code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Watchdog - Building supervision with Raspberry Pi
# (C)2017 - Norbert Huffschmid - GNU GPL V3
import os
from subprocess import Popen, PIPE
def index(req):
req.write('Watchdog - (C)2017 - Norbert Huffschmid\n\n')
req.write('Building supervision with Raspberry Pi\n\n')
process = Popen(['/usr/bin/git', 'describe', '--tags'], stdout=PIPE, stderr=PIPE, cwd=os.path.dirname(__file__))
output, errors = process.communicate()
req.write('Version: %s\n' % output)
req.write('License: GNU GPL V3\n\n')
process = Popen(['/bin/uname', '-a'], stdout=PIPE, stderr=PIPE)
output, errors = process.communicate()
req.write(output)
| long-exposure/watchdog | plugin/about/www/about.py | Python | gpl-3.0 | 669 |
class Day:
date = None
index = None
room_objects = []
def __init__(self, date=None, index=None):
self.date = date
self.index = index
self.room_objects = []
def add_room(self, room):
self.room_objects.append(room)
| niranjan94/python-pentabarf-xml | pentabarf/Day.py | Python | mit | 269 |
import re
from datetime import datetime
from BeautifulSoup import BeautifulSoup
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.contrib.syndication.views import add_domain
from django.utils.safestring import mark_safe
from tagging.models import Tag
from tagging.fields import TagField
BLOG_READMORE_CAPTION = getattr(settings, "BLOG_READMORE_CAPTION", "Read More...")
BLOG_USE_FEED_INTRO = getattr(settings, "BLOG_USE_FEED_INTRO", True)
BLOG_USE_BLOG_INTRO = getattr(settings, "BLOG_USE_BLOG_INTRO", True)
FIND_MORE = re.compile("(<[^>]>)?(<!--[\s]*pagebreak[\s]*-->)(</[^>]>)?")
RELATIVE_LINKS = re.compile('href="/')
RELATIVE_MEDIA = re.compile('src="/')
class Category(models.Model):
name = models.CharField(max_length=128)
slug = models.SlugField(unique=True)
class Meta:
verbose_name_plural = "Categories"
ordering = ("name",)
def __unicode__(self):
return self.name
class Post(models.Model):
author = models.ForeignKey(User)
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
body = models.TextField(help_text="Use the page break button, to insert a 'Read More...' link.")
pub_date = models.DateTimeField("Date", default=datetime.now)
published = models.BooleanField(default=True)
categories = models.ManyToManyField(Category, related_name="post", blank=True)
tags = TagField(help_text="Tags for to this post, separated by either spaces or commas.")
allow_comments = models.BooleanField(default=True)
allow_trackbacks = models.BooleanField(default=True)
allow_pingbacks = models.BooleanField(default=True)
class Meta:
ordering = ("title",)
def categories_col(self):
"""
Helper method that returns a comma separated list of the categories
for this post (as a single string). Used for prettifying the
categories column in list view in admin.
"""
return ", ".join([str(category) for category in self.categories.all()])
categories_col.short_description = "Categories"
def tags_col(self):
"""
Helper method that returns a comma separated list of the tags for this
post (as a single string). Used for prettifying the tags column in
list view in admin.
"""
return ", ".join([tag.name for tag in Tag.objects.get_for_object(self)])
tags_col.short_description = "Tags"
def readmore_replace(self, match):
openTag = match.group(1)
closeTag = match.group(3)
if openTag and closeTag is not None:
return ""
elif openTag is None:
return closeTag
else:
return openTag
def author_name(self):
if self.author.first_name:
return self.author.get_full_name()
else:
return str(self.author)
def get_absolute_url(self):
domain = Site.objects.get(pk=settings.SITE_ID).domain
return add_domain(domain, reverse("blog-post", args=[self.slug]))
def full_post(self):
"""
Returns the full post, stripping the special <!-- pagebreak --> tag.
"""
return mark_safe(FIND_MORE.sub(self.readmore_replace, self.body, 1))
def intro(self, full_url=False):
"""
Returns only the part before the <!-- pagebreak --> tag (intro).
If the BLOG_READMORE_CAPTION settings variable is set, a hyperlink
to the full post will be included. If full_url is True, the URL to
the full post will contain the domain of the site as well. When
full_url is False, it will be an URL without the domain.
"""
pieces = FIND_MORE.split(self.body)
post = pieces[0]
if len(pieces) > 1:
if pieces[3] is not None:
# Fixes possible unclosed tags after splitting the HTML in two parts
pieces[3] = BeautifulSoup(pieces[3]).prettify()
post += pieces[3]
if pieces[2] is not None:
post += '<p><a href="' + reverse("blog-post", args=[self.slug]) + '">' + BLOG_READMORE_CAPTION + "</a></p>"
return mark_safe(post)
def get_feed_intro(self, request_url):
"""
If BLOG_USE_FEED_INTRO is set to True, returns only the part before the
<!-- pagebreak --> tag (intro), with a hyperlink to the remainder of the
post. If BLOG_USE_FEED_INTRO is False, returns the full post
(but with the special <!-- pagebreak --> tag stripped). Used by
feeds.py.
"""
if BLOG_USE_FEED_INTRO:
content = self.intro(True)
else:
content = self.full_post()
content = RELATIVE_LINKS.sub('href="%s/' % request_url, content)
content = RELATIVE_MEDIA.sub('src="%s/' % request_url, content)
return mark_safe(content)
def get_blog_intro(self):
"""
If BLOG_USE_BLOG_INTRO is set to True, only returns the part before the
<!-- pagebreak --> tag (intro), with a hyperlink to the remainder of the
post. If BLOG_USE_BLOG_INTRO is False, returns the full post
(but with the special <!-- pagebreak --> tag stripped).
"""
if BLOG_USE_BLOG_INTRO:
return mark_safe(self.intro())
else:
return mark_safe(self.full_post())
def has_categories(self):
return self.categories.count() > 0
def __unicode__(self):
return self.title
| viswimmer1/PythonGenerator | data/python_files/34091278/models.py | Python | gpl-2.0 | 5,618 |
from sympy.combinatorics import Permutation
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.homomorphisms import homomorphism, group_isomorphism, is_isomorphic
from sympy.combinatorics.free_groups import free_group
from sympy.combinatorics.fp_groups import FpGroup
from sympy.combinatorics.named_groups import AlternatingGroup, DihedralGroup, CyclicGroup
from sympy.utilities.pytest import raises
def test_homomorphism():
# FpGroup -> PermutationGroup
F, a, b = free_group("a, b")
G = FpGroup(F, [a**3, b**3, (a*b)**2])
c = Permutation(3)(0, 1, 2)
d = Permutation(3)(1, 2, 3)
A = AlternatingGroup(4)
T = homomorphism(G, A, [a, b], [c, d])
assert T(a*b**2*a**-1) == c*d**2*c**-1
assert T.is_isomorphism()
assert T(T.invert(Permutation(3)(0, 2, 3))) == Permutation(3)(0, 2, 3)
T = homomorphism(G, AlternatingGroup(4), G.generators)
assert T.is_trivial()
assert T.kernel().order() == G.order()
E, e = free_group("e")
G = FpGroup(E, [e**8])
P = PermutationGroup([Permutation(0, 1, 2, 3), Permutation(0, 2)])
T = homomorphism(G, P, [e], [Permutation(0, 1, 2, 3)])
assert T.image().order() == 4
assert T(T.invert(Permutation(0, 2)(1, 3))) == Permutation(0, 2)(1, 3)
T = homomorphism(E, AlternatingGroup(4), E.generators, [c])
assert T.invert(c**2) == e**-1 #order(c) == 3 so c**2 == c**-1
# FreeGroup -> FreeGroup
T = homomorphism(F, E, [a], [e])
assert T(a**-2*b**4*a**2).is_identity
# FreeGroup -> FpGroup
G = FpGroup(F, [a*b*a**-1*b**-1])
T = homomorphism(F, G, F.generators, G.generators)
assert T.invert(a**-1*b**-1*a**2) == a*b**-1
# PermutationGroup -> PermutationGroup
D = DihedralGroup(8)
p = Permutation(0, 1, 2, 3, 4, 5, 6, 7)
P = PermutationGroup(p)
T = homomorphism(P, D, [p], [p])
assert T.is_injective()
assert not T.is_isomorphism()
assert T.invert(p**3) == p**3
T2 = homomorphism(F, P, [F.generators[0]], P.generators)
T = T.compose(T2)
assert T.domain == F
assert T.codomain == D
assert T(a*b) == p
def test_isomorphisms():
F, a, b = free_group("a, b")
E, c, d = free_group("c, d")
# Infinite groups with differently ordered relators.
G = FpGroup(F, [a**2, b**3])
H = FpGroup(F, [b**3, a**2])
assert is_isomorphic(G, H)
# Trivial Case
# FpGroup -> FpGroup
H = FpGroup(F, [a**3, b**3, (a*b)**2])
F, c, d = free_group("c, d")
G = FpGroup(F, [c**3, d**3, (c*d)**2])
check, T = group_isomorphism(G, H)
assert check
T(c**3*d**2) == a**3*b**2
# FpGroup -> PermutationGroup
# FpGroup is converted to the equivalent isomorphic group.
F, a, b = free_group("a, b")
G = FpGroup(F, [a**3, b**3, (a*b)**2])
H = AlternatingGroup(4)
check, T = group_isomorphism(G, H)
assert check
assert T(b*a*b**-1*a**-1*b**-1) == Permutation(0, 2, 3)
assert T(b*a*b*a**-1*b**-1) == Permutation(0, 3, 2)
# PermutationGroup -> PermutationGroup
D = DihedralGroup(8)
p = Permutation(0, 1, 2, 3, 4, 5, 6, 7)
P = PermutationGroup(p)
assert not is_isomorphic(D, P)
A = CyclicGroup(5)
B = CyclicGroup(7)
assert not is_isomorphic(A, B)
# Two groups of the same prime order are isomorphic to each other.
G = FpGroup(F, [a, b**5])
H = CyclicGroup(5)
assert G.order() == H.order()
assert is_isomorphic(G, H)
def test_check_homomorphism():
a = Permutation(1,2,3,4)
b = Permutation(1,3)
G = PermutationGroup([a, b])
raises(ValueError, lambda: homomorphism(G, G, [a], [a]))
| kaushik94/sympy | sympy/combinatorics/tests/test_homomorphisms.py | Python | bsd-3-clause | 3,623 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class PantsEngineIntegrationTest(PantsRunIntegrationTest):
def test_engine_list(self):
pants_run = self.run_pants(['-ldebug', '--enable-v2-engine', 'list', '3rdparty::'])
self.assert_success(pants_run)
self.assertRegexpMatches(pants_run.stderr_data, 'build_graph is: .*LegacyBuildGraph')
self.assertRegexpMatches(pants_run.stderr_data, 'ran \d+ scheduling iterations in')
self.assertNotRegexpMatches(pants_run.stderr_data, 'pantsd is running at pid \d+')
def test_engine_binary(self):
self.assert_success(
self.run_pants(
['--enable-v2-engine', 'binary', 'examples/src/python/example/hello/main:']
)
)
| gmalmquist/pants | tests/python/pants_test/engine/legacy/test_pants_engine_integration.py | Python | apache-2.0 | 1,031 |
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016,2019
"""
Schemas for streams.
********
Overview
********
A stream represents an unbounded flow of tuples with a declared schema so that each tuple on the stream complies with the schema. A stream's schema may be one of:
* :py:class:`~StreamsSchema` structured schema - a tuple is a sequence of attributes, and an attribute is a named value of a specific type.
* :py:const:`~CommonSchema.Json` a tuple is a JSON object.
* :py:const:`~CommonSchema.String` a tuple is a string.
* :py:const:`~CommonSchema.Python` a tuple is any Python object, effectively an untyped stream.
******************
Structured schemas
******************
A structured schema is a sequence of attributes, and an attribute is a named value of a specific type. For example a stream of sensor readings can be represented as a schema with three attributes ``sensor_id``, ``ts`` and ``reading`` with types of ``int64``, ``int64`` and ``float64`` respectively.
This schema can be declared a number of ways:
Python 3.6::
class SensorReading(typing.NamedTuple):
sensor_id: int
ts: int
reading: float
sensors = raw_readings.map(parse_sensor, schema=SensorReading)
Python 3::
sensors = raw_readings.map(parse_sensor,
schema='tuple<int64 sensor_id, int64 ts, float64 reading>')
The supported types are defined by IBM Streams and are listed in :py:class:`~StreamSchema`.
*************************
Nested structured schemas
*************************
A structured schema can contain nested structures that are defined separately.
Python 3.6::
class Sensor(typing.NamedTuple):
manufacturer: str
sensor_id: int
class SensorReading(typing.NamedTuple):
sensor: Sensor
ts: int
reading: float
sensors = raw_readings.map(parse_sensor, schema=SensorReading)
Python 3::
sensors = raw_readings.map(parse_sensor,
schema='tuple<tuple<rstring manufacturer, int64 sensor_id> sensor, int64 ts, float64 reading>')
Both schema definitions are equivalent.
Structured schemas provide type-safety and efficient network serialization when compared to passing a ``dict`` using :py:const:`~CommonSchema.Python` streams.
Streams with structured schemas can be interchanged with any IBM Streams application using :py:meth:`~streamsx.topology.topology.Stream.publish` and :py:meth:`~streamsx.topology.topology.Topology.subscribe` maintaining type safety.
**************************
Defining a stream's schema
**************************
Every stream within a :py:class:`~streamsx.topology.topology.Topology` has defined schema. The schema may be defined explictly (for example :py:meth:`~streamsx.topology.topology.Stream.map` or :py:meth:`~streamsx.topology.topology.Topology.subscribe`) or implicity (for example :py:meth:`~streamsx.topology.topology.Stream.filter` produces a stream with the same schema as its input stream).
Explictly defining a stream's schema is flexible and various types of values are accepted as the schema.
* Builtin types as aliases for common schema types:
* ``json`` (module) - for :py:const:`~CommonSchema.Json`
* ``str`` - for :py:const:`~CommonSchema.String`
* ``object`` - for :py:const:`~CommonSchema.Python`
* Values of the enumeration :py:class:`CommonSchema`
* An instance of ``typing.NamedTuple`` (Python 3)
* An instance of :py:class:`StreamSchema`
* A string of the format ``tuple<...>`` defining the attribute names and types. See :py:class:`StreamSchema` for details on the format and types supported.
* A string containing a namespace qualified SPL stream type (e.g. ``com.ibm.streams.geospatial::FlightPathEncounterTypes.Observation3D``)
"""
# For style dicts passed into Python from Streams C++
# are raw dicts since they are created by Python C-API code
# not the future dict in Python 2.7.
_spl_dict = dict
_spl_object = object
__all__ = ['is_common', 'StreamSchema', 'CommonSchema']
import collections
import decimal
import datetime
import enum
import io
import itertools
import json
import sys
import token
import tokenize
import typing
import streamsx._streams._version
__version__ = streamsx._streams._version.__version__
_spl_str = str
def _normalize(schema, allow_none=True):
"""
Normalize a schema.
"""
if allow_none and schema is None:
return schema
if isinstance(schema, CommonSchema):
return schema
if isinstance(schema, StreamSchema):
return schema
if isinstance(schema, str):
return StreamSchema(schema)
py_types = {
_spl_object: CommonSchema.Python,
_spl_str: CommonSchema.String,
json: CommonSchema.Json,
}
if schema in py_types:
return py_types[schema]
# Allow a named tuple with type hints
# to be used as a schema definition
if _is_typedtuple(schema):
return _from_named_tuple(schema)
raise ValueError("Unknown stream schema type:" + str(schema))
def is_common(schema):
"""
Is `schema` an common schema.
Args:
schema: Scheme to test.
Returns:
bool: ``True`` if schema is a common schema, otherwise ``False``.
"""
if isinstance(schema, StreamSchema):
return schema.schema() in _SCHEMA_COMMON
if isinstance(schema, CommonSchema):
return True
if isinstance(schema, str):
return is_common(StreamSchema(schema))
return False
def _is_namedtuple(cls):
return cls != tuple and isinstance(cls, type) and issubclass(cls, tuple) and hasattr(cls, '_fields')
def _is_typedtuple(cls):
return _is_namedtuple(cls) and hasattr(cls, '_field_types')
_SCHEMA_PENDING = '<pending>'
def _is_pending(schema):
return isinstance(schema, StreamSchema) and schema.schema() == _SCHEMA_PENDING
# Parses a schema of the form 'tuple<...>'
# _parse returns a list of the schema attributes,
# each attribute is a python tuple of:
# (type, name)
# with type being
# primitive type (str), e.g. 'int32'
# collection type (tuple), e.g. ('list', 'int32')
# optional type (tuple), e.g. ('optional', 'int32')
# nested tuple type (tuple), e.g. ('tuple', [('int32', 'a'), ('float64', 'b')])
# This is an internal api.
#
class _SchemaParser(object):
"""Class for parsing schemas."""
_SPL_PRIMITIVE_TYPES = { 'boolean',
'uint8', 'uint16', 'uint32', 'uint64',
'int8', 'int16', 'int32', 'int64',
'float32', 'float64',
'complex32', 'complex64',
'decimal32', 'decimal64', 'decimal128',
'rstring', 'ustring',
'timestamp', 'blob', 'xml'}
_SPL_COLLECTION_TYPES = { 'list', 'set'}
def __init__(self, schema):
self.schema = schema
self._type = []
def _parse_error(self, token):
raise SyntaxError("Invalid schema:" + self.schema + " token " + str(token))
def _req_op(self, which):
token = next(self.tokens)
if token[0] != tokenize.OP or which != token[1]:
self._parse_error(token)
def _parse(self):
schema = self.schema.replace(">>", ' > > ')
schema = schema.replace('<<', ' < < ')
ios = io.StringIO(schema).readline
self.tokens = tokenize.generate_tokens(ios)
self._parse_tuple(self._type, next(self.tokens))
endtoken = next(self.tokens)
# Python 3.6.7 adds a newline at the end! Issue #1959
if endtoken[0] == token.NEWLINE:
endtoken = next(self.tokens)
if not endtoken[0] == token.ENDMARKER:
self._parse_error(endtoken)
return self._type
def _parse_tuple(self, _type, token):
if token[0] != tokenize.NAME or 'tuple' != token[1]:
self._parse_error(token)
self._req_op('<')
token = None
while True:
token = next(self.tokens)
if token[0] == tokenize.OP:
if token[1] == ',':
continue
if token[1] == '>':
break
self._parse_error(token)
if token[0] == tokenize.NAME:
self._parse_attribute_type(_type, token)
continue
self._parse_error(token)
def _parse_type(self, attr_type):
if attr_type[0] != tokenize.NAME:
self._parse_error(attr_type)
if 'tuple' == attr_type[1]:
nested_tuple = []
self._parse_tuple(nested_tuple, attr_type)
return ('tuple', nested_tuple)
if 'map' == attr_type[1]:
self._req_op('<')
key_type = self._parse_type(next(self.tokens))
self._req_op(',')
value_type = self._parse_type(next(self.tokens))
self._req_op('>')
bound = self._parse_optional_bounded()
return ('map', (key_type, value_type), bound)
if 'optional' == attr_type[1]:
self._req_op('<')
value_type = self._parse_type(next(self.tokens))
self._req_op('>')
return ('optional', value_type)
if attr_type[1] in _SchemaParser._SPL_PRIMITIVE_TYPES:
if attr_type[1] == 'rstring':
bound = self._parse_optional_bounded()
if bound is not None:
return 'rstring' + bound
return attr_type[1]
if attr_type[1] in _SchemaParser._SPL_COLLECTION_TYPES:
self._req_op('<')
element_type = self._parse_type(next(self.tokens))
self._req_op('>')
bound = self._parse_optional_bounded()
return (attr_type[1], element_type, bound)
self._parse_error(attr_type)
def _parse_attribute_type(self, _type, attr_type):
if attr_type[0] != tokenize.NAME:
self._parse_error(attr_type)
attr_type = self._parse_type(attr_type)
attr = (attr_type, self._parse_attribute_name())
_type.append(attr)
def _parse_attribute_name(self):
attr_name = next(self.tokens)
if attr_name[0] != tokenize.NAME:
self._parse_error(attr_name)
return attr_name[1]
def _parse_optional_bounded(self):
token = next(self.tokens)
if token[0] == tokenize.OP and '[' == token[1]:
bound_info = next(self.tokens)
if bound_info[0] != tokenize.NUMBER:
self._parse_error(bound_info)
bound = str(int(bound_info[0]))
self._req_op(']')
return bound
else:
# push back the token
self.tokens = itertools.chain([token], self.tokens)
return None
def _attribute_names(types):
names = []
for attr in types:
names.append(attr[1])
return names
# Returns a schema's types as a list of (name, python type) tuples
# suitable for use in creating a typing.NamedTuple
def _attribute_pytypes(types):
pytypes = []
for attr in types:
pytypes.append((attr[1], _type_from_spl(attr[0])))
return pytypes
_SCHEMA_PYTHON_OBJECT = 'tuple<blob __spl_po>'
_SCHEMA_STRING = 'tuple<rstring string>'
_SCHEMA_JSON = 'tuple<rstring jsonString>'
_SCHEMA_BINARY = 'tuple<blob binary>' # not yet supported
_SCHEMA_XML = 'tuple<xml document>' # not yet supported
_SCHEMA_COMMON = frozenset([_SCHEMA_PYTHON_OBJECT, _SCHEMA_JSON, _SCHEMA_STRING, _SCHEMA_BINARY, _SCHEMA_XML])
_SCHEMA_COMMON_STYLES = {_SCHEMA_PYTHON_OBJECT:_spl_object, _SCHEMA_STRING: _spl_str, _SCHEMA_JSON: _spl_dict, _SCHEMA_BINARY:None, _SCHEMA_XML: None }
class StreamSchema(object) :
"""Defines a schema for a structured stream.
On a structured stream a tuple is a sequence of attributes,
and an attribute is a named value of a specific type.
The supported types are defined by IBM Streams and include such
types as `int8`, `int16`, `rstring` and `list<float32>`.
A structured schema can be defined using a ``typing.NamedTuple`` in
Python 3, a string with the syntax ``tuple<type name [,...]>`` or
an instance of this class.
typing.NamedTuple:
A ``typing.NamedTuple`` can be used to define a structured
schema with the field names and types mapping to the
structured schema attribute names and types.
Python types are mapped to IBM Streams types as follows:
================================== ================
Python type IBM Streams type
================================== ================
``str`` ``rstring``
``bool`` ``boolean``
``int`` ``int64``
``float`` ``float64``
``decimal.Decimal`` ``decimal128``
``complex`` ``complex64``
``bytes`` ``blob``
``streamsx.spl.types.Timestamp`` ``timestamp``
``datetime.datetime`` ``timestamp``
``typing.List[T]`` ``list<T>``
``typing.Set[T]`` ``set<T>``
``typing.Mapping[K,V]`` ``map<K,V>``
``typing.Optional[T]`` ``optional<T>``
================================== ================
.. note::
Tuples on a stream with a schema defined by a
``typing.NamedTuple`` instance are passed into callables
as instance of a named tuple with the the correct field
names and types unless the named tuple contains nested
named tuples at any nesting depth. When passed as named tuple,
there is no guarantee to be the same class
instance as the one used to declare the schema.
Tuple string:
A string of the format `tuple<type name [,...]>` can be used
to define a structured schema, where `type` is an IBM Streams type.
Example::
tuple<rstring id, timestamp ts, float64 value>
represents a schema with three attributes suitable for a sensor reading.
IBM Streams types:
============================ ============================== ========================================= =======================================
Type Description Python representation Conversion from Python
============================ ============================== ========================================= =======================================
``boolean`` True or False ``bool`` ``bool(value)``
``int8`` 8-bit signed integer ``int`` ``int(value)`` truncated to 8 bits
``int16`` 16-bit signed integer ``int`` ``int(value)`` truncated to 16 bits
``int32`` 32-bit signed integer ``int`` ``int(value)`` truncated to 32 bits
``int64`` 64-bit signed integer ``int`` ``int(value)``
``uint8`` 8-bit unsigned integer ``int`` -
``uint16`` 16-bit unsigned integer ``int`` -
``uint32`` 32-bit unsigned integer ``int`` -
``uint64`` 64-bit unsigned integer ``int`` -
``float32`` 32-bit binary floating point ``float`` ``float(value)`` truncated to 32 bits
``float64`` 64-bit binary floating point ``float`` ``float(value)``
``decimal32`` 32-bit decimal floating point ``decimal.Decimal`` ``decimal.Decimal(value)`` normalized to IEEE 754 decimal32
``decimal64`` 64-bit decimal floating point ``decimal.Decimal`` ``decimal.Decimal(value)`` normalized to IEEE 754 decimal64
``decimal128`` 128-bit decimal floating point ``decimal.Decimal`` ``decimal.Decimal(value)`` normalized to IEEE 754 decimal128
``complex32`` complex with `float32` values ``complex`` ``complex(value)`` with real and imaginary values truncated to 32 bits
``complex64`` complex with `float64` values ``complex`` ``complex(value)``
``timestamp`` Nanosecond timestamp :py:class:`~streamsx.spl.types.Timestamp` -
``rstring`` UTF-8 string ``str`` ``str(value)``
``rstring[N]`` Bounded UTF-8 string ``str`` ``str(value)``
``ustring`` UTF-16 string ``str`` ``str(value)``
``blob`` Sequence of bytes ``memoryview`` -
``list<T>`` List with elements of type `T` ``list`` -
``list<T>[N]`` Bounded list ``list`` -
``set<T>`` Set with elements of type `T` ``set`` -
``set<T>[N]`` Bounded set ``set`` -
``map<K,V>`` Map with typed keys and values ``dict`` -
``map<K,V>[N]`` Bounded map, limted to N pairs ``dict`` -
``optional<T>`` Optional value of type `T` Value of type `T`, or None Value of for type ``T``
``enum{id [,...]}`` Enumeration Not supported Not supported
``xml`` XML value Not supported Not supported
``tuple<type name [, ...]>`` Nested tuple ``dict`` ``dict``
============================ ============================== ========================================= =======================================
.. note::
Type `optional<T>` requires IBM Streams 4.3 or later.
.. note::
Conversion to or from Python:
* Type `set<T>` is restricted to primitive types
* Type `map<K,V>` is restricted to primitive types for the key type K
Python representation is how an attribute value in a structured schema is passed into a Python function.
Conversion from Python indicates how a value from Python is converted to an attribute value in a structured schema.
For example a value ``v`` assigned to ``float64`` attribute is converted as though ``float(v)`` is called first,
thus ``v`` may be a ``float``, ``int`` or any type that has a ``__float__`` method.
When a type is not supported in Python it can only be used in a schema used for streams produced and consumed by invocation of SPL operators.
A `StreamSchema` can be created by passing a string of the
form ``tuple<...>`` or by passing the name of an SPL type from
an SPL toolkit, for example ``com.ibm.streamsx.transportation.vehicle::VehicleLocation``.
Attribute names must start with an ASCII letter or underscore, followed by ASCII letters, digits, or underscores.
When a tuple on a structured stream is passed into a Python callable it
is converted to a ``dict``, ``tuple`` or **named tuple** object containing all attributes of the stream tuple.
See :py:meth:`style`, :py:meth:`as_dict` and :py:meth:`as_tuple` for details.
.. note::
When a tuple on a structured stream, that contains **nested tuples**, is passed into a Python callable it is **always** converted to a ``dict`` object containing all attributes of the stream tuple.
When a Python object is submitted to a structured stream,
for example as the return from the function invoked in a
:py:meth:`~streamsx.topology.topology.Stream.map` with the
`schema` parameter set, it must be:
* A Python ``dict``. Attributes are set by name using value in the dict for the name. If a value does not exist (the name does not exist as a key) or is set to `None` then the attribute has its default value, zero, false, empty list or string etc.
* A Python ``tuple`` or named tuple. Attributes are set by position, with the first attribute being the value at index 0 in the Python `tuple`. If a value does not exist (the tuple has less values than the structured schema) or is set to `None` then the attribute has its default value, zero, false, empty list or string etc.
Args:
schema(str): Schema definition. Either a schema definition or the name of an SPL type.
.. versionadded:: 1.16 Support for nested tuples (conversion to SPL from Python or conversion to Python from SPL)
"""
def __init__(self, schema):
schema = schema.strip()
self._spl_type = not schema.startswith("tuple<")
self._schema=schema
if not self._spl_type:
parser = _SchemaParser(schema)
self._types = parser._parse()
self._style = self._default_style()
def _set(self, schema):
"""Set a schema from another schema"""
if isinstance(schema, CommonSchema):
self._spl_type = False
self._schema = schema.schema()
self._style = self._default_style()
else:
self._spl_type = schema._spl_type
self._schema = schema._schema
self._style = schema._style
@property
def style(self):
"""Style stream tuples will be passed into a callable.
For the common schemas the style is fixed:
* ``CommonSchema.Python`` - ``object`` - Stream tuples are arbitrary objects.
* ``CommonSchema.String`` - ``str`` - Stream tuples are unicode strings.
* ``CommonSchema.Json`` - ``dict`` - Stream tuples are a ``dict`` that represents the JSON object.
For a structured schema the supported styles are:
* ``dict`` - Stream tuples are passed as a ``dict`` with the key being the attribute name and and the value the attribute value. This is the default.
* E.g. with a schema of ``tuple<rstring id, float32 value>`` a value is passed as ``{'id':'TempSensor', 'value':20.3}``.
* ``tuple`` - Stream tuples are passed as a ``tuple`` with the value being the attributes value in order. A schema is set to pass stream tuples as tuples using :py:meth:`as_tuple`.
* E.g. with a schema of ``tuple<rstring id, float32 value>`` a value is passed as ``('TempSensor', 20.3)``.
* ``namedtuple`` - Stream tuples are passed as a named tuple (see ``collections.namedtuple``) with the value being the attributes value in order. Field names correspond to the attribute names of the schema. A schema is set to pass stream tuples as named tuples using :py:meth:`as_tuple` setting the `named` parameter.
Returns:
type: Class of tuples that will be passed into callables.
.. versionadded:: 1.8
.. versionadded:: 1.9 Support for namedtuple.
"""
return self._style
def _default_style(self):
if self._spl_type:
return _spl_dict
return _SCHEMA_COMMON_STYLES[self.schema()] if is_common(self) else _spl_dict
def _copy(self, style=None):
if style is None:
return self
if self._style is style:
return self
# Cannot change style of common schemas
if is_common(self):
return self
c = StreamSchema(self.schema())
c._style = style
return c
def _make_named_tuple(self, name):
if self._spl_type:
return tuple
if name is True:
name = 'StreamTuple'
nt = typing.NamedTuple(name, _attribute_pytypes(self._types))
nt._splpy_namedtuple = name
return nt
def as_tuple(self, named=None):
"""
Create a structured schema that will pass stream tuples into callables as ``tuple`` instances.
If this instance represents a common schema then it will be returned
without modification. Stream tuples with common schemas are
always passed according to their definition.
**Passing as tuple**
When `named` evaluates to ``False`` then each stream tuple will
be passed as a ``tuple``. For example with a structured schema
of ``tuple<rstring id, float64 value>`` a value is passed as
``('TempSensor', 27.4)`` and access to the first attribute
is ``t[0]`` and the second as ``t[1]`` where ``t`` represents
the passed value..
**Passing as named tuple**
When `named` is ``True`` or a ``str`` then each stream tuple will
be passed as a named tuple. For example with a structured schema
of ``tuple<rstring id, float64 value>`` a value is passed as
``('TempSensor', 27.4)`` and access to the first attribute
is ``t.id`` (or ``t[0]``) and the second as ``t.value`` (``t[1]``)
where ``t`` represents the passed value.
.. warning:: If an schema's attribute name is not a valid Python identifier or
starts with an underscore then it will be renamed as positional name ``_n``.
For example, with the schema ``tuple<int32 a, int32 def, int32 id>`` the
field names are ``a``, ``_1``, ``_2``.
The value of `named` is used as the name of the named tuple
class with ``StreamTuple`` used when `named` is ``True``.
It is not guaranteed that the class of the namedtuple is the
same for all callables processing tuples with the same
structured schema, only that the tuple is a named tuple
with the correct field names.
Args:
named: Pass stream tuples as a named tuple.
If not set then stream tuples are passed as
instances of ``tuple``.
Returns:
StreamSchema: Schema passing stream tuples as ``tuple`` if allowed.
.. versionadded:: 1.8
.. versionadded:: 1.9 Addition of `named` parameter.
"""
if not named:
return self._copy(tuple)
if named == True or isinstance(named, str):
return self._copy(self._make_named_tuple(name=named))
return self._copy(tuple)
def as_dict(self):
"""
Create a structured schema that will pass stream tuples into callables as ``dict`` instances.
This allows a return to the default calling style for a structured schema.
If this instance represents a common schema then it will be returned
without modification. Stream tuples with common schemas are always passed according
to their definition.
Returns:
StreamSchema: Schema passing stream tuples as ``dict`` if allowed.
.. versionadded:: 1.8
"""
return self._copy(_spl_dict)
def schema(self):
"""Private method. May be removed at any time."""
return self._schema
def __str__(self):
"""Private method. May be removed at any time."""
return self._schema
def spl_json(self):
_splj = {}
_splj["type"] = 'spltype'
_splj["value"] = self.schema()
return _splj
def extend(self, schema):
"""
Extend a structured schema by another.
For example extending ``tuple<rstring id, timestamp ts, float64 value>``
with ``tuple<float32 score>`` results in ``tuple<rstring id, timestamp ts, float64 value, float32 score>``.
Args:
schema(StreamSchema): Schema to extend this schema by.
Returns:
StreamSchema: New schema that is an extension of this schema.
"""
if self._spl_type:
raise TypeError("Not supported for declared SPL types")
base = self.schema()
extends = schema.schema()
new_schema = base[:-1] + ',' + extends[6:]
return StreamSchema(new_schema)
def __hash__(self):
return hash(self.schema())
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.schema() == other.schema():
if self.style == other.style:
return True
if _is_typedtuple(self.style) and _is_typedtuple(other.style):
return self.style._fields == other.style._fields and self.style._field_types == other.style._field_types
return False
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def _fnop_style(schema, op, name):
"""Set an operator's parameter representing the style of this schema."""
if is_common(schema):
if name in op.params:
del op.params[name]
return
if _is_pending(schema):
ntp = 'pending'
elif schema.style is tuple:
ntp = 'tuple'
elif schema.style is _spl_dict:
ntp = 'dict'
elif _is_namedtuple(schema.style) and hasattr(schema.style, '_splpy_namedtuple'):
ntp = 'namedtuple:' + schema.style._splpy_namedtuple
else:
return
op.params[name] = ntp
@enum.unique
class CommonSchema(enum.Enum):
"""
Common stream schemas for interoperability within Streams applications.
Streams application can publish streams that are subscribed to by other applications.
Use of common schemas allow streams connections regardless of the application implementation language.
Python applications publish streams using :py:meth:`~streamsx.topology.topology.Stream.publish`
and subscribe using :py:meth:`~streamsx.topology.topology.Topology.subscribe`.
* :py:const:`Python` - Stream constains Python objects.
* :py:const:`Json` - Stream contains JSON objects.
* :py:const:`String` - Stream contains strings.
* :py:const:`Binary` - Stream contains binary tuples.
* :py:const:`XML` - Stream contains XML documents.
"""
Python = StreamSchema(_SCHEMA_PYTHON_OBJECT)
"""
Stream where each tuple is a Python object. Each object
must be picklable to allow execution in a distributed
environment where streams can connect processes
running on the same or different resources.
Python streams can only be used by Python applications.
"""
Json = StreamSchema(_SCHEMA_JSON)
"""
Stream where each tuple is logically a JSON object.
`Json` can be used as a natural interchange format between Streams applications
implemented in different programming languages. All languages supported by
Streams support publishing and subscribing to JSON streams.
A Python callable receives each tuple as a `dict` as though it was
created from ``json.loads(json_formatted_str)`` where `json_formatted_str`
is the JSON formatted representation of tuple.
Python objects that are to be converted to JSON objects
must be supported by `JSONEncoder`. If the object is not a `dict`
then it will be converted to a JSON object with a single key `payload`
containing the value.
"""
String = StreamSchema(_SCHEMA_STRING)
"""
Stream where each tuple is a string.
`String` can be used as a natural interchange format between Streams applications
implemented in different programming languages. All languages supported by
Streams support publishing and subscribing to string streams.
A Python callable receives each tuple as a `str` object.
Python objects are converted to strings using ``str(obj)``.
"""
Binary = StreamSchema(_SCHEMA_BINARY)
"""
Stream where each tuple is a binary object (sequence of bytes).
.. warning:: `Binary` is not yet supported for Python applications.
"""
XML = StreamSchema(_SCHEMA_XML)
"""
Stream where each tuple is an XML document.
.. warning:: `XML` is not yet supported for Python applications.
"""
def schema(self):
"""Private method. May be removed at any time."""
return self.value.schema()
def spl_json(self):
return self.value.spl_json()
def extend(self, schema):
"""Extend a structured schema by another.
Args:
schema(StreamSchema): Schema to extend this schema by.
Returns:
StreamSchema: New schema that is an extension of this schema.
"""
return self.value.extend(schema)
def __str__(self):
return str(self.schema())
def _from_named_tuple(nt):
nested = 0
i = 0
td = 'tuple<'
for name in nt._fields:
if i:
td += ', '
typeval = _spl_from_type(nt._field_types[name])
# special handling for nested tuple types
if typeval.startswith('tuple') or typeval.startswith('list<tuple'): # __NESTED_TUPLE__
nested = 1
if typeval.startswith('map<'):
if 'tuple<' in typeval:
nested = 1
if typeval.startswith('set<'):
if 'tuple<' in typeval:
nested = 1
td += typeval
td += ' '
td += name
i = i + 1
td += '>'
# For nested tuple types use dict as prefered python object instead of named tuple (problems with named tuples classes)
if nested: # __NESTED_TUPLE__
return StreamSchema(td)
return StreamSchema(td).as_tuple(named=nt.__name__)
def _from_named_tuple_subclass(nt):
i = 0
td = ''
for name in nt._fields:
if i:
td += ', '
typeval = _spl_from_type(nt._field_types[name])
td += typeval
td += ' '
td += name
i = i + 1
return td
def _spl_from_type(type_):
_init_type_mappings()
if type_ in _PYTYPE_TO_SPL:
return _PYTYPE_TO_SPL[type_]
# See https://bugs.python.org/issue34568
# isinstance,issubclass no longer work in Python 3.7
if hasattr(type_, '__origin__') and hasattr(type_, '__args__'):
if len(type_.__args__) == 1:
et = type_.__args__[0]
if typing.List[et] == type_:
return 'list<' + _spl_from_type(et) + '>'
if typing.Set[et] == type_:
return 'set<' + _spl_from_type(et) + '>'
elif len(type_.__args__) == 2:
kt = type_.__args__[0]
vt = type_.__args__[1]
if typing.Mapping[kt, vt] == type_:
return 'map<' + _spl_from_type(kt) + ', ' + _spl_from_type(vt) + '>'
if hasattr(type_, '__args__') and len(type_.__args__) == 2:
if type(None) in type_.__args__:
et = type_.__args__[0] if type_.__args__[1] is type(None) else type_.__args__[1]
if typing.Optional[et] == type_:
return 'optional<' + _spl_from_type(et) + '>'
if _is_namedtuple(type_):
# special handling for nested tuple
return 'tuple<' + _from_named_tuple_subclass(type_) + '>'
else:
raise ValueError("Unsupported type: " + str(type_))
def _type_from_spl(type_):
_init_type_mappings()
if type_ in _SPLTYPE_TO_PY:
return _SPLTYPE_TO_PY[type_]
if isinstance(type_, tuple):
if type_[0] == 'list':
return typing.List[_type_from_spl(type_[1])]
if type_[0] == 'set':
return typing.Set[_type_from_spl(type_[1])]
if type_[0] == 'map':
return typing.Mapping[_type_from_spl(type_[1][0]), _type_from_spl(type_[1][1])]
if type_[0] == 'optional':
return typing.Optional[_type_from_spl(type_[1])]
raise ValueError("Unsupported type: " + type_)
_PYTYPE_TO_SPL = {}
_SPLTYPE_TO_PY = {}
def _init_type_mappings():
global _PYTYPE_TO_SPL
if not _PYTYPE_TO_SPL:
import streamsx.spl.types
_PYTYPE_TO_SPL = {
str:'rstring', bool:'boolean', int:'int64', float:'float64',
complex:'complex64', decimal.Decimal:'decimal128',
streamsx.spl.types.Timestamp:'timestamp',
datetime.datetime:'timestamp',
bytes:'blob' }
global _SPLTYPE_TO_PY
_SPLTYPE_TO_PY = {
'rstring': str, 'boolean':bool,
'int8':int, 'int16':int, 'int32':int, 'int64':int,
'uint8':int, 'uint16':int, 'uint32':int, 'uint64':int,
'float32':float, 'float64':float,
'complex32':complex, 'complex64':complex,
'decimal32':decimal.Decimal,
'decimal64':decimal.Decimal,
'decimal128':decimal.Decimal,
'timestamp':streamsx.spl.types.Timestamp,
'blob': bytes}
| IBMStreams/streamsx.topology | com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/schema.py | Python | apache-2.0 | 37,502 |
import os
import subprocess
import sys
from collections import defaultdict
from typing import Any, ClassVar, Dict, Type
from urllib.parse import urljoin
from .wptmanifest.parser import atoms
atom_reset = atoms["Reset"]
enabled_tests = {"testharness", "reftest", "wdspec", "crashtest", "print-reftest"}
class Result(object):
def __init__(self,
status,
message,
expected=None,
extra=None,
stack=None,
known_intermittent=None):
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.expected = expected
self.known_intermittent = known_intermittent if known_intermittent is not None else []
self.extra = extra if extra is not None else {}
self.stack = stack
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
class SubtestResult(object):
def __init__(self, name, status, message, stack=None, expected=None, known_intermittent=None):
self.name = name
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.stack = stack
self.expected = expected
self.known_intermittent = known_intermittent if known_intermittent is not None else []
def __repr__(self):
return "<%s.%s %s %s>" % (self.__module__, self.__class__.__name__, self.name, self.status)
class TestharnessResult(Result):
default_expected = "OK"
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH", "PRECONDITION_FAILED"}
class TestharnessSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "TIMEOUT", "NOTRUN", "PRECONDITION_FAILED"}
class ReftestResult(Result):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"}
class WdspecResult(Result):
default_expected = "OK"
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"}
class WdspecSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "ERROR"}
class CrashtestResult(Result):
default_expected = "PASS"
statuses = {"PASS", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"}
def get_run_info(metadata_root, product, **kwargs):
return RunInfo(metadata_root, product, **kwargs)
class RunInfo(Dict[str, Any]):
def __init__(self, metadata_root, product, debug,
browser_version=None,
browser_channel=None,
verify=None,
extras=None,
enable_webrender=False,
device_serials=None,
adb_binary=None):
import mozinfo
self._update_mozinfo(metadata_root)
self.update(mozinfo.info)
from .update.tree import GitTree
try:
# GitTree.__init__ throws if we are not in a git tree.
rev = GitTree(log_error=False).rev
except (OSError, subprocess.CalledProcessError):
rev = None
if rev:
self["revision"] = rev.decode("utf-8")
self["python_version"] = sys.version_info.major
self["product"] = product
if debug is not None:
self["debug"] = debug
elif "debug" not in self:
# Default to release
self["debug"] = False
if browser_version:
self["browser_version"] = browser_version
if browser_channel:
self["browser_channel"] = browser_channel
self["verify"] = verify
if "wasm" not in self:
self["wasm"] = False
if extras is not None:
self.update(extras)
if "headless" not in self:
self["headless"] = False
self["webrender"] = enable_webrender
if adb_binary:
self["adb_binary"] = adb_binary
if device_serials:
# Assume all emulators are identical, so query an arbitrary one.
self._update_with_emulator_info(device_serials[0])
self.pop("linux_distro", None)
def _adb_run(self, device_serial, args, **kwargs):
adb_binary = self.get("adb_binary", "adb")
cmd = [adb_binary, "-s", device_serial, *args]
return subprocess.check_output(cmd, **kwargs)
def _adb_get_property(self, device_serial, prop, **kwargs):
args = ["shell", "getprop", prop]
value = self._adb_run(device_serial, args, **kwargs)
return value.strip()
def _update_with_emulator_info(self, device_serial):
"""Override system info taken from the host if using an Android
emulator."""
try:
self._adb_run(device_serial, ["wait-for-device"])
emulator_info = {
"os": "android",
"os_version": self._adb_get_property(
device_serial,
"ro.build.version.release",
encoding="utf-8",
),
}
emulator_info["version"] = emulator_info["os_version"]
# Detect CPU info (https://developer.android.com/ndk/guides/abis#sa)
abi64, *_ = self._adb_get_property(
device_serial,
"ro.product.cpu.abilist64",
encoding="utf-8",
).split(',')
if abi64:
emulator_info["processor"] = abi64
emulator_info["bits"] = 64
else:
emulator_info["processor"], *_ = self._adb_get_property(
device_serial,
"ro.product.cpu.abilist32",
encoding="utf-8",
).split(',')
emulator_info["bits"] = 32
self.update(emulator_info)
except (OSError, subprocess.CalledProcessError):
pass
def _update_mozinfo(self, metadata_root):
"""Add extra build information from a mozinfo.json file in a parent
directory"""
import mozinfo
path = metadata_root
dirs = set()
while path != os.path.expanduser('~'):
if path in dirs:
break
dirs.add(str(path))
path = os.path.dirname(path)
mozinfo.find_and_update_from_json(*dirs)
def server_protocol(manifest_item):
if hasattr(manifest_item, "h2") and manifest_item.h2:
return "h2"
if hasattr(manifest_item, "https") and manifest_item.https:
return "https"
return "http"
class Test(object):
result_cls = None # type: ClassVar[Type[Result]]
subtest_result_cls = None # type: ClassVar[Type[SubtestResult]]
test_type = None # type: ClassVar[str]
default_timeout = 10 # seconds
long_timeout = 60 # seconds
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata,
timeout=None, path=None, protocol="http", subdomain=False):
self.url_base = url_base
self.tests_root = tests_root
self.url = url
self._inherit_metadata = inherit_metadata
self._test_metadata = test_metadata
self.timeout = timeout if timeout is not None else self.default_timeout
self.path = path
self.subdomain = subdomain
self.environment = {"url_base": url_base,
"protocol": protocol,
"prefs": self.prefs}
def __eq__(self, other):
if not isinstance(other, Test):
return False
return self.id == other.id
# Python 2 does not have this delegation, while Python 3 does.
def __ne__(self, other):
return not self.__eq__(other)
def update_metadata(self, metadata=None):
if metadata is None:
metadata = {}
return metadata
@classmethod
def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
return cls(manifest_file.url_base,
manifest_file.tests_root,
manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=os.path.join(manifest_file.tests_root, manifest_item.path),
protocol=server_protocol(manifest_item),
subdomain=manifest_item.subdomain)
@property
def id(self):
return self.url
@property
def keys(self):
return tuple()
@property
def abs_path(self):
return os.path.join(self.tests_root, self.path)
def _get_metadata(self, subtest=None):
if self._test_metadata is not None and subtest is not None:
return self._test_metadata.get_subtest(subtest)
else:
return self._test_metadata
def itermeta(self, subtest=None):
if self._test_metadata is not None:
if subtest is not None:
subtest_meta = self._get_metadata(subtest)
if subtest_meta is not None:
yield subtest_meta
yield self._get_metadata()
for metadata in reversed(self._inherit_metadata):
yield metadata
def disabled(self, subtest=None):
for meta in self.itermeta(subtest):
disabled = meta.disabled
if disabled is not None:
return disabled
return None
@property
def restart_after(self):
for meta in self.itermeta(None):
restart_after = meta.restart_after
if restart_after is not None:
return True
return False
@property
def leaks(self):
for meta in self.itermeta(None):
leaks = meta.leaks
if leaks is not None:
return leaks
return False
@property
def min_assertion_count(self):
for meta in self.itermeta(None):
count = meta.min_assertion_count
if count is not None:
return count
return 0
@property
def max_assertion_count(self):
for meta in self.itermeta(None):
count = meta.max_assertion_count
if count is not None:
return count
return 0
@property
def lsan_disabled(self):
for meta in self.itermeta():
if meta.lsan_disabled is not None:
return meta.lsan_disabled
return False
@property
def lsan_allowed(self):
lsan_allowed = set()
for meta in self.itermeta():
lsan_allowed |= meta.lsan_allowed
if atom_reset in lsan_allowed:
lsan_allowed.remove(atom_reset)
break
return lsan_allowed
@property
def lsan_max_stack_depth(self):
for meta in self.itermeta(None):
depth = meta.lsan_max_stack_depth
if depth is not None:
return depth
return None
@property
def mozleak_allowed(self):
mozleak_allowed = set()
for meta in self.itermeta():
mozleak_allowed |= meta.leak_allowed
if atom_reset in mozleak_allowed:
mozleak_allowed.remove(atom_reset)
break
return mozleak_allowed
@property
def mozleak_threshold(self):
rv = {}
for meta in self.itermeta(None):
threshold = meta.leak_threshold
for key, value in threshold.items():
if key not in rv:
rv[key] = value
return rv
@property
def tags(self):
tags = set()
for meta in self.itermeta():
meta_tags = meta.tags
tags |= meta_tags
if atom_reset in meta_tags:
tags.remove(atom_reset)
break
tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
return tags
@property
def prefs(self):
prefs = {}
for meta in reversed(list(self.itermeta())):
meta_prefs = meta.prefs
if atom_reset in meta_prefs:
del meta_prefs[atom_reset]
prefs = {}
prefs.update(meta_prefs)
return prefs
def expected(self, subtest=None):
if subtest is None:
default = self.result_cls.default_expected
else:
default = self.subtest_result_cls.default_expected
metadata = self._get_metadata(subtest)
if metadata is None:
return default
try:
expected = metadata.get("expected")
if isinstance(expected, str):
return expected
elif isinstance(expected, list):
return expected[0]
elif expected is None:
return default
except KeyError:
return default
def implementation_status(self):
implementation_status = None
for meta in self.itermeta():
implementation_status = meta.implementation_status
if implementation_status:
return implementation_status
# assuming no specific case, we are implementing it
return "implementing"
def known_intermittent(self, subtest=None):
metadata = self._get_metadata(subtest)
if metadata is None:
return []
try:
expected = metadata.get("expected")
if isinstance(expected, list):
return expected[1:]
return []
except KeyError:
return []
def expect_any_subtest_status(self):
metadata = self._get_metadata()
if metadata is None:
return False
try:
# This key is used by the Blink CI to ignore subtest statuses
metadata.get("blink_expect_any_subtest_status")
return True
except KeyError:
return False
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.id)
class TestharnessTest(Test):
result_cls = TestharnessResult
subtest_result_cls = TestharnessSubtestResult
test_type = "testharness"
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata,
timeout=None, path=None, protocol="http", testdriver=False,
jsshell=False, scripts=None, subdomain=False):
Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout,
path, protocol, subdomain)
self.testdriver = testdriver
self.jsshell = jsshell
self.scripts = scripts or []
@classmethod
def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
testdriver = manifest_item.testdriver if hasattr(manifest_item, "testdriver") else False
jsshell = manifest_item.jsshell if hasattr(manifest_item, "jsshell") else False
script_metadata = manifest_item.script_metadata or []
scripts = [v for (k, v) in script_metadata
if k == "script"]
return cls(manifest_file.url_base,
manifest_file.tests_root,
manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=os.path.join(manifest_file.tests_root, manifest_item.path),
protocol=server_protocol(manifest_item),
testdriver=testdriver,
jsshell=jsshell,
scripts=scripts,
subdomain=manifest_item.subdomain)
@property
def id(self):
return self.url
class ManualTest(Test):
test_type = "manual"
@property
def id(self):
return self.url
class ReftestTest(Test):
"""A reftest
A reftest should be considered to pass if one of its references matches (see below) *and* the
reference passes if it has any references recursively.
Attributes:
references (List[Tuple[str, str]]): a list of alternate references, where one must match for the test to pass
viewport_size (Optional[Tuple[int, int]]): size of the viewport for this test, if not default
dpi (Optional[int]): dpi to use when rendering this test, if not default
"""
result_cls = ReftestResult
test_type = "reftest"
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
protocol="http", subdomain=False):
Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout,
path, protocol, subdomain)
for _, ref_type in references:
if ref_type not in ("==", "!="):
raise ValueError
self.references = references
self.viewport_size = self.get_viewport_size(viewport_size)
self.dpi = dpi
self._fuzzy = fuzzy or {}
@classmethod
def cls_kwargs(cls, manifest_test):
return {"viewport_size": manifest_test.viewport_size,
"dpi": manifest_test.dpi,
"protocol": server_protocol(manifest_test),
"fuzzy": manifest_test.fuzzy}
@classmethod
def from_manifest(cls,
manifest_file,
manifest_test,
inherit_metadata,
test_metadata):
timeout = cls.long_timeout if manifest_test.timeout == "long" else cls.default_timeout
url = manifest_test.url
node = cls(manifest_file.url_base,
manifest_file.tests_root,
manifest_test.url,
inherit_metadata,
test_metadata,
[],
timeout=timeout,
path=manifest_test.path,
subdomain=manifest_test.subdomain,
**cls.cls_kwargs(manifest_test))
refs_by_type = defaultdict(list)
for ref_url, ref_type in manifest_test.references:
refs_by_type[ref_type].append(ref_url)
# Construct a list of all the mismatches, where we end up with mismatch_1 != url !=
# mismatch_2 != url != mismatch_3 etc.
#
# Per the logic documented above, this means that none of the mismatches provided match,
mismatch_walk = None
if refs_by_type["!="]:
mismatch_walk = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
refs_by_type["!="][0],
[],
None,
[])
cmp_ref = mismatch_walk
for ref_url in refs_by_type["!="][1:]:
cmp_self = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
url,
[],
None,
[])
cmp_ref.references.append((cmp_self, "!="))
cmp_ref = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
ref_url,
[],
None,
[])
cmp_self.references.append((cmp_ref, "!="))
if mismatch_walk is None:
mismatch_refs = []
else:
mismatch_refs = [(mismatch_walk, "!=")]
if refs_by_type["=="]:
# For each == ref, add a reference to this node whose tail is the mismatch list.
# Per the logic documented above, this means any one of the matches must pass plus all the mismatches.
for ref_url in refs_by_type["=="]:
ref = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
ref_url,
[],
None,
mismatch_refs)
node.references.append((ref, "=="))
else:
# Otherwise, we just add the mismatches directly as we are immediately into the
# mismatch chain with no alternates.
node.references.extend(mismatch_refs)
return node
def update_metadata(self, metadata):
if "url_count" not in metadata:
metadata["url_count"] = defaultdict(int)
for reference, _ in self.references:
# We assume a naive implementation in which a url with multiple
# possible screenshots will need to take both the lhs and rhs screenshots
# for each possible match
metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
reference.update_metadata(metadata)
return metadata
def get_viewport_size(self, override):
return override
@property
def id(self):
return self.url
@property
def keys(self):
return ("reftype", "refurl")
@property
def fuzzy(self):
return self._fuzzy
@property
def fuzzy_override(self):
values = {}
for meta in reversed(list(self.itermeta(None))):
value = meta.fuzzy
if not value:
continue
if atom_reset in value:
value.remove(atom_reset)
values = {}
for key, data in value:
if isinstance(key, (tuple, list)):
key = list(key)
key[0] = urljoin(self.url, key[0])
key[1] = urljoin(self.url, key[1])
key = tuple(key)
elif key:
# Key is just a relative url to a ref
key = urljoin(self.url, key)
values[key] = data
return values
@property
def page_ranges(self):
return {}
class PrintReftestTest(ReftestTest):
test_type = "print-reftest"
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
page_ranges=None, protocol="http", subdomain=False):
super(PrintReftestTest, self).__init__(url_base, tests_root, url, inherit_metadata, test_metadata,
references, timeout, path, viewport_size, dpi,
fuzzy, protocol, subdomain=subdomain)
self._page_ranges = page_ranges
@classmethod
def cls_kwargs(cls, manifest_test):
rv = super(PrintReftestTest, cls).cls_kwargs(manifest_test)
rv["page_ranges"] = manifest_test.page_ranges
return rv
def get_viewport_size(self, override):
assert override is None
return (5*2.54, 3*2.54)
@property
def page_ranges(self):
return self._page_ranges
class WdspecTest(Test):
result_cls = WdspecResult
subtest_result_cls = WdspecSubtestResult
test_type = "wdspec"
default_timeout = 25
long_timeout = 180 # 3 minutes
class CrashTest(Test):
result_cls = CrashtestResult
test_type = "crashtest"
manifest_test_cls = {"reftest": ReftestTest,
"print-reftest": PrintReftestTest,
"testharness": TestharnessTest,
"manual": ManualTest,
"wdspec": WdspecTest,
"crashtest": CrashTest}
def from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata):
test_cls = manifest_test_cls[manifest_test.item_type]
return test_cls.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)
| chromium/chromium | third_party/wpt_tools/wpt/tools/wptrunner/wptrunner/wpttest.py | Python | bsd-3-clause | 24,539 |
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(max_length=255, null=True, blank=True)),
('contact_name', models.CharField(max_length=255, null=True, blank=True)),
('contact_email', models.EmailField(max_length=255, null=True, blank=True)),
('contact_phone', models.CharField(max_length=50, null=True, blank=True)),
('logo_url', models.CharField(max_length=255, null=True, blank=True)),
('groups', models.ManyToManyField(related_name='organizations', to='auth.Group', blank=True)),
('users', models.ManyToManyField(related_name='organizations', to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OrganizationGroupUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('group', models.ForeignKey(to='auth.Group', on_delete=models.CASCADE)),
('organization', models.ForeignKey(to='edx_solutions_organizations.Organization', on_delete=models.CASCADE)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='organizationgroupuser',
unique_together=set([('organization', 'group', 'user')]),
),
]
| edx-solutions/organizations-edx-platform-extensions | edx_solutions_organizations/migrations/0001_initial.py | Python | agpl-3.0 | 2,681 |
"""
KDTree for PySAL: Python Spatial Analysis Library.
Adds support for Arc Distance to scipy.spatial.KDTree.
"""
import math
import scipy.spatial
import numpy
from scipy import inf
from . import sphere
from .sphere import RADIUS_EARTH_KM
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
__all__ = ["DISTANCE_METRICS", "FLOAT_EPS", "KDTree"]
DISTANCE_METRICS = ['Euclidean', 'Arc']
FLOAT_EPS = numpy.finfo(float).eps
def KDTree(data, leafsize=10, distance_metric='Euclidean',
radius=RADIUS_EARTH_KM):
"""
kd-tree built on top of kd-tree functionality in scipy. If using scipy 0.12
or greater uses the scipy.spatial.cKDTree, otherwise uses
scipy.spatial.KDTree. Offers both Arc distance and Euclidean distance.
Note that Arc distance is only appropriate when points in latitude and
longitude, and the radius set to meaningful value (see docs below).
Parameters
----------
data : array
The data points to be indexed. This array is not copied,
and so modifying this data will result in bogus results.
Typically nx2.
leafsize : int
The number of points at which the algorithm switches over
to brute-force. Has to be positive. Optional, default is 10.
distance_metric : string
Options: "Euclidean" (default) and "Arc".
radius : float
Radius of the sphere on which to compute distances.
Assumes data in latitude and longitude. Ignored if
distance_metric="Euclidean". Typical values:
pysal.cg.RADIUS_EARTH_KM (default)
pysal.cg.RADIUS_EARTH_MILES
"""
if distance_metric.lower() == 'euclidean':
if int(scipy.version.version.split(".")[1]) < 12:
return scipy.spatial.KDTree(data, leafsize)
else:
return scipy.spatial.cKDTree(data, leafsize)
elif distance_metric.lower() == 'arc':
return Arc_KDTree(data, leafsize, radius)
# internal hack for the Arc_KDTree class inheritance
if int(scipy.version.version.split(".")[1]) < 12:
temp_KDTree = scipy.spatial.KDTree
else:
temp_KDTree = scipy.spatial.cKDTree
class Arc_KDTree(temp_KDTree):
def __init__(self, data, leafsize=10, radius=1.0):
"""
KDTree using Arc Distance instead of Euclidean Distance.
Returned distances are based on radius.
For Example, pass in the radius of earth in miles to get back miles.
Assumes data are Lng/Lat, does not account for geoids.
For more information see docs for scipy.spatial.KDTree
Examples
--------
>>> pts = [(0,90), (0,0), (180,0), (0,-90)]
>>> kd = Arc_KDTree(pts, radius = sphere.RADIUS_EARTH_KM)
>>> d,i = kd.query((90,0), k=4)
>>> d
array([10007.54339801, 10007.54339801, 10007.54339801, 10007.54339801])
>>> circumference = 2*math.pi*sphere.RADIUS_EARTH_KM
>>> round(d[0],5) == round(circumference/4.0,5)
True
"""
self.radius = radius
self.circumference = 2 * math.pi * radius
temp_KDTree.__init__(self, list(map(sphere.toXYZ, data)), leafsize)
def _toXYZ(self, x):
if not issubclass(type(x), numpy.ndarray):
x = numpy.array(x)
if len(x.shape) == 2 and x.shape[1] == 3: # assume point is already in XYZ
return x
if len(x.shape) == 1 and x.shape[0] == 3: # assume point is already in XYZ
return x
elif len(x.shape) == 1:
x = numpy.array(sphere.toXYZ(x))
else:
x = list(map(sphere.toXYZ, x))
return x
def count_neighbors(self, other, r, p=2):
"""
See scipy.spatial.KDTree.count_neighbors
Parameters
----------
p: ignored, kept to maintain compatibility with scipy.spatial.KDTree
Examples
--------
>>> pts = [(0,90), (0,0), (180,0), (0,-90)]
>>> kd = Arc_KDTree(pts, radius = sphere.RADIUS_EARTH_KM)
>>> kd.count_neighbors(kd,0)
4
>>> circumference = 2.0*math.pi*sphere.RADIUS_EARTH_KM
>>> kd.count_neighbors(kd,circumference/2.0)
16
"""
if r > 0.5 * self.circumference:
raise ValueError("r, must not exceed 1/2 circumference of the sphere (%f)." % self.circumference * 0.5)
r = sphere.arcdist2linear(r, self.radius)
return temp_KDTree.count_neighbors(self, other, r)
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=inf):
"""
See scipy.spatial.KDTree.query
Parameters
----------
x : array-like, last dimension self.m
query points are lng/lat.
p: ignored, kept to maintain compatibility with scipy.spatial.KDTree
Examples
--------
>>> import numpy as np
>>> pts = [(0,90), (0,0), (180,0), (0,-90)]
>>> kd = Arc_KDTree(pts, radius = sphere.RADIUS_EARTH_KM)
>>> d,i = kd.query((90,0), k=4)
>>> d
array([10007.54339801, 10007.54339801, 10007.54339801, 10007.54339801])
>>> circumference = 2*math.pi*sphere.RADIUS_EARTH_KM
>>> round(d[0],5) == round(circumference/4.0,5)
True
>>> d,i = kd.query(kd.data, k=3)
>>> d2,i2 = kd.query(pts, k=3)
>>> (d == d2).all()
True
>>> (i == i2).all()
True
"""
eps = sphere.arcdist2linear(eps, self.radius)
if distance_upper_bound != inf:
distance_upper_bound = sphere.arcdist2linear(
distance_upper_bound, self.radius)
d, i = temp_KDTree.query(self, self._toXYZ(x), k,
eps=eps, distance_upper_bound=distance_upper_bound)
dims = len(d.shape)
r = self.radius
if dims == 0:
return sphere.linear2arcdist(d, r), i
if dims == 1:
#TODO: implement linear2arcdist on numpy arrays
d = [sphere.linear2arcdist(x, r) for x in d]
elif dims == 2:
d = [[sphere.linear2arcdist(x, r) for x in row] for row in d]
return numpy.array(d), i
def query_ball_point(self, x, r, p=2, eps=0):
"""
See scipy.spatial.KDTree.query_ball_point
Parameters
----------
p: ignored, kept to maintain compatibility with scipy.spatial.KDTree
Examples
--------
>>> import numpy as np
>>> pts = [(0,90), (0,0), (180,0), (0,-90)]
>>> kd = Arc_KDTree(pts, radius = sphere.RADIUS_EARTH_KM)
>>> circumference = 2*math.pi*sphere.RADIUS_EARTH_KM
>>> kd.query_ball_point(pts, circumference/4.)
array([list([0, 1, 2]), list([0, 1, 3]), list([0, 2, 3]), list([1, 2, 3])],
dtype=object)
>>> kd.query_ball_point(pts, circumference/2.)
array([list([0, 1, 2, 3]), list([0, 1, 2, 3]), list([0, 1, 2, 3]),
list([0, 1, 2, 3])], dtype=object)
"""
eps = sphere.arcdist2linear(eps, self.radius)
#scipy.sphere.KDTree.query_ball_point appears to ignore the eps argument.
# we have some floating point errors moving back and forth between cordinate systems,
# so we'll account for that be adding some to our radius, 3*float's eps value.
if r > 0.5 * self.circumference:
raise ValueError("r, must not exceed 1/2 circumference of the sphere (%f)." % self.circumference * 0.5)
r = sphere.arcdist2linear(r, self.radius) + FLOAT_EPS * 3
return temp_KDTree.query_ball_point(self, self._toXYZ(x), r, eps=eps)
def query_ball_tree(self, other, r, p=2, eps=0):
"""
See scipy.spatial.KDTree.query_ball_tree
Parameters
----------
p: ignored, kept to maintain compatibility with scipy.spatial.KDTree
Examples
--------
>>> pts = [(0,90), (0,0), (180,0), (0,-90)]
>>> kd = Arc_KDTree(pts, radius = sphere.RADIUS_EARTH_KM)
>>> kd.query_ball_tree(kd, kd.circumference/4.) == [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
True
>>> kd.query_ball_tree(kd, kd.circumference/2.) == [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
True
"""
eps = sphere.arcdist2linear(eps, self.radius)
#scipy.sphere.KDTree.query_ball_point appears to ignore the eps argument.
# we have some floating point errors moving back and forth between cordinate systems,
# so we'll account for that be adding some to our radius, 3*float's eps value.
if self.radius != other.radius:
raise ValueError("Both trees must have the same radius.")
if r > 0.5 * self.circumference:
raise ValueError("r, must not exceed 1/2 circumference of the sphere (%f)." % self.circumference * 0.5)
r = sphere.arcdist2linear(r, self.radius) + FLOAT_EPS * 3
return temp_KDTree.query_ball_tree(self, other, r, eps=eps)
def query_pairs(self, r, p=2, eps=0):
"""
See scipy.spatial.KDTree.query_pairs
Parameters
----------
p: ignored, kept to maintain compatibility with scipy.spatial.KDTree
Examples
--------
>>> pts = [(0,90), (0,0), (180,0), (0,-90)]
>>> kd = Arc_KDTree(pts, radius = sphere.RADIUS_EARTH_KM)
>>> kd.query_pairs(kd.circumference/4.) == set([(0, 1), (1, 3), (2, 3), (0, 2)])
True
>>> kd.query_pairs(kd.circumference/2.) == set([(0, 1), (1, 2), (1, 3), (2, 3), (0, 3), (0, 2)])
True
"""
if r > 0.5 * self.circumference:
raise ValueError("r, must not exceed 1/2 circumference of the sphere (%f)." % self.circumference * 0.5)
r = sphere.arcdist2linear(r, self.radius) + FLOAT_EPS * 3
return temp_KDTree.query_pairs(self, r, eps=eps)
def sparse_distance_matrix(self, other, max_distance, p=2):
"""
See scipy.spatial.KDTree.sparse_distance_matrix
Parameters
----------
p: ignored, kept to maintain compatibility with scipy.spatial.KDTree
Examples
--------
>>> pts = [(0,90), (0,0), (180,0), (0,-90)]
>>> kd = Arc_KDTree(pts, radius = sphere.RADIUS_EARTH_KM)
>>> kd.sparse_distance_matrix(kd, kd.circumference/4.).todense()
matrix([[ 0. , 10007.54339801, 10007.54339801, 0. ],
[10007.54339801, 0. , 0. , 10007.54339801],
[10007.54339801, 0. , 0. , 10007.54339801],
[ 0. , 10007.54339801, 10007.54339801, 0. ]])
>>> kd.sparse_distance_matrix(kd, kd.circumference/2.).todense()
matrix([[ 0. , 10007.54339801, 10007.54339801, 20015.08679602],
[10007.54339801, 0. , 20015.08679602, 10007.54339801],
[10007.54339801, 20015.08679602, 0. , 10007.54339801],
[20015.08679602, 10007.54339801, 10007.54339801, 0. ]])
"""
if self.radius != other.radius:
raise ValueError("Both trees must have the same radius.")
if max_distance > 0.5 * self.circumference:
raise ValueError("max_distance, must not exceed 1/2 circumference of the sphere (%f)." % self.circumference * 0.5)
max_distance = sphere.arcdist2linear(
max_distance, self.radius) + FLOAT_EPS * 3
D = temp_KDTree.sparse_distance_matrix(
self, other, max_distance)
D = D.tocoo()
#print D.data
a2l = lambda x: sphere.linear2arcdist(x, self.radius)
#print map(a2l,D.data)
return scipy.sparse.coo_matrix((list(map(a2l, D.data)), (D.row, D.col))).todok()
| lixun910/pysal | pysal/lib/cg/kdtree.py | Python | bsd-3-clause | 11,910 |
# Term Frequency Graph based on .jsonl data from TheRealDonaldTrump
import sys
import string
import json
from collections import Counter
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
def process(text, tokenizer=TweetTokenizer(), stopwords=[]):
"""Process Text of Tweet:
- Lowercase
- Tokenize
- Stopword Removal
- Digits Removal
Return: List of Strings
"""
text = text.lower()
tokens = tokenizer.tokenize(text)
return [tok for tok in tokens if tok not in stopwords and not tok.isdigit()]
if __name__ == '__main__':
tweet_tokenizer = TweetTokenizer()
punct = list(string.punctuation)
stopword_list = stopwords.words('english') + punct + ['rt', 'via']
fname = sys.argv[1]
tf = Counter()
with open(fname, 'r') as f:
for line in f:
tweet = json.loads(line)
tokens = process(text=tweet.get('text', ''),
tokenizer=tweet_tokenizer,
stopwords=stopword_list)
tf.update(tokens)
y = [count for tag, count in tf.most_common(30)]
x = range(1, len(y)+1)
plt.bar(x, y)
plt.title("Term Frequencies")
plt.ylabel("Frequency")
plt.savefig('term_distribution.png')
| filkuzmanovski/tweet-science | twitter_term_frequency_graph.py | Python | gpl-3.0 | 1,326 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Amicable numbers" – Project Euler Problem No. 21
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=21
#
def get_proper_divisors(n):
ret = [1]
for d in range(2, n/2+1):
if n % d == 0:
ret.append(d)
return ret
def d(n):
return reduce(lambda sum, num: sum + num, get_proper_divisors(n))
def is_amicable_number(n):
a = d(n)
if a == n:
return False
return n == d(a)
# Testcases
assert (284 == d(220)), "Testcase failed"
# Solve
sum = 0
for n in range(1, 10000):
if is_amicable_number(n):
print "%d is amicable number with %d" % (n, d(n))
sum = sum + n
print "Solution:", sum
| fbcom/project-euler | 021_amicable_numbers.py | Python | mit | 800 |
#!/usr/bin/python
# Copyright 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that on compilers sensitive to library order on linker's command line, we
# generate the correct order.
import BoostBuild
import string
t = BoostBuild.Tester()
t.write("a.cpp", """
void b();
void a() { b(); }
""")
t.write("b.cpp", """
void c();
void b() { c(); }
""")
t.write("c.cpp", """
void d();
void c() { d(); }
""")
t.write("d.cpp", """
void d() {}
""")
# The order of libraries in 'main' is crafted so that we get error unless we do
# something about the order ourselves.
t.write("jamfile.jam", """
exe main : main.cpp libd libc libb liba ;
lib libd : d.cpp ;
lib libc : c.cpp : <link>static <use>libd ;
lib libb : b.cpp : <use>libc ;
lib liba : a.cpp : <use>libb ;
""")
t.write("main.cpp", """
void a();
int main() { a(); }
""")
t.write("jamroot.jam", """
""")
t.run_build_system("-d2")
t.expect_addition("bin/$toolset/debug/main.exe")
# Test the order between searched libraries.
t.write("jamfile.jam", """
exe main : main.cpp png z ;
lib png : z : <name>png ;
lib z : : <name>zzz ;
""")
t.run_build_system("-a -n -d+2")
t.fail_test(string.find(t.stdout(), "png") > string.find(t.stdout(), "zzz"))
t.write("jamfile.jam", """
exe main : main.cpp png z ;
lib png : : <name>png ;
lib z : png : <name>zzz ;
""")
t.run_build_system("-a -n -d+2")
t.fail_test(string.find(t.stdout(), "png") < string.find(t.stdout(), "zzz"))
# Test the order between prebuilt libraries.
t.write("first.a", "")
t.write("second.a", "")
t.write("jamfile.jam", """
exe main : main.cpp first second ;
lib first : second : <file>first.a ;
lib second : : <file>second.a ;
""")
t.run_build_system("-a -n -d+2")
t.fail_test(string.find(t.stdout(), "first") > string.find(t.stdout(), "second"))
t.write("jamfile.jam", """
exe main : main.cpp first second ;
lib first : : <file>first.a ;
lib second : first : <file>second.a ;
""")
t.run_build_system("-a -n -d+2")
t.fail_test(string.find(t.stdout(), "first") < string.find(t.stdout(), "second"))
t.cleanup()
| mxrrow/zaicoin | src/deps/boost/tools/build/v2/test/library_order.py | Python | mit | 2,157 |
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all predefined custom targeting keys. The statement
retrieves up to the maximum page size limit of 500. To create custom
targeting keys, run create_custom_targeting_keys_and_values.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201306')
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'PREDEFINED'
}
}]
filter_statement = {'query': 'WHERE type = :type LIMIT 500',
'values': values}
# Get custom targeting keys by statement.
response = custom_targeting_service.GetCustomTargetingKeysByStatement(
filter_statement)[0]
keys = []
if 'results' in response:
keys = response['results']
# Display results.
if keys:
for key in keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
% (key['id'], key['name'], key['displayName'], key['type']))
else:
print 'No keys were found.'
| lociii/googleads-python-lib | examples/adspygoogle/dfp/v201306/get_custom_targeting_keys_by_statement.py | Python | apache-2.0 | 2,094 |
"""
===============================================================
The discount factor tuner for dlm
===============================================================
The modelTuner class provides the tuning functionality for the dlm class.
It makes use of the gradient descent to optimize the discount factor for
each component (jointly) based on the one-day ahead prediction error.
>>> import modelTuner
>>> myTuner = modelTuner()
>>> tunedDLM = myTuner(untunedDLM, maxit=100)
The tunedDLM will be saved in tunedDLM while the untunedDLM remains unchangd.
An alternative way to call this class is via the tuner method within dlm class.
>>> mydlm.tune(maxit=100)
This will permenantly change the discouting factor in mydlm. So if the user
prefer to build a new dlm with the new discount factor without changing the
original one, one should opt to use the modelTuner class.
"""
from copy import deepcopy
from numpy import array
class modelTuner:
""" The main class for modelTuner
Attributes:
method: the optimization method. Currently only 'gradient_descent'
is supported.
loss: the optimization loss function. Currently only 'mse' (one-day
ahead prediction) is supported.
"""
def __init__(self, method='gradient_descent', loss='mse'):
self.method = method
self.loss = loss
self.current_mse = None
self.err = 1e-4
self.discounts = None
def tune(self, untunedDLM, maxit=100, step = 1.0):
""" Main function for tuning the DLM model.
Args:
untunedDLM: The DLM object that needs tuning
maxit: The maximum number of iteractions for gradient descent.
step: the moving length at each iteraction.
Returns:
A tuned DLM object in unintialized status.
"""
# make a deep copy of the original dlm
tunedDLM = deepcopy(untunedDLM)
tunedDLM.showInternalMessage(False)
if not tunedDLM.initialized:
tunedDLM.fitForwardFilter()
discounts = array(tunedDLM._getDiscounts())
self.current_mse = tunedDLM._getMSE()
# using gradient descent
if self.method == 'gradient_descent':
for i in range(maxit):
gradient = self.find_gradient(discounts, tunedDLM)
discounts -= gradient * step
discounts = list(map(lambda x: self.cutoff(x), discounts))
tunedDLM._setDiscounts(discounts)
tunedDLM.fitForwardFilter()
self.current_mse = tunedDLM._getMSE()
if i < maxit - 1:
print('Converge successfully!')
else:
print('The algorithm stops without converging.')
if min(discounts) <= 0.7 + self.err or max(discounts) >= 1 - 2 * self.err:
print('Possible reason: some discount is too close to 1 or 0.7' +
' (0.7 is smallest discount that is permissible.')
else:
print('It might require more step to converge.' +
' Use tune(..., maixt = <a larger number>) instead.')
self.discounts = discounts
tunedDLM._setDiscounts(discounts, change_component=True)
return tunedDLM
def getDiscounts(self):
""" Get the tuned discounting factors. One for each component (even the
component being multi-dimensional, only one discounting factor will
be assigned to one component). Initialized to None.
"""
return self.discounts
def find_gradient(self, discounts, DLM):
if self.current_mse is None:
self.current_mse = DLM._getMSE()
gradient = array([0.0] * len(discounts))
for i in range(len(discounts)):
discounts_err = discounts
discounts_err[i] = self.cutoff(discounts_err[i] + self.err)
DLM._setDiscounts(discounts_err)
DLM.fitForwardFilter()
gradient[i] = (DLM._getMSE() - self.current_mse) / self.err
return gradient
def cutoff(self, a):
if a < 0.7:
return 0.7
if a >= 1:
return 0.99999
return a
| wwrechard/pydlm | pydlm/tuner/dlmTuner.py | Python | bsd-3-clause | 4,269 |
import numpy as np
import theano as th
from kaggle_utils import multiclass_log_loss
from examples.utils import make_progressbar
def validate(dataset_x, dataset_y, model, epoch, batch_size):
progress = make_progressbar('Testing epoch #{}'.format(epoch), len(dataset_x))
progress.start()
logloss = 0.
for j in range((dataset_x.shape[0] + batch_size - 1) // batch_size):
# Note: numpy correctly handles the size of the last minibatch.
mini_batch_input = dataset_x[j*batch_size : (j+1)*batch_size].astype(th.config.floatX)
mini_batch_targets = dataset_y[j*batch_size : (j+1)*batch_size].astype(th.config.floatX)
mini_batch_prediction = model.forward(mini_batch_input)
logloss += multiclass_log_loss(mini_batch_targets, mini_batch_prediction, normalize=False)
progress.update(j * batch_size + len(mini_batch_input))
progress.finish()
print("Epoch #{}, Logloss: {:.5f}".format(epoch, logloss/dataset_x.shape[0]))
| Pandoro/DeepFried2 | examples/Kaggle-Otto/test.py | Python | mit | 986 |
import time
import threading
import subprocess
import pygame.locals
vlc_path = 'C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe'
class Scene(threading.Thread):
def __init__(self, screen, games, games_manager):
threading.Thread.__init__(self)
self.screen = screen
self.games = games
self.games_manager = games_manager
class DummyScene(Scene):
def run(self):
time.sleep(5)
class VideoScene(Scene):
def __init__(self, screen, games, games_manager, filename):
Scene.__init__(self, screen, games, games_manager)
self.filename = filename
def run(self):
subprocess.call([vlc_path, self.filename, '--play-and-exit', '--fullscreen'])#, shell=True)
if __name__ == '__main__':
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((700, 800))
s = VideoScene(screen, None, None, 'videos/p2.mpg')
s.start()
s.join()
| flowersteam/SESM | SESM/scene.py | Python | gpl-3.0 | 953 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import multiprocessing
import signal
import os
import pwd
import Queue
import random
import traceback
import tempfile
import time
import collections
import socket
import base64
import sys
import pipes
import jinja2
import subprocess
import getpass
import ansible.constants as C
import ansible.inventory
from ansible import utils
from ansible.utils import template
from ansible.utils import check_conditional
from ansible.utils import string_functions
from ansible import errors
from ansible import module_common
import poller
import connection
from return_data import ReturnData
from ansible.callbacks import DefaultRunnerCallbacks, vv
from ansible.module_common import ModuleReplacer
from ansible.module_utils.splitter import split_args, unquote
from ansible.cache import FactCache
from ansible.utils import update_hash
module_replacer = ModuleReplacer(strip_comments=False)
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
HAS_ATFORK=True
try:
from Crypto.Random import atfork
except ImportError:
HAS_ATFORK=False
multiprocessing_runner = None
OUTPUT_LOCKFILE = tempfile.TemporaryFile()
PROCESS_LOCKFILE = tempfile.TemporaryFile()
################################################
def _executor_hook(job_queue, result_queue, new_stdin):
# attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
# this function also not present in CentOS 6
if HAS_ATFORK:
atfork()
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not job_queue.empty():
try:
host = job_queue.get(block=False)
return_data = multiprocessing_runner._executor(host, new_stdin)
result_queue.put(return_data)
except Queue.Empty:
pass
except:
traceback.print_exc()
class HostVars(dict):
''' A special view of vars_cache that adds values from the inventory when needed. '''
def __init__(self, vars_cache, inventory, vault_password=None):
self.vars_cache = vars_cache
self.inventory = inventory
self.lookup = {}
self.update(vars_cache)
self.vault_password = vault_password
def __getitem__(self, host):
if host not in self.lookup:
result = self.inventory.get_variables(host, vault_password=self.vault_password).copy()
result.update(self.vars_cache.get(host, {}))
self.lookup[host] = template.template('.', result, self.vars_cache)
return self.lookup[host]
class Runner(object):
''' core API interface to ansible '''
# see bin/ansible for how this is used...
def __init__(self,
host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
module_path=None, # ex: /usr/share/ansible
module_name=C.DEFAULT_MODULE_NAME, # ex: copy
module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b"
forks=C.DEFAULT_FORKS, # parallelism level
timeout=C.DEFAULT_TIMEOUT, # SSH timeout
pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org'
remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
remote_port=None, # if SSH on different ports
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable
setup_cache=None, # used to share fact data w/ other tasks
vars_cache=None, # used to store variables about hosts
transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
conditional='True', # run only if this fact expression evals to true
callbacks=None, # used for output
module_vars=None, # a playbooks internals thing
play_vars=None, #
play_file_vars=None, #
role_vars=None, #
role_params=None, #
default_vars=None, #
extra_vars=None, # extra vars specified with he playbook(s)
is_playbook=False, # running from playbook or not?
inventory=None, # reference to Inventory object
subset=None, # subset pattern
check=False, # don't make any changes, just try to probe for potential changes
diff=False, # whether to show diffs for template files that change
environment=None, # environment variables (as dict) to use inside the command
complex_args=None, # structured data in addition to module_args, must be a dict
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False
accelerate=False, # use accelerated connection
accelerate_ipv6=False, # accelerated connection w/ IPv6
accelerate_port=None, # port to use with accelerated connection
vault_pass=None,
run_hosts=None, # an optional list of pre-calculated hosts to run on
no_log=False, # option to enable/disable logging for a given task
run_once=False, # option to enable/disable host bypass loop for a given task
become=False, # whether to run privelege escalation or not
become_method=C.DEFAULT_BECOME_METHOD,
become_user=C.DEFAULT_BECOME_USER, # ex: 'root'
become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None
become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo
):
# used to lock multiprocess inputs and outputs at various levels
self.output_lockfile = OUTPUT_LOCKFILE
self.process_lockfile = PROCESS_LOCKFILE
if not complex_args:
complex_args = {}
# storage & defaults
self.check = check
self.diff = diff
self.setup_cache = utils.default(setup_cache, lambda: ansible.cache.FactCache())
self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
self.basedir = utils.default(basedir, lambda: os.getcwd())
self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
self.generated_jid = str(random.randint(0, 999999999999))
self.transport = transport
self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list))
self.module_vars = utils.default(module_vars, lambda: {})
self.play_vars = utils.default(play_vars, lambda: {})
self.play_file_vars = utils.default(play_file_vars, lambda: {})
self.role_vars = utils.default(role_vars, lambda: {})
self.role_params = utils.default(role_params, lambda: {})
self.default_vars = utils.default(default_vars, lambda: {})
self.extra_vars = utils.default(extra_vars, lambda: {})
self.always_run = None
self.connector = connection.Connector(self)
self.conditional = conditional
self.delegate_to = None
self.module_name = module_name
self.forks = int(forks)
self.pattern = pattern
self.module_args = module_args
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.private_key_file = private_key_file
self.background = background
self.become = become
self.become_method = become_method
self.become_user_var = become_user
self.become_user = None
self.become_pass = become_pass
self.become_exe = become_exe
self.is_playbook = is_playbook
self.environment = environment
self.complex_args = complex_args
self.error_on_undefined_vars = error_on_undefined_vars
self.accelerate = accelerate
self.accelerate_port = accelerate_port
self.accelerate_ipv6 = accelerate_ipv6
self.callbacks.runner = self
self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self.vault_pass = vault_pass
self.no_log = no_log
self.run_once = run_once
if self.transport == 'smart':
# If the transport is 'smart', check to see if certain conditions
# would prevent us from using ssh, and fallback to paramiko.
# 'smart' is the default since 1.2.1/1.3
self.transport = "ssh"
if sys.platform.startswith('darwin') and self.remote_pass:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
self.transport = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if "Bad configuration option" in err:
self.transport = "paramiko"
# save the original transport, in case it gets
# changed later via options like accelerate
self.original_transport = self.transport
# misc housekeeping
if subset and self.inventory._subset is None:
# don't override subset when passed from playbook
self.inventory.subset(subset)
# If we get a pre-built list of hosts to run on, from say a playbook, use them.
# Also where we will store the hosts to run on once discovered
self.run_hosts = run_hosts
if self.transport == 'local':
self.remote_user = pwd.getpwuid(os.geteuid())[0]
if module_path is not None:
for i in module_path.split(os.pathsep):
utils.plugins.module_finder.add_directory(i)
utils.plugins.push_basedir(self.basedir)
# ensure we are using unique tmp paths
random.seed()
# *****************************************************
def _complex_args_hack(self, complex_args, module_args):
"""
ansible-playbook both allows specifying key=value string arguments and complex arguments
however not all modules use our python common module system and cannot
access these. An example might be a Bash module. This hack allows users to still pass "args"
as a hash of simple scalars to those arguments and is short term. We could technically
just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented
it does mean values in 'args' have LOWER priority than those on the key=value line, allowing
args to provide yet another way to have pluggable defaults.
"""
if complex_args is None:
return module_args
if not isinstance(complex_args, dict):
raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args)
for (k,v) in complex_args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
return module_args
# *****************************************************
def _transfer_str(self, conn, tmp, name, data):
''' transfer string to remote file '''
if type(data) == dict:
data = utils.jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
if not isinstance(data, unicode):
#ensure the data is valid UTF-8
data.decode('utf-8')
else:
data = data.encode('utf-8')
afo.write(data)
except:
raise errors.AnsibleError("failure encoding into utf-8")
afo.flush()
afo.close()
remote = conn.shell.join_path(tmp, name)
try:
conn.put_file(afile, remote)
finally:
os.unlink(afile)
return remote
# *****************************************************
def _compute_environment_string(self, conn, inject=None):
''' what environment variables to use when running the command? '''
enviro = {}
if self.environment:
enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
enviro = utils.safe_eval(enviro)
if type(enviro) != dict:
raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
return conn.shell.env_prefix(**enviro)
# *****************************************************
def _compute_delegate(self, password, remote_inject):
""" Build a dictionary of all attributes for the delegate host """
delegate = {}
# allow delegated host to be templated
delegate['inject'] = remote_inject.copy()
# set any interpreters
interpreters = []
for i in delegate['inject']:
if i.startswith("ansible_") and i.endswith("_interpreter"):
interpreters.append(i)
for i in interpreters:
del delegate['inject'][i]
port = C.DEFAULT_REMOTE_PORT
# get the vars for the delegate by its name
try:
this_info = delegate['inject']['hostvars'][self.delegate_to]
except:
# make sure the inject is empty for non-inventory hosts
this_info = {}
# get the real ssh_address for the delegate
# and allow ansible_ssh_host to be templated
delegate['ssh_host'] = template.template(
self.basedir,
this_info.get('ansible_ssh_host', self.delegate_to),
this_info,
fail_on_undefined=True
)
delegate['port'] = this_info.get('ansible_ssh_port', port)
delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject'])
delegate['pass'] = this_info.get('ansible_ssh_pass', password)
delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file)
delegate['transport'] = this_info.get('ansible_connection', self.transport)
delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass))
# Last chance to get private_key_file from global variables.
# this is useful if delegated host is not defined in the inventory
if delegate['private_key_file'] is None:
delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
if delegate['private_key_file'] is not None:
delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
for i in this_info:
if i.startswith("ansible_") and i.endswith("_interpreter"):
delegate['inject'][i] = this_info[i]
return delegate
def _compute_delegate_user(self, host, inject):
""" Calculate the remote user based on an order of preference """
# inventory > playbook > original_host
actual_user = inject.get('ansible_ssh_user', self.remote_user)
thisuser = None
try:
if host in inject['hostvars']:
if inject['hostvars'][host].get('ansible_ssh_user'):
# user for delegate host in inventory
thisuser = inject['hostvars'][host].get('ansible_ssh_user')
else:
# look up the variables for the host directly from inventory
host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
if 'ansible_ssh_user' in host_vars:
thisuser = host_vars['ansible_ssh_user']
except errors.AnsibleError, e:
# the hostname was not found in the inventory, so
# we just ignore this and try the next method
pass
if thisuser is None and self.remote_user:
# user defined by play/runner
thisuser = self.remote_user
if thisuser is not None:
actual_user = thisuser
else:
# fallback to the inventory user of the play host
#actual_user = inject.get('ansible_ssh_user', actual_user)
actual_user = inject.get('ansible_ssh_user', self.remote_user)
return actual_user
def _count_module_args(self, args, allow_dupes=False):
'''
Count the number of k=v pairs in the supplied module args. This is
basically a specialized version of parse_kv() from utils with a few
minor changes.
'''
options = {}
if args is not None:
try:
vargs = split_args(args)
except Exception, e:
if "unbalanced jinja2 block or quotes" in str(e):
raise errors.AnsibleError("error parsing argument string '%s', try quoting the entire line." % args)
else:
raise
for x in vargs:
quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
if "=" in x and not quoted:
k, v = x.split("=",1)
is_shell_module = self.module_name in ('command', 'shell')
is_shell_param = k in ('creates', 'removes', 'chdir', 'executable')
if k in options and not allow_dupes:
if not(is_shell_module and not is_shell_param):
raise errors.AnsibleError("a duplicate parameter was found in the argument string (%s)" % k)
if is_shell_module and is_shell_param or not is_shell_module:
options[k] = v
return len(options)
# *****************************************************
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True):
''' transfer and run a module along with its arguments on the remote side'''
# hack to support fireball mode
if module_name == 'fireball':
args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
if 'port' not in args:
args += " port=%s" % C.ZEROMQ_PORT
(
module_style,
shebang,
module_data
) = self._configure_module(conn, module_name, args, inject, complex_args)
# a remote tmp path may be necessary and not already created
if self._late_needs_tmp_path(conn, tmp, module_style):
tmp = self._make_tmp_path(conn)
remote_module_path = conn.shell.join_path(tmp, module_name)
if (module_style != 'new'
or async_jid is not None
or not conn.has_pipelining
or not C.ANSIBLE_SSH_PIPELINING
or C.DEFAULT_KEEP_REMOTE_FILES
or self.become_method == 'su'):
self._transfer_str(conn, tmp, module_name, module_data)
environment_string = self._compute_environment_string(conn, inject)
if "tmp" in tmp and (self.become and self.become_user != 'root'):
# deal with possible umask issues once you become another user
self._remote_chmod(conn, 'a+r', remote_module_path, tmp)
cmd = ""
in_data = None
if module_style != 'new':
if 'CHECKMODE=True' in args:
# if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to
# do --check mode, so to be safe we will not run it.
return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules"))
elif 'NO_LOG' in args:
return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules"))
args = template.template(self.basedir, args, inject)
# decide whether we need to transfer JSON or key=value
argsfile = None
if module_style == 'non_native_want_json':
if complex_args:
complex_args.update(utils.parse_kv(args))
argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args))
else:
argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args)))
else:
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
if self.become and self.become_user != 'root':
# deal with possible umask issues once become another user
self._remote_chmod(conn, 'a+r', argsfile, tmp)
if async_jid is None:
cmd = "%s %s" % (remote_module_path, argsfile)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
else:
if async_jid is None:
if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su':
in_data = module_data
else:
cmd = "%s" % (remote_module_path)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
if not shebang:
raise errors.AnsibleError("module is missing interpreter line")
rm_tmp = None
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self.become or self.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the become_user
sudoable = False
res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data)
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self.become and self.become_user != 'root':
# not becoming root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = conn.shell.remove(tmp, recurse=True)
self._low_level_exec_command(conn, cmd2, tmp, sudoable=False)
data = utils.parse_json(res['stdout'], from_remote=True, no_exceptions=True)
if 'parsed' in data and data['parsed'] == False:
data['msg'] += res['stderr']
return ReturnData(conn=conn, result=data)
# *****************************************************
def _executor(self, host, new_stdin):
''' handler for multiprocessing library '''
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
try:
self._new_stdin = new_stdin
if not new_stdin and fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
exec_rc = self._executor_internal(host, new_stdin)
if type(exec_rc) != ReturnData:
raise Exception("unexpected return type: %s" % type(exec_rc))
# redundant, right?
if not exec_rc.comm_ok:
self.callbacks.on_unreachable(host, exec_rc.result)
return exec_rc
except errors.AnsibleError, ae:
msg = str(ae)
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
except Exception:
msg = traceback.format_exc()
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
# *****************************************************
def get_combined_cache(self):
# merge the VARS and SETUP caches for this host
combined_cache = self.setup_cache.copy()
return utils.merge_hash(combined_cache, self.vars_cache)
def get_inject_vars(self, host):
host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
combined_cache = self.get_combined_cache()
# use combined_cache and host_variables to template the module_vars
# we update the inject variables with the data we're about to template
# since some of the variables we'll be replacing may be contained there too
module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {}))
module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject)
module_vars = template.template(self.basedir, self.module_vars, module_vars_inject)
# remove bad variables from the module vars, which may be in there due
# the way role declarations are specified in playbooks
if 'tags' in module_vars:
del module_vars['tags']
if 'when' in module_vars:
del module_vars['when']
# start building the dictionary of injected variables
inject = {}
# default vars are the lowest priority
inject = utils.combine_vars(inject, self.default_vars)
# next come inventory variables for the host
inject = utils.combine_vars(inject, host_variables)
# then the setup_cache which contains facts gathered
inject = utils.combine_vars(inject, self.setup_cache.get(host, {}))
# next come variables from vars and vars files
inject = utils.combine_vars(inject, self.play_vars)
inject = utils.combine_vars(inject, self.play_file_vars)
# next come variables from role vars/main.yml files
inject = utils.combine_vars(inject, self.role_vars)
# then come the module variables
inject = utils.combine_vars(inject, module_vars)
# followed by vars_cache things (set_fact, include_vars, and
# vars_files which had host-specific templating done)
inject = utils.combine_vars(inject, self.vars_cache.get(host, {}))
# role parameters next
inject = utils.combine_vars(inject, self.role_params)
# and finally -e vars are the highest priority
inject = utils.combine_vars(inject, self.extra_vars)
# and then special vars
inject.setdefault('ansible_ssh_user', self.remote_user)
inject['group_names'] = host_variables.get('group_names', [])
inject['groups'] = self.inventory.groups_list()
inject['vars'] = self.module_vars
inject['defaults'] = self.default_vars
inject['environment'] = self.environment
inject['playbook_dir'] = os.path.abspath(self.basedir)
inject['omit'] = self.omit_token
inject['combined_cache'] = combined_cache
return inject
def _executor_internal(self, host, new_stdin):
''' executes any module one or more times '''
# We build the proper injected dictionary for all future
# templating operations in this run
inject = self.get_inject_vars(host)
# Then we selectively merge some variable dictionaries down to a
# single dictionary, used to template the HostVars for this host
temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
temp_vars = utils.combine_vars(temp_vars, self.play_vars)
temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
# and we save the HostVars in the injected dictionary so they
# may be referenced from playbooks/templates
inject['hostvars'] = hostvars
host_connection = inject.get('ansible_connection', self.transport)
if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]:
port = hostvars.get('ansible_ssh_port', self.remote_port)
if port is None:
port = C.DEFAULT_REMOTE_PORT
else:
# fireball, local, etc
port = self.remote_port
if self.inventory.basedir() is not None:
inject['inventory_dir'] = self.inventory.basedir()
if self.inventory.src() is not None:
inject['inventory_file'] = self.inventory.src()
# could be already set by playbook code
inject.setdefault('ansible_version', utils.version_info(gitinfo=False))
# allow with_foo to work in playbooks...
items = None
items_plugin = self.module_vars.get('items_lookup_plugin', None)
if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
basedir = self.basedir
if '_original_file' in inject:
basedir = os.path.dirname(inject['_original_file'])
filesdir = os.path.join(basedir, '..', 'files')
if os.path.exists(filesdir):
basedir = filesdir
try:
items_terms = self.module_vars.get('items_lookup_terms', '')
items_terms = template.template(basedir, items_terms, inject)
items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject)
except errors.AnsibleUndefinedVariable, e:
if 'has no attribute' in str(e):
# the undefined variable was an attribute of a variable that does
# exist, so try and run this through the conditional check to see
# if the user wanted to skip something on being undefined
if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True):
# the conditional check passed, so we have to fail here
raise
else:
# the conditional failed, so we skip this task
result = utils.jsonify(dict(changed=False, skipped=True))
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, result=result)
except errors.AnsibleError, e:
raise
except Exception, e:
raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e))
# strip out any jinja2 template syntax within
# the data returned by the lookup plugin
items = utils._clean_data_struct(items, from_remote=True)
if items is None:
items = []
else:
if type(items) != list:
raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]:
# hack for apt, yum, and pkgng so that with_items maps back into a single module call
use_these_items = []
for x in items:
inject['item'] = x
if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
use_these_items.append(x)
inject['item'] = ",".join(use_these_items)
items = None
def _safe_template_complex_args(args, inject):
# Ensure the complex args here are a dictionary, but
# first template them if they contain a variable
returned_args = args
if isinstance(args, basestring):
# If the complex_args were evaluated to a dictionary and there are
# more keys in the templated version than the evaled version, some
# param inserted additional keys (the template() call also runs
# safe_eval on the var if it looks like it's a datastructure). If the
# evaled_args are not a dict, it's most likely a whole variable (ie.
# args: {{var}}), in which case there's no way to detect the proper
# count of params in the dictionary.
templated_args = template.template(self.basedir, args, inject, convert_bare=True)
evaled_args = utils.safe_eval(args)
if isinstance(evaled_args, dict) and len(evaled_args) > 0 and len(evaled_args) != len(templated_args):
raise errors.AnsibleError("a variable tried to insert extra parameters into the args for this task")
# set the returned_args to the templated_args
returned_args = templated_args
# and a final check to make sure the complex args are a dict
if returned_args is not None and not isinstance(returned_args, dict):
raise errors.AnsibleError("args must be a dictionary, received %s" % returned_args)
return returned_args
# logic to decide how to run things depends on whether with_items is used
if items is None:
complex_args = _safe_template_complex_args(self.complex_args, inject)
return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args)
elif len(items) > 0:
# executing using with_items, so make multiple calls
# TODO: refactor
if self.background > 0:
raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks")
all_comm_ok = True
all_changed = False
all_failed = False
results = []
for x in items:
# use a fresh inject for each item
this_inject = inject.copy()
this_inject['item'] = x
complex_args = _safe_template_complex_args(self.complex_args, this_inject)
result = self._executor_internal_inner(
host,
self.module_name,
self.module_args,
this_inject,
port,
complex_args=complex_args
)
if 'stdout' in result.result and 'stdout_lines' not in result.result:
result.result['stdout_lines'] = result.result['stdout'].splitlines()
results.append(result.result)
if result.comm_ok == False:
all_comm_ok = False
all_failed = True
break
for x in results:
if x.get('changed') == True:
all_changed = True
if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]:
all_failed = True
break
msg = 'All items completed'
if all_failed:
msg = "One or more items failed."
rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
if not all_failed:
del rd_result['failed']
return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
else:
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True))
# *****************************************************
def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None):
''' decides how to invoke a module '''
# late processing of parameterized become_user (with_items,..)
if self.become_user_var is not None:
self.become_user = template.template(self.basedir, self.become_user_var, inject)
# module_name may be dynamic (but cannot contain {{ ansible_ssh_user }})
module_name = template.template(self.basedir, module_name, inject)
if module_name in utils.plugins.action_loader:
if self.background != 0:
raise errors.AnsibleError("async mode is not supported with the %s module" % module_name)
handler = utils.plugins.action_loader.get(module_name, self)
elif self.background == 0:
handler = utils.plugins.action_loader.get('normal', self)
else:
handler = utils.plugins.action_loader.get('async', self)
if type(self.conditional) != list:
self.conditional = [ self.conditional ]
for cond in self.conditional:
if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
result = dict(changed=False, skipped=True)
if self.no_log:
result = utils.censor_unlogged_data(result)
self.callbacks.on_skipped(host, result)
else:
self.callbacks.on_skipped(host, inject.get('item',None))
return ReturnData(host=host, result=utils.jsonify(result))
if getattr(handler, 'setup', None) is not None:
handler.setup(module_name, inject)
conn = None
actual_host = inject.get('ansible_ssh_host', host)
# allow ansible_ssh_host to be templated
actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True)
actual_port = port
actual_user = inject.get('ansible_ssh_user', self.remote_user)
actual_pass = inject.get('ansible_ssh_pass', self.remote_pass)
actual_transport = inject.get('ansible_connection', self.transport)
actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
self.become_method = inject.get('ansible_become_method', self.become_method)
# select default root user in case self.become requested
# but no user specified; happens e.g. in host vars when
# just ansible_become=True is specified
if self.become and self.become_user is None:
self.become_user = 'root'
if actual_private_key_file is not None:
actual_private_key_file = os.path.expanduser(actual_private_key_file)
if self.accelerate and actual_transport != 'local':
#Fix to get the inventory name of the host to accelerate plugin
if inject.get('ansible_ssh_host', None):
self.accelerate_inventory_host = host
else:
self.accelerate_inventory_host = None
# if we're using accelerated mode, force the
# transport to accelerate
actual_transport = "accelerate"
if not self.accelerate_port:
self.accelerate_port = C.ACCELERATE_PORT
actual_port = inject.get('ansible_ssh_port', port)
# the delegated host may have different SSH port configured, etc
# and we need to transfer those, and only those, variables
self.delegate_to = inject.get('delegate_to', None)
if self.delegate_to:
self.delegate_to = template.template(self.basedir, self.delegate_to, inject)
if self.delegate_to is not None:
delegate = self._compute_delegate(actual_pass, inject)
actual_transport = delegate['transport']
actual_host = delegate['ssh_host']
actual_port = delegate['port']
actual_user = delegate['user']
actual_pass = delegate['pass']
actual_private_key_file = delegate['private_key_file']
self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass'))
inject = delegate['inject']
# set resolved delegate_to into inject so modules can call _remote_checksum
inject['delegate_to'] = self.delegate_to
# user/pass may still contain variables at this stage
actual_user = template.template(self.basedir, actual_user, inject)
actual_pass = template.template(self.basedir, actual_pass, inject)
self.become_pass = template.template(self.basedir, self.become_pass, inject)
# make actual_user available as __magic__ ansible_ssh_user variable
inject['ansible_ssh_user'] = actual_user
try:
if actual_transport == 'accelerate':
# for accelerate, we stuff both ports into a single
# variable so that we don't have to mangle other function
# calls just to accommodate this one case
actual_port = [actual_port, self.accelerate_port]
elif actual_port is not None:
actual_port = int(template.template(self.basedir, actual_port, inject))
except ValueError, e:
result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port)
return ReturnData(host=host, comm_ok=False, result=result)
try:
if self.delegate_to or host != actual_host:
delegate_host = host
else:
delegate_host = None
conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file, delegate_host)
default_shell = getattr(conn, 'default_shell', '')
shell_type = inject.get('ansible_shell_type')
if not shell_type:
if default_shell:
shell_type = default_shell
else:
shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
shell_plugin = utils.plugins.shell_loader.get(shell_type)
if shell_plugin is None:
shell_plugin = utils.plugins.shell_loader.get('sh')
conn.shell = shell_plugin
except errors.AnsibleConnectionFailed, e:
result = dict(failed=True, msg="FAILED: %s" % str(e))
return ReturnData(host=host, comm_ok=False, result=result)
tmp = ''
# action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir
if self._early_needs_tmp_path(module_name, handler):
tmp = self._make_tmp_path(conn)
# allow module args to work as a dictionary
# though it is usually a string
if isinstance(module_args, dict):
module_args = utils.serialize_args(module_args)
# render module_args and complex_args templates
try:
# When templating module_args, we need to be careful to ensure
# that no variables inadvertently (or maliciously) add params
# to the list of args. We do this by counting the number of k=v
# pairs before and after templating.
num_args_pre = self._count_module_args(module_args, allow_dupes=True)
module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars)
num_args_post = self._count_module_args(module_args)
if num_args_pre != num_args_post:
raise errors.AnsibleError("A variable inserted a new parameter into the module args. " + \
"Be sure to quote variables if they contain equal signs (for example: \"{{var}}\").")
# And we also make sure nothing added in special flags for things
# like the command/shell module (ie. #USE_SHELL)
if '#USE_SHELL' in module_args:
raise errors.AnsibleError("A variable tried to add #USE_SHELL to the module arguments.")
complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars)
except jinja2.exceptions.UndefinedError, e:
raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
# filter omitted arguments out from complex_args
if complex_args:
complex_args = dict(filter(lambda x: x[1] != self.omit_token, complex_args.iteritems()))
# Filter omitted arguments out from module_args.
# We do this with split_args instead of parse_kv to ensure
# that things are not unquoted/requoted incorrectly
args = split_args(module_args)
final_args = []
for arg in args:
if '=' in arg:
k,v = arg.split('=', 1)
if unquote(v) != self.omit_token:
final_args.append(arg)
else:
# not a k=v param, append it
final_args.append(arg)
module_args = ' '.join(final_args)
result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
# Code for do until feature
until = self.module_vars.get('until', None)
if until is not None and result.comm_ok:
inject[self.module_vars.get('register')] = result.result
cond = template.template(self.basedir, until, inject, expand_lists=False)
if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False)
delay = self.module_vars.get('delay')
for x in range(1, int(retries) + 1):
# template the delay, cast to float and sleep
delay = template.template(self.basedir, delay, inject, expand_lists=False)
delay = float(delay)
time.sleep(delay)
tmp = ''
if self._early_needs_tmp_path(module_name, handler):
tmp = self._make_tmp_path(conn)
result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
result.result['attempts'] = x
vv("Result from run %i is: %s" % (x, result.result))
inject[self.module_vars.get('register')] = result.result
cond = template.template(self.basedir, until, inject, expand_lists=False)
if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
break
if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
result.result['failed'] = True
result.result['msg'] = "Task failed as maximum retries was encountered"
else:
result.result['attempts'] = 0
conn.close()
if not result.comm_ok:
# connection or parsing errors...
self.callbacks.on_unreachable(host, result.result)
else:
data = result.result
# https://github.com/ansible/ansible/issues/4958
if hasattr(sys.stdout, "isatty"):
if "stdout" in data and sys.stdout.isatty():
if not string_functions.isprintable(data['stdout']):
data['stdout'] = ''
if 'item' in inject:
result.result['item'] = inject['item']
result.result['invocation'] = dict(
module_args=module_args,
module_name=module_name
)
changed_when = self.module_vars.get('changed_when')
failed_when = self.module_vars.get('failed_when')
if (changed_when is not None or failed_when is not None) and self.background == 0:
register = self.module_vars.get('register')
if register is not None:
if 'stdout' in data:
data['stdout_lines'] = data['stdout'].splitlines()
inject[register] = data
# only run the final checks if the async_status has finished,
# or if we're not running an async_status check at all
if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status':
if changed_when is not None and 'skipped' not in data:
data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
if failed_when is not None and 'skipped' not in data:
data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
if is_chained:
# no callbacks
return result
if 'skipped' in data:
self.callbacks.on_skipped(host, inject.get('item',None))
if self.no_log:
data = utils.censor_unlogged_data(data)
if not result.is_successful():
ignore_errors = self.module_vars.get('ignore_errors', False)
self.callbacks.on_failed(host, data, ignore_errors)
else:
if self.diff:
self.callbacks.on_file_diff(conn.host, result.diff)
self.callbacks.on_ok(host, data)
return result
def _early_needs_tmp_path(self, module_name, handler):
''' detect if a tmp path should be created before the handler is called '''
if module_name in utils.plugins.action_loader:
return getattr(handler, 'TRANSFERS_FILES', False)
# other modules never need tmp path at early stage
return False
def _late_needs_tmp_path(self, conn, tmp, module_style):
if "tmp" in tmp:
# tmp has already been created
return False
if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su':
# tmp is necessary to store module source code
return True
if not conn.has_pipelining:
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
# *****************************************************
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False,
executable=None, become=False, in_data=None):
''' execute a command string over SSH, return the output '''
# this can be skipped with powershell modules when there is no analog to a Windows command (like chmod)
if cmd:
if executable is None:
executable = C.DEFAULT_EXECUTABLE
become_user = self.become_user
# compare connection user to (su|sudo)_user and disable if the same
# assume connection type is local if no user attribute
this_user = getattr(conn, 'user', getpass.getuser())
if (not become and this_user == become_user):
sudoable = False
become = False
rc, stdin, stdout, stderr = conn.exec_command(cmd,
tmp,
become_user=become_user,
sudoable=sudoable,
executable=executable,
in_data=in_data)
if type(stdout) not in [ str, unicode ]:
out = ''.join(stdout.readlines())
else:
out = stdout
if type(stderr) not in [ str, unicode ]:
err = ''.join(stderr.readlines())
else:
err = stderr
if rc is not None:
return dict(rc=rc, stdout=out, stderr=err)
else:
return dict(stdout=out, stderr=err)
return dict(rc=None, stdout='', stderr='')
# *****************************************************
def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False):
''' issue a remote chmod command '''
cmd = conn.shell.chmod(mode, path)
return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become)
# *****************************************************
def _remote_expand_user(self, conn, path, tmp):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'):
return path
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self.become and self.become_user:
expand_path = '~%s' % self.become_user
cmd = conn.shell.expand_user(expand_path)
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False)
initial_fragment = utils.last_non_blank_line(data['stdout'])
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return conn.shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
# *****************************************************
def _remote_checksum(self, conn, tmp, path, inject):
''' takes a remote checksum and returns 1 if no file '''
# Lookup the python interp from the host or delegate
# host == inven_host when there is no delegate
host = inject['inventory_hostname']
if 'delegate_to' in inject:
delegate = inject['delegate_to']
if delegate:
# host == None when the delegate is not in inventory
host = None
# delegate set, check whether the delegate has inventory vars
delegate = template.template(self.basedir, delegate, inject)
if delegate in inject['hostvars']:
# host == delegate if we need to lookup the
# python_interpreter from the delegate's inventory vars
host = delegate
if host:
python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python')
else:
python_interp = 'python'
cmd = conn.shell.checksum(path, python_interp)
#TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods
if self.become_method == 'sudo':
sudoable = True
else:
sudoable = False
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable)
data2 = utils.last_non_blank_line(data['stdout'])
try:
if data2 == '':
# this may happen if the connection to the remote server
# failed, so just return "INVALIDCHECKSUM" to avoid errors
return "INVALIDCHECKSUM"
else:
return data2.split()[0]
except IndexError:
sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
sys.stderr.write("command: %s\n" % cmd)
sys.stderr.write("----\n")
sys.stderr.write("output: %s\n" % data)
sys.stderr.write("----\n")
# this will signal that it changed and allow things to keep going
return "INVALIDCHECKSUM"
# *****************************************************
def _make_tmp_path(self, conn):
''' make and return a temporary path on a remote box '''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self.become and self.become_user != 'root':
use_system_tmp = True
tmp_mode = None
if self.remote_user != 'root' or (self.become and self.become_user != 'root'):
tmp_mode = 'a+rx'
cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self.transport in ['ssh']:
if utils.VERBOSITY > 3:
output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
else:
output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
elif 'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
if 'stdout' in result and result['stdout'] != '':
output = output + ": %s" % result['stdout']
raise errors.AnsibleError(output)
rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd))
return rc
# *****************************************************
def _remove_tmp_path(self, conn, tmp_path):
''' Remove a tmp_path. '''
if "-tmp-" in tmp_path:
cmd = conn.shell.remove(tmp_path, recurse=True)
self._low_level_exec_command(conn, cmd, None, sudoable=False)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
# *****************************************************
def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None):
''' transfer a module over SFTP, does not run it '''
(
module_style,
module_shebang,
module_data
) = self._configure_module(conn, module_name, module_args, inject, complex_args)
module_remote_path = conn.shell.join_path(tmp, module_name)
self._transfer_str(conn, tmp, module_name, module_data)
return (module_remote_path, module_style, module_shebang)
# *****************************************************
def _configure_module(self, conn, module_name, module_args, inject, complex_args=None):
''' find module and configure it '''
# Search module path(s) for named module.
module_suffixes = getattr(conn, 'default_suffixes', None)
module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes)
if module_path is None:
module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes)
if module_path2 is not None:
raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name))
else:
raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = module_replacer.modify_module(
module_path, complex_args, module_args, inject
)
return (module_style, module_shebang, module_data)
# *****************************************************
def _parallel_exec(self, hosts):
''' handles mulitprocessing when more than 1 fork is required '''
manager = multiprocessing.Manager()
job_queue = manager.Queue()
for host in hosts:
job_queue.put(host)
result_queue = manager.Queue()
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
workers = []
for i in range(self.forks):
new_stdin = None
if fileno is not None:
try:
new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
prc = multiprocessing.Process(target=_executor_hook,
args=(job_queue, result_queue, new_stdin))
prc.start()
workers.append(prc)
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
for worker in workers:
worker.terminate()
worker.join()
results = []
try:
while not result_queue.empty():
results.append(result_queue.get(block=False))
except socket.error:
raise errors.AnsibleError("<interrupted>")
return results
# *****************************************************
def _partition_results(self, results):
''' separate results by ones we contacted & ones we didn't '''
if results is None:
return None
results2 = dict(contacted={}, dark={})
for result in results:
host = result.host
if host is None:
raise Exception("internal error, host not set")
if result.communicated_ok():
results2["contacted"][host] = result.result
else:
results2["dark"][host] = result.result
# hosts which were contacted but never got a chance to return
for host in self.run_hosts:
if not (host in results2['dark'] or host in results2['contacted']):
results2["dark"][host] = {}
return results2
# *****************************************************
def run(self):
''' xfer & run module on all matched hosts '''
# find hosts that match the pattern
if not self.run_hosts:
self.run_hosts = self.inventory.list_hosts(self.pattern)
hosts = self.run_hosts
if len(hosts) == 0:
self.callbacks.on_no_hosts()
return dict(contacted={}, dark={})
global multiprocessing_runner
multiprocessing_runner = self
results = None
# Check if this is an action plugin. Some of them are designed
# to be ran once per group of hosts. Example module: pause,
# run once per hostgroup, rather than pausing once per each
# host.
p = utils.plugins.action_loader.get(self.module_name, self)
if self.forks == 0 or self.forks > len(hosts):
self.forks = len(hosts)
if (p and (getattr(p, 'BYPASS_HOST_LOOP', None)) or self.run_once):
# Expose the current hostgroup to the bypassing plugins
self.host_set = hosts
# We aren't iterating over all the hosts in this
# group. So, just choose the "delegate_to" host if that is defined and is
# one of the targeted hosts, otherwise pick the first host in our group to
# construct the conn object with.
if self.delegate_to is not None and self.delegate_to in hosts:
host = self.delegate_to
else:
host = hosts[0]
result_data = self._executor(host, None).result
# Create a ResultData item for each host in this group
# using the returned result. If we didn't do this we would
# get false reports of dark hosts.
results = [ ReturnData(host=h, result=result_data, comm_ok=True) \
for h in hosts ]
del self.host_set
elif self.forks > 1:
try:
results = self._parallel_exec(hosts)
except IOError, ie:
print ie.errno
if ie.errno == 32:
# broken pipe from Ctrl+C
raise errors.AnsibleError("interrupted")
raise
else:
results = [ self._executor(h, None) for h in hosts ]
return self._partition_results(results)
# *****************************************************
def run_async(self, time_limit):
''' Run this module asynchronously and return a poller. '''
self.background = time_limit
results = self.run()
return results, poller.AsyncPoller(results, self)
# *****************************************************
def noop_on_check(self, inject):
''' Should the runner run in check mode or not ? '''
# initialize self.always_run on first call
if self.always_run is None:
self.always_run = self.module_vars.get('always_run', False)
self.always_run = check_conditional(
self.always_run, self.basedir, inject, fail_on_undefined=True)
return (self.check and not self.always_run)
| devopservices/ansible | lib/ansible/runner/__init__.py | Python | gpl-3.0 | 69,424 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from rackspace.monitoring import monitoring_service
class NotificationType(resource.Resource):
base_path = 'notification_types'
resources_key = 'values'
service = monitoring_service.MonitoringService()
# capabilities
allow_list = True
allow_retrieve = True
# Properties
#: Details specific to the notification type. *Type: list*
details = resource.prop('fields', type=list)
| briancurtin/rackspace-sdk-plugin | rackspace/monitoring/v1/notification_type.py | Python | apache-2.0 | 991 |
import sys
from ftplib import FTP
ftp = FTP()
class FTPClient():
"""docstring for FTPClient"""
def __init__(self):
"""
"""
pass
message_array = []
def log_message(self, message, clear=True):
"""
Logs the message to the message_array, from where it is retrieved to display
:param message: The message string.
:param clear: Buffer clearance.
"""
if clear:
self.message_array = message
def get_message(self):
"""
Returns the logged message to the console
:return: Return the message.
"""
return self.message_array
def connect(self, server, ftp_user, ftp_password, port):
"""
Connects the remote host to the server from the information provided to the connect method.
If the connection is successful, the messaged will logged and displayed in the console, otherwise
Exception is raised with the error displayed to the console and program execution halts.
:param server: The address of the server
:param ftp_user: The FTP user id.
:param ftp_password: The FTP password.
:param port: The port number.
"""
try:
ftp.connect(server, port)
ftp.login(user=ftp_user, passwd=ftp_password)
self.log_message("Connect to {0} for {1} on port {2}".format(server, ftp_user, port))
except Exception as e:
print(e)
sys.exit(1)
def make_directory(self, directory):
"""
Create the new directory in the connected server in the root or in the directory specified via the parameter.
:param directory: Directory name to create.
"""
try:
ftp.mkd(directory)
self.log_message("Directory {0} created successfully".format(directory))
except Exception as e:
print(e)
sys.exit(1)
def upload_file(self, filename):
"""
The file provided with filename will be uploaded to the server in the recommended
format automatically to the desired directory.
:param filename: Name of the file to upload.
"""
try:
if filename.lower().endswith(('.*')):
with open(filename, 'r') as f:
ftp.storlines('STOP {}'.format(filename), f)
else:
with open(filename, 'rb') as f:
ftp.storbinary('STOP {}'.format(filename), f)
self.log_message("Uploaded {0} in {1}".format(filename, ftp.pwd()))
except Exception as e:
print(e)
sys.exit(1)
def change_directory(self, directory):
"""
CD's into the directory of our wish by providing the directory name as the parameter to it.
:param directory: Directory name to change to it.
"""
try:
ftp.cwd(directory)
self.log_message("Current Directory is now {0}".format(ftp.pwd()))
except Exception as e:
print(e)
sys.exit(1)
def get_directory_listing(self):
"""
Lists all the contents in the connected server or in the specified folder in the server.
"""
data = []
ftp.dir(data.append)
for line in data:
print("-", line)
self.log_message("Listed all the files in {0}".format(ftp.pwd()))
def download_file(self, filename):
"""
Downloads the file from the connected server, provided the name is passes as the parameter.
:param filename: Name of the file to download.
"""
try:
ftp.retrbinary("RETR " + filename, open(filename, 'wb').write)
self.log_message("Downloaded {0}".format(filename))
except Exception as e:
print(e)
sys.exit(1)
def directory_exists(self, directory_name):
"""
Checks if the directory you are trying to upload the files is already present or not and if
its already present CD's into the directory and if not, creates the directory and CD's into the
newly created directory.
:param directory_name: Directory name to check its existence.
"""
try:
new_dir_name = directory_name.strip("/")
if new_dir_name in ftp.nlst():
self.change_directory(directory_name)
else:
self.make_directory(directory_name)
self.change_directory(directory_name)
except Exception as e:
print(e)
sys.exit(1)
def __del__(self):
"""
Closes the FTP connection.
"""
ftp.close()
| yekeqiang/mypython | myftp/mypackage/ftp_module.py | Python | gpl-2.0 | 4,731 |
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import reactor
from p2ner.abstract.pipeelement import PipeElement
from p2ner.base.Peer import Peer
import time
from messages.rttmessage import RTTMessage
class FlowControlElement(PipeElement):
def initElement(self,bw=200000,inFactor=1.1,decFactor=0.8):
self.inFactor=inFactor
self.inDFactor=inFactor
self.decFactor=decFactor
self.bw=bw
self.registerMessages()
def registerMessages(self):
self.messages = []
self.messages.append(RTTMessage())
def send(self, res, msg, data, peer):
ret=res[0]
self.updateBandwidth(res[1],peer)
return ret
def updateBandwidth(self,failed,peer):
if not peer.bw:
peer.bw=self.bw
peer.lastTransmit=time.time()
peer.prevFailed=False
peer.reset=True
peer.prevBw=0
peer.thresBw=[]
return
if not failed:
self.increaseBandwidth(peer)
else:
self.decreaseBandwidth(peer,failed)
def increaseBandwidth(self,peer):
return
if time.time()-peer.lastTransmit>0.3 and not peer.prevFailed:
peer.prevBw=peer.bw
peer.bw=self.inFactor*peer.bw
peer.lastTransmit=time.time()
peer.prevFailed=False
self.inFactor=self.inFactor
#print 'increase bw for ',peer,' from ',peer.prevBw,' to ',peer.bw
elif peer.prevFailed and peer.reset:
peer.reset=False
reactor.callLater(3,self.resetFailed,peer)
def decreaseBandwidth(self,peer,failed):
return
if not peer.prevFailed and peer.prevBw:
peer.thresBw.append(peer.prevBw)
peer.thresBw=peer.thresBw[-10:]
peer.prevFailed=True
#print 'first decrease bw for ',peer,' from ',peer.prevBw,' to ',peer.bw
#print peer.thresBw
sum=0
for b in peer.thresBw:
sum +=b
average=sum/len(peer.thresBw)
print 'average:',sum/len(peer.thresBw)
peer.bw=0.9*average
peer.bwAverage=average
self.inFactor=self.inDFactor
else:
peer.prevBw=peer.bw
peer.bw=peer.bw*self.decFactor
#print 'decrease bw for ',peer,' from ',peer.prevBw,' to ',peer.bw
#self.breakCall()
def resetFailed(self,peer):
peer.prevFailed=False
peer.reset=True
print 'resetinggggggggg'
| schristakidis/p2ner | p2ner/components/pipeelement/flowcontrolelement/flowcontrolelement/flowcontrol.py | Python | apache-2.0 | 3,252 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('management', '0005_remove_userprofile_photos'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='sn_facebook',
field=models.CharField(max_length=1000, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='userprofile',
name='sn_google',
field=models.CharField(max_length=1000, null=True),
preserve_default=True,
),
]
| trivago-ggarrido/PsyAna | management/migrations/0006_auto_20150118_0911.py | Python | gpl-2.0 | 675 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
import os
import re
import ast
from ub import config
import ub
session = None
cc_exceptions = ['datetime', 'int', 'comments', 'float', 'composite', 'series']
cc_classes = None
engine = None
# user defined sort function for calibre databases (Series, etc.)
def title_sort(title):
# calibre sort stuff
title_pat = re.compile(config.config_title_regex, re.IGNORECASE)
match = title_pat.search(title)
if match:
prep = match.group(1)
title = title.replace(prep, '') + ', ' + prep
return title.strip()
Base = declarative_base()
books_authors_link = Table('books_authors_link', Base.metadata,
Column('book', Integer, ForeignKey('books.id'), primary_key=True),
Column('author', Integer, ForeignKey('authors.id'), primary_key=True)
)
books_tags_link = Table('books_tags_link', Base.metadata,
Column('book', Integer, ForeignKey('books.id'), primary_key=True),
Column('tag', Integer, ForeignKey('tags.id'), primary_key=True)
)
books_series_link = Table('books_series_link', Base.metadata,
Column('book', Integer, ForeignKey('books.id'), primary_key=True),
Column('series', Integer, ForeignKey('series.id'), primary_key=True)
)
books_ratings_link = Table('books_ratings_link', Base.metadata,
Column('book', Integer, ForeignKey('books.id'), primary_key=True),
Column('rating', Integer, ForeignKey('ratings.id'), primary_key=True)
)
books_languages_link = Table('books_languages_link', Base.metadata,
Column('book', Integer, ForeignKey('books.id'), primary_key=True),
Column('lang_code', Integer, ForeignKey('languages.id'), primary_key=True)
)
books_publishers_link = Table('books_publishers_link', Base.metadata,
Column('book', Integer, ForeignKey('books.id'), primary_key=True),
Column('publisher', Integer, ForeignKey('publishers.id'), primary_key=True)
)
class Identifiers(Base):
__tablename__ = 'identifiers'
id = Column(Integer, primary_key=True)
type = Column(String)
val = Column(String)
book = Column(Integer, ForeignKey('books.id'))
def __init__(self, val, type, book):
self.val = val
self.type = type
self.book = book
def formatType(self):
if self.type == "amazon":
return u"Amazon"
elif self.type == "isbn":
return u"ISBN"
elif self.type == "doi":
return u"DOI"
elif self.type == "goodreads":
return u"Goodreads"
else:
return self.type
def __repr__(self):
if self.type == "amazon":
return u"https://amzn.com/{0}".format(self.val)
elif self.type == "isbn":
return u"http://www.worldcat.org/isbn/{0}".format(self.val)
elif self.type == "doi":
return u"http://dx.doi.org/{0}".format(self.val)
elif self.type == "goodreads":
return u"http://www.goodreads.com/book/show/{0}".format(self.val)
elif self.type == "douban":
return u"https://book.douban.com/subject/{0}".format(self.val)
else:
return u""
class Comments(Base):
__tablename__ = 'comments'
id = Column(Integer, primary_key=True)
text = Column(String)
book = Column(Integer, ForeignKey('books.id'))
def __init__(self, text, book):
self.text = text
self.book = book
def __repr__(self):
return u"<Comments({0})>".format(self.text)
class Tags(Base):
__tablename__ = 'tags'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String)
def __init__(self, name):
self.name = name
def __repr__(self):
return u"<Tags('{0})>".format(self.name)
class Authors(Base):
__tablename__ = 'authors'
id = Column(Integer, primary_key=True)
name = Column(String)
sort = Column(String)
link = Column(String)
def __init__(self, name, sort, link):
self.name = name
self.sort = sort
self.link = link
def __repr__(self):
return u"<Authors('{0},{1}{2}')>".format(self.name, self.sort, self.link)
class Series(Base):
__tablename__ = 'series'
id = Column(Integer, primary_key=True)
name = Column(String)
sort = Column(String)
def __init__(self, name, sort):
self.name = name
self.sort = sort
def __repr__(self):
return u"<Series('{0},{1}')>".format(self.name, self.sort)
class Ratings(Base):
__tablename__ = 'ratings'
id = Column(Integer, primary_key=True)
rating = Column(Integer)
def __init__(self, rating):
self.rating = rating
def __repr__(self):
return u"<Ratings('{0}')>".format(self.rating)
class Languages(Base):
__tablename__ = 'languages'
id = Column(Integer, primary_key=True)
lang_code = Column(String)
def __init__(self, lang_code):
self.lang_code = lang_code
def __repr__(self):
return u"<Languages('{0}')>".format(self.lang_code)
class Publishers(Base):
__tablename__ = 'publishers'
id = Column(Integer, primary_key=True)
name = Column(String)
sort = Column(String)
def __init__(self, name,sort):
self.name = name
self.sort = sort
def __repr__(self):
return u"<Publishers('{0},{1}')>".format(self.name, self.sort)
class Data(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True)
book = Column(Integer, ForeignKey('books.id'))
format = Column(String)
uncompressed_size = Column(Integer)
name = Column(String)
def __init__(self, book, format, uncompressed_size, name):
self.book = book
self.format = format
self.uncompressed_size = uncompressed_size
self.name = name
def __repr__(self):
return u"<Data('{0},{1}{2}{3}')>".format(self.book, self.format, self.uncompressed_size, self.name)
class Books(Base):
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
title = Column(String)
sort = Column(String)
author_sort = Column(String)
timestamp = Column(String)
pubdate = Column(String)
series_index = Column(String)
last_modified = Column(String)
path = Column(String)
has_cover = Column(Integer)
uuid = Column(String)
authors = relationship('Authors', secondary=books_authors_link, backref='books')
tags = relationship('Tags', secondary=books_tags_link, backref='books')
comments = relationship('Comments', backref='books')
data = relationship('Data', backref='books')
series = relationship('Series', secondary=books_series_link, backref='books')
ratings = relationship('Ratings', secondary=books_ratings_link, backref='books')
languages = relationship('Languages', secondary=books_languages_link, backref='books')
publishers = relationship('Publishers', secondary=books_publishers_link, backref='books')
identifiers = relationship('Identifiers', backref='books')
def __init__(self, title, sort, author_sort, timestamp, pubdate, series_index, last_modified, path, has_cover,
authors, tags, languages = None):
self.title = title
self.sort = sort
self.author_sort = author_sort
self.timestamp = timestamp
self.pubdate = pubdate
self.series_index = series_index
self.last_modified = last_modified
self.path = path
self.has_cover = has_cover
def __repr__(self):
return u"<Books('{0},{1}{2}{3}{4}{5}{6}{7}{8}')>".format(self.title, self.sort, self.author_sort,
self.timestamp, self.pubdate, self.series_index,
self.last_modified, self.path, self.has_cover)
class Custom_Columns(Base):
__tablename__ = 'custom_columns'
id = Column(Integer, primary_key=True)
label = Column(String)
name = Column(String)
datatype = Column(String)
mark_for_delete = Column(Boolean)
editable = Column(Boolean)
display = Column(String)
is_multiple = Column(Boolean)
normalized = Column(Boolean)
def get_display_dict(self):
display_dict = ast.literal_eval(self.display)
return display_dict
def setup_db():
global engine
global session
global cc_classes
if config.config_calibre_dir is None or config.config_calibre_dir == u'':
return False
dbpath = os.path.join(config.config_calibre_dir, "metadata.db")
#engine = create_engine('sqlite:///{0}'.format(dbpath.encode('utf-8')), echo=False, isolation_level="SERIALIZABLE")
engine = create_engine('sqlite:///'+ dbpath, echo=False, isolation_level="SERIALIZABLE")
try:
conn = engine.connect()
except Exception as e:
content = ub.session.query(ub.Settings).first()
content.config_calibre_dir = None
content.db_configured = False
ub.session.commit()
config.loadSettings()
return False
content = ub.session.query(ub.Settings).first()
content.db_configured = True
ub.session.commit()
config.loadSettings()
conn.connection.create_function('title_sort', 1, title_sort)
if not cc_classes:
cc = conn.execute("SELECT id, datatype FROM custom_columns")
cc_ids = []
books_custom_column_links = {}
cc_classes = {}
for row in cc:
if row.datatype not in cc_exceptions:
books_custom_column_links[row.id] = Table('books_custom_column_' + str(row.id) + '_link', Base.metadata,
Column('book', Integer, ForeignKey('books.id'),
primary_key=True),
Column('value', Integer,
ForeignKey('custom_column_' + str(row.id) + '.id'),
primary_key=True)
)
cc_ids.append([row.id, row.datatype])
if row.datatype == 'bool':
ccdict = {'__tablename__': 'custom_column_' + str(row.id),
'id': Column(Integer, primary_key=True),
'book': Column(Integer, ForeignKey('books.id')),
'value': Column(Boolean)}
else:
ccdict = {'__tablename__': 'custom_column_' + str(row.id),
'id': Column(Integer, primary_key=True),
'value': Column(String)}
cc_classes[row.id] = type('Custom_Column_' + str(row.id), (Base,), ccdict)
for id in cc_ids:
if id[1] == 'bool':
setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]],
primaryjoin=(
Books.id == cc_classes[id[0]].book),
backref='books'))
else:
setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]],
secondary=books_custom_column_links[id[0]],
backref='books'))
# Base.metadata.create_all(engine)
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
return True | JackED42/calibre-web | cps/db.py | Python | gpl-3.0 | 11,883 |
import xmlrpclib
server = xmlrpclib.ServerProxy("http://effbot.org/rpc/echo.cgi")
print "'testing'"
print repr(server.echo("testing"))
print "['testing', 'testing', 1, 2.0, [3]]"
print repr(server.echo("testing", "testing", 1, 2.0, [3]))
| Yinxiaoli/iros2015_folding | src/folding_control/src/xmlrpclib-1.0.1/echotest.py | Python | mit | 248 |
import os
import sys
import codecs
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
setup(
name="html5video",
version=":versiontools:html5video:",
url='http://html5video.readthedocs.org/',
license='MIT',
description="Converts a source video file into the multiple video container and codecs required to support HTML5's <video>tag.",
long_description=read('README.rst'),
author='Dougal Matthews',
author_email='dougal85@gmail.com',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data=find_package_data('html5video', only_in_packages=False),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
scripts=['html5video/bin/html5video'],
zip_safe=False,
install_requires=[
'Unipath >= 0.2.1',
],
setup_requires=[
'Unipath >= 0.2.1',
'versiontools >= 1.6',
],
)
| d0ugal-archive/html5video | setup.py | Python | mit | 5,057 |
# Copyright (c) 2009, Tim Cuthbertson # All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the organisation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import sys
import linecache
import re
import time
colorama = None
if os.name == 'nt':
import colorama
import nose
import termstyle
failure = 'FAILED'
error = 'ERROR'
success = 'passed'
skip = 'skipped'
line_length = 77
PY3 = sys.version_info[0] >= 3
if PY3:
to_unicode = str
else:
def to_unicode(s):
try:
return unicode(s)
except UnicodeDecodeError:
s = str(s)
try:
# try utf-8, the most likely case
return unicode(s, 'UTF-8')
except UnicodeDecodeError:
# Can't decode, just use `repr`
return unicode(repr(s))
BLACKLISTED_WRITERS = [
'nose[\\/]result\\.pyc?$',
'unittest[\\/]runner\\.pyc?$'
]
REDNOSE_DEBUG = False
class RedNose(nose.plugins.Plugin):
env_opt = 'NOSE_REDNOSE'
env_opt_color = 'NOSE_REDNOSE_COLOR'
score = 199 # just under the `coverage` module
def __init__(self, *args):
super(RedNose, self).__init__(*args)
self.reports = []
self.error = self.success = self.failure = self.skip = 0
self.total = 0
self.stream = None
self.verbose = False
self.enabled = False
self.tree = False
def options(self, parser, env=os.environ):
global REDNOSE_DEBUG
rednose_on = bool(env.get(self.env_opt, False))
rednose_color = env.get(self.env_opt_color, 'auto')
REDNOSE_DEBUG = bool(env.get('REDNOSE_DEBUG', False))
parser.add_option(
"--rednose",
action="store_true",
default=rednose_on,
dest="rednose",
help="enable colour output (alternatively, set $%s=1)" % (self.env_opt,)
)
parser.add_option(
"--no-color",
action="store_false",
dest="rednose",
help="disable colour output"
)
parser.add_option(
"--force-color",
action="store_const",
dest='rednose_color',
default=rednose_color,
const='force',
help="force colour output when not using a TTY (alternatively, set $%s=force)" % (self.env_opt_color,)
)
parser.add_option(
"--immediate",
action="store_true",
default=False,
help="print errors and failures as they happen, as well as at the end"
)
def configure(self, options, conf):
if options.rednose:
self.enabled = True
termstyle_init = {
'force': termstyle.enable,
'off': termstyle.disable
}.get(options.rednose_color, termstyle.auto)
termstyle_init()
self.immediate = options.immediate
self.verbose = options.verbosity >= 2
def begin(self):
self.start_time = time.time()
self._in_test = False
def _format_test_name(self, test):
return test.shortDescription() or to_unicode(test)
def prepareTestResult(self, result):
result.stream = FilteringStream(self.stream, BLACKLISTED_WRITERS)
def beforeTest(self, test):
self._in_test = True
if self.verbose:
self._out(self._format_test_name(test) + ' ... ')
def afterTest(self, test):
if self._in_test:
self.addSkip()
def _print_test(self, type_, color):
self.total += 1
if self.verbose:
self._outln(color(type_))
else:
if type_ == failure:
short_ = 'F'
elif type_ == error:
short_ = 'X'
elif type_ == skip:
short_ = '-'
else:
short_ = '.'
self._out(color(short_))
if self.total % line_length == 0:
self._outln()
self._in_test = False
def _add_report(self, report):
failure_type, test, err = report
self.reports.append(report)
if self.immediate:
self._outln()
self._report_test(len(self.reports), *report)
def addFailure(self, test, err):
self.failure += 1
self._add_report((failure, test, err))
self._print_test(failure, termstyle.red)
def addError(self, test, err):
if err[0].__name__ == 'SkipTest':
self.addSkip(test, err)
return
self.error += 1
self._add_report((error, test, err))
self._print_test(error, termstyle.yellow)
def addSuccess(self, test):
self.success += 1
self._print_test(success, termstyle.green)
def addSkip(self, test=None, err=None):
self.skip += 1
self._print_test(skip, termstyle.blue)
def setOutputStream(self, stream):
if colorama:
stream = colorama.initialise.wrap_stream(stream, convert=True, strip=False, autoreset=False, wrap=True)
self.stream = stream
def report(self, stream):
"""report on all registered failures and errors"""
self._outln()
if self.immediate:
for x in range(0, 5):
self._outln()
report_num = 0
if len(self.reports) > 0:
for report_num, report in enumerate(self.reports):
self._report_test(report_num + 1, *report)
self._outln()
self._summarize()
def _summarize(self):
"""summarize all tests - the number of failures, errors and successes"""
self._line(termstyle.black)
self._out("%s test%s run in %0.1f seconds" % (
self.total,
self._plural(self.total),
time.time() - self.start_time))
if self.total > self.success:
self._outln(". ")
additionals = []
if self.failure > 0:
additionals.append(termstyle.red("%s FAILED" % (
self.failure,)))
if self.error > 0:
additionals.append(termstyle.yellow("%s error%s" % (
self.error,
self._plural(self.error) )))
if self.skip > 0:
additionals.append(termstyle.blue("%s skipped" % (
self.skip)))
self._out(', '.join(additionals))
self._out(termstyle.green(" (%s test%s passed)" % (
self.success,
self._plural(self.success) )))
self._outln()
def _report_test(self, report_num, type_, test, err):
"""report the results of a single (failing or errored) test"""
self._line(termstyle.black)
self._out("%s) " % (report_num))
if type_ == failure:
color = termstyle.red
self._outln(color('FAIL: %s' % (self._format_test_name(test),)))
else:
color = termstyle.yellow
self._outln(color('ERROR: %s' % (self._format_test_name(test),)))
exc_type, exc_instance, exc_trace = err
self._outln()
self._outln(self._fmt_traceback(exc_trace))
self._out(color(' ', termstyle.bold(color(exc_type.__name__)), ": "))
self._outln(self._fmt_message(exc_instance, color))
self._outln()
def _relative_path(self, path):
"""
If path is a child of the current working directory, the relative
path is returned surrounded by bold xterm escape sequences.
If path is not a child of the working directory, path is returned
"""
try:
here = os.path.abspath(os.path.realpath(os.getcwd()))
fullpath = os.path.abspath(os.path.realpath(path))
except OSError:
return path
if fullpath.startswith(here):
return termstyle.bold(fullpath[len(here)+1:])
return path
def _file_line(self, tb):
"""formats the file / lineno / function line of a traceback element"""
prefix = "file://"
prefix = ""
f = tb.tb_frame
if '__unittest' in f.f_globals:
# this is the magical flag that prevents unittest internal
# code from junking up the stacktrace
return None
filename = f.f_code.co_filename
lineno = tb.tb_lineno
linecache.checkcache(filename)
function_name = f.f_code.co_name
line_contents = linecache.getline(filename, lineno, f.f_globals).strip()
return " %s line %s in %s\n %s" % (
termstyle.blue(prefix, self._relative_path(filename)),
termstyle.bold(termstyle.cyan(lineno)),
termstyle.cyan(function_name),
line_contents)
def _fmt_traceback(self, trace):
"""format a traceback"""
ret = []
ret.append(termstyle.default(" Traceback (most recent call last):"))
current_trace = trace
while current_trace is not None:
line = self._file_line(current_trace)
if line is not None:
ret.append(line)
current_trace = current_trace.tb_next
return '\n'.join(ret)
def _fmt_message(self, exception, color):
orig_message_lines = to_unicode(exception).splitlines()
if len(orig_message_lines) == 0:
return ''
message_lines = [color(orig_message_lines[0])]
for line in orig_message_lines[1:]:
match = re.match('^---.* begin captured stdout.*----$', line)
if match:
color = None
message_lines.append('')
line = ' ' + line
message_lines.append(color(line) if color is not None else line)
return '\n'.join(message_lines)
def _out(self, msg='', newline=False):
self.stream.write(msg)
if newline:
self.stream.write('\n')
def _outln(self, msg=''):
self._out(msg, True)
def _plural(self, num):
return '' if num == 1 else 's'
def _line(self, color=termstyle.reset, char='-'):
"""
print a line of separator characters (default '-')
in the given colour (default black)
"""
self._outln(color(char * line_length))
import traceback
import sys
class FilteringStream(object):
"""
A wrapper for a stream that will filter
calls to `write` and `writeln` to ignore calls
from blacklisted callers
(implemented as a regex on their filename, according
to traceback.extract_stack())
It's super hacky, but there seems to be no other way
to suppress nose's default output
"""
def __init__(self, stream, excludes):
self.__stream = stream
self.__excludes = list(map(re.compile, excludes))
def __should_filter(self):
try:
stack = traceback.extract_stack(limit=3)[0]
filename = stack[0]
pattern_matches_filename = lambda pattern: pattern.search(filename)
should_filter = any(map(pattern_matches_filename, self.__excludes))
if REDNOSE_DEBUG:
print >> sys.stderr, "REDNOSE_DEBUG: got write call from %s, should_filter = %s" % (
filename, should_filter)
return should_filter
except StandardError as e:
if REDNOSE_DEBUG:
print("\nError in rednose filtering: %s" % (e,), file=sys.stderr)
traceback.print_exc(sys.stderr)
return False
def write(self, *a):
if self.__should_filter():
return
return self.__stream.write(*a)
def writeln(self, *a):
if self.__should_filter():
return
return self.__stream.writeln(*a)
# pass non-known methods through to self.__stream
def __getattr__(self, name):
if REDNOSE_DEBUG:
print("REDNOSE_DEBUG: getting attr %s" % (name,), file=sys.stderr)
return getattr(self.__stream, name)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/rednose.py | Python | agpl-3.0 | 11,413 |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
import json
import re
import click
from tabulate import tabulate
from platformio.clients.account import AccountClient
def validate_orgname_teamname(value, teamname_validate=False):
if ":" not in value:
raise click.BadParameter(
"Please specify organization and team name in the next"
" format - orgname:teamname. For example, mycompany:DreamTeam"
)
teamname = str(value.strip().split(":", 1)[1])
if teamname_validate:
validate_teamname(teamname)
return value
def validate_teamname(value):
if not value:
return value
value = str(value).strip()
if not re.match(r"^[a-z\d](?:[a-z\d]|[\-_ ](?=[a-z\d])){0,19}$", value, flags=re.I):
raise click.BadParameter(
"Invalid team name format. "
"Team name must only contain alphanumeric characters, "
"single hyphens, underscores, spaces. It can not "
"begin or end with a hyphen or a underscore and must"
" not be longer than 20 characters."
)
return value
@click.group("team", short_help="Manage Teams")
def cli():
pass
@cli.command("create", short_help="Create a new team")
@click.argument(
"orgname_teamname",
metavar="ORGNAME:TEAMNAME",
callback=lambda _, __, value: validate_orgname_teamname(
value, teamname_validate=True
),
)
@click.option("--description",)
def team_create(orgname_teamname, description):
orgname, teamname = orgname_teamname.split(":", 1)
client = AccountClient()
client.create_team(orgname, teamname, description)
return click.secho(
"The team %s has been successfully created." % teamname, fg="green",
)
@cli.command("list", short_help="List teams")
@click.argument("orgname", required=False)
@click.option("--json-output", is_flag=True)
def team_list(orgname, json_output):
client = AccountClient()
data = {}
if not orgname:
for item in client.list_orgs():
teams = client.list_teams(item.get("orgname"))
data[item.get("orgname")] = teams
else:
teams = client.list_teams(orgname)
data[orgname] = teams
if json_output:
return click.echo(json.dumps(data[orgname] if orgname else data))
if not any(data.values()):
return click.secho("You do not have any teams.", fg="yellow")
for org_name in data:
for team in data[org_name]:
click.echo()
click.secho("%s:%s" % (org_name, team.get("name")), fg="cyan")
click.echo("-" * len("%s:%s" % (org_name, team.get("name"))))
table_data = []
if team.get("description"):
table_data.append(("Description:", team.get("description")))
table_data.append(
(
"Members:",
", ".join(
(member.get("username") for member in team.get("members"))
)
if team.get("members")
else "-",
)
)
click.echo(tabulate(table_data, tablefmt="plain"))
return click.echo()
@cli.command("update", short_help="Update team")
@click.argument(
"orgname_teamname",
metavar="ORGNAME:TEAMNAME",
callback=lambda _, __, value: validate_orgname_teamname(value),
)
@click.option(
"--name", callback=lambda _, __, value: validate_teamname(value),
)
@click.option("--description",)
def team_update(orgname_teamname, **kwargs):
orgname, teamname = orgname_teamname.split(":", 1)
client = AccountClient()
team = client.get_team(orgname, teamname)
del team["id"]
del team["members"]
new_team = team.copy()
if not any(kwargs.values()):
for field in team:
new_team[field] = click.prompt(
field.replace("_", " ").capitalize(), default=team[field]
)
if field == "name":
validate_teamname(new_team[field])
else:
new_team.update({key: value for key, value in kwargs.items() if value})
client.update_team(orgname, teamname, new_team)
return click.secho(
"The team %s has been successfully updated." % teamname, fg="green",
)
@cli.command("destroy", short_help="Destroy a team")
@click.argument(
"orgname_teamname",
metavar="ORGNAME:TEAMNAME",
callback=lambda _, __, value: validate_orgname_teamname(value),
)
def team_destroy(orgname_teamname):
orgname, teamname = orgname_teamname.split(":", 1)
click.confirm(
click.style(
"Are you sure you want to destroy the %s team?" % teamname, fg="yellow"
),
abort=True,
)
client = AccountClient()
client.destroy_team(orgname, teamname)
return click.secho(
"The team %s has been successfully destroyed." % teamname, fg="green",
)
@cli.command("add", short_help="Add a new member to team")
@click.argument(
"orgname_teamname",
metavar="ORGNAME:TEAMNAME",
callback=lambda _, __, value: validate_orgname_teamname(value),
)
@click.argument("username",)
def team_add_member(orgname_teamname, username):
orgname, teamname = orgname_teamname.split(":", 1)
client = AccountClient()
client.add_team_member(orgname, teamname, username)
return click.secho(
"The new member %s has been successfully added to the %s team."
% (username, teamname),
fg="green",
)
@cli.command("remove", short_help="Remove a member from team")
@click.argument(
"orgname_teamname",
metavar="ORGNAME:TEAMNAME",
callback=lambda _, __, value: validate_orgname_teamname(value),
)
@click.argument("username",)
def org_remove_owner(orgname_teamname, username):
orgname, teamname = orgname_teamname.split(":", 1)
client = AccountClient()
client.remove_team_member(orgname, teamname, username)
return click.secho(
"The %s member has been successfully removed from the %s team."
% (username, teamname),
fg="green",
)
| platformio/platformio | platformio/commands/team.py | Python | apache-2.0 | 6,668 |
"""
Support tool for disabling user accounts.
"""
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.db.models import Q
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import View
from rest_framework.generics import GenericAPIView
from edxmako.shortcuts import render_to_response
from lms.djangoapps.support.decorators import require_support_permission
from openedx.core.djangoapps.user_api.accounts.serializers import AccountUserSerializer
from openedx.core.djangoapps.user_api.accounts.utils import generate_password
from util.json_request import JsonResponse
class ManageUserSupportView(View):
"""
View for viewing and managing user accounts, used by the
support team.
"""
@method_decorator(require_support_permission)
def get(self, request):
"""Render the manage user support tool view."""
return render_to_response('support/manage_user.html', {
_('username'): request.GET.get('user', ''),
_('user_support_url'): reverse('support:manage_user'),
_('user_detail_url'): reverse('support:manage_user_detail')
})
class ManageUserDetailView(GenericAPIView):
"""
Allows viewing and disabling learner accounts by support
staff.
"""
# TODO: ARCH-91
# This view is excluded from Swagger doc generation because it
# does not specify a serializer class.
exclude_from_schema = True
@method_decorator(require_support_permission)
def get(self, request, username_or_email):
"""
Returns details for the given user, along with
information about its username and joining date.
"""
try:
user = get_user_model().objects.get(
Q(username=username_or_email) | Q(email=username_or_email)
)
data = AccountUserSerializer(user, context={'request': request}).data
data['status'] = _('Usable') if user.has_usable_password() else _('Unusable')
return JsonResponse(data)
except get_user_model().DoesNotExist:
return JsonResponse([])
@method_decorator(require_support_permission)
def post(self, request, username_or_email):
"""Allows support staff to disable a user's account."""
user = get_user_model().objects.get(
Q(username=username_or_email) | Q(email=username_or_email)
)
if user.has_usable_password():
user.set_unusable_password()
else:
user.set_password(generate_password(length=25))
user.save()
if user.has_usable_password():
password_status = _('Usable')
msg = _('User Enabled Successfully')
else:
password_status = _('Unusable')
msg = _('User Disabled Successfully')
return JsonResponse({'success_msg': msg, 'status': password_status})
| philanthropy-u/edx-platform | lms/djangoapps/support/views/manage_user.py | Python | agpl-3.0 | 2,973 |
"""Check that there is enough disk space in predefined paths."""
import tempfile
import os.path
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class DiskAvailability(OpenShiftCheck):
"""Check that recommended disk space is available before a first-time install."""
name = "disk_availability"
tags = ["preflight"]
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_disk_space_bytes = {
'/var': {
'oo_masters_to_config': 40 * 10**9,
'oo_nodes_to_config': 15 * 10**9,
'oo_etcd_to_config': 20 * 10**9,
},
# Used to copy client binaries into,
# see roles/openshift_cli/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
'oo_masters_to_config': 1 * 10**9,
'oo_nodes_to_config': 1 * 10**9,
'oo_etcd_to_config': 1 * 10**9,
},
# Used as temporary storage in several cases.
tempfile.gettempdir(): {
'oo_masters_to_config': 1 * 10**9,
'oo_nodes_to_config': 1 * 10**9,
'oo_etcd_to_config': 1 * 10**9,
},
}
# recommended disk space for each location under an upgrade context
recommended_disk_upgrade_bytes = {
'/var': {
'oo_masters_to_config': 10 * 10**9,
'oo_nodes_to_config': 5 * 10 ** 9,
'oo_etcd_to_config': 5 * 10 ** 9,
},
}
def is_active(self):
"""Skip hosts that do not have recommended disk space requirements."""
group_names = self.get_var("group_names", default=[])
active_groups = set()
for recommendation in self.recommended_disk_space_bytes.values():
active_groups.update(recommendation.keys())
has_disk_space_recommendation = bool(active_groups.intersection(group_names))
return super(DiskAvailability, self).is_active() and has_disk_space_recommendation
def run(self):
group_names = self.get_var("group_names")
user_config = self.get_var("openshift_check_min_host_disk_gb", default={})
try:
# For backwards-compatibility, if openshift_check_min_host_disk_gb
# is a number, then it overrides the required config for '/var'.
number = float(user_config)
user_config = {
'/var': {
'oo_masters_to_config': number,
'oo_nodes_to_config': number,
'oo_etcd_to_config': number,
},
}
except TypeError:
# If it is not a number, then it should be a nested dict.
pass
self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
if user_config:
self.register_log("user-configured thresholds", user_config)
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
# not part of the official recommendation but present in the user
# configuration.
for path, recommendation in self.recommended_disk_space_bytes.items():
free_bytes = self.free_bytes(path)
recommended_bytes = max(recommendation.get(name, 0) for name in group_names)
config = user_config.get(path, {})
# NOTE: the user config is in GB, but we compare bytes, thus the
# conversion.
config_bytes = max(config.get(name, 0) for name in group_names) * 10**9
recommended_bytes = config_bytes or recommended_bytes
# if an "upgrade" context is set, update the minimum disk requirement
# as this signifies an in-place upgrade - the node might have the
# required total disk space, but some of that space may already be
# in use by the existing OpenShift deployment.
context = self.get_var("r_openshift_health_checker_playbook_context", default="")
if context == "upgrade":
recommended_upgrade_paths = self.recommended_disk_upgrade_bytes.get(path, {})
if recommended_upgrade_paths:
recommended_bytes = config_bytes or max(recommended_upgrade_paths.get(name, 0)
for name in group_names)
if free_bytes < recommended_bytes:
free_gb = float(free_bytes) / 10**9
recommended_gb = float(recommended_bytes) / 10**9
msg = (
'Available disk space in "{}" ({:.1f} GB) '
'is below minimum recommended ({:.1f} GB)'
).format(path, free_gb, recommended_gb)
# warn if check failed under an "upgrade" context
# due to limits imposed by the user config
if config_bytes and context == "upgrade":
msg += ('\n\nMake sure to account for decreased disk space during an upgrade\n'
'due to an existing OpenShift deployment. Please check the value of\n'
' openshift_check_min_host_disk_gb={}\n'
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
self.register_failure(msg)
return {}
def find_ansible_submounts(self, path):
"""Return a list of ansible_mounts that are below the given path."""
base = os.path.join(path, "")
return [
mount
for mount in self.get_var("ansible_mounts")
if mount["mount"].startswith(base)
]
def free_bytes(self, path):
"""Return the size available in path based on ansible_mounts."""
submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path))
mount = self.find_ansible_mount(path)
try:
return mount['size_available'] + submounts
except KeyError:
raise OpenShiftCheckException(
'Unable to retrieve disk availability for "{path}".\n'
'Ansible facts included a matching mount point for this path:\n'
' {mount}\n'
'however it is missing the size_available field.\n'
'To investigate, you can inspect the output of `ansible -m setup <host>`'
''.format(path=path, mount=mount)
)
| zhiwliu/openshift-ansible | roles/openshift_health_checker/openshift_checks/disk_availability.py | Python | apache-2.0 | 6,703 |
"""
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import re
import os
import logging
import random
import multiprocessing
import numpy
from ..sources import DerivedSource, VanillaSource, ChunkSource
from . import Supplier
from ..utils import package
from ..utils import count_lines
from ..utils import get_audio_features
from ..utils import Normalize
logger = logging.getLogger(__name__)
###############################################################################
def _init_data_worker():
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
###############################################################################
def _load_single(args):
"""
This function is called through an instance of multiprocessing.Pool
in the RawUtterance source. We do not make this function an instance
method of RawUtterance in order to avoid unnecessary pickling of
RawUtterance instances which would fail due to the presence of some
unpickleable instance variables.
"""
(feature_type, high_freq, on_error), paths = args
result = [None] * len(paths)
for i, path in enumerate(paths):
result[i] = get_audio_features(
path,
feature_type=feature_type,
high_freq=high_freq,
on_error=on_error
)
if result[i] is None:
logger.error('Failed to load audio file at path: %s', path)
return result
###############################################################################
def loop_copy(src, dest):
""" Copies a source array into a destination array, looping
through the source array until the entire destination array
is written.
# Arguments
src: numpy array. The source array.
dest: numpy array. The destination array.
# Return value
The destination array.
# Notes
If the source array is longer than the destination array, then
only the first N entries of the source array are copied into the
destination, where N is the length of the destination array.
If the source array is shorter than, or of equal length with, the
destination array, then it will be copied into the destination array
and repeated, back-to-back, until the destination is filled up (almost
tiling the destination, if you will).
"""
if len(src) > len(dest):
dest[:] = src[:len(dest)]
else:
i = 0
while i < len(dest):
entries_to_copy = min(len(dest) - i, len(src))
dest[i:i+entries_to_copy] = src[:entries_to_copy]
i += len(src)
return dest
###############################################################################
class UtteranceLength(DerivedSource):
""" Data source for audio lengths.
"""
def __init__(self, source):
super().__init__()
self.source = source
def derive(self, inputs):
utterances, = inputs
return numpy.array([[len(x)] for x in utterances], dtype='int32')
def shape(self):
return (1, )
def requires(self):
return (self.source, )
###############################################################################
class Utterance(DerivedSource):
""" Data source for model-ready audio samples. Unlike `RawUtterance`, this
ensures that all data products are rectangular tensors (rather than
ragged arrays).
"""
def __init__(self, source, raw, fill=None, bucket=None):
super().__init__()
self.source = source
self.raw = raw
self.fill = fill or 'zero'
assert isinstance(self.fill, str) and self.fill in ('zero', 'loop')
logger.debug('Utterance source is using fill mode "%s".', self.fill)
if bucket:
if not isinstance(bucket, (float, int)) or bucket <= 0:
raise ValueError('"bucket" must be a positive number.')
logger.debug('Utterance source is using bucket: %.3f sec', bucket)
# 10ms audio frames mean there are 100 frames per second.
# So convert seconds to frames.
bucket = round(bucket * 100)
logger.trace('This bucket is equivalent to: %d frames', bucket)
self.bucket = bucket
def derive(self, inputs):
utterances, = inputs
max_len = max(len(x) for x in utterances)
if self.bucket:
max_len = (max_len-1) - ((max_len-1) % self.bucket) + self.bucket
if self.fill == 'zero':
output = numpy.zeros(
shape=(len(utterances), max_len, self.raw.features)
)
for i, row in enumerate(utterances):
output[i][:len(row)] = row
elif self.fill == 'loop':
output = numpy.empty(
shape=(len(utterances), max_len, self.raw.features)
)
for i, row in enumerate(utterances):
loop_copy(row, output[i])
else:
raise ValueError('Unhandled fill type: "{}". This is a bug.'
.format(self.fill))
return output
def shape(self):
return (None, self.raw.features)
def requires(self):
return (self.source, )
###############################################################################
class RawUtterance(ChunkSource):
""" Data source for audio samples
"""
DEFAULT_NORMALIZATION_DEPTH = 100
_pool = None
_data_cpus = None
###########################################################################
@property
def pool(self):
return RawUtterance._pool
###########################################################################
@property
def data_cpus(self):
return RawUtterance._data_cpus
###########################################################################
@classmethod
def _init_pool(cls, data_cpus):
assert isinstance(data_cpus, int)
data_cpus = max(1, data_cpus)
if cls._pool is None:
cls._data_cpus = data_cpus
cls._pool = multiprocessing.Pool(data_cpus, _init_data_worker)
else:
if data_cpus != cls._data_cpus:
logger.warning('"data_cpus" has already been set to %d.',
cls._data_cpus)
###########################################################################
@classmethod
def default_chunk_size(cls):
""" Returns the default chunk size for this source.
"""
return ChunkSource.USE_BATCH_SIZE
###########################################################################
def __init__(self, audio_paths, feature_type=None,
normalization=None, max_frequency=None, data_cpus=1, *args, **kwargs):
""" Creates a new raw utterance source.
"""
super().__init__(*args, **kwargs)
self.audio_paths = audio_paths
self.indices = numpy.arange(len(self))
self.feature_type = feature_type
self.features = None
self.max_frequency = max_frequency
self._init_pool(data_cpus)
self._init_normalizer(normalization)
###########################################################################
def _init_normalizer(self, params):
# Parse the specification.
if isinstance(params, str):
params = {'path' : params}
elif params is None:
params = {'path' : None}
elif not isinstance(params, dict):
raise ValueError('Unknown normalization value: {}'.format(params))
# Merge in the defaults.
defaults = {
'path' : None,
'center' : True,
'scale' : True,
'rotate' : True,
'depth' : RawUtterance.DEFAULT_NORMALIZATION_DEPTH
}
defaults.update(params)
params = defaults
# Create the normalizer.
norm = Normalize(
center=params['center'],
scale=params['scale'],
rotate=params['rotate']
)
path = params['path']
if path is None:
logger.warning('No normalization data available. We will use '
'on-the-fly (non-persistent) normalization. In the future, '
'you probably want to give a filename to the "normalization" '
'key in the speech recognition supplier.')
self.train_normalizer(norm, depth=params['depth'])
else:
path = os.path.expanduser(os.path.expandvars(path))
if os.path.exists(path):
if not os.path.isfile(path):
raise ValueError('Normalization data must be a regular '
'file. This is not: {}'.format(path))
logger.debug('Restoring normalization statistics: %s', path)
norm.restore(path)
self.features = norm.get_dimensionality()
else:
logger.info('Training new normalization statistics: %s', path)
self.train_normalizer(norm, depth=params['depth'])
norm.save(path)
# Register the normalizer
self.norm = norm
###########################################################################
def load_audio(self, partial_paths):
""" Loads unnormalized audio data.
"""
# Resolve each path.
paths = [
SpeechRecognitionSupplier.find_audio_path(partial_path)
for partial_path in partial_paths
]
for path, partial_path in zip(paths, partial_paths):
if path is None:
logger.error('Could not find audio file that---ignoring '
'extension---begins with: %s', partial_path)
paths = [path for path in paths if path]
n_paths = len(paths)
n_cpus = min(n_paths, self.data_cpus)
n = n_paths // n_cpus # paths per cpu
args = (self.feature_type, self.max_frequency, 'suppress') # arguments to be passed into worker function
# Split the paths to be processed as evenly as possible
x = [(args, paths[i * n:(i+1) * n]) for i in range(n_cpus - 1)]
# In case n_paths is not evenly divisible by n_cpus, we handle the last
# element outside of the above list comprehension
x.append((args, paths[(n_cpus - 1) * n:]))
# actually load audio via process pool
try:
result = self.pool.map(_load_single, x)
except KeyboardInterrupt:
self.pool.terminate()
self.pool.join()
# flatten the result, which is a list of lists
result = [x for y in result for x in y]
# Clean up bad audio
if any(x is None for x in result):
logger.warning('Recovering from a bad audio uttereance.')
good = None
for candidate in result:
if candidate is not None:
good = candidate
break
if good is None:
raise ValueError(
'Cannot tolerate an entire batch of bad audio.')
for i, x in enumerate(result):
if x is None:
result[i] = good
return result
###########################################################################
def train_normalizer(self, norm, depth):
""" Trains the normalizer on the data.
# Arguments
norm: Normalize instance. The normalization transform to train.
"""
logger.debug('Training normalization transform.')
num_entries = min(depth, len(self.audio_paths))
paths = random.sample(self.audio_paths, num_entries)
data = self.load_audio(paths)
self.features = data[0].shape[-1]
norm.learn(data)
logger.debug('Finished training normalization transform.')
###########################################################################
def __iter__(self):
""" Return an iterator to the data.
"""
start = 0
num_entries = len(self)
while start < num_entries:
end = min(num_entries, start + self.chunk_size)
paths = [self.audio_paths[i] for i in self.indices[start:end]]
batch = self.load_audio(paths)
batch = [self.norm.apply(data) for data in batch]
yield batch
start = end
###########################################################################
def __len__(self):
""" Returns the total number of entries that this source can return, if
known.
"""
return len(self.audio_paths)
###########################################################################
def shape(self):
""" Return the shape of the tensor (excluding batch size) returned by
this data source.
"""
return (None, self.features)
###########################################################################
def can_shuffle(self):
""" This source can be shuffled.
"""
return True
###########################################################################
def shuffle(self, indices):
""" Applies a permutation to the data.
"""
if len(indices) > len(self):
raise ValueError('Shuffleable was asked to apply permutation, but '
'the permutation is longer than the length of the data set.')
self.indices[:len(indices)] = self.indices[:len(indices)][indices]
###############################################################################
class TranscriptLength(DerivedSource):
""" Data source for computing transcript lengths.
"""
def __init__(self, source):
super().__init__()
self.source = source
def derive(self, inputs):
transcript, = inputs
return numpy.array([[len(x)] for x in transcript], dtype='int32')
def shape(self):
return (1, )
def requires(self):
return (self.source, )
###############################################################################
class Transcript(DerivedSource):
""" Data source for neat (non-ragged) transcript arrays.
"""
def __init__(self, source):
super().__init__()
self.source = source
def derive(self, inputs):
transcript, = inputs
max_len = max(len(x) for x in transcript)
output = numpy.zeros(shape=(len(transcript), max_len), dtype='int32')
for i, row in enumerate(transcript):
output[i][:len(row)] = row
return output
def shape(self):
return (None, )
def requires(self):
return (self.source, )
###############################################################################
class RawTranscript(ChunkSource):
""" Data source for variable-length transcripts.
"""
###########################################################################
def __init__(self, transcripts, vocab=None, unknown=None, *args,
**kwargs):
""" Creates a new raw transcript source.
"""
super().__init__(*args, **kwargs)
self.transcripts = transcripts
self.indices = numpy.arange(len(self))
self.vocab = self.make_vocab(vocab)
if unknown is None:
self.unknown = self.unknown_index = None
else:
self.unknown_index = self.vocab.get(unknown)
if self.unknown_index is None:
raise ValueError('The "unknown" vocabulary word must be '
'part of the vocabulary itself.')
self.unknown = unknown
###########################################################################
@staticmethod
def make_lower(entry):
""" Maps strings or lists of strings to lowercase.
"""
if isinstance(entry, str):
return entry.lower()
return [x.lower() for x in entry]
###########################################################################
def make_vocab(self, vocab):
""" Loads or infers a vocabulary.
"""
if vocab is None:
logger.warning('Inferring vocabulary from data set.')
data = set(x for transcript in self.transcripts \
for x in self.make_lower(transcript))
data = sorted(data)
elif isinstance(vocab, str):
logger.debug('Load vocabulary from a JSON file: %s', vocab)
with open(vocab) as fh:
json_data = json.loads(fh.read())
try:
data = [self.make_lower(x) for x in json_data]
except:
logger.exception('Expected the JSON to contain a single list '
'of strings. Instead, we got: %s', json_data)
raise
elif isinstance(vocab, (tuple, list)):
logger.debug('Using a hard-coded vocabulary.')
try:
data = [self.make_lower(x) for x in vocab]
except:
logger.exception('Expected the vocabulary to be a list of '
'strings. Instead, we got: %s', vocab)
raise
else:
raise ValueError('Unknown vocabulary format: {}'.format(vocab))
if len(set(data)) != len(data):
raise ValueError('The vocabulary must contain unique entries, but '
'we found duplicates. Make sure that all entries are unique, '
'ignoring capitalization. That means you should not have both '
'"x" and "X" in your vocabulary. For reference, this is the '
'vocabulary we ended up with: {}'.format(data))
logger.debug('Loaded a %d-word vocabulary.', len(data))
return {x : i for i, x in enumerate(data)}
###########################################################################
def word_to_integer(self, data):
""" Maps a character transcript to its integer representation.
"""
result = [None]*len(data)
for i, row in enumerate(data):
result[i] = [
self.vocab.get(word.lower(), self.unknown_index)
for word in row
]
result[i] = [x for x in result[i] if x is not None]
return result
###########################################################################
def __iter__(self):
""" Return an iterator to the data.
"""
start = 0
num_entries = len(self)
while start < num_entries:
end = min(num_entries, start + self.chunk_size)
batch = [self.transcripts[i] for i in self.indices[start:end]]
batch = self.word_to_integer(batch)
yield batch
start = end
###########################################################################
def __len__(self):
""" Returns the total number of entries that this source can return, if
known.
"""
return len(self.transcripts)
###########################################################################
def shape(self):
""" Return the shape of the tensor (excluding batch size) returned by
this data source.
"""
return (None, )
###########################################################################
def can_shuffle(self):
""" This source can be shuffled.
"""
return True
###########################################################################
def shuffle(self, indices):
""" Applies a permutation to the data.
"""
if len(indices) > len(self):
raise ValueError('Shuffleable was asked to apply permutation, but '
'the permutation is longer than the length of the data set.')
self.indices[:len(indices)] = self.indices[:len(indices)][indices]
###############################################################################
class SpeechRecognitionSupplier(Supplier):
""" A supplier which handles parsing of audio + transcript data sets for
speech recognition purposes.
"""
DEFAULT_UNPACK = True
DEFAULT_TYPE = 'spec'
SUPPORTED_TYPES = ('wav', 'mp3', 'flac')
###########################################################################
@staticmethod
def find_audio_path(partial_path):
""" Resolves the audio file extension.
"""
for ext in SpeechRecognitionSupplier.SUPPORTED_TYPES:
candidate = '{}.{}'.format(partial_path, ext)
if os.path.isfile(candidate):
return candidate
return None
###########################################################################
@classmethod
def get_name(cls):
""" Returns the name of the supplier.
"""
return 'speech_recognition'
###########################################################################
def __init__(self, url=None, path=None, checksum=None, unpack=None,
type=None, normalization=None, min_duration=None, max_duration=None,
max_frequency=None, vocab=None, samples=None, fill=None, key=None,
bucket=None, data_cpus=None, unknown=None, *args, **kwargs):
""" Creates a new speech recognition supplier.
# Arguments
"""
super().__init__(*args, **kwargs)
if unpack is None:
unpack = SpeechRecognitionSupplier.DEFAULT_UNPACK
self.load_data(url=url, path=path, checksum=checksum, unpack=unpack,
min_duration=min_duration, max_duration=max_duration, text_key=key)
self.downselect(samples)
logger.trace('Creating sources.')
data_cpus = max(1, multiprocessing.cpu_count() - 1 if not data_cpus else data_cpus)
utterance_raw = RawUtterance(
self.data['audio'],
feature_type=type or SpeechRecognitionSupplier.DEFAULT_TYPE,
normalization=normalization,
max_frequency=max_frequency,
data_cpus=data_cpus
)
self.sources = {
'utterance_raw' : utterance_raw,
'utterance_length' : UtteranceLength('utterance_raw'),
'utterance' : Utterance(
'utterance_raw',
utterance_raw,
fill=fill,
bucket=bucket
),
'duration' : VanillaSource(numpy.array(self.data['duration'])),
'audio_source' : VanillaSource(numpy.array(self.data['audio']))
}
for i, (k, v) in enumerate(self.data['transcript'].items()):
logger.trace('Parsing the vocabulary for key: %s', k)
if len(self.data['transcript']) == 1:
prefix = ''
else:
prefix = '{}_'.format(k)
if isinstance(vocab, dict):
if k in vocab:
this_vocab = vocab[k]
else:
raise ValueError('If the vocabulary is a dictionary, then '
'it must have keys corresponding to the text keys.')
elif isinstance(vocab, (list, tuple)):
if all(isinstance(v, (list, tuple)) for v in vocab):
this_vocab = vocab[i]
else:
this_vocab = vocab
else:
this_vocab = vocab
if isinstance(unknown, dict):
if k in unknown:
this_unknown = unknown[k]
else:
raise ValueError('If "unknown" is a dictionary, then it '
'must have keys corresponding to the text keys.')
elif isinstance(unknown, (list, tuple)):
this_unknown = unknown[i]
else:
this_unknown = unknown
raw_name = '{}transcript_raw'.format(prefix)
self.sources.update({
raw_name : RawTranscript(
v,
vocab=this_vocab,
unknown=this_unknown
),
'{}transcript_length'.format(prefix) : TranscriptLength(
raw_name
),
'{}transcript'.format(prefix) : Transcript(raw_name)
})
###########################################################################
def downselect(self, samples):
""" Selects a subset of the data.
# Arguments
samples: None, int, or str. If None, uses all samples. If an
integer, uses the first `samples` entries. If a string, follows
the `Sample Specification` below.
# Sample Specification
Forms: Meaning:
10 Use 10 samples: 0 through 10.
10- Use all but 10 samples: use the 10th through
the remainder.
10% Use the first 10% of samples.
10-20 Use 10 samples: 10 through 19.
10-20% Use 10% of samples, from 10% through 20%.
When combined with a random seed, this lets you
split a dataset on the fly.
10-% Use all but 10% of samples: from 10% through
the remainder.
# Notes:
- For percentage ranges of the form "X-%" or "X-Y%", Kur will
compute the percentage and then add one to the start value.
This makes it easier for you to use a random seed and then
make dataset splits like 10%, 10-20%, 20% without worrying
about having disjoint datasets.
- As evidenced by the examples above, Kur follows Python in using
ranges that exclude the upper bound (e.g., "10" means "0 .. 9"
and "10-20" means "10..19").
"""
if samples is None:
logger.trace('Using all available data.')
return
elif isinstance(samples, int):
if samples < 1:
raise ValueError('"samples" cannot be less than 1.')
if samples >= self.metadata['entries']:
return
logger.debug('Using only %d / %d samples of the available data.',
samples, self.metadata['entries'])
start = 0
end = samples
elif isinstance(samples, str):
regex = re.compile(
r'(?P<start>[0-9]+(?:\.[0-9]*)?)'
r'(?:(?P<range>-)'
r'(?P<end>[0-9]+(?:\.[0-9]*)?)?'
r')?'
r'(?P<unit>%)?'
)
match = regex.match(samples)
if not match:
raise ValueError('Failed to parse the "samples" '
'specification: {}'.format(samples))
result = match.groupdict()
start = float(result['start'])
if result['range']:
if result['end']:
end = float(result['end'])
elif result['unit']:
end = 100
else:
end = self.metadata['entries']
else:
end = start
start = 0
if result['unit']:
start = int(self.metadata['entries'] * (start / 100))
end = int(self.metadata['entries'] * (end / 100))
else:
start = int(start)
end = int(end)
start = min(max(0, start), self.metadata['entries'])
end = min(max(0, end), self.metadata['entries'])
if start == 0 and end == self.metadata['entries']:
return
if start >= end:
raise ValueError('No samples pass this "samples" cut: [{}, {})'
.format(start, end))
else:
raise TypeError('Invalid/unexpected type for "samples": {}'
.format(samples))
# Create the seeded random number generator.
gen = numpy.random.RandomState(
seed=self.kurfile.get_seed() if self.kurfile else None
)
# Produce a mask (True = keep, False = discard)
mask = numpy.zeros(self.metadata['entries'], dtype=bool)
indices = gen.permutation(self.metadata['entries'])[start:end]
mask[indices] = True
# Downselect
for k in self.data:
if isinstance(self.data[k], dict):
for inner in self.data[k]:
self.data[k][inner] = [
x for i, x in enumerate(self.data[k][inner]) if mask[i]
]
else:
self.data[k] = [
x for i, x in enumerate(self.data[k]) if mask[i]
]
self.metadata['entries'] = int(end - start)
###########################################################################
def load_data(self, url=None, path=None, checksum=None, unpack=None,
min_duration=None, max_duration=None, text_key=None):
""" Loads the data for this supplier.
"""
logger.info('Loading input dataset: %s', path if path else url)
local_path, is_packed = package.install(
url=url,
path=path,
checksum=checksum
)
manifest = None
if is_packed and unpack:
if os.path.isfile(local_path) \
and os.path.splitext(local_path)[1].lower() == '.jsonl':
logger.trace('Data is actually unpacked JSONL.')
manifest = [local_path]
local_path = os.path.dirname(local_path)
else:
logger.trace('Unpacking input data: %s', local_path)
manifest = package.unpack(local_path, recursive=True)
is_packed = False
elif is_packed and not unpack:
logger.trace('Using packed input data.')
raise NotImplementedError
elif not is_packed and unpack:
logger.trace('Using already unpacked input data.')
elif not is_packed and not unpack:
logger.trace('Ignore "unpack" for input data, since it is already '
'unpacked.')
else:
logger.error('Unhandled data package requirements. This is a bug.')
self.metadata, self.data = self.get_metadata(
manifest=manifest,
root=local_path,
min_duration=min_duration,
max_duration=max_duration,
text_key=text_key
)
###########################################################################
def get_metadata(self, manifest=None, root=None, min_duration=None,
max_duration=None, text_key=None):
""" Scans the package for a metadata file, makes sure everything is in
order, and returns some information about the data set.
"""
logger.trace('Looking for metadata file.')
metadata_file = None
if not text_key:
text_key = ('text', )
elif isinstance(text_key, str):
text_key = (text_key, )
elif isinstance(text_key, list):
text_key = tuple(text_key)
elif not isinstance(text_key, tuple):
raise ValueError('"key" must be a string, None, or a list. '
'Instead, we received: {}'.format(text_key))
def look_in_list(filenames):
""" Searches a list of files for a JSONL file.
"""
for filename in filenames:
parts = os.path.splitext(filename)
if parts[1].lower() == '.jsonl' and \
not os.path.basename(filename).startswith('.'):
return filename
return None
if manifest is None:
if root is None:
raise ValueError('No root provided and no manifest provided. '
'This is a bug.')
if not os.path.isdir(root):
raise ValueError('Root is not a directory. This is a bug.')
for dirpath, _, filenames in os.walk(root):
metadata_file = look_in_list(filenames)
if metadata_file is not None:
metadata_file = os.path.join(dirpath, metadata_file)
break
else:
metadata_file = look_in_list(manifest)
if metadata_file is None:
raise ValueError('Failed to find a JSONL metadata file.')
source = os.path.join(
os.path.dirname(metadata_file),
'audio'
)
logger.debug('Found metadata file: %s', metadata_file)
logger.debug('Inferred source path: %s', source)
logger.debug('Scanning metadata file.')
lines = count_lines(metadata_file)
logger.debug('Entries counted: %d', lines)
logger.debug('Loading metadata.')
data = {
'audio' : [None]*lines,
'transcript' : {k : [None]*lines for k in text_key},
'duration' : [None]*lines
}
entries = 0
valid_entries = 0
required_keys = text_key + ('duration_s', 'uuid')
with open(metadata_file, 'r') as fh:
for line_number, line in enumerate(fh, 1):
if line_number >= 10 and valid_entries < line_number/2:
raise ValueError('Data file has too many bad entries: {}'
.format(metadata_file))
try:
entry = json.loads(line)
except json.decoder.JSONDecodeError:
logger.warning('Failed to parse valid JSON on line %d of '
'file %s', line_number, metadata_file)
continue
bad = False
for k in required_keys:
if k not in entry:
logger.warning('Line %d is missing one of its '
'required keys in metadata file %s: %s. Available '
'keys are: %s', line_number, metadata_file, k,
', '.join(entry.keys()))
bad = True
break
if bad:
continue
valid_entries += 1
duration = entry['duration_s']
if min_duration and duration < min_duration:
continue
if max_duration and duration > max_duration:
continue
data['duration'][entries] = entry['duration_s']
for k in text_key:
data['transcript'][k][entries] = entry[k]
data['audio'][entries] = os.path.join(source, entry['uuid'])
entries += 1
logger.debug('Entries kept: %d', entries)
for k in data:
if isinstance(data[k], dict):
for inner in data[k]:
data[k][inner] = data[k][inner][:entries]
else:
data[k] = data[k][:entries]
metadata = {
'entries' : entries,
'filename' : metadata_file,
'source' : source
}
return metadata, data
###########################################################################
def get_sources(self, sources=None):
""" Returns all sources from this provider.
"""
if sources is None:
sources = list(self.sources.keys())
elif not isinstance(sources, (list, tuple)):
sources = [sources]
for source in sources:
if source not in self.sources:
raise KeyError(
'Invalid data key: {}. Valid keys are: {}'.format(
source, ', '.join(str(k) for k in self.sources.keys())
))
return {k : self.sources[k] for k in sources}
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
| deepgram/kur | kur/supplier/speechrec.py | Python | apache-2.0 | 30,208 |
# -*- coding: utf-8 -*-
# srpregister.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import binascii
import json
import logging
import requests
import srp
from PySide import QtCore
from urlparse import urlparse
from leap.bitmask.config.providerconfig import ProviderConfig
from leap.bitmask.util.constants import SIGNUP_TIMEOUT
from leap.bitmask.util.request_helpers import get_content
from leap.common.check import leap_assert, leap_assert_type
logger = logging.getLogger(__name__)
class SRPRegisterImpl:
USER_LOGIN_KEY = 'user[login]'
USER_VERIFIER_KEY = 'user[password_verifier]'
USER_SALT_KEY = 'user[password_salt]'
STATUS_ERROR = -999 # Custom error status
def __init__(self, provider_config, register_path):
leap_assert(provider_config, "Please provide a provider")
leap_assert_type(provider_config, ProviderConfig)
self._provider_config = provider_config
# **************************************************** #
# Dependency injection helpers, override this for more
# granular testing
self._fetcher = requests
self._srp = srp
self._hashfun = self._srp.SHA256
self._ng = self._srp.NG_1024
# **************************************************** #
parsed_url = urlparse(provider_config.get_api_uri())
self._provider = parsed_url.hostname
self._port = parsed_url.port
if self._port is None:
self._port = "443"
self._register_path = register_path
self._session = self._fetcher.session()
def register_user(self, username, password):
"""
Registers a user with the validator based on the password provider
:param username: username to register
:type username: str
:param password: password for this username
:type password: str
:returns: if the registration went ok or not.
:rtype: bool
"""
username = username.lower().encode('utf-8')
password = password.encode('utf-8')
salt, verifier = self._srp.create_salted_verification_key(
username,
password,
self._hashfun,
self._ng)
user_data = {
self.USER_LOGIN_KEY: username,
self.USER_VERIFIER_KEY: binascii.hexlify(verifier),
self.USER_SALT_KEY: binascii.hexlify(salt)
}
uri = self._get_registration_uri()
logger.debug('Post to uri: %s' % uri)
logger.debug("Will try to register user = %s" % (username,))
ok = False
req = None
try:
req = self._session.post(uri,
data=user_data,
timeout=SIGNUP_TIMEOUT,
verify=self._provider_config.
get_ca_cert_path())
except requests.exceptions.RequestException as exc:
logger.error(exc.message)
else:
ok = req.ok
status_code = self.STATUS_ERROR
if req is not None:
status_code = req.status_code
if not ok:
try:
content, _ = get_content(req)
json_content = json.loads(content)
error_msg = json_content.get("errors").get("login")[0]
if not error_msg.istitle():
error_msg = "%s %s" % (username, error_msg)
logger.error(error_msg)
except Exception as e:
logger.error("Unknown error: %r" % (e, ))
return ok, status_code
def _get_registration_uri(self):
"""
Returns the URI where the register request should be made for
the provider
:rtype: str
"""
uri = "https://%s:%s/%s/%s" % (
self._provider,
self._port,
self._provider_config.get_api_version(),
self._register_path)
return uri
class SRPRegister(QtCore.QObject):
"""
Registers a user to a specific provider using SRP
"""
STATUS_OK = (200, 201)
STATUS_TAKEN = 422
def __init__(self, signaler=None,
provider_config=None, register_path="users"):
"""
Constructor
:param signaler: Signaler object used to receive notifications
from the backend
:type signaler: Signaler
:param provider_config: provider configuration instance,
properly loaded
:type privider_config: ProviderConfig
:param register_path: webapp path for registering users
:type register_path; str
"""
self._srp_register = SRPRegisterImpl(provider_config, register_path)
QtCore.QObject.__init__(self)
self._signaler = signaler
def register_user(self, username, password):
"""
Registers a user with the validator based on the password provider
:param username: username to register
:type username: str
:param password: password for this username
:type password: str
:returns: if the registration went ok or not.
:rtype: bool
"""
ok, status_code = self._srp_register.register_user(username, password)
self._emit_result(status_code)
return ok
def _emit_result(self, status_code):
"""
Emit the corresponding signal depending on the status code.
:param status_code: the status code received.
:type status_code: int or str
"""
logger.debug("Status code is: {0}".format(status_code))
if self._signaler is None:
return
if status_code in self.STATUS_OK:
self._signaler.signal(self._signaler.srp_registration_finished)
elif status_code == self.STATUS_TAKEN:
self._signaler.signal(self._signaler.srp_registration_taken)
else:
self._signaler.signal(self._signaler.srp_registration_failed)
if __name__ == "__main__":
logger = logging.getLogger(name='leap')
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s '
'- %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
provider = ProviderConfig()
if provider.load("leap/providers/bitmask.net/provider.json"):
register = SRPRegister(provider_config=provider)
print "Registering user..."
print register.register_user("test1", "sarasaaaa")
print register.register_user("test2", "sarasaaaa")
| andrejb/bitmask_client | src/leap/bitmask/crypto/srpregister.py | Python | gpl-3.0 | 7,329 |
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, user, password):
wd = self.app.wd
self.app.open_home_page()
self.app.type_text("user", user)
self.app.type_text("pass", password)
wd.find_element_by_css_selector("input[value='Login']").click()
def ensure_login(self, user, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(user):
return
else:
self.logout()
self.login(user,password)
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout"))> 0
def is_logged_in_as(self, user):
wd = self.app.wd
return self.get_logged_user() == user
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//form[@name='logout']/b").text[1:-1]
| obutkalyuk/Python_15 | fixture/session.py | Python | apache-2.0 | 1,157 |
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, pi, fabs
from integrate import *
from fitting import *
def f(x):
return 1 / (1 + x ** 2)
targets = [10**(-val) for val in range(2, 13)]
comp_bound = 7
rom_nums = [1 / target ** (1 / 6) for target in targets[0: comp_bound]]
rom_nums = [int(val) for val in rom_nums]
rom_err = []
for n in rom_nums:
res = romberg_integrate(f, 0, 1, n)
rom_err.append(fabs(res - pi / 4))
# pass
# evaluate the log of data
log_nums = np.log10(rom_nums)
log_err = np.log10(rom_err)
plt.figure()
plt.scatter(log_nums, log_err, color='r')
# plt.xscale('log')
# plt.yscale('log')
plt.title('Romberg\'s method')
plt.xlabel('number of sub-intervals')
plt.ylabel('error')
plt.grid()
# plt.show()
fit_bound = 7
fit_res = linear_fit(log_nums[:fit_bound], log_err[:fit_bound])
x = np.linspace(log_nums.min(), log_nums.max())
y = np.array(evaluate_linear_result(x, fit_res))
plt.plot(x, y)
if fit_res[1] > 0:
func_name = '$%.2f + %.2f x$' % tuple(fit_res)
else:
func_name = '$%.2f - %.2f x$' % tuple([fit_res[0], fabs(fit_res[1])])
plt.annotate(func_name,
xy=(1.1, -15), xycoords='data',
xytext=(-70, 0), textcoords='offset points',
arrowprops=dict(facecolor='green', shrink=0.05),
horizontalalignment='right',
verticalalignment='bottom')
plt.show()
| Jokiva/Computational-Physics | lecture 10/romberg_err.py | Python | gpl-3.0 | 1,390 |
"""
Copyright (c) 2011, Michael Joseph Walsh.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" | nemonik/Intellect | intellect/examples/bahBahBlackSheep/__init__.py | Python | bsd-3-clause | 1,628 |
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime, time
from collections import namedtuple
from django import forms
from django.utils.dateparse import parse_datetime
from django.utils.encoding import force_str
from django.utils.translation import ugettext_lazy as _
from .utils import handle_timezone
from .widgets import RangeWidget, LookupTypeWidget, CSVWidget, BaseCSVWidget
class RangeField(forms.MultiValueField):
widget = RangeWidget
def __init__(self, fields=None, *args, **kwargs):
if fields is None:
fields = (
forms.DecimalField(),
forms.DecimalField())
super(RangeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return slice(*data_list)
return None
class DateRangeField(RangeField):
def __init__(self, *args, **kwargs):
fields = (
forms.DateField(),
forms.DateField())
super(DateRangeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
start_date, stop_date = data_list
if start_date:
start_date = handle_timezone(
datetime.combine(start_date, time.min),
False
)
if stop_date:
stop_date = handle_timezone(
datetime.combine(stop_date, time.max),
False
)
return slice(start_date, stop_date)
return None
class DateTimeRangeField(RangeField):
def __init__(self, *args, **kwargs):
fields = (
forms.DateTimeField(),
forms.DateTimeField())
super(DateTimeRangeField, self).__init__(fields, *args, **kwargs)
class TimeRangeField(RangeField):
def __init__(self, *args, **kwargs):
fields = (
forms.TimeField(),
forms.TimeField())
super(TimeRangeField, self).__init__(fields, *args, **kwargs)
class Lookup(namedtuple('Lookup', ('value', 'lookup_type'))):
# python nature is test __len__ on tuple types for boolean check
def __len__(self):
if not self.value:
return 0
return 2
class LookupTypeField(forms.MultiValueField):
def __init__(self, field, lookup_choices, *args, **kwargs):
fields = (
field,
forms.ChoiceField(choices=lookup_choices)
)
defaults = {
'widgets': [f.widget for f in fields],
}
widget = LookupTypeWidget(**defaults)
kwargs['widget'] = widget
kwargs['help_text'] = field.help_text
super(LookupTypeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if len(data_list) == 2:
return Lookup(value=data_list[0], lookup_type=data_list[1] or 'exact')
return Lookup(value=None, lookup_type='exact')
class IsoDateTimeField(forms.DateTimeField):
"""
Supports 'iso-8601' date format too which is out the scope of
the ``datetime.strptime`` standard library
# ISO 8601: ``http://www.w3.org/TR/NOTE-datetime``
Based on Gist example by David Medina https://gist.github.com/copitux/5773821
"""
ISO_8601 = 'iso-8601'
input_formats = [ISO_8601]
def strptime(self, value, format):
value = force_str(value)
if format == self.ISO_8601:
parsed = parse_datetime(value)
if parsed is None: # Continue with other formats if doesn't match
raise ValueError
return handle_timezone(parsed)
return super(IsoDateTimeField, self).strptime(value, format)
class BaseCSVField(forms.Field):
"""
Base field for validating CSV types. Value validation is performed by
secondary base classes.
ex::
class IntegerCSVField(BaseCSVField, filters.IntegerField):
pass
"""
base_widget_class = BaseCSVWidget
def __init__(self, *args, **kwargs):
widget = kwargs.get('widget') or self.widget
kwargs['widget'] = self._get_widget_class(widget)
super(BaseCSVField, self).__init__(*args, **kwargs)
def _get_widget_class(self, widget):
# passthrough, allows for override
if isinstance(widget, BaseCSVWidget) or (
isinstance(widget, type) and
issubclass(widget, BaseCSVWidget)):
return widget
# complain since we are unable to reconstruct widget instances
assert isinstance(widget, type), \
"'%s.widget' must be a widget class, not %s." \
% (self.__class__.__name__, repr(widget))
bases = (self.base_widget_class, widget, )
return type(str('CSV%s' % widget.__name__), bases, {})
def clean(self, value):
if value is None:
return None
return [super(BaseCSVField, self).clean(v) for v in value]
class BaseRangeField(BaseCSVField):
# Force use of text input, as range must always have two inputs. A date
# input would only allow a user to input one value and would always fail.
widget = CSVWidget
default_error_messages = {
'invalid_values': _('Range query expects two values.')
}
def clean(self, value):
value = super(BaseRangeField, self).clean(value)
if value is not None and len(value) != 2:
raise forms.ValidationError(
self.error_messages['invalid_values'],
code='invalid_values')
return value
| steventimberman/masterDebater | venv/lib/python2.7/site-packages/django_filters/fields.py | Python | mit | 5,624 |
def transform_scalars(dataset):
from tomviz import utils
import numpy as np
# Get the current volume as a numpy array.
array = utils.get_array(dataset)
#create 3D hanning window
for axis, axis_size in enumerate(array.shape):
# set up shape for numpy broadcasting
filter_shape = [1, ] * array.ndim
filter_shape[axis] = axis_size
window = np.hanning(axis_size).reshape(filter_shape)
array *= window
# This is where you operate on your data
result = array
#result = window
# This is where the transformed data is set, it will display in tomviz.
utils.set_array(dataset, result)
| Hovden/tomviz | tomviz/python/HannWindow3D.py | Python | bsd-3-clause | 678 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from email.header import decode_header
from email.utils import formataddr
from openerp import SUPERUSER_ID, api
from openerp.osv import osv, orm, fields
from openerp.tools import html_email_clean
from openerp.tools.translate import _
from HTMLParser import HTMLParser
_logger = logging.getLogger(__name__)
""" Some tools for parsing / creating email fields """
def decode(text):
"""Returns unicode() string conversion of the the given encoded smtp header text"""
if text:
text = decode_header(text.replace('\r', ''))
return ''.join([tools.ustr(x[0], x[1]) for x in text])
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class mail_message(osv.Model):
""" Messages model: system notification (replacing res.log notifications),
comments (OpenChatter discussion) and incoming emails. """
_name = 'mail.message'
_description = 'Message'
_inherit = ['ir.needaction_mixin']
_order = 'id desc'
_rec_name = 'record_name'
_message_read_limit = 30
_message_read_fields = ['id', 'parent_id', 'model', 'res_id', 'body', 'subject', 'date', 'to_read', 'email_from',
'type', 'vote_user_ids', 'attachment_ids', 'author_id', 'partner_ids', 'record_name']
_message_record_name_length = 18
_message_read_more_limit = 1024
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
if context and context.get('default_type') and context.get('default_type') not in [
val[0] for val in self._columns['type'].selection]:
context = dict(context, default_type=None)
return super(mail_message, self).default_get(cr, uid, fields, context=context)
def _get_to_read(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('is_read', '=', False),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_to_read(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a is_read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.is_read', '=', not domain[0][2])]
def _get_starred(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('starred', '=', True),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_starred(self, cr, uid, obj, name, domain, context=None):
""" Search for starred messages by the current user."""
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.starred', '=', domain[0][2])]
_columns = {
'type': fields.selection([
('email', 'Email'),
('comment', 'Comment'),
('notification', 'System notification'),
], 'Type', size=12,
help="Message type: email for email message, notification for system "\
"message, comment for other messages such as user replies"),
'email_from': fields.char('From',
help="Email address of the sender. This field is set when no matching partner is found for incoming emails."),
'reply_to': fields.char('Reply-To',
help='Reply email address. Setting the reply_to bypasses the automatic thread creation.'),
'no_auto_thread': fields.boolean('No threading for answers',
help='Answers do not go in the original document\' discussion thread. This has an impact on the generated message-id.'),
'author_id': fields.many2one('res.partner', 'Author', select=1,
ondelete='set null',
help="Author of the message. If not set, email_from may hold an email address that did not match any partner."),
'author_avatar': fields.related('author_id', 'image_small', type="binary", string="Author's Avatar"),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
'notified_partner_ids': fields.many2many('res.partner', 'mail_notification',
'message_id', 'partner_id', 'Notified partners',
help='Partners that have a notification pushing this message in their mailboxes'),
'attachment_ids': fields.many2many('ir.attachment', 'message_attachment_rel',
'message_id', 'attachment_id', 'Attachments'),
'parent_id': fields.many2one('mail.message', 'Parent Message', select=True,
ondelete='set null', help="Initial thread message."),
'child_ids': fields.one2many('mail.message', 'parent_id', 'Child Messages'),
'model': fields.char('Related Document Model', size=128, select=1),
'res_id': fields.integer('Related Document ID', select=1),
'record_name': fields.char('Message Record Name', help="Name get of the related document."),
'notification_ids': fields.one2many('mail.notification', 'message_id',
string='Notifications', auto_join=True,
help='Technical field holding the message notifications. Use notified_partner_ids to access notified partners.'),
'subject': fields.char('Subject'),
'date': fields.datetime('Date'),
'message_id': fields.char('Message-Id', help='Message unique identifier', select=1, readonly=1, copy=False),
'body': fields.html('Contents', help='Automatically sanitized HTML contents'),
'to_read': fields.function(_get_to_read, fnct_search=_search_to_read,
type='boolean', string='To read',
help='Current user has an unread notification linked to this message'),
'starred': fields.function(_get_starred, fnct_search=_search_starred,
type='boolean', string='Starred',
help='Current user has a starred notification linked to this message'),
'subtype_id': fields.many2one('mail.message.subtype', 'Subtype',
ondelete='set null', select=1,),
'vote_user_ids': fields.many2many('res.users', 'mail_vote',
'message_id', 'user_id', string='Votes',
help='Users that voted for this message'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
}
def _needaction_domain_get(self, cr, uid, context=None):
return [('to_read', '=', True)]
def _get_default_from(self, cr, uid, context=None):
this = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if this.alias_name and this.alias_domain:
return formataddr((this.name, '%s@%s' % (this.alias_name, this.alias_domain)))
elif this.email:
return formataddr((this.name, this.email))
raise osv.except_osv(_('Invalid Action!'), _("Unable to send email, please configure the sender's email address or alias."))
def _get_default_author(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
_defaults = {
'type': 'email',
'date': fields.datetime.now,
'author_id': lambda self, cr, uid, ctx=None: self._get_default_author(cr, uid, ctx),
'body': '',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
}
#------------------------------------------------------
# Vote/Like
#------------------------------------------------------
def vote_toggle(self, cr, uid, ids, context=None):
''' Toggles vote. Performed using read to avoid access rights issues.
Done as SUPERUSER_ID because uid may vote for a message he cannot modify. '''
for message in self.read(cr, uid, ids, ['vote_user_ids'], context=context):
new_has_voted = not (uid in message.get('vote_user_ids'))
if new_has_voted:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(4, uid)]}, context=context)
else:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(3, uid)]}, context=context)
return new_has_voted or False
#------------------------------------------------------
# download an attachment
#------------------------------------------------------
def download_attachment(self, cr, uid, id_message, attachment_id, context=None):
""" Return the content of linked attachments. """
# this will fail if you cannot read the message
message_values = self.read(cr, uid, [id_message], ['attachment_ids'], context=context)[0]
if attachment_id in message_values['attachment_ids']:
attachment = self.pool.get('ir.attachment').browse(cr, SUPERUSER_ID, attachment_id, context=context)
if attachment.datas and attachment.datas_fname:
return {
'base64': attachment.datas,
'filename': attachment.datas_fname,
}
return False
#------------------------------------------------------
# Notification API
#------------------------------------------------------
@api.cr_uid_ids_context
def set_message_read(self, cr, uid, msg_ids, read, create_missing=True, context=None):
""" Set messages as (un)read. Technically, the notifications related
to uid are set to (un)read. If for some msg_ids there are missing
notifications (i.e. due to load more or thread parent fetching),
they are created.
:param bool read: set notification as (un)read
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
:return number of message mark as read
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('is_read', '=', not read)]
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)read
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
# some messages do not have notifications: find which one, create notification, update read status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, {'partner_id': user_pid, 'is_read': read, 'message_id': msg_id}, context=context)
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
@api.cr_uid_ids_context
def set_message_starred(self, cr, uid, msg_ids, starred, create_missing=True, context=None):
""" Set messages as (un)starred. Technically, the notifications related
to uid are set to (un)starred.
:param bool starred: set notification as (un)starred
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('starred', '=', not starred)]
values = {
'starred': starred
}
if starred:
values['is_read'] = False
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)starred
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
# some messages do not have notifications: find which one, create notification, update starred status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, dict(values, partner_id=user_pid, message_id=msg_id), context=context)
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
#------------------------------------------------------
# Message loading for web interface
#------------------------------------------------------
def _message_read_dict_postprocess(self, cr, uid, messages, message_tree, context=None):
""" Post-processing on values given by message_read. This method will
handle partners in batch to avoid doing numerous queries.
:param list messages: list of message, as get_dict result
:param dict message_tree: {[msg.id]: msg browse record}
"""
res_partner_obj = self.pool.get('res.partner')
ir_attachment_obj = self.pool.get('ir.attachment')
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
# 1. Aggregate partners (author_id and partner_ids) and attachments
partner_ids = set()
attachment_ids = set()
for key, message in message_tree.iteritems():
if message.author_id:
partner_ids |= set([message.author_id.id])
if message.subtype_id and message.notified_partner_ids: # take notified people of message with a subtype
partner_ids |= set([partner.id for partner in message.notified_partner_ids])
elif not message.subtype_id and message.partner_ids: # take specified people of message without a subtype (log)
partner_ids |= set([partner.id for partner in message.partner_ids])
if message.attachment_ids:
attachment_ids |= set([attachment.id for attachment in message.attachment_ids])
# Read partners as SUPERUSER -> display the names like classic m2o even if no access
partners = res_partner_obj.name_get(cr, SUPERUSER_ID, list(partner_ids), context=context)
partner_tree = dict((partner[0], partner) for partner in partners)
# 2. Attachments as SUPERUSER, because could receive msg and attachments for doc uid cannot see
attachments = ir_attachment_obj.read(cr, SUPERUSER_ID, list(attachment_ids), ['id', 'datas_fname', 'name', 'file_type_icon'], context=context)
attachments_tree = dict((attachment['id'], {
'id': attachment['id'],
'filename': attachment['datas_fname'],
'name': attachment['name'],
'file_type_icon': attachment['file_type_icon'],
}) for attachment in attachments)
# 3. Update message dictionaries
for message_dict in messages:
message_id = message_dict.get('id')
message = message_tree[message_id]
if message.author_id:
author = partner_tree[message.author_id.id]
else:
author = (0, message.email_from)
partner_ids = []
if message.subtype_id:
partner_ids = [partner_tree[partner.id] for partner in message.notified_partner_ids
if partner.id in partner_tree]
else:
partner_ids = [partner_tree[partner.id] for partner in message.partner_ids
if partner.id in partner_tree]
attachment_ids = []
for attachment in message.attachment_ids:
if attachment.id in attachments_tree:
attachment_ids.append(attachments_tree[attachment.id])
message_dict.update({
'is_author': pid == author[0],
'author_id': author,
'partner_ids': partner_ids,
'attachment_ids': attachment_ids,
'user_pid': pid
})
return True
def _message_read_dict(self, cr, uid, message, parent_id=False, context=None):
""" Return a dict representation of the message. This representation is
used in the JS client code, to display the messages. Partners and
attachments related stuff will be done in post-processing in batch.
:param dict message: mail.message browse record
"""
# private message: no model, no res_id
is_private = False
if not message.model or not message.res_id:
is_private = True
# votes and favorites: res.users ids, no prefetching should be done
vote_nb = len(message.vote_user_ids)
has_voted = uid in [user.id for user in message.vote_user_ids]
try:
if parent_id:
max_length = 300
else:
max_length = 100
body_short = html_email_clean(message.body, remove=False, shorten=True, max_length=max_length)
except Exception:
body_short = '<p><b>Encoding Error : </b><br/>Unable to convert this message (id: %s).</p>' % message.id
_logger.exception(Exception)
return {'id': message.id,
'type': message.type,
'subtype': message.subtype_id.name if message.subtype_id else False,
'body': message.body,
'body_short': body_short,
'model': message.model,
'res_id': message.res_id,
'record_name': message.record_name,
'subject': message.subject,
'date': message.date,
'to_read': message.to_read,
'parent_id': parent_id,
'is_private': is_private,
'author_id': False,
'author_avatar': message.author_avatar,
'is_author': False,
'partner_ids': [],
'vote_nb': vote_nb,
'has_voted': has_voted,
'is_favorite': message.starred,
'attachment_ids': [],
}
def _message_read_add_expandables(self, cr, uid, messages, message_tree, parent_tree,
message_unload_ids=[], thread_level=0, domain=[], parent_id=False, context=None):
""" Create expandables for message_read, to load new messages.
1. get the expandable for new threads
if display is flat (thread_level == 0):
fetch message_ids < min(already displayed ids), because we
want a flat display, ordered by id
else:
fetch message_ids that are not childs of already displayed
messages
2. get the expandables for new messages inside threads if display
is not flat
for each thread header, search for its childs
for each hole in the child list based on message displayed,
create an expandable
:param list messages: list of message structure for the Chatter
widget to which expandables are added
:param dict message_tree: dict [id]: browse record of this message
:param dict parent_tree: dict [parent_id]: [child_ids]
:param list message_unload_ids: list of message_ids we do not want
to load
:return bool: True
"""
def _get_expandable(domain, message_nb, parent_id, max_limit):
return {
'domain': domain,
'nb_messages': message_nb,
'type': 'expandable',
'parent_id': parent_id,
'max_limit': max_limit,
}
if not messages:
return True
message_ids = sorted(message_tree.keys())
# 1. get the expandable for new threads
if thread_level == 0:
exp_domain = domain + [('id', '<', min(message_unload_ids + message_ids))]
else:
exp_domain = domain + ['!', ('id', 'child_of', message_unload_ids + parent_tree.keys())]
more_count = self.search_count(cr, uid, exp_domain, context=context)
if more_count:
# inside a thread: prepend
if parent_id:
messages.insert(0, _get_expandable(exp_domain, -1, parent_id, True))
# new threads: append
else:
messages.append(_get_expandable(exp_domain, -1, parent_id, True))
# 2. get the expandables for new messages inside threads if display is not flat
if thread_level == 0:
return True
for message_id in message_ids:
message = message_tree[message_id]
# generate only for thread header messages (TDE note: parent_id may be False is uid cannot see parent_id, seems ok)
if message.parent_id:
continue
# check there are message for expandable
child_ids = set([child.id for child in message.child_ids]) - set(message_unload_ids)
child_ids = sorted(list(child_ids), reverse=True)
if not child_ids:
continue
# make groups of unread messages
id_min, id_max, nb = max(child_ids), 0, 0
for child_id in child_ids:
if not child_id in message_ids:
nb += 1
if id_min > child_id:
id_min = child_id
if id_max < child_id:
id_max = child_id
elif nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(child_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, False))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
id_min, id_max, nb = max(child_ids), 0, 0
else:
id_min, id_max, nb = max(child_ids), 0, 0
if nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(message_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, id_min))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
return True
@api.cr_uid_context
def message_read(self, cr, uid, ids=None, domain=None, message_unload_ids=None,
thread_level=0, context=None, parent_id=False, limit=None):
""" Read messages from mail.message, and get back a list of structured
messages to be displayed as discussion threads. If IDs is set,
fetch these records. Otherwise use the domain to fetch messages.
After having fetch messages, their ancestors will be added to obtain
well formed threads, if uid has access to them.
After reading the messages, expandable messages are added in the
message list (see ``_message_read_add_expandables``). It consists
in messages holding the 'read more' data: number of messages to
read, domain to apply.
:param list ids: optional IDs to fetch
:param list domain: optional domain for searching ids if ids not set
:param list message_unload_ids: optional ids we do not want to fetch,
because i.e. they are already displayed somewhere
:param int parent_id: context of parent_id
- if parent_id reached when adding ancestors, stop going further
in the ancestor search
- if set in flat mode, ancestor_id is set to parent_id
:param int limit: number of messages to fetch, before adding the
ancestors and expandables
:return list: list of message structure for the Chatter widget
"""
assert thread_level in [0, 1], 'message_read() thread_level should be 0 (flat) or 1 (1 level of thread); given %s.' % thread_level
domain = domain if domain is not None else []
message_unload_ids = message_unload_ids if message_unload_ids is not None else []
if message_unload_ids:
domain += [('id', 'not in', message_unload_ids)]
limit = limit or self._message_read_limit
message_tree = {}
message_list = []
parent_tree = {}
# no specific IDS given: fetch messages according to the domain, add their parents if uid has access to
if ids is None:
ids = self.search(cr, uid, domain, context=context, limit=limit)
# fetch parent if threaded, sort messages
for message in self.browse(cr, uid, ids, context=context):
message_id = message.id
if message_id in message_tree:
continue
message_tree[message_id] = message
# find parent_id
if thread_level == 0:
tree_parent_id = parent_id
else:
tree_parent_id = message_id
parent = message
while parent.parent_id and parent.parent_id.id != parent_id:
parent = parent.parent_id
tree_parent_id = parent.id
if not parent.id in message_tree:
message_tree[parent.id] = parent
# newest messages first
parent_tree.setdefault(tree_parent_id, [])
if tree_parent_id != message_id:
parent_tree[tree_parent_id].append(self._message_read_dict(cr, uid, message_tree[message_id], parent_id=tree_parent_id, context=context))
if thread_level:
for key, message_id_list in parent_tree.iteritems():
message_id_list.sort(key=lambda item: item['id'])
message_id_list.insert(0, self._message_read_dict(cr, uid, message_tree[key], context=context))
# create final ordered message_list based on parent_tree
parent_list = parent_tree.items()
parent_list = sorted(parent_list, key=lambda item: max([msg.get('id') for msg in item[1]]) if item[1] else item[0], reverse=True)
message_list = [message for (key, msg_list) in parent_list for message in msg_list]
# get the child expandable messages for the tree
self._message_read_dict_postprocess(cr, uid, message_list, message_tree, context=context)
self._message_read_add_expandables(cr, uid, message_list, message_tree, parent_tree,
thread_level=thread_level, message_unload_ids=message_unload_ids, domain=domain, parent_id=parent_id, context=context)
return message_list
#------------------------------------------------------
# mail_message internals
#------------------------------------------------------
def init(self, cr):
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'mail_message_model_res_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE INDEX mail_message_model_res_id_idx ON mail_message (model, res_id)""")
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
doc_ids = doc_dict.keys()
allowed_doc_ids = self.pool[doc_model].search(cr, uid, [('id', 'in', doc_ids)], context=context)
return set([message_id for allowed_doc_id in allowed_doc_ids for message_id in doc_dict[allowed_doc_id]])
def _find_allowed_doc_ids(self, cr, uid, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
allowed_ids = set()
for doc_model, doc_dict in model_ids.iteritems():
if not model_access_obj.check(cr, uid, doc_model, 'read', False):
continue
allowed_ids |= self._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
return allowed_ids
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
ids uid could not see according to our custom rules. Please refer
to check_access_rule for more details about those rules.
After having received ids of a classic search, keep only:
- if author_id == pid, uid is the author, OR
- a notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document is model, res_id
- otherwise: remove the id
"""
# Rules do not apply to administrator
if uid == SUPERUSER_ID:
return super(mail_message, self)._search(
cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
# Perform a super with count as False, to have the ids, not a counter
ids = super(mail_message, self)._search(
cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=False, access_rights_uid=access_rights_uid)
if not ids and count:
return 0
elif not ids:
return ids
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
author_ids, partner_ids, allowed_ids = set([]), set([]), set([])
model_ids = {}
# check read access rights before checking the actual rules on the given ids
super(mail_message, self).check_access_rights(cr, access_rights_uid or uid, 'read')
cr.execute("""SELECT DISTINCT m.id, m.model, m.res_id, m.author_id, n.partner_id
FROM "%s" m LEFT JOIN "mail_notification" n
ON n.message_id=m.id AND n.partner_id = (%%s)
WHERE m.id = ANY (%%s)""" % self._table, (pid, ids,))
for id, rmod, rid, author_id, partner_id in cr.fetchall():
if author_id == pid:
author_ids.add(id)
elif partner_id == pid:
partner_ids.add(id)
elif rmod and rid:
model_ids.setdefault(rmod, {}).setdefault(rid, set()).add(id)
allowed_ids = self._find_allowed_doc_ids(cr, uid, model_ids, context=context)
final_ids = author_ids | partner_ids | allowed_ids
if count:
return len(final_ids)
else:
# re-construct a list based on ids, because set did not keep the original order
id_list = [id for id in ids if id in final_ids]
return id_list
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.message:
- read: if
- author_id == pid, uid is the author, OR
- mail_notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document if model, res_id
- otherwise: raise
- create: if
- no model, no res_id, I create a private message OR
- pid in message_follower_ids if model, res_id OR
- mail_notification (parent_id.id, pid) exists, uid has been notified of the parent, OR
- uid have write or create access on the related document if model, res_id, OR
- otherwise: raise
- write: if
- author_id == pid, uid is the author, OR
- uid has write or create access on the related document if model, res_id
- otherwise: raise
- unlink: if
- uid has write or create access on the related document if model, res_id
- otherwise: raise
"""
def _generate_model_record_ids(msg_val, msg_ids):
""" :param model_record_ids: {'model': {'res_id': (msg_id, msg_id)}, ... }
:param message_values: {'msg_id': {'model': .., 'res_id': .., 'author_id': ..}}
"""
model_record_ids = {}
for id in msg_ids:
vals = msg_val.get(id, {})
if vals.get('model') and vals.get('res_id'):
model_record_ids.setdefault(vals['model'], set()).add(vals['res_id'])
return model_record_ids
if uid == SUPERUSER_ID:
return
if isinstance(ids, (int, long)):
ids = [ids]
not_obj = self.pool.get('mail.notification')
fol_obj = self.pool.get('mail.followers')
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=None).partner_id.id
# Read mail_message.ids to have their values
message_values = dict.fromkeys(ids, {})
cr.execute('SELECT DISTINCT id, model, res_id, author_id, parent_id FROM "%s" WHERE id = ANY (%%s)' % self._table, (ids,))
for id, rmod, rid, author_id, parent_id in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid, 'author_id': author_id, 'parent_id': parent_id}
# Author condition (READ, WRITE, CREATE (private)) -> could become an ir.rule ?
author_ids = []
if operation == 'read' or operation == 'write':
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('author_id') and message.get('author_id') == partner_id]
elif operation == 'create':
author_ids = [mid for mid, message in message_values.iteritems()
if not message.get('model') and not message.get('res_id')]
# Parent condition, for create (check for received notifications for the created message parent)
notified_ids = []
if operation == 'create':
parent_ids = [message.get('parent_id') for mid, message in message_values.iteritems()
if message.get('parent_id')]
not_ids = not_obj.search(cr, SUPERUSER_ID, [('message_id.id', 'in', parent_ids), ('partner_id', '=', partner_id)], context=context)
not_parent_ids = [notif.message_id.id for notif in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('parent_id') in not_parent_ids]
# Notification condition, for read (check for received notifications and create (in message_follower_ids)) -> could become an ir.rule, but not till we do not have a many2one variable field
other_ids = set(ids).difference(set(author_ids), set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
if operation == 'read':
not_ids = not_obj.search(cr, SUPERUSER_ID, [
('partner_id', '=', partner_id),
('message_id', 'in', ids),
], context=context)
notified_ids = [notification.message_id.id for notification in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
elif operation == 'create':
for doc_model, doc_ids in model_record_ids.items():
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', doc_model),
('res_id', 'in', list(doc_ids)),
('partner_id', '=', partner_id),
], context=context)
fol_mids = [follower.res_id for follower in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == doc_model and message.get('res_id') in fol_mids]
# CRUD: Access rights related to the document
other_ids = other_ids.difference(set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
document_related_ids = []
for model, doc_ids in model_record_ids.items():
model_obj = self.pool[model]
mids = model_obj.exists(cr, uid, list(doc_ids))
if hasattr(model_obj, 'check_mail_message_access'):
model_obj.check_mail_message_access(cr, uid, mids, operation, context=context)
else:
self.pool['mail.thread'].check_mail_message_access(cr, uid, mids, operation, model_obj=model_obj, context=context)
document_related_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == model and message.get('res_id') in mids]
# Calculate remaining ids: if not void, raise an error
other_ids = other_ids.difference(set(document_related_ids))
if not other_ids:
return
raise orm.except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') %
(self._description, operation))
def _get_record_name(self, cr, uid, values, context=None):
""" Return the related document name, using name_get. It is done using
SUPERUSER_ID, to be sure to have the record name correctly stored. """
if not values.get('model') or not values.get('res_id') or values['model'] not in self.pool:
return False
return self.pool[values['model']].name_get(cr, SUPERUSER_ID, [values['res_id']], context=context)[0][1]
def _get_reply_to(self, cr, uid, values, context=None):
""" Return a specific reply_to: alias of the document through message_get_reply_to
or take the email_from
"""
model, res_id, email_from = values.get('model'), values.get('res_id'), values.get('email_from')
ctx = dict(context, thread_model=model)
return self.pool['mail.thread'].message_get_reply_to(cr, uid, [res_id], default=email_from, context=ctx)[res_id]
def _get_message_id(self, cr, uid, values, context=None):
if values.get('no_auto_thread', False) is True:
message_id = tools.generate_tracking_message_id('reply_to')
elif values.get('res_id') and values.get('model'):
message_id = tools.generate_tracking_message_id('%(res_id)s-%(model)s' % values)
else:
message_id = tools.generate_tracking_message_id('private')
return message_id
def create(self, cr, uid, values, context=None):
context = dict(context or {})
default_starred = context.pop('default_starred', False)
if 'email_from' not in values: # needed to compute reply_to
values['email_from'] = self._get_default_from(cr, uid, context=context)
if not values.get('message_id'):
values['message_id'] = self._get_message_id(cr, uid, values, context=context)
if 'reply_to' not in values:
values['reply_to'] = self._get_reply_to(cr, uid, values, context=context)
if 'record_name' not in values and 'default_record_name' not in context:
values['record_name'] = self._get_record_name(cr, uid, values, context=context)
newid = super(mail_message, self).create(cr, uid, values, context)
self._notify(cr, uid, newid, context=context,
force_send=context.get('mail_notify_force_send', True),
user_signature=context.get('mail_notify_user_signature', True))
# TDE FIXME: handle default_starred. Why not setting an inv on starred ?
# Because starred will call set_message_starred, that looks for notifications.
# When creating a new mail_message, it will create a notification to a message
# that does not exist, leading to an error (key not existing). Also this
# this means unread notifications will be created, yet we can not assure
# this is what we want.
if default_starred:
self.set_message_starred(cr, uid, [newid], True, context=context)
return newid
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them. """
self.check_access_rule(cr, uid, ids, 'read', context=context)
res = super(mail_message, self).read(cr, uid, ids, fields=fields, context=context, load=load)
return res
def unlink(self, cr, uid, ids, context=None):
# cascade-delete attachments that are directly attached to the message (should only happen
# for mail.messages that act as parent for a standalone mail.mail record).
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
attachments_to_delete = []
for message in self.browse(cr, uid, ids, context=context):
for attach in message.attachment_ids:
if attach.res_model == self._name and (attach.res_id == message.id or attach.res_id == 0):
attachments_to_delete.append(attach.id)
if attachments_to_delete:
self.pool.get('ir.attachment').unlink(cr, uid, attachments_to_delete, context=context)
return super(mail_message, self).unlink(cr, uid, ids, context=context)
#------------------------------------------------------
# Messaging API
#------------------------------------------------------
def _notify(self, cr, uid, newid, context=None, force_send=False, user_signature=True):
""" Add the related record followers to the destination partner_ids if is not a private message.
Call mail_notification.notify to manage the email sending
"""
notification_obj = self.pool.get('mail.notification')
message = self.browse(cr, uid, newid, context=context)
partners_to_notify = set([])
# all followers of the mail.message document have to be added as partners and notified if a subtype is defined (otherwise: log message)
if message.subtype_id and message.model and message.res_id:
fol_obj = self.pool.get("mail.followers")
# browse as SUPERUSER because rules could restrict the search results
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', message.model),
('res_id', '=', message.res_id),
], context=context)
partners_to_notify |= set(
fo.partner_id.id for fo in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)
if message.subtype_id.id in [st.id for st in fo.subtype_ids]
)
# remove me from notified partners, unless the message is written on my own wall
if message.subtype_id and message.author_id and message.model == "res.partner" and message.res_id == message.author_id.id:
partners_to_notify |= set([message.author_id.id])
elif message.author_id:
partners_to_notify -= set([message.author_id.id])
# all partner_ids of the mail.message have to be notified regardless of the above (even the author if explicitly added!)
if message.partner_ids:
partners_to_notify |= set([p.id for p in message.partner_ids])
# notify
notification_obj._notify(
cr, uid, newid, partners_to_notify=list(partners_to_notify), context=context,
force_send=force_send, user_signature=user_signature
)
message.refresh()
# An error appear when a user receive a notification without notifying
# the parent message -> add a read notification for the parent
if message.parent_id:
# all notified_partner_ids of the mail.message have to be notified for the parented messages
partners_to_parent_notify = set(message.notified_partner_ids).difference(message.parent_id.notified_partner_ids)
for partner in partners_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner.id,
'is_read': True,
}, context=context)
| jjscarafia/odoo | addons/mail/mail_message.py | Python | agpl-3.0 | 47,270 |
# -*- coding: utf-8
# pylint: disable=line-too-long
"""Implements the collections class (the file name has an extra 'c' to avoid
masking the standard collections library).
If the user have analyzed their metagenome using a metagenome binning software
and identified draft genomes in their data (or by any other means binned their
contigs based on any criterion), this information can be stored in the
contigs database's collections_* tables. The class implemented here collects
this information from the database, and presents it as an intuitive data structure
for the client.
"""
import copy
import anvio
import anvio.db as db
import anvio.tables as t
import anvio.utils as utils
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
from anvio.errors import ConfigError
from anvio.tables.collections import TablesForCollections
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "a.murat.eren@gmail.com"
__status__ = "Development"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
class Collections:
def __init__(self, r=run, p=progress):
self.collections_dict = {}
self.run = r
self.progress = p
self.db_type = None
self.db_path = None
def populate_collections_dict(self, db_path):
filesnpaths.is_file_exists(db_path)
self.db_path = db_path
database = db.DB(db_path, utils.get_required_version_for_db(db_path))
self.db_type = database.get_meta_value('db_type')
collections_info_table = database.get_table_as_dict(t.collections_info_table_name)
database.disconnect()
# collections info must be read only if its coming from the contigs database.
if self.db_type == 'contigs':
read_only = True
elif self.db_type == 'profile':
read_only = False
elif self.db_type:
read_only = False
elif self.db_type == 'pan':
read_only = False
else:
raise ConfigError('Collections class does not know about this "%s" database type :/' % self.db_type)
for collection_name in collections_info_table:
self.collections_dict[collection_name] = collections_info_table[collection_name]
self.collections_dict[collection_name]['read_only'] = read_only
self.collections_dict[collection_name]['source_db_path'] = db_path
self.collections_dict[collection_name]['source_db_version'] = utils.get_required_version_for_db(db_path)
def sanity_check(self, collection_name):
if collection_name not in self.collections_dict:
raise ConfigError('There is no "%s" I know of. Probably something is spelled wrong somewhere? In case you are '
'a programmer and accessing to the collections from your program, here is a reminder for you: '
'are you sure `populate_collections_dict` was called for whatever database you are trying to '
'get collections from? If you are a user, you can always try to use the `--list-collections` '
'flag and hope for the best.' % collection_name)
def get_trimmed_dicts(self, collection_name, split_names = set([])):
"""Returns collection_dict, bins_info_dict for splits matching split_names, and
split names that are in the db, but not binned in the collection..
Any bin that does not have any splits left after removal simply is removed
from the dictionary"""
self.progress.new('Recovering collection information for "%s" ...' % collection_name)
collection_dict = self.get_collection_dict(collection_name)
bins_info_dict = self.get_bins_info_dict(collection_name)
self.progress.update('Identifying split names that are in the profile db but do not appear in any bin ...')
split_names_in_db_but_missing_in_collection = copy.deepcopy(split_names)
for bin_id in collection_dict:
split_names_in_db_but_missing_in_collection -= set(collection_dict[bin_id])
self.progress.update('Identifying bin names that do not have any splits that appear in the profile database ...')
bins_with_zero_splits_in_profile_db = []
bin_ids_in_collection = list(collection_dict.keys())
for bin_id in bin_ids_in_collection:
# good split names are the ones that appear in `split_names` user sent. so here we will replace
# the content of each bin with only split names that are 'good' in that sense. in practice, this
# will ensure that the collection dict will not contain any split name that does not appear in
# the profile database (i.e., a relevant need can be seen in the `load_collection_mode` function
# in the interactive.py)
good_split_names = set([split_name for split_name in collection_dict[bin_id] if split_name in split_names])
if not len(good_split_names):
bins_with_zero_splits_in_profile_db.append(bin_id)
collection_dict.pop(bin_id)
bins_info_dict.pop(bin_id)
else:
collection_dict[bin_id] = good_split_names
self.progress.end()
if len(bins_with_zero_splits_in_profile_db):
self.run.warning('Some of the bins in this collection (precisely %d of %d total) did not contain any '
'that appeared in the profile database. There are multiple reasons for why this can '
'happen. But one of the common scenario could be this: You imported an external '
'collection, and some of the bins you have in that collection contain a small number '
'of contigs that were too short to make it into the merged profile. Well, if you would '
'like to figure out what might be the scenario for your experiment, here is the list of '
'bin names that did not go through: %s.' \
% (len(bins_with_zero_splits_in_profile_db), len(collection_dict), ", ".join(bins_with_zero_splits_in_profile_db)))
return (collection_dict, bins_info_dict, split_names_in_db_but_missing_in_collection)
def get_collection_dict(self, collection_name):
self.sanity_check(collection_name)
c = self.collections_dict[collection_name]
database = db.DB(c['source_db_path'], c['source_db_version'])
collection_dict_from_db = database.get_some_rows_from_table_as_dict(t.collections_splits_table_name, 'collection_name="%s"' % collection_name)
database.disconnect()
collection_dict_to_return = {}
for entry in list(collection_dict_from_db.values()):
collection_name = entry['collection_name']
bin_name = entry['bin_name']
split = entry['split']
if bin_name in collection_dict_to_return:
collection_dict_to_return[bin_name].append(split)
else:
collection_dict_to_return[bin_name] = [split]
return collection_dict_to_return
def get_bins_info_dict(self, collection_name):
self.sanity_check(collection_name)
c = self.collections_dict[collection_name]
database = db.DB(c['source_db_path'], c['source_db_version'])
collections_bins_info_table = database.get_table_as_dict(t.collections_bins_info_table_name)
database.disconnect()
# FIXME: this could be resolved with a WHERE clause in the SQL query:
collections_bins_info_table_filtered = utils.get_filtered_dict(collections_bins_info_table, 'collection_name', set([collection_name]))
bins_info_dict = {}
for v in list(collections_bins_info_table_filtered.values()):
bins_info_dict[v['bin_name']] = {'html_color': v['html_color'], 'source': v['source']}
return bins_info_dict
def is_bin_in_collection(self, collection_name, bin_name):
self.sanity_check(collection_name)
bins_info_dict = self.get_bins_info_dict(collection_name)
if bin_name not in bins_info_dict:
raise ConfigError("The bin '%s' does not seem to be a member of the collection '%s'. If you want to see all bins in "
"this collection you can try to add `--list-bins` to your arguments." % (bin_name, collection_name))
return True
def list_collections(self):
self.run.warning('', 'COLLECTIONS FOUND', lc='yellow')
for collection_name in self.collections_dict:
c = self.collections_dict[collection_name]
output = '%s (%d bins, representing %d items).' % (collection_name, c['num_bins'], c['num_splits'])
self.run.info_single(output)
def list_bins_in_collection(self, collection_name):
if collection_name not in self.collections_dict:
raise ConfigError("The collection name '%s' is not know to anyone here :/ You have to go back, Kate." % collection_name)
self.run.warning('', 'BINS IN COLLECTION "%s"' % collection_name, lc='yellow')
bins_info = self.get_bins_info_dict(collection_name)
for bin_name in sorted(bins_info.keys()):
output = '%s.' % (bin_name)
self.run.info_single(output)
def merge_bins(self, collection_name, new_bin_name, bin_names_list):
"""Merges a given list of bins in a collection"""
self.sanity_check(collection_name)
if not self.db_path:
raise ConfigError("Something is off. The class does not know which database it is supposed to "
"be working with.")
if not isinstance(bin_names_list, list):
raise ConfigError("The `bin_names_list` must be of thpe `set` :/")
bins_info_dict = self.get_bins_info_dict(collection_name)
collection_dict = self.get_collection_dict(collection_name)
invalid_bin_names = [b for b in bin_names_list if not b in collection_dict]
if invalid_bin_names:
raise ConfigError("Some of the bin names you want to merge is not in the collection %s :/ Here "
"is a list of them: %s" % (collection_name, ', '.join(invalid_bin_names)))
items_in_new_bin = []
for bin_name in bin_names_list:
items_in_new_bin.extend(collection_dict[bin_name])
info_for_new_bin = copy.deepcopy(bins_info_dict[bin_name])
info_for_new_bin['source'] = 'anvi-merge-bins'
# time to remove the ones that are merged
for bin_name in bin_names_list:
bins_info_dict.pop(bin_name)
collection_dict.pop(bin_name)
# add the merged stuff
bins_info_dict[new_bin_name] = info_for_new_bin
collection_dict[new_bin_name] = items_in_new_bin
tables_for_collections = TablesForCollections(self.db_path, run=terminal.Run(verbose=False))
tables_for_collections.append(collection_name, collection_dict, bins_info_dict)
self.run.info_single("You did it. Your bins are now merged.. Onward!", nl_before=1, nl_after=1)
def export_collection(self, collection_name, output_file_prefix=None, include_unbinned=False):
self.sanity_check(collection_name)
if not output_file_prefix:
output_file_prefix = 'collection-%s' % (collection_name.strip().replace(' ', '-'))
info_file_path = output_file_prefix + '-info.txt'
items_file_path = output_file_prefix + '.txt'
self.run.info('Report unbinned items if there are any', include_unbinned)
self.run.info('Items file path', items_file_path)
filesnpaths.is_output_file_writable(items_file_path)
bins_info = self.get_bins_info_dict(collection_name)
collection = self.get_collection_dict(collection_name)
if len(bins_info):
self.run.info('Bins info file path', info_file_path)
info_file = open(info_file_path, 'w')
if include_unbinned:
bins_info['UNBINNED_ITEMS_BIN'] = {'html_color': '#000000', 'source': 'anvi-export-collections'}
for bin_name in bins_info:
info_file.write('%s\t%s\t%s\n' % (bin_name, bins_info[bin_name]['source'], bins_info[bin_name]['html_color']))
info_file.close()
binned_items = set([])
items_file = open(items_file_path, 'w')
for bin_name in collection:
for item_name in collection[bin_name]:
items_file.write('%s\t%s\n' % (item_name, bin_name))
binned_items.add(item_name)
if include_unbinned:
all_items = utils.get_all_item_names_from_the_database(self.db_path)
unbinned_items = all_items.difference(binned_items)
for item_name in unbinned_items:
items_file.write('%s\tUNBINNED_ITEMS_BIN\n' % (item_name))
self.run.warning("As per your request, %d items that were not in any of the bins in the collection '%s' are stored "
"in the output file under the bin name 'UNBINNED_ITEMS_BIN'." % (len(unbinned_items), collection_name))
items_file.close()
class GetSplitNamesInBins:
def __init__(self, args):
# we will fill this in and return it
self.split_names_of_interest = set([])
self.bins = None
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.bin_ids_file_path = A('bin_ids_file')
self.bin_ids_list = A('bin_ids_list')
self.bin_id = A('bin_id')
self.collection_name = A('collection_name')
self.contigs_db_path = A('contigs_db')
self.profile_db_path = A('profile_db')
self.debug = anvio.DEBUG
if not self.profile_db_path:
raise ConfigError("You didn't provide a profile database path. When you clearly should have :/ "
"This is GetSplitNamesInBins speaking. Has her eyes on you.")
if self.bin_ids_file_path and self.bin_id:
raise ConfigError('Either use a file to list all the bin ids (-B), or declare a single bin (-b) '
'you would like to focus. Not both :/')
if not self.collection_name:
raise ConfigError('This will not work without a collection ID for your bins :/')
if self.bin_ids_file_path:
filesnpaths.is_file_exists(self.bin_ids_file_path)
self.bins = set([b.strip() for b in open(self.bin_ids_file_path).readlines()])
elif self.bin_id:
self.bins = set([self.bin_id])
self.collections = Collections()
self.collections.populate_collections_dict(self.profile_db_path)
if self.collection_name not in self.collections.collections_dict:
progress.reset()
raise ConfigError('The collection id "%s" does not seem to be in the profile database. These are the '
'collections that are available through this profile database: "%s".'\
% (self.collection_name, ', '.join(self.collections.collections_dict)))
self.collection_dict = self.collections.get_collection_dict(self.collection_name)
bins_in_collection = list(self.collection_dict.keys())
if not self.bins:
self.bins = bins_in_collection
else:
bins_that_do_not_exist_in_collection = [b for b in self.bins if b not in bins_in_collection]
if len(bins_that_do_not_exist_in_collection):
some_bins_that_exist_in_collection = bins_in_collection if len(bins_in_collection) < 30 else bins_in_collection[:30]
raise ConfigError('Some of the bins you requested do not appear to have been described in the collection '
'"%s". Here is a list of bins that are missing: "%s". Here is a list of some bins in '
'your collection: "%s"' % (self.collection_name,
', '.join(bins_that_do_not_exist_in_collection),
', '.join(some_bins_that_exist_in_collection)))
if not len(self.bins):
raise ConfigError('There is no bin to work with :/')
def get_split_names_only(self):
split_names_of_interest = []
for bin_id in self.bins:
split_names_of_interest.extend(self.collection_dict[bin_id])
self.split_names_of_interest = set(split_names_of_interest)
return self.split_names_of_interest
def get_dict(self):
d = {}
for bin_id in self.bins:
d[bin_id] = set(self.collection_dict[bin_id])
return d
class GetSequentialBlocksOfSplits:
"""A simple class to identify longest stretches in a list of integers.
>>> sequentials = SequentialBlocksOfSplits([1, 2, 3, 5, 6, 9])
>>> print sequentials.blocks
[[1, 2, 3], [5, 6], [9]]
>>>
"""
def __init__(self, l):
self.l = sorted(list(set(l)))
self.blocks = []
self.current_block = []
def finalize_block(self):
self.blocks.append(self.current_block)
self.current_block = []
def process(self):
while True:
if not self.l:
break
current = self.l.pop(0)
if not len(self.current_block) or current == self.current_block[-1] + 1:
self.current_block.append(current)
else:
self.finalize_block()
self.current_block.append(current)
self.finalize_block()
return self.blocks
| meren/anvio | anvio/ccollections.py | Python | gpl-3.0 | 18,061 |
# -*- coding: UTF-8 -*-
"""
Some global constants and a settings object, that stores the template and
it's context along with the filenames for the diff…
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from pathlib import Path
from pkg_resources import resource_string
from appdirs import user_data_dir
__all__ = [
'Settings',
]
CMD_WDIFF = 'wdiff'
OPTIONS_LINEBREAK = [
'-n',
]
OPTIONS_OUTPUT = [
'--start-delete', '<del>',
'--end-delete', '</del>',
'--start-insert', '<ins>',
'--end-insert', '</ins>',
]
USER_DIR = user_data_dir('wdiffhtml')
def load_from_resource(name):
"""
Returns the contents of a file resource.
If the resource exists in the users data directory, it is used instead
of the default resource.
"""
filepath = Path(USER_DIR) / name
if filepath.exists():
with filepath.open() as fh:
return fh.read()
else:
return resource_string('wdiffhtml', 'data/' + name).decode('utf-8')
class Settings(object):
"""
The class holds the path to the files that should be compared as well as
the template used for the output along with it's context.
Context Variables
-----------------
`org_filename`
Display version of the name of the original file.
`new_filename`
Display version of the name of the changed file.
`content`
Will contain the (HTMLified) output from `wdiff` (just a placeholder).
`css`
CSS for the document.
`js`
JS for the document.
`js`
Secondary JS for the document (loaded before the first, for frameworks…).
`timestamp`
:cls:`datetime.datetime` of the diff (optional).
`version`
revision or version of the diff (optional).
"""
template = load_from_resource('template.jinja')
_context = {
'content': "",
'css': load_from_resource('styles.css'),
'js': load_from_resource('main.js'),
'js2': load_from_resource('secondary.js'),
}
def __init__(self, org_file, new_file, template=None, **context):
self.org_file = org_file
self.new_file = new_file
if template:
self.template = template
self.context = self._context.copy()
self.context['org_filename'] = Path(org_file).name
self.context['new_filename'] = Path(new_file).name
self.context.update(context)
| brutus/wdiffhtml | wdiffhtml/settings.py | Python | gpl-3.0 | 2,302 |
#!/usr/bin/env python3
import argparse
from mygrations.mygrate import mygrate
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument(
'command',
nargs='?',
default='version',
choices=['version', 'apply', 'check', 'import', 'plan', 'plan_export'],
help='Action to execute (default: version)'
)
parser.add_argument(
'--config', default='mygrate.conf', help='Location of mygrate configuration file (default: mygrate.conf)'
)
parser.add_argument('-f', dest='force', action='store_true', help='Ignore errors/warnings and execute command anyway')
parser.add_argument('-v', dest='version', action='store_true', help='Display version')
args = parser.parse_args()
# load up a mygrate object
my = mygrate(args.command, vars(args))
# and execute
my.execute()
| cmancone/mygrations | mygrate.py | Python | mit | 795 |
from enum import Enum
class ExpressionType(Enum):
CUSTOM = ""
AND = "and"
OR = "or"
NOT = "not"
WHEN = "when"
FORALL = "forall"
class Expression:
expression_type = ExpressionType("")
predicate = ""
objects = []
subexpressions = []
def __init__(self, expression_type, predicate, objects, subexpressions):
self.expression_type = expression_type
self.predicate = predicate
self.objects = objects
self.subexpressions = subexpressions
def __repr__(self):
ret = "("
if self.expression_type == ExpressionType(""):
ret += self.predicate
for obj in self.objects:
ret += " "
ret += obj
elif self.expression_type == ExpressionType("forall"):
ret += "forall ("
for obj in self.objects:
ret += " "
ret += obj
ret += ")"
for subexpr in self.subexpressions:
ret += " "
ret += repr(subexpr)
else:
ret += self.expression_type.value
for subexpr in self.subexpressions:
ret += " "
ret += repr(subexpr)
ret += ")\n"
return ret
class Problem:
problem_name = ""
domain_name = ""
objects = []
init_expressions = []
goal_expression = None
_file = None
def __init__(self):
self.problem_name = ""
self.domain_name = ""
self.objects = []
self.init_expressions = []
self.goal_expression = None
def __repr__(self):
ret = "(define (problem "
ret += self.problem_name
ret += ")\n(:domain "
ret += self.domain_name
ret += ")\n(:objects "
for obj in self.objects:
ret += " "
ret += obj
ret += ")\n(:init\n"
for expr in self.init_expressions:
ret += repr(expr)
ret += ")\n(:goal\n"
ret += repr(self.goal_expression)
ret += ")\n)"
return ret
def _get_token(self):
token = ""
# Consume space and the first char of token.
while True:
c = self._file.read(1).decode("ascii")
if not c:
return None
elif c.isspace():
continue
else:
token += c
break
# If the token is any of ()?: then we return it.
if token == "(" or token == ")" or token == "?" or token == ":":
return token.lower()
# Consume rest of the token's chars.
while True:
c = self._file.read(1).decode("ascii")
if not c:
return token.lower()
elif c == "(" or c == ")" or c == "?" or c == ":":
self._file.seek(-1, 1) # Seek one byte back.
return token.lower()
elif not c.isspace():
token += c
continue
elif c.isspace():
return token.lower()
def _parse_start(self):
token1 = self._get_token()
token2 = self._get_token()
if token1 != "(" or token2 != "define":
return -1
return 0
def _parse_problem_name(self):
token1 = self._get_token()
token2 = self._get_token()
token3 = self._get_token()
token4 = self._get_token()
if token1 != "(" or token2 != "problem" or token4 != ")":
return -1
self.problem_name = token3
return 0
def _parse_domain_name(self):
token1 = self._get_token()
token2 = self._get_token()
token3 = self._get_token()
token4 = self._get_token()
token5 = self._get_token()
if token1 != "(" or token2 != ":" or token3 != "domain" or token5 != ")":
return -1
self.domain_name = token4
return 0
def _parse_object_list(self):
objects = []
while True:
token = self._get_token()
if token == ")":
return objects
if not token:
return None
objects.append(token)
def _parse_objects(self):
token1 = self._get_token()
token2 = self._get_token()
token3 = self._get_token()
if token1 != "(" or token2 != ":" or token3 != "objects":
return -1
objects = self._parse_object_list()
if not objects:
return -1
self.objects = objects
return 0
def _parse_expression(self):
token1 = self._get_token()
if token1 != "(":
return None
token2 = self._get_token()
predicate = ""
subexpressions = []
objects = []
try:
expression_type = ExpressionType(token2)
if expression_type == ExpressionType("and") or expression_type == ExpressionType("or"):
while True:
subexpression = self._parse_expression()
if not subexpression:
break
subexpressions.append(subexpression)
elif expression_type == ExpressionType("not"):
subexpression = self._parse_expression()
if not subexpression:
return None
subexpressions.append(subexpression)
token3 = self._get_token()
if token3 != ")":
return None
elif expression_type == ExpressionType("when"):
subexpression1 = self._parse_expression()
subexpression2 = self._parse_expression()
if not subexpression1 or not subexpression2:
return None
subexpressions.append(subexpression1)
subexpressions.append(subexpression2)
token3 = self._get_token()
if token3 != ")":
return None
elif expression_type == ExpressionType("forall"):
token3 = self._get_token()
if token3 != "(":
return None
objects = self._parse_objects()
subexpression = self._parse_expression()
subexpressions.append(subexpression)
token4 = self._get_token()
if token4 != ")":
return None
return Expression(expression_type, predicate, objects, subexpressions)
except ValueError: # It's not any of and, or, not, forall, when.
expression_type = ExpressionType("")
predicate = token2
objects = self._parse_object_list()
return Expression(expression_type, predicate, objects, subexpressions)
def _parse_init(self):
token1 = self._get_token()
token2 = self._get_token()
token3 = self._get_token()
if token1 != "(" or token2 != ":" or token3 != "init":
return -1
while True:
expression = self._parse_expression()
if not expression:
return 0
self.init_expressions.append(expression)
def _parse_goal(self):
token1 = self._get_token()
token2 = self._get_token()
token3 = self._get_token()
if token1 != "(" or token2 != ":" or token3 != "goal":
return -1
expression = self._parse_expression()
if not expression:
return -1
self.goal_expression = expression
return 0
def parse(self, filename):
self._file = open(filename, "rb")
if self._parse_start() != 0:
print("Beginning is incorrect.")
return
if self._parse_problem_name() != 0:
print("Problem name given incorrectly.")
return
if self._parse_domain_name() != 0:
print("Domain name given incorrectly.")
if self._parse_objects() != 0:
print("Objects given incorrectly.")
if self._parse_init() != 0:
print("Init given incorrectly.")
if self._parse_goal() != 0:
print("Goal given incorrectly.")
| CinnamonHAB/cinnamonHAB | pddl_parser/pddl_problem.py | Python | gpl-3.0 | 8,213 |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import mock
from nova.api.openstack import extensions as api_extensions
from nova.openstack.common import jsonutils
from nova.tests.integrated.v3 import api_sample_base
class ExtensionInfoSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
sample_dir = "extension-info"
def test_list_extensions(self):
response = self._do_get('extensions')
subs = self._get_regexes()
self._verify_response('extensions-list-resp', subs, response, 200)
def test_get_extensions(self):
response = self._do_get('extensions/flavors')
subs = self._get_regexes()
self._verify_response('extensions-get-resp', subs, response, 200)
class ExtensionInfoFormatTest(api_sample_base.ApiSampleTestBaseV3):
# NOTE: To check all extension formats, here makes authorize() return True
# always instead of fake_policy.py because most extensions are not set as
# "discoverable" in fake_policy.py.
all_extensions = True
def _test_list_extensions(self, key, pattern):
with mock.patch.object(api_extensions,
'soft_extension_authorizer') as api_mock:
def fake_soft_extension_authorizer(api_name, extension_name):
def authorize(context, action=None):
return True
return authorize
api_mock.side_effect = fake_soft_extension_authorizer
response = self._do_get('extensions')
response = jsonutils.loads(response.read())
extensions = response['extensions']
pattern_comp = re.compile(pattern)
for ext in extensions:
self.assertIsNotNone(pattern_comp.match(ext[key]),
'%s does not match with %s' % (ext[key],
pattern))
def test_list_extensions_name_format(self):
# name should be CamelCase.
pattern = '^[A-Z]{1}[a-z]{1}[a-zA-Z]*$'
self._test_list_extensions('name', pattern)
def test_list_extensions_alias_format(self):
# alias should contain lowercase chars and '-' only.
pattern = '^[a-z-]+$'
self._test_list_extensions('alias', pattern)
| srajag/nova | nova/tests/integrated/v3/test_extension_info.py | Python | apache-2.0 | 2,868 |
#!/usr/bin/python
## Printing troubleshooter
## Copyright (C) 2008 Red Hat, Inc.
## Copyright (C) 2008 Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import cups
from timedops import TimedOperation
from base import *
class CheckLocalServerPublishing(Question):
def __init__ (self, troubleshooter):
Question.__init__ (self, troubleshooter, "Is local server publishing?")
vbox = self.initial_vbox (_("Server Not Exporting Printers"),
_("Although one or more printers are marked "
"as being shared, this print server is "
"not exporting shared printers to the "
"network.") + '\n\n' +
_("Enable the 'Publish shared printers "
"connected to this system' option in "
"the server settings using the printing "
"administration tool.") + ' ' +
_(TEXT_start_print_admin_tool))
troubleshooter.new_page (vbox, self)
def display (self):
self.answers = {}
cups.setServer ('')
parent = self.troubleshooter.get_window ()
try:
c = self.timedop (cups.Connection, parent=parent).run ()
printers = self.timedop (c.getPrinters, parent=parent).run ()
if len (printers) == 0:
return False
for name, printer in printers.iteritems ():
if printer.get ('printer-is-shared', False):
break
attr = self.timedop (c.getPrinterAttributes,
args=(name,),
parent=parent).run ()
except RuntimeError:
return False
except cups.IPPError:
return False
if not printer.get ('printer-is-shared', False):
return False
if attr.get ('server-is-sharing-printers', True):
# server-is-sharing-printers is in CUPS 1.4
return False
return True
def collect_answer (self):
if self.displayed:
return { 'local_server_exporting_printers': False }
return {}
def cancel_operation (self):
self.op.cancel ()
def timedop (self, *args, **kwargs):
self.op = TimedOperation (*args, **kwargs)
return self.op
| hamonikr-root/system-config-printer-gnome | troubleshoot/CheckLocalServerPublishing.py | Python | gpl-2.0 | 3,179 |
import os
import h5py
import numpy as np
from pyspawn.traj import traj
class hessian(traj):
def build_hessian_hdf5_semianalytical(self, dr):
ndims = self.get_numdims()
self.set_timestep(1.0)
self.compute_elec_struct(False)
filename = "hessian.hdf5"
if not os.path.isfile(filename):
# if file doesn't exist writing positions and
# filling hessian with -1000
h5f = h5py.File(filename, "a")
# writing geometries
dsetname = "geometry"
dset = h5f.create_dataset(dsetname, (1, ndims))
pos = self.get_positions().reshape(1, ndims)
dset[:, :] = pos
# filling hessian
dsetname = "hessian"
dset = h5f.create_dataset(dsetname, (ndims, ndims))
dset[:, :] = -1000.0 * np.ones((ndims, ndims))
mindim = 0
else:
# if file exists overwriting it?
h5f = h5py.File(filename, "a")
mindim = -1
dsetname = "geometry"
dset = h5f.get(dsetname)
pos = dset[:, :].flatten()
self.set_positions(pos)
dsetname = "hessian"
dset = h5f.get(dsetname)
for idim in range(ndims):
if mindim < 0:
tmp = dset[idim, 0]
if tmp < -999.0 and tmp > -1001.0:
mindim = idim
h5f.close()
for idim in range(mindim, ndims):
pos = self.get_positions()
# shifting positions by +dr
pos[idim] += dr
self.set_positions(pos)
self.compute_elec_struct(False)
# forces at r + dr
gp = -1.0 * self.get_forces_i()
# shifting positions by -dr
pos[idim] -= 2.0 * dr
self.set_positions(pos)
self.compute_elec_struct(False)
# forces at r - dr
gm = -1.0 * self.get_forces_i()
# setting positions to original
pos[idim] += dr
self.set_positions(pos)
# numerical second derivative
de2dr2 = (gp - gm) / (2.0 * dr)
# writing hessian into file
h5f = h5py.File(filename, "a")
mindim = -1
dsetname = "hessian"
dset = h5f.get(dsetname)
dset[idim, :] = de2dr2
h5f.close()
print "Done building hessian.hdf5!"
| blevine37/pySpawn17 | pyspawn/hessian.py | Python | mit | 2,477 |
import logging
import splunk.Intersplunk as si
import os
import subprocess
from splunk.appserver.mrsparkle.lib.util import make_splunkhome_path
USER = 'ubuntu'
SSH_KEY = make_splunkhome_path(['etc', 'apps', 'bsides-austin-2015-app','default','bsides_demo.pem'])
#makes a local path to store logs to be ingested in inouts.conf
BASE_DIR = make_splunkhome_path(["etc","apps","bsides-austin-2015-app"])
#adjusted for windows path
EVIDENCE_LOG_PATH = os.path.join(BASE_DIR,'log','evidence.log')
def setup_logger():
"""
sets up logger for shutdown command
"""
logger = logging.getLogger('bsides')
# Prevent the log messgaes from being duplicated in the python.log
# AuthorizationFailed
logger.propagate = False
logger.setLevel(logging.DEBUG)
file_handler = logging.handlers.RotatingFileHandler(
make_splunkhome_path(['etc', 'apps', 'bsides-austin-2015-app', 'logs',
'bsides.log']),
maxBytes=25000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
logger = setup_logger()
def sysdigstart(process_name,endpoint):
if process_name:
ssh = subprocess.Popen(['ssh', '-o StrictHostKeyChecking=no', '-i{0}'.format(SSH_KEY), '{0}@{1}'.format(USER,endpoint), 'sysdig -v proc.name contains', process_name], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = ssh.stdout.readlines()
if result == []:
error = ssh.stderr.readlines()
logger.error(error)
else:
logger.debug(result)
return result
if __name__ == '__main__':
try:
results, dummyresults, settings = si.getOrganizedResults()
keywords, options = si.getKeywordsAndOptions()
#opens a file handle to write results into
bufsize = 0
f = open(EVIDENCE_LOG_PATH, 'w',bufsize)
for entry in results:
## parse arguments
if "endpoint" in entry:
endpoint = entry["endpoint"]
else:
endpoint = options.get('endpoint', None)
if "process_name" in entry:
process_name = entry["process_name"]
else:
process_name = options.get('process_name', None)
#kill process
result = prockill(process_name,endpoint)
logger.warn('sent sysdig collection to endpoint {0} with a process name that contains {1} )'.format(endpoint,process_name))
#write results
f.write(result)
#should not get here should use the stop command
break
f.close()
except Exception as e:
logger.error("There was an issue establishing arguments for the " +
"sysdig search command!")
logger.exception(str(e))
| divious1/bsides-austin-2015 | bsides-austin-2015-app/bin/sysdigstart.py | Python | apache-2.0 | 3,007 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import sys
import os
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
paddle.enable_static()
IS_SPARSE = True
USE_GPU = False
BATCH_SIZE = 256
def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may has range check.
# currently we don't have user configurated check.
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], dtype='int64')
usr_emb = layers.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_emb = layers.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return usr_combined_features
def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_emb = layers.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(
name='category_id', shape=[1], dtype='int64', lod_level=1)
mov_categories_emb = layers.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(
name='movie_title', shape=[1], dtype='int64', lod_level=1)
mov_title_emb = layers.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return mov_combined_features
def model():
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()
# need cos sim
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost)
return scale_infer, avg_cost
def train(use_cuda, save_dirname, is_local=True):
scale_infer, avg_cost = model()
# test program
test_program = fluid.default_main_program().clone(for_test=True)
sgd_optimizer = SGDOptimizer(learning_rate=0.2)
sgd_optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
feed_order = [
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
'movie_title', 'score'
]
def train_loop(main_program):
exe.run(framework.default_startup_program())
feed_list = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
# train a mini-batch
outs = exe.run(program=main_program,
feed=feeder.feed(data),
fetch_list=[avg_cost])
out = np.array(outs[0])
if (batch_id + 1) % 10 == 0:
avg_cost_set = []
for test_data in test_reader():
avg_cost_np = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[avg_cost])
avg_cost_set.append(avg_cost_np[0])
break # test only 1 segment for speeding up CI
# get test avg_cost
test_avg_cost = np.array(avg_cost_set).mean()
if test_avg_cost < 6.0:
# if avg_cost less than 6.0, we think our code is good.
if save_dirname is not None:
fluid.io.save_inference_model(save_dirname, [
"user_id", "gender_id", "age_id", "job_id",
"movie_id", "category_id", "movie_title"
], [scale_infer], exe)
return
if math.isnan(float(out[0])):
sys.exit("got NaN loss, training failed.")
if is_local:
train_loop(fluid.default_main_program())
else:
port = os.getenv("PADDLE_PSERVER_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("PADDLE_TRAINERS"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
train_loop(t.get_trainer_program())
def infer(use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id"
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# to generate LoD Tensor where `data` is a list of sequences of index
# numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[np.int64(0)]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[np.int64(10)]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[np.int64(783)]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor(
[np.array(
[10, 8, 9], dtype='int64')], [[3]], place)
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor(
[np.array(
[1069, 4140, 2923, 710, 988], dtype='int64')], [[5]],
place)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
print("inferred score: ", np.array(results[0]))
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# Directory for saving the inference model
save_dirname = "recommender_system.inference.model"
train(use_cuda, save_dirname)
infer(use_cuda, save_dirname)
if __name__ == '__main__':
main(USE_GPU)
| PaddlePaddle/Paddle | python/paddle/fluid/tests/book/test_recommender_system.py | Python | apache-2.0 | 12,089 |
import csv
import numpy as np
from apiclient.discovery import build
import json
# Load Dataset
with open("./Youtube History Data.csv") as f:
reader = csv.reader(f)
raw_data = [list(x) for x in reader]
data = np.array(raw_data[1:])
# Youtube Data Collection
with open('youtube_config.json') as f:
DEVELOPER_KEY = json.loads(f.read())['api_key']
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
def get_video_tags(id_):
search_response = youtube.videos().list(
id=id_,
part="snippet",
).execute()
try:
if 'tags' in search_response['items'][0]['snippet']:
return search_response['items'][0]['snippet']['tags']
except IndexError:
print search_response
video_tags = ({'id': x[2], 'date': x[1], 'tags': get_video_tags(x[2])}
for x in data)
with open('video_data.json', 'w+') as f:
for tag_dict in video_tags:
f.write(json.dumps(tag_dict))
f.write('\n')
| bcongdon/Data-Science-Projects | youtube-history/tag_scraper.py | Python | gpl-3.0 | 1,084 |
# -*- coding: utf-8 -*-
#
# lld documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lld'
copyright = u'2011-%d, LLVM Project' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short version.
version = '10'
# The full version, including alpha/beta/rc tags.
release = '10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'llvm-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# If given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers use
# this as icon for tabs, windows and bookmarks. It should be a Windows-style
# icon file (.ico), which is 16x16 or 32x32 pixels large. Default: None. The
# image file will be copied to the _static directory of the output HTML, but
# only if the file does not already exist there.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'llddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'lld.tex', u'lld Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'lld', u'lld Documentation',
[u'LLVM project'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('contents', 'lld', u'lld Documentation',
u'LLVM project', 'lld', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configration.
intersphinx_mapping = {}
# -- Options for extensions ----------------------------------------------------
# Enable this if you want TODOs to show up in the generated documentation.
todo_include_todos = True
| llvm-mirror/lld | docs/conf.py | Python | apache-2.0 | 8,299 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest2
from st2auth_flat_file_backend.flat_file import FlatFileAuthenticationBackend
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class FlatFileAuthenticationBackendTestCase(unittest2.TestCase):
def test_authenticate_httpasswd_file_without_comments(self):
file_path = os.path.join(BASE_DIR, '../fixtures/htpasswd_test')
backend = FlatFileAuthenticationBackend(file_path=file_path)
# Inexistent user
self.assertFalse(backend.authenticate(username='doesntexist', password='bar'))
# Invalid password
self.assertFalse(backend.authenticate(username='test1', password='bar'))
# Valid password (md5 hash)
self.assertTrue(backend.authenticate(username='test1', password='testpassword'))
# Valid password (sha hash - insecure)
self.assertTrue(backend.authenticate(username='test3', password='testpassword'))
# Valid password (crypt - insecure)
self.assertTrue(backend.authenticate(username='test4', password='testpassword'))
def test_authenticate_httpasswd_file_with_comments(self):
file_path = os.path.join(BASE_DIR, '../fixtures/htpasswd_test_with_comments')
backend = FlatFileAuthenticationBackend(file_path=file_path)
# Inexistent user
self.assertFalse(backend.authenticate(username='doesntexist', password='bar'))
# Invalid password
self.assertFalse(backend.authenticate(username='test1', password='bar'))
# Valid password (md5 hash)
self.assertTrue(backend.authenticate(username='test1', password='testpassword'))
# Valid password (sha hash - insecure)
self.assertTrue(backend.authenticate(username='test3', password='testpassword'))
# Valid password (crypt - insecure)
self.assertTrue(backend.authenticate(username='test4', password='testpassword'))
def test_authenticate_httpasswd_file_doesnt_exist(self):
file_path = os.path.join(BASE_DIR, '../fixtures/htpasswd_doesnt_exist')
backend = FlatFileAuthenticationBackend(file_path=file_path)
self.assertRaises(IOError, backend.authenticate, username='doesntexist', password='bar')
if __name__ == '__main__':
sys.exit(unittest2.main())
| StackStorm/st2-auth-backend-flat-file | tests/unit/test_flat_file_backend.py | Python | apache-2.0 | 3,052 |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_task
short_description: Run, start or stop a task in ecs
description:
- Creates or deletes instances of task definitions.
version_added: "2.0"
author: Mark Chance (@Java1Guy)
requirements: [ json, botocore, boto3 ]
options:
operation:
description:
- Which task operation to execute.
required: True
choices: ['run', 'start', 'stop']
type: str
cluster:
description:
- The name of the cluster to run the task on.
required: False
type: str
task_definition:
description:
- The task definition to start or run.
required: False
type: str
overrides:
description:
- A dictionary of values to pass to the new instances.
required: False
type: dict
count:
description:
- How many new instances to start.
required: False
type: int
task:
description:
- The task to stop.
required: False
type: str
container_instances:
description:
- The list of container instances on which to deploy the task.
required: False
type: list
elements: str
started_by:
description:
- A value showing who or what started the task (for informational purposes).
required: False
type: str
network_configuration:
description:
- Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
type: dict
suboptions:
subnets:
description: A list of subnet IDs to which the task is attached.
type: list
elements: str
security_groups:
description: A list of group names or group IDs for the task.
type: list
elements: str
version_added: 2.6
launch_type:
description:
- The launch type on which to run your service.
required: false
version_added: 2.8
choices: ["EC2", "FARGATE"]
type: str
tags:
type: dict
description:
- Tags that will be added to ecs tasks on start and run
required: false
version_added: "2.10"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of run task
- name: Run task
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
count: 1
started_by: ansible_user
register: task_output
# Simple example of start task
- name: Start a task
ecs_task:
operation: start
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
tags:
resourceName: a_task_for_ansible_to_run
type: long_running_task
network: internal
version: 1.4
container_instances:
- arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
started_by: ansible_user
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: RUN a task on Fargate
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
started_by: ansible_user
launch_type: FARGATE
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: Stop a task
ecs_task:
operation: stop
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
'''
RETURN = '''
task:
description: details about the task that was started
returned: success
type: complex
contains:
taskArn:
description: The Amazon Resource Name (ARN) that identifies the task.
returned: always
type: str
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
returned: only when details is true
type: str
taskDefinitionArn:
description: The Amazon Resource Name (ARN) of the task definition.
returned: only when details is true
type: str
containerInstanceArn:
description: The Amazon Resource Name (ARN) of the container running the task.
returned: only when details is true
type: str
overrides:
description: The container overrides set for this task.
returned: only when details is true
type: list
elements: dict
lastStatus:
description: The last recorded status of the task.
returned: only when details is true
type: str
desiredStatus:
description: The desired status of the task.
returned: only when details is true
type: str
containers:
description: The container details.
returned: only when details is true
type: list
elements: dict
startedBy:
description: The used who started the task.
returned: only when details is true
type: str
stoppedReason:
description: The reason why the task was stopped.
returned: only when details is true
type: str
createdAt:
description: The timestamp of when the task was created.
returned: only when details is true
type: str
startedAt:
description: The timestamp of when the task was started.
returned: only when details is true
type: str
stoppedAt:
description: The timestamp of when the task was stopped.
returned: only when details is true
type: str
launchType:
description: The launch type on which to run your task.
returned: always
type: str
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
class EcsExecManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
self.ec2 = module.client('ec2')
def format_network_configuration(self, network_config):
result = dict()
if 'subnets' in network_config:
result['subnets'] = network_config['subnets']
else:
self.module.fail_json(msg="Network configuration must include subnets")
if 'security_groups' in network_config:
groups = network_config['security_groups']
if any(not sg.startswith('sg-') for sg in groups):
try:
vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
result['securityGroups'] = groups
return dict(awsvpcConfiguration=result)
def list_tasks(self, cluster_name, service_name, status):
response = self.ecs.list_tasks(
cluster=cluster_name,
family=service_name,
desiredStatus=status
)
if len(response['taskArns']) > 0:
for c in response['taskArns']:
if c.endswith(service_name):
return c
return None
def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags):
if overrides is None:
overrides = dict()
params = dict(cluster=cluster, taskDefinition=task_definition,
overrides=overrides, count=count, startedBy=startedBy)
if self.module.params['network_configuration']:
params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
if launch_type:
params['launchType'] = launch_type
if tags:
params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
# TODO: need to check if long arn format enabled.
try:
response = self.ecs.run_task(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't run task")
# include tasks and failures
return response['tasks']
def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags):
args = dict()
if cluster:
args['cluster'] = cluster
if task_definition:
args['taskDefinition'] = task_definition
if overrides:
args['overrides'] = overrides
if container_instances:
args['containerInstances'] = container_instances
if startedBy:
args['startedBy'] = startedBy
if self.module.params['network_configuration']:
args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
if tags:
args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
try:
response = self.ecs.start_task(**args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't start task")
# include tasks and failures
return response['tasks']
def stop_task(self, cluster, task):
response = self.ecs.stop_task(cluster=cluster, task=task)
return response['task']
def ecs_api_handles_launch_type(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.8.4')
def ecs_task_long_format_enabled(self):
account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True)
return account_support['settings'][0]['value'] == 'enabled'
def ecs_api_handles_tags(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.12.46')
def ecs_api_handles_network_configuration(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44')
def main():
argument_spec = dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict'),
launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
tags=dict(required=False, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
if module.params['tags']:
if not service_mgr.ecs_api_handles_tags():
module.fail_json(msg=missing_required_lib("botocore >= 1.12.46", reason="to use tags"))
if not service_mgr.ecs_task_long_format_enabled():
module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags")
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'],
module.params['launch_type'],
module.params['tags'],
)
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by'],
module.params['tags'],
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| roadmapper/ansible | lib/ansible/modules/cloud/amazon/ecs_task.py | Python | gpl-3.0 | 17,388 |
from sympy import (
adjoint, conjugate, DiracDelta, Heaviside, nan, pi, sign, sqrt,
symbols, transpose, Symbol, Piecewise, I, S, Eq, oo,
SingularityFunction, signsimp
)
from sympy.utilities.pytest import raises
from sympy.core.function import ArgumentIndexError
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import filldedent
x, y = symbols('x y')
i = symbols('t', nonzero=True)
j = symbols('j', positive=True)
k = symbols('k', negative=True)
def test_DiracDelta():
assert DiracDelta(1) == 0
assert DiracDelta(5.1) == 0
assert DiracDelta(-pi) == 0
assert DiracDelta(5, 7) == 0
assert DiracDelta(i) == 0
assert DiracDelta(j) == 0
assert DiracDelta(k) == 0
assert DiracDelta(nan) == nan
assert DiracDelta(0).func is DiracDelta
assert DiracDelta(x).func is DiracDelta
# FIXME: this is generally undefined @ x=0
# But then limit(Delta(c)*Heaviside(x),x,-oo)
# need's to be implemented.
#assert 0*DiracDelta(x) == 0
assert adjoint(DiracDelta(x)) == DiracDelta(x)
assert adjoint(DiracDelta(x - y)) == DiracDelta(x - y)
assert conjugate(DiracDelta(x)) == DiracDelta(x)
assert conjugate(DiracDelta(x - y)) == DiracDelta(x - y)
assert transpose(DiracDelta(x)) == DiracDelta(x)
assert transpose(DiracDelta(x - y)) == DiracDelta(x - y)
assert DiracDelta(x).diff(x) == DiracDelta(x, 1)
assert DiracDelta(x, 1).diff(x) == DiracDelta(x, 2)
assert DiracDelta(x).is_simple(x) is True
assert DiracDelta(3*x).is_simple(x) is True
assert DiracDelta(x**2).is_simple(x) is False
assert DiracDelta(sqrt(x)).is_simple(x) is False
assert DiracDelta(x).is_simple(y) is False
assert DiracDelta(x*y).expand(diracdelta=True, wrt=x) == DiracDelta(x)/abs(y)
assert DiracDelta(x*y).expand(diracdelta=True, wrt=y) == DiracDelta(y)/abs(x)
assert DiracDelta(x**2*y).expand(diracdelta=True, wrt=x) == DiracDelta(x**2*y)
assert DiracDelta(y).expand(diracdelta=True, wrt=x) == DiracDelta(y)
assert DiracDelta((x - 1)*(x - 2)*(x - 3)).expand(diracdelta=True, wrt=x) == (
DiracDelta(x - 3)/2 + DiracDelta(x - 2) + DiracDelta(x - 1)/2)
assert DiracDelta(2*x) != DiracDelta(x) # scaling property
assert DiracDelta(x) == DiracDelta(-x) # even function
assert DiracDelta(-x, 2) == DiracDelta(x, 2)
assert DiracDelta(-x, 1) == -DiracDelta(x, 1) # odd deriv is odd
assert DiracDelta(-oo*x) == DiracDelta(oo*x)
assert DiracDelta(x - y) != DiracDelta(y - x)
assert signsimp(DiracDelta(x - y) - DiracDelta(y - x)) == 0
with raises(SymPyDeprecationWarning):
assert DiracDelta(x*y).simplify(x) == DiracDelta(x)/abs(y)
assert DiracDelta(x*y).simplify(y) == DiracDelta(y)/abs(x)
assert DiracDelta(x**2*y).simplify(x) == DiracDelta(x**2*y)
assert DiracDelta(y).simplify(x) == DiracDelta(y)
assert DiracDelta((x - 1)*(x - 2)*(x - 3)).simplify(x) == (
DiracDelta(x - 3)/2 + DiracDelta(x - 2) + DiracDelta(x - 1)/2)
raises(ArgumentIndexError, lambda: DiracDelta(x).fdiff(2))
raises(ValueError, lambda: DiracDelta(x, -1))
raises(ValueError, lambda: DiracDelta(I))
raises(ValueError, lambda: DiracDelta(2 + 3*I))
def test_heaviside():
assert Heaviside(0).func == Heaviside
assert Heaviside(-5) == 0
assert Heaviside(1) == 1
assert Heaviside(nan) == nan
assert Heaviside(0, x) == x
assert Heaviside(0, nan) == nan
assert Heaviside(x, None) == Heaviside(x)
assert Heaviside(0, None) == Heaviside(0)
# we do not want None in the args:
assert None not in Heaviside(x, None).args
assert adjoint(Heaviside(x)) == Heaviside(x)
assert adjoint(Heaviside(x - y)) == Heaviside(x - y)
assert conjugate(Heaviside(x)) == Heaviside(x)
assert conjugate(Heaviside(x - y)) == Heaviside(x - y)
assert transpose(Heaviside(x)) == Heaviside(x)
assert transpose(Heaviside(x - y)) == Heaviside(x - y)
assert Heaviside(x).diff(x) == DiracDelta(x)
assert Heaviside(x + I).is_Function is True
assert Heaviside(I*x).is_Function is True
raises(ArgumentIndexError, lambda: Heaviside(x).fdiff(2))
raises(ValueError, lambda: Heaviside(I))
raises(ValueError, lambda: Heaviside(2 + 3*I))
def test_rewrite():
x, y = Symbol('x', real=True), Symbol('y')
assert Heaviside(x).rewrite(Piecewise) == (
Piecewise((0, x < 0), (Heaviside(0), Eq(x, 0)), (1, x > 0)))
assert Heaviside(y).rewrite(Piecewise) == (
Piecewise((0, y < 0), (Heaviside(0), Eq(y, 0)), (1, y > 0)))
assert Heaviside(x, y).rewrite(Piecewise) == (
Piecewise((0, x < 0), (y, Eq(x, 0)), (1, x > 0)))
assert Heaviside(x, 0).rewrite(Piecewise) == (
Piecewise((0, x <= 0), (1, x > 0)))
assert Heaviside(x, 1).rewrite(Piecewise) == (
Piecewise((0, x < 0), (1, x >= 0)))
assert Heaviside(x).rewrite(sign) == (sign(x)+1)/2
assert Heaviside(y).rewrite(sign) == Heaviside(y)
assert Heaviside(x, S.Half).rewrite(sign) == (sign(x)+1)/2
assert Heaviside(x, y).rewrite(sign) == Heaviside(x, y)
assert DiracDelta(y).rewrite(Piecewise) == Piecewise((DiracDelta(0), Eq(y, 0)), (0, True))
assert DiracDelta(y, 1).rewrite(Piecewise) == DiracDelta(y, 1)
assert DiracDelta(x - 5).rewrite(Piecewise) == (
Piecewise((DiracDelta(0), Eq(x - 5, 0)), (0, True)))
assert (x*DiracDelta(x - 10)).rewrite(SingularityFunction) == x*SingularityFunction(x, 10, -1)
assert 5*x*y*DiracDelta(y, 1).rewrite(SingularityFunction) == 5*x*y*SingularityFunction(y, 0, -2)
assert DiracDelta(0).rewrite(SingularityFunction) == SingularityFunction(0, 0, -1)
assert DiracDelta(0, 1).rewrite(SingularityFunction) == SingularityFunction(0, 0, -2)
assert Heaviside(x).rewrite(SingularityFunction) == SingularityFunction(x, 0, 0)
assert 5*x*y*Heaviside(y + 1).rewrite(SingularityFunction) == 5*x*y*SingularityFunction(y, -1, 0)
assert ((x - 3)**3*Heaviside(x - 3)).rewrite(SingularityFunction) == (x - 3)**3*SingularityFunction(x, 3, 0)
assert Heaviside(0).rewrite(SingularityFunction) == SingularityFunction(0, 0, 0)
| wxgeo/geophar | wxgeometrie/sympy/functions/special/tests/test_delta_functions.py | Python | gpl-2.0 | 6,190 |
import sys
from com.l2scoria.gameserver.datatables import SkillTable
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "125_IntheNameofEvilPart1"
# NPCs
MUSHIKA = 32114
KARAKAWEI = 32117
ULU_KAIMU = 32119
BALU_KAIMU = 32120
CHUTA_KAIMU = 32121
# ITEMS
GAZKH_FRAGMENT = 8782
ORNITHOMIMUS_CLAW = 8779
DEINONYCHUS_BONE = 8780
EPITAPH_OF_WISDOM = 8781
# MOBS
ORNITHOMIMUS = [ 22200,22201,22202,22219,22224,22742,22744 ]
DEINONYCHUS = [ 16067,22203,22204,22205,22220,22225,22743,22745 ]
# DROP
DROP_CHANCE = 30
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [GAZKH_FRAGMENT,ORNITHOMIMUS_CLAW,DEINONYCHUS_BONE,EPITAPH_OF_WISDOM]
def onAdvEvent(self, event, npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
cond = st.getInt("cond")
if event == "32114-05.htm" :
st.setState(STARTED)
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
elif event == "32114-09.htm" and cond == 1 :
st.set("cond","2")
st.giveItems(GAZKH_FRAGMENT,1)
st.playSound("ItemSound.quest_middle")
elif event == "32117-08.htm" and cond == 2 :
st.set("cond","3")
st.playSound("ItemSound.quest_middle")
elif event == "32117-14.htm" and cond == 4 :
st.set("cond","5")
st.playSound("ItemSound.quest_middle")
elif event == "32119-02.htm" :
st.set("pilar1","0")
elif cond == 5 and event.isdigit() :
correct = st.getInt("pilar1")
st.set("pilar1", str(correct+1))
htmltext = "32119-0"+str(int(event)+2)+".htm"
elif event == "32119-06.htm" and cond == 5 :
if st.getInt("pilar1") < 4 :
htmltext = "32119-00.htm"
st.unset("pilar1")
elif event == "32119-14.htm" and cond == 5 :
st.set("cond","6")
st.playSound("ItemSound.quest_middle")
elif event == "32120-02.htm" :
st.set("pilar2","0")
elif cond == 6 and event.isdigit() :
correct = st.getInt("pilar2")
st.set("pilar2", str(correct+1))
htmltext = "32120-0"+str(int(event)+2)+".htm"
elif event == "32120-06.htm" and cond == 6 :
if st.getInt("pilar2") < 4 :
htmltext = "32120-00.htm"
st.unset("pilar2")
elif event == "32120-15.htm" and cond == 6 :
st.set("cond","7")
st.playSound("ItemSound.quest_middle")
elif event == "32121-02.htm" :
st.set("pilar3","0")
elif cond == 7 and event.isdigit() :
correct = st.getInt("pilar3")
st.set("pilar3", str(correct+1))
htmltext = "32121-0"+str(int(event)+2)+".htm"
elif event == "32121-06.htm" and cond == 7 :
if st.getInt("pilar3") < 4 :
htmltext = "32121-00.htm"
st.unset("pilar3")
elif event == "32121-16.htm" and cond == 7 :
st.set("cond","8")
st.takeItems(GAZKH_FRAGMENT,-1)
st.giveItems(EPITAPH_OF_WISDOM,1)
st.playSound("ItemSound.quest_middle")
return htmltext
def onTalk (self, npc, player) :
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
cond = st.getInt("cond")
npcId = npc.getNpcId()
if npcId == MUSHIKA :
first = player.getQuestState("124_MeetingTheElroki")
if st.getState() == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif first and first.getState().getName() == 'Completed' and st.getState() == CREATED and player.getLevel() >= 76 :
htmltext = "32114-01.htm"
elif cond == 0 :
htmltext = "32114-00.htm"
elif cond == 1 :
htmltext = "32114-07.htm"
elif cond == 2 :
htmltext = "32114-10.htm"
elif cond >= 3 and cond < 8:
htmltext = "32114-11.htm"
elif cond == 8 :
st.addExpAndSp(859195,86603)
st.unset("cond")
st.unset("pilar1")
st.unset("pilar2")
st.unset("pilar3")
st.setState(COMPLETED)
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
htmltext = "32114-12.htm"
elif npcId == KARAKAWEI :
if cond == 2 :
htmltext = "32117-01.htm"
elif cond == 3 :
htmltext = "32117-09.htm"
elif cond == 4 :
st.takeItems(ORNITHOMIMUS_CLAW,-1)
st.takeItems(DEINONYCHUS_BONE,-1)
st.playSound("ItemSound.quest_middle")
htmltext = "32117-10.htm"
elif cond == 5 :
htmltext = "32117-15.htm"
elif cond == 6 or cond == 7 :
htmltext = "32117-16.htm"
elif cond == 8 :
htmltext = "32117-17.htm"
elif npcId == ULU_KAIMU :
if cond == 5 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32119-01.htm"
elif cond == 6 :
htmltext = "32119-14.htm"
elif npcId == BALU_KAIMU :
if cond == 6 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32120-01.htm"
elif cond == 7 :
htmltext = "32120-16.htm"
elif npcId == CHUTA_KAIMU :
if cond == 7 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32121-01.htm"
elif cond == 8 :
htmltext = "32121-17.htm"
return htmltext
def onKill(self, npc, player, isPet) :
st = player.getQuestState(qn)
if not st : return
if st.getInt("cond") == 3 :
if npc.getNpcId() in ORNITHOMIMUS :
if st.getQuestItemsCount(ORNITHOMIMUS_CLAW) < 2 :
if st.getRandom(100) < DROP_CHANCE :
st.giveItems(ORNITHOMIMUS_CLAW,1)
st.playSound("ItemSound.quest_itemget")
elif npc.getNpcId() in DEINONYCHUS :
if st.getQuestItemsCount(DEINONYCHUS_BONE) < 2 :
if st.getRandom(100) < DROP_CHANCE :
st.giveItems(DEINONYCHUS_BONE,1)
st.playSound("ItemSound.quest_itemget")
if st.getQuestItemsCount(ORNITHOMIMUS_CLAW) == 2 and st.getQuestItemsCount(DEINONYCHUS_BONE) == 2 :
st.set("cond","4")
st.playSound("ItemSound.quest_middle")
return
QUEST = Quest(125,qn,"The Name of Evil - 1")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(MUSHIKA)
QUEST.addTalkId(MUSHIKA)
QUEST.addTalkId(KARAKAWEI)
QUEST.addTalkId(ULU_KAIMU)
QUEST.addTalkId(BALU_KAIMU)
QUEST.addTalkId(CHUTA_KAIMU)
for i in ORNITHOMIMUS :
QUEST.addKillId(i)
for i in DEINONYCHUS :
QUEST.addKillId(i) | zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/125_IntheNameofEvilPart1/__init__.py | Python | gpl-3.0 | 6,260 |
# Webhooks for external integrations.
from __future__ import absolute_import
from zerver.models import get_client
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
import ujson
@api_key_only_webhook_view
@has_request_variables
def api_travis_webhook(request, user_profile, stream=REQ(default='travis'), topic=REQ(default=None)):
message = ujson.loads(request.POST['payload'])
author = message['author_name']
message_type = message['status_message']
changes = message['compare_url']
good_status = ['Passed', 'Fixed']
bad_status = ['Failed', 'Broken', 'Still Failing']
emoji = ''
if message_type in good_status:
emoji = ':thumbsup:'
elif message_type in bad_status:
emoji = ':thumbsdown:'
else:
emoji = "(No emoji specified for status '%s'.)" % (message_type,)
build_url = message['build_url']
template = (
u'Author: %s\n'
u'Build status: %s %s\n'
u'Details: [changes](%s), [build log](%s)')
body = template % (author, message_type, emoji, changes, build_url)
check_send_message(user_profile, get_client('ZulipTravisWebhook'), 'stream', [stream], topic, body)
return json_success()
| ryansnowboarder/zulip | zerver/views/webhooks/travis.py | Python | apache-2.0 | 1,326 |
"""
Adds support for Nest thermostats.
"""
import logging
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD, TEMP_CELCIUS)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the nest thermostat. """
logger = logging.getLogger(__name__)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
if username is None or password is None:
logger.error("Missing required configuration items %s or %s",
CONF_USERNAME, CONF_PASSWORD)
return
try:
import nest
except ImportError:
logger.exception(
"Error while importing dependency nest. "
"Did you maybe not install the python-nest dependency?")
return
napi = nest.Nest(username, password)
add_devices([
NestThermostat(structure, device)
for structure in napi.structures
for device in structure.devices
])
class NestThermostat(ThermostatDevice):
""" Represents a Nest thermostat within Home Assistant. """
def __init__(self, structure, device):
self.structure = structure
self.device = device
@property
def name(self):
""" Returns the name of the nest, if any. """
return self.device.name
@property
def unit_of_measurement(self):
""" Returns the unit of measurement. """
return TEMP_CELCIUS
@property
def device_state_attributes(self):
""" Returns device specific state attributes. """
# Move these to Thermostat Device and make them global
return {
"humidity": self.device.humidity,
"target_humidity": self.device.target_humidity,
"fan": self.device.fan,
"mode": self.device.mode
}
@property
def current_temperature(self):
""" Returns the current temperature. """
return round(self.device.temperature, 1)
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
return round(self.device.target, 1)
@property
def is_away_mode_on(self):
""" Returns if away mode is on. """
return self.structure.away
def set_temperature(self, temperature):
""" Set new target temperature """
self.device.target = temperature
def turn_away_mode_on(self):
""" Turns away on. """
self.structure.away = True
def turn_away_mode_off(self):
""" Turns away off. """
self.structure.away = False
def update(self):
""" Python-nest has its own mechanism for staying up to date. """
pass
| Jaidan/jaidan-hab-home-assistant | homeassistant/components/thermostat/nest.py | Python | mit | 2,770 |
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
# Utility functions to handle indexing/slicing into an expression.
import numpy as np
def validate_key(key, shape):
"""Check if the key is a valid index.
Args:
key: The key used to index/slice.
shape: The shape of the expression.
Returns:
The key as a tuple of slices.
Raises:
Error: Index/slice out of bounds.
"""
rows, cols = shape.size
# Change single indexes for vectors into double indices.
if not isinstance(key, tuple):
if rows == 1:
key = (slice(0, 1, None), key)
elif cols == 1:
key = (key, slice(0, 1, None))
else:
raise IndexError("Invalid index/slice.")
# Change numbers into slices and ensure all slices have a start and step.
key = (format_slice(slc, dim) for slc, dim in zip(key, shape.size))
return tuple(key)
def format_slice(key_val, dim):
"""Converts part of a key into a slice with a start and step.
Uses the same syntax as numpy.
Args:
key_val: The value to convert into a slice.
dim: The length of the dimension being sliced.
Returns:
A slice with a start and step.
"""
if isinstance(key_val, slice):
return key_val
else:
key_val = wrap_neg_index(key_val, dim)
if 0 <= key_val < dim:
return slice(key_val, key_val + 1, 1)
else:
raise IndexError("Index/slice out of bounds.")
def wrap_neg_index(index, dim):
"""Converts a negative index into a positive index.
Args:
index: The index to convert. Can be None.
dim: The length of the dimension being indexed.
"""
if index is not None and index < 0:
index %= dim
return index
def index_to_slice(idx):
"""Converts an index to a slice.
Args:
idx: int
The index.
Returns:
slice
A slice equivalent to the index.
"""
return slice(idx, idx+1, None)
def slice_to_str(slc):
"""Converts a slice into a string.
"""
if is_single_index(slc):
return str(slc.start)
endpoints = [none_to_empty(val) for val in (slc.start, slc.stop)]
if slc.step != 1:
return "%s:%s:%s" % (endpoints[0], endpoints[1], slc.step)
else:
return "%s:%s" % (endpoints[0], endpoints[1])
def none_to_empty(val):
"""Converts None to an empty string.
"""
if val is None:
return ''
else:
return val
def is_single_index(slc):
"""Is the slice equivalent to a single index?
"""
if slc.step is None:
step = 1
else:
step = slc.step
return slc.stop is not None and \
slc.start + step >= slc.stop
def size(key, shape):
"""Finds the dimensions of a sliced expression.
Args:
key: The key used to index/slice.
shape: The shape of the expression.
Returns:
The dimensions of the expression as (rows, cols).
"""
dims = []
for i in range(2):
selection = np.arange(shape.size[i])[key[i]]
size = np.size(selection)
dims.append(size)
return tuple(dims)
def to_str(key):
"""Converts a key (i.e. two slices) into a string.
"""
return (slice_to_str(key[0]), slice_to_str(key[1]))
| riadnassiffe/Simulator | src/tools/ecos/cvxpy/cvxpy/utilities/key_utils.py | Python | mit | 3,912 |
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
print "Starting"
numFiles = 3
gridfile = [None]*numFiles
Elines = [None]*numFiles
for i in range(3):
for file in os.listdir('.'):
if file.endswith("padova_inst_{:d}.grd".format(i+1)):
gridfile[i] = file
print file
if file.endswith("padova_inst_{:d}.txt".format(i+1)):
Elines[i] = file
print file
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
headers = headers[1:]
# ---------------------------------------------------
# ---------------------------------------------------
#To fix when hdens > 10
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
for i in range(len(hdens_values)):
if float(hdens_values[i]) < 10.100 :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "peaks pulled"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks', max_values, delimiter='\t')
print "peaks saved"
| crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_inst/padova_inst_8/fullgrid/peaks_reader.py | Python | gpl-2.0 | 5,306 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from torch import nn
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
from fairseq.models.text_to_speech.tacotron2 import Postnet
from fairseq.modules import (
FairseqDropout,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
)
logger = logging.getLogger(__name__)
def model_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
return m
class PositionwiseFeedForward(nn.Module):
def __init__(self, in_dim, hidden_dim, kernel_size, dropout):
super().__init__()
self.ffn = nn.Sequential(
nn.Conv1d(
in_dim,
hidden_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
),
nn.ReLU(),
nn.Conv1d(
hidden_dim,
in_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
),
)
self.layer_norm = LayerNorm(in_dim)
self.dropout = self.dropout_module = FairseqDropout(
p=dropout, module_name=self.__class__.__name__
)
def forward(self, x):
# B x T x C
residual = x
x = self.ffn(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout(x)
return self.layer_norm(x + residual)
class FFTLayer(torch.nn.Module):
def __init__(
self, embed_dim, n_heads, hidden_dim, kernel_size, dropout, attention_dropout
):
super().__init__()
self.self_attn = MultiheadAttention(
embed_dim, n_heads, dropout=attention_dropout, self_attention=True
)
self.layer_norm = LayerNorm(embed_dim)
self.ffn = PositionwiseFeedForward(
embed_dim, hidden_dim, kernel_size, dropout=dropout
)
def forward(self, x, padding_mask=None):
# B x T x C
residual = x
x = x.transpose(0, 1)
x, _ = self.self_attn(
query=x, key=x, value=x, key_padding_mask=padding_mask, need_weights=False
)
x = x.transpose(0, 1)
x = self.layer_norm(x + residual)
return self.ffn(x)
class LengthRegulator(nn.Module):
def forward(self, x, durations):
# x: B x T x C
out_lens = durations.sum(dim=1)
max_len = out_lens.max()
bsz, seq_len, dim = x.size()
out = x.new_zeros((bsz, max_len, dim))
for b in range(bsz):
indices = []
for t in range(seq_len):
indices.extend([t] * utils.item(durations[b, t]))
indices = torch.tensor(indices, dtype=torch.long).to(x.device)
out_len = utils.item(out_lens[b])
out[b, :out_len] = x[b].index_select(0, indices)
return out, out_lens
class VariancePredictor(nn.Module):
def __init__(self, args):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(
args.encoder_embed_dim,
args.var_pred_hidden_dim,
kernel_size=args.var_pred_kernel_size,
padding=(args.var_pred_kernel_size - 1) // 2,
),
nn.ReLU(),
)
self.ln1 = nn.LayerNorm(args.var_pred_hidden_dim)
self.dropout_module = FairseqDropout(
p=args.var_pred_dropout, module_name=self.__class__.__name__
)
self.conv2 = nn.Sequential(
nn.Conv1d(
args.var_pred_hidden_dim,
args.var_pred_hidden_dim,
kernel_size=args.var_pred_kernel_size,
padding=1,
),
nn.ReLU(),
)
self.ln2 = nn.LayerNorm(args.var_pred_hidden_dim)
self.proj = nn.Linear(args.var_pred_hidden_dim, 1)
def forward(self, x):
# Input: B x T x C; Output: B x T
x = self.conv1(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout_module(self.ln1(x))
x = self.conv2(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout_module(self.ln2(x))
return self.proj(x).squeeze(dim=2)
class VarianceAdaptor(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.length_regulator = LengthRegulator()
self.duration_predictor = VariancePredictor(args)
self.pitch_predictor = VariancePredictor(args)
self.energy_predictor = VariancePredictor(args)
n_bins, steps = self.args.var_pred_n_bins, self.args.var_pred_n_bins - 1
self.pitch_bins = torch.linspace(args.pitch_min, args.pitch_max, steps)
self.embed_pitch = Embedding(n_bins, args.encoder_embed_dim)
self.energy_bins = torch.linspace(args.energy_min, args.energy_max, steps)
self.embed_energy = Embedding(n_bins, args.encoder_embed_dim)
def get_pitch_emb(self, x, tgt=None, factor=1.0):
out = self.pitch_predictor(x)
bins = self.pitch_bins.to(x.device)
if tgt is None:
out = out * factor
emb = self.embed_pitch(torch.bucketize(out, bins))
else:
emb = self.embed_pitch(torch.bucketize(tgt, bins))
return out, emb
def get_energy_emb(self, x, tgt=None, factor=1.0):
out = self.energy_predictor(x)
bins = self.energy_bins.to(x.device)
if tgt is None:
out = out * factor
emb = self.embed_energy(torch.bucketize(out, bins))
else:
emb = self.embed_energy(torch.bucketize(tgt, bins))
return out, emb
def forward(
self,
x,
padding_mask,
durations=None,
pitches=None,
energies=None,
d_factor=1.0,
p_factor=1.0,
e_factor=1.0,
):
# x: B x T x C
log_dur_out = self.duration_predictor(x)
dur_out = torch.clamp(
torch.round((torch.exp(log_dur_out) - 1) * d_factor).long(), min=0
)
dur_out.masked_fill_(padding_mask, 0)
pitch_out, pitch_emb = self.get_pitch_emb(x, pitches, p_factor)
x = x + pitch_emb
energy_out, energy_emb = self.get_energy_emb(x, energies, e_factor)
x = x + energy_emb
x, out_lens = self.length_regulator(
x, dur_out if durations is None else durations
)
return x, out_lens, log_dur_out, pitch_out, energy_out
class FastSpeech2Encoder(FairseqEncoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(src_dict)
self.args = args
self.padding_idx = src_dict.pad()
self.n_frames_per_step = args.n_frames_per_step
self.out_dim = args.output_frame_dim * args.n_frames_per_step
self.embed_speaker = embed_speaker
self.spk_emb_proj = None
if embed_speaker is not None:
self.spk_emb_proj = nn.Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_tokens = Embedding(
len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx
)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.dec_pos_emb_alpha = nn.Parameter(torch.ones(1))
self.encoder_fft_layers = nn.ModuleList(
FFTLayer(
args.encoder_embed_dim,
args.encoder_attention_heads,
args.fft_hidden_dim,
args.fft_kernel_size,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
)
for _ in range(args.encoder_layers)
)
self.var_adaptor = VarianceAdaptor(args)
self.decoder_fft_layers = nn.ModuleList(
FFTLayer(
args.decoder_embed_dim,
args.decoder_attention_heads,
args.fft_hidden_dim,
args.fft_kernel_size,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
)
for _ in range(args.decoder_layers)
)
self.out_proj = nn.Linear(args.decoder_embed_dim, self.out_dim)
self.postnet = None
if args.add_postnet:
self.postnet = Postnet(
self.out_dim,
args.postnet_conv_dim,
args.postnet_conv_kernel_size,
args.postnet_layers,
args.postnet_dropout,
)
self.apply(model_init)
def forward(
self,
src_tokens,
src_lengths=None,
speaker=None,
durations=None,
pitches=None,
energies=None,
**kwargs,
):
x = self.embed_tokens(src_tokens)
enc_padding_mask = src_tokens.eq(self.padding_idx)
x += self.pos_emb_alpha * self.embed_positions(enc_padding_mask)
x = self.dropout_module(x)
for layer in self.encoder_fft_layers:
x = layer(x, enc_padding_mask)
if self.embed_speaker is not None:
bsz, seq_len, _ = x.size()
emb = self.embed_speaker(speaker).expand(bsz, seq_len, -1)
x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
x, out_lens, log_dur_out, pitch_out, energy_out = self.var_adaptor(
x, enc_padding_mask, durations, pitches, energies
)
dec_padding_mask = lengths_to_padding_mask(out_lens)
x += self.dec_pos_emb_alpha * self.embed_positions(dec_padding_mask)
for layer in self.decoder_fft_layers:
x = layer(x, dec_padding_mask)
x = self.out_proj(x)
x_post = None
if self.postnet is not None:
x_post = x + self.postnet(x)
return x, x_post, out_lens, log_dur_out, pitch_out, energy_out
@register_model("fastspeech2")
class FastSpeech2Model(FairseqEncoderModel):
"""
Implementation for https://arxiv.org/abs/2006.04558
"""
NON_AUTOREGRESSIVE = True
@classmethod
def hub_models(cls):
base_url = "http://dl.fbaipublicfiles.com/fairseq/s2"
model_ids = [
"fastspeech2-en-ljspeech",
"fastspeech2-en-200_speaker-cv4",
]
return {i: f"{base_url}/{i}.tar.gz" for i in model_ids}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
config_yaml="config.yaml",
vocoder: str = "griffin_lim",
fp16: bool = False,
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
config_yaml=config_yaml,
vocoder=vocoder,
fp16=fp16,
**kwargs,
)
return TTSHubInterface(x["args"], x["task"], x["models"][0])
@staticmethod
def add_args(parser):
parser.add_argument("--dropout", type=float)
parser.add_argument("--output-frame-dim", type=int)
parser.add_argument("--speaker-embed-dim", type=int)
# FFT blocks
parser.add_argument("--fft-hidden-dim", type=int)
parser.add_argument("--fft-kernel-size", type=int)
parser.add_argument("--attention-dropout", type=float)
parser.add_argument("--encoder-layers", type=int)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-attention-heads", type=int)
parser.add_argument("--decoder-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-attention-heads", type=int)
# variance predictor
parser.add_argument("--var-pred-n-bins", type=int)
parser.add_argument("--var-pred-hidden-dim", type=int)
parser.add_argument("--var-pred-kernel-size", type=int)
parser.add_argument("--var-pred-dropout", type=float)
# postnet
parser.add_argument("--add-postnet", action="store_true")
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
def __init__(self, encoder, args, src_dict):
super().__init__(encoder)
self._num_updates = 0
out_dim = args.output_frame_dim * args.n_frames_per_step
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(out_dim, len(src_dict))
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
encoder = FastSpeech2Encoder(args, task.src_dict, embed_speaker)
return cls(encoder, args, task.src_dict)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = self.ctc_proj(net_output[0])
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
@register_model_architecture("fastspeech2", "fastspeech2")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
| pytorch/fairseq | fairseq/models/text_to_speech/fastspeech2.py | Python | mit | 15,718 |
# string to integer
def myint(string):
if string[0] == '-':
neg = True
string = string[1:]
else:
neg = False
num = 0
for s in string:
num *= 10
num += int(s)
if neg:
num = -1*num
return num
print myint('123')
print myint('-123')
print myint('1232731723')
| amitsaha/learning | python/strings/str2int.py | Python | unlicense | 345 |
#! /usr/bin/env python
"""
Bit error rate tester (BERT) simulator, written in Python.
Original Author: David Banas <capn.freako@gmail.com>
Original Date: 17 June 2014
Testing by: Mark Marlett <mark.marlett@gmail.com>
This Python script provides a GUI interface to a BERT simulator, which
can be used to explore the concepts of serial communication link design.
The application source is divided among several files, as follows:
pybert.py - This file. It contains:
- independent variable declarations
- default initialization
- the definitions of those dependent variables, which are handled
automatically by the Traits/UI machinery.
pybert_view.py - Contains the main window layout definition, as
well as the definitions of user invoked actions
(i.e.- buttons).
pybert_cntrl.py - Contains the definitions for those dependent
variables, which are updated not automatically by
the Traits/UI machinery, but rather by explicit
user action (i.e. - button clicks).
pybert_util.py - Contains general purpose utility functionality.
dfe.py - Contains the decision feedback equalizer model.
cdr.py - Contains the clock data recovery unit model.
Copyright (c) 2014 by David Banas; All rights reserved World wide.
"""
from traits.api import HasTraits, Array, Range, Float, Int, Property, String, cached_property, Instance, HTML, List, Bool
from chaco.api import Plot, ArrayPlotData, VPlotContainer, GridPlotContainer, ColorMapper, Legend, OverlayPlotContainer, PlotAxis
from chaco.tools.api import PanTool, ZoomTool, LegendTool, TraitsTool, DragZoom
from numpy import array, linspace, zeros, histogram, mean, diff, log10, transpose, shape
from numpy.fft import fft
from numpy.random import randint
from scipy.signal import lfilter, iirfilter
from pybert_view import *
from pybert_cntrl import *
from pybert_util import *
debug = False
# Default model parameters - Modify these to customize the default simulation.
# - Simulation Control
gUI = 100 # (ps)
gNbits = 8000 # number of bits to run
gPatLen = 127 # repeating bit pattern length
gNspb = 32 # samples per bit
# - Channel Control
# - parameters for Howard Johnson's "Metallic Transmission Model"
# - (See "High Speed Signal Propagation", Sec. 3.1.)
# - ToDo: These are the values for 24 guage twisted copper pair; need to add other options.
gRdc = 0.1876 # Ohms/m
gw0 = 10.e6 # 10 MHz is recommended in Ch. 8 of his second book, in which UTP is described in detail.
gR0 = 1.452 # skin-effect resistance (Ohms/m)
gTheta0 = .02 # loss tangent
gZ0 = 100. # characteristic impedance in LC region (Ohms)
gv0 = 0.67 # relative propagation velocity (c)
gl_ch = 1.0 # cable length (m)
gRn = 0.01 # standard deviation of Gaussian random noise (V) (Applied at end of channel, so as to appear white to Rx.)
# - Tx
gVod = 1.0 # output drive strength (Vp)
gRs = 100 # differential source impedance (Ohms)
gCout = 0.50 # parasitic output capacitance (pF) (Assumed to exist at both 'P' and 'N' nodes.)
gPnMag = 0.1 # magnitude of periodic noise (V)
gPnFreq = 0.437 # frequency of periodic noise (MHz)
# - Rx
gRin = 100 # differential input resistance
gCin = 0.50 # parasitic input capacitance (pF) (Assumed to exist at both 'P' and 'N' nodes.)
gCac = 1. # a.c. coupling capacitance (uF) (Assumed to exist at both 'P' and 'N' nodes.)
gBW = 12. # Rx signal path bandwidth, assuming no CTLE action. (GHz)
gUseCtle = True # Include CTLE when running simulation.
gUseDfe = True # Include DFE when running simulation.
gDfeIdeal = True # DFE ideal summing node selector
gPeakFreq = 5. # CTLE peaking frequency (GHz)
gPeakMag = 10. # CTLE peaking magnitude (dB)
# - DFE
gDecisionScaler = 0.5
gNtaps = 5
gGain = 0.1
gNave = 100
gDfeBW = 12. # DFE summing node bandwidth (GHz)
# - CDR
gDeltaT = 0.1 # (ps)
gAlpha = 0.01
gNLockAve = 500 # number of UI used to average CDR locked status.
gRelLockTol = .1 # relative lock tolerance of CDR.
gLockSustain = 500
# - Analysis
gThresh = 6 # threshold for identifying periodic jitter spectral elements (sigma)
class PyBERT(HasTraits):
"""
A serial communication link bit error rate tester (BERT) simulator with a GUI interface.
Useful for exploring the concepts of serial communication link design.
"""
# Independent variables
# - Simulation Control
ui = Float(gUI) # (ps)
nbits = Int(gNbits)
pattern_len = Int(gPatLen)
nspb = Int(gNspb)
eye_bits = Int(gNbits // 5)
mod_type = List([0])
# - Channel Control
Rdc = Float(gRdc)
w0 = Float(gw0)
R0 = Float(gR0)
Theta0 = Float(gTheta0)
Z0 = Float(gZ0)
v0 = Float(gv0)
l_ch = Float(gl_ch)
# - Tx
vod = Float(gVod) # (V)
rs = Float(gRs) # (Ohms)
cout = Float(gCout) # (pF)
pn_mag = Float(gPnMag) # (ps)
pn_freq = Float(gPnFreq) # (MHz)
rn = Float(gRn) # (V)
pretap = Float(-0.05)
posttap = Float(-0.10)
# - Rx
rin = Float(gRin) # (Ohmin)
cin = Float(gCin) # (pF)
cac = Float(gCac) # (uF)
rx_bw = Float(gBW) # (GHz)
use_dfe = Bool(gUseDfe)
sum_ideal = Bool(gDfeIdeal)
peak_freq = Float(gPeakFreq) # CTLE peaking frequency (GHz)
peak_mag = Float(gPeakMag) # CTLE peaking magnitude (dB)
# - DFE
decision_scaler = Float(gDecisionScaler)
gain = Float(gGain)
n_ave = Float(gNave)
n_taps = Int(gNtaps)
sum_bw = Float(gDfeBW) # (GHz)
# - CDR
delta_t = Float(gDeltaT) # (ps)
alpha = Float(gAlpha)
n_lock_ave = Int(gNLockAve)
rel_lock_tol = Float(gRelLockTol)
lock_sustain = Int(gLockSustain)
# - Analysis
thresh = Int(gThresh)
# - Plots (plot containers, actually)
plotdata = ArrayPlotData()
plots_h = Instance(GridPlotContainer)
plots_s = Instance(GridPlotContainer)
plots_H = Instance(GridPlotContainer)
plots_dfe = Instance(GridPlotContainer)
plots_eye = Instance(GridPlotContainer)
plots_jitter_dist = Instance(GridPlotContainer)
plots_jitter_spec = Instance(GridPlotContainer)
plots_bathtub = Instance(GridPlotContainer)
# - Status
status = String("Ready.")
jitter_perf = Float(0.)
total_perf = Float(0.)
# - About
ident = String('PyBERT v1.2 - a serial communication link design tool, written in Python\n\n \
David Banas\n \
February 10, 2015\n\n \
Copyright (c) 2014 David Banas;\n \
All rights reserved World wide.')
# - Help
instructions = Property(HTML)
# Dependent variables
# - Handled by the Traits/UI machinery. (Should only contain "low overhead" variables, which don't freeze the GUI noticeably.)
jitter_info = Property(HTML, depends_on=['jitter_perf'])
perf_info = Property(HTML, depends_on=['total_perf'])
status_str = Property(String, depends_on=['status'])
# - Handled by pybert_cntrl.py, upon user button clicks. (May contain "large overhead" variables.)
# - These are dependencies. So, they must be Array()s.
# - These are not.
# Note: Everything has been moved to pybert_cntrl.py.
# I was beginning to suspect flaky initialization behavior,
# due to the way in which I was splitting up the initialization.
# Also, this guarantees no GUI freeze-up.
# Default initialization
def __init__(self):
"""Plot setup occurs here."""
super(PyBERT, self).__init__()
plotdata = self.plotdata
# Running the simulation will fill in the 'plotdata' structure.
my_run_simulation(self, initial_run=True)
# Now, create all the various plots we need for our GUI.
# - DFE tab
plot1 = Plot(plotdata)
plot1.plot(("t_ns", "dfe_out"), type="line", color="blue")
plot1.plot(("t_ns", "clocks"), type="line", color="green")
plot1.plot(("t_ns", "lockeds"), type="line", color="red")
plot1.title = "DFE Output, Recovered Clocks, & Locked"
plot1.index_axis.title = "Time (ns)"
plot1.tools.append(PanTool(plot1, constrain=True, constrain_key=None, constrain_direction='x'))
zoom1 = ZoomTool(plot1, tool_mode="range", axis='index', always_on=False)
plot1.overlays.append(zoom1)
plot2 = Plot(plotdata)
plot2.plot(("t_ns", "ui_ests"), type="line", color="blue")
plot2.title = "CDR Adaptation"
plot2.index_axis.title = "Time (ns)"
plot2.value_axis.title = "UI (ps)"
plot2.index_range = plot1.index_range # Zoom x-axes in tandem.
plot3 = Plot(plotdata)
plot3.plot(('f_MHz_dfe', 'jitter_rejection_ratio'), type="line", color="blue")
plot3.title = "CDR/DFE Jitter Rejection Ratio"
plot3.index_axis.title = "Frequency (MHz)"
plot3.value_axis.title = "Ratio (dB)"
zoom3 = ZoomTool(plot3, tool_mode="range", axis='index', always_on=False)
plot3.overlays.append(zoom3)
plot4 = Plot(plotdata)
plot4.plot(('auto_corr'), type="line", color="blue")
plot4.title = "Received to Transmitted Bits Correlation"
plot4.index_axis.title = "Offset (bits)"
plot4.value_axis.title = "Correlation"
plot4.value_range.high_setting = 1
plot4.value_range.low_setting = 0
zoom4 = ZoomTool(plot4, tool_mode="range", axis='index', always_on=False)
plot4.overlays.append(zoom4)
plot9 = Plot(plotdata, auto_colors=['red', 'orange', 'yellow', 'green', 'blue', 'purple'])
for i in range(gNtaps):
plot9.plot(("tap_weight_index", "tap%d_weights" % (i + 1)), type="line", color="auto", name="tap%d"%(i+1))
plot9.title = "DFE Adaptation"
plot9.tools.append(PanTool(plot9, constrain=True, constrain_key=None, constrain_direction='x'))
zoom9 = ZoomTool(plot9, tool_mode="range", axis='index', always_on=False)
plot9.overlays.append(zoom9)
plot9.legend.visible = True
plot9.legend.align = 'ul'
container_dfe = GridPlotContainer(shape=(2,2))
container_dfe.add(plot2)
container_dfe.add(plot9)
container_dfe.add(plot1)
container_dfe.add(plot3)
self.plots_dfe = container_dfe
# - Impulse Responses tab
plot_h_chnl = Plot(plotdata)
plot_h_chnl.plot(("t_ns_chnl", "chnl_h"), type="line", color="blue")
plot_h_chnl.title = "Channel"
plot_h_chnl.index_axis.title = "Time (ns)"
plot_h_chnl.y_axis.title = "Impulse Response (V/ns)"
plot_h_tx = Plot(plotdata)
plot_h_tx.plot(("t_ns_chnl", "tx_out_h"), type="line", color="red", name="Cumulative")
plot_h_tx.title = "Channel + Tx Preemphasis"
plot_h_tx.index_axis.title = "Time (ns)"
plot_h_tx.y_axis.title = "Impulse Response (V/ns)"
plot_h_ctle = Plot(plotdata)
plot_h_ctle.plot(("t_ns_chnl", "ctle_out_h"), type="line", color="red", name="Cumulative")
plot_h_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_h_ctle.index_axis.title = "Time (ns)"
plot_h_ctle.y_axis.title = "Impulse Response (V/ns)"
plot_h_dfe = Plot(plotdata)
plot_h_dfe.plot(("t_ns_chnl", "dfe_out_h"), type="line", color="red", name="Cumulative")
plot_h_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_h_dfe.index_axis.title = "Time (ns)"
plot_h_dfe.y_axis.title = "Impulse Response (V/ns)"
container_h = GridPlotContainer(shape=(2,2))
container_h.add(plot_h_chnl)
container_h.add(plot_h_tx)
container_h.add(plot_h_ctle)
container_h.add(plot_h_dfe)
self.plots_h = container_h
# - Step Responses tab
plot_s_chnl = Plot(plotdata)
plot_s_chnl.plot(("t_ns_chnl", "chnl_s"), type="line", color="blue")
plot_s_chnl.title = "Channel"
plot_s_chnl.index_axis.title = "Time (ns)"
plot_s_chnl.y_axis.title = "Step Response (V)"
plot_s_tx = Plot(plotdata)
plot_s_tx.plot(("t_ns_chnl", "tx_s"), type="line", color="blue", name="Incremental")
plot_s_tx.plot(("t_ns_chnl", "tx_out_s"), type="line", color="red", name="Cumulative")
plot_s_tx.title = "Channel + Tx Preemphasis"
plot_s_tx.index_axis.title = "Time (ns)"
plot_s_tx.y_axis.title = "Step Response (V)"
plot_s_tx.legend.visible = True
plot_s_tx.legend.align = 'lr'
plot_s_ctle = Plot(plotdata)
plot_s_ctle.plot(("t_ns_chnl", "ctle_s"), type="line", color="blue", name="Incremental")
plot_s_ctle.plot(("t_ns_chnl", "ctle_out_s"), type="line", color="red", name="Cumulative")
plot_s_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_s_ctle.index_axis.title = "Time (ns)"
plot_s_ctle.y_axis.title = "Step Response (V)"
plot_s_ctle.legend.visible = True
plot_s_ctle.legend.align = 'lr'
plot_s_dfe = Plot(plotdata)
plot_s_dfe.plot(("t_ns_chnl", "dfe_s"), type="line", color="blue", name="Incremental")
plot_s_dfe.plot(("t_ns_chnl", "dfe_out_s"), type="line", color="red", name="Cumulative")
plot_s_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_s_dfe.index_axis.title = "Time (ns)"
plot_s_dfe.y_axis.title = "Step Response (V)"
plot_s_dfe.legend.visible = True
plot_s_dfe.legend.align = 'lr'
container_s = GridPlotContainer(shape=(2,2))
container_s.add(plot_s_chnl)
container_s.add(plot_s_tx)
container_s.add(plot_s_ctle)
container_s.add(plot_s_dfe)
self.plots_s = container_s
# - Frequency Responses tab
plot_H_chnl = Plot(plotdata)
plot_H_chnl.plot(("f_GHz", "chnl_H"), type="line", color="blue", index_scale='log')
plot_H_chnl.title = "Channel"
plot_H_chnl.index_axis.title = "Frequency (GHz)"
plot_H_chnl.y_axis.title = "Frequency Response (dB)"
plot_H_chnl.index_range.low_setting = 0.1
plot_H_chnl.index_range.high_setting = 40.
plot_H_chnl.value_range.low_setting = -40.
plot_H_tx = Plot(plotdata)
plot_H_tx.plot(("f_GHz", "tx_H"), type="line", color="blue", name="Incremental", index_scale='log')
plot_H_tx.plot(("f_GHz", "tx_out_H"), type="line", color="red", name="Cumulative", index_scale='log')
plot_H_tx.title = "Channel + Tx Preemphasis"
plot_H_tx.index_axis.title = "Frequency (GHz)"
plot_H_tx.y_axis.title = "Frequency Response (dB)"
plot_H_tx.index_range.low_setting = 0.1
plot_H_tx.index_range.high_setting = 40.
plot_H_tx.value_range.low_setting = -40.
plot_H_tx.legend.visible = True
plot_H_tx.legend.align = 'll'
plot_H_ctle = Plot(plotdata)
plot_H_ctle.plot(("f_GHz", "ctle_H"), type="line", color="blue", name="Incremental", index_scale='log')
plot_H_ctle.plot(("f_GHz", "ctle_out_H"), type="line", color="red", name="Cumulative", index_scale='log')
plot_H_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_H_ctle.index_axis.title = "Frequency (GHz)"
plot_H_ctle.y_axis.title = "Frequency Response (dB)"
plot_H_ctle.index_range.low_setting = 0.1
plot_H_ctle.index_range.high_setting = 40.
plot_H_ctle.value_range.low_setting = -40.
plot_H_ctle.legend.visible = True
plot_H_ctle.legend.align = 'll'
plot_H_dfe = Plot(plotdata)
plot_H_dfe.plot(("f_GHz", "dfe_H"), type="line", color="blue", name="Incremental", index_scale='log')
plot_H_dfe.plot(("f_GHz", "dfe_out_H"), type="line", color="red", name="Cumulative", index_scale='log')
plot_H_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_H_dfe.index_axis.title = "Frequency (GHz)"
plot_H_dfe.y_axis.title = "Frequency Response (dB)"
plot_H_dfe.index_range.low_setting = 0.1
plot_H_dfe.index_range.high_setting = 40.
plot_H_dfe.value_range.low_setting = -40.
plot_H_dfe.legend.visible = True
plot_H_dfe.legend.align = 'll'
container_H = GridPlotContainer(shape=(2,2))
container_H.add(plot_H_chnl)
container_H.add(plot_H_tx)
container_H.add(plot_H_ctle)
container_H.add(plot_H_dfe)
self.plots_H = container_H
# - Outputs tab
plot_out_chnl = Plot(plotdata)
plot_out_chnl.plot(("t_ns", "ideal_signal"), type="line", color="lightgrey")
plot_out_chnl.plot(("t_ns", "chnl_out"), type="line", color="blue")
plot_out_chnl.title = "Channel"
plot_out_chnl.index_axis.title = "Time (ns)"
plot_out_chnl.y_axis.title = "Output (V)"
zoom_out_chnl = ZoomTool(plot_out_chnl, tool_mode="range", axis='index', always_on=False)
plot_out_chnl.overlays.append(zoom_out_chnl)
plot_out_tx = Plot(plotdata)
plot_out_tx.plot(("t_ns", "tx_out"), type="line", color="blue")
plot_out_tx.title = "Channel + Tx Preemphasis (Noise added here.)"
plot_out_tx.index_axis.title = "Time (ns)"
plot_out_tx.y_axis.title = "Output (V)"
plot_out_tx.index_range = plot_out_chnl.index_range # Zoom x-axes in tandem.
plot_out_ctle = Plot(plotdata)
plot_out_ctle.plot(("t_ns", "ctle_out"), type="line", color="blue")
plot_out_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_out_ctle.index_axis.title = "Time (ns)"
plot_out_ctle.y_axis.title = "Output (V)"
plot_out_ctle.index_range = plot_out_chnl.index_range # Zoom x-axes in tandem.
plot_out_dfe = Plot(plotdata)
plot_out_dfe.plot(("t_ns", "dfe_out"), type="line", color="blue")
plot_out_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_out_dfe.index_axis.title = "Time (ns)"
plot_out_dfe.y_axis.title = "Output (V)"
plot_out_dfe.index_range = plot_out_chnl.index_range # Zoom x-axes in tandem.
container_out = GridPlotContainer(shape=(2,2))
container_out.add(plot_out_chnl)
container_out.add(plot_out_tx)
container_out.add(plot_out_ctle)
container_out.add(plot_out_dfe)
self.plots_out = container_out
# - Eye Diagrams tab
seg_map = dict(
red = [
(0.00, 0.00, 0.00), # black
(0.00001, 0.00, 0.00), # blue
(0.15, 0.00, 0.00), # cyan
(0.30, 0.00, 0.00), # green
(0.45, 1.00, 1.00), # yellow
(0.60, 1.00, 1.00), # orange
(0.75, 1.00, 1.00), # red
(0.90, 1.00, 1.00), # pink
(1.00, 1.00, 1.00) # white
],
green = [
(0.00, 0.00, 0.00), # black
(0.00001, 0.00, 0.00), # blue
(0.15, 0.50, 0.50), # cyan
(0.30, 0.50, 0.50), # green
(0.45, 1.00, 1.00), # yellow
(0.60, 0.50, 0.50), # orange
(0.75, 0.00, 0.00), # red
(0.90, 0.50, 0.50), # pink
(1.00, 1.00, 1.00) # white
],
blue = [
(0.00, 0.00, 0.00), # black
(1e-18, 0.50, 0.50), # blue
(0.15, 0.50, 0.50), # cyan
(0.30, 0.00, 0.00), # green
(0.45, 0.00, 0.00), # yellow
(0.60, 0.00, 0.00), # orange
(0.75, 0.00, 0.00), # red
(0.90, 0.50, 0.50), # pink
(1.00, 1.00, 1.00) # white
]
)
clr_map = ColorMapper.from_segment_map(seg_map)
self.clr_map = clr_map
plot_eye_chnl = Plot(plotdata)
plot_eye_chnl.img_plot("eye_chnl", colormap=clr_map,)
plot_eye_chnl.y_direction = 'normal'
plot_eye_chnl.components[0].y_direction = 'normal'
plot_eye_chnl.title = "Channel"
plot_eye_chnl.x_axis.title = "Time (ps)"
plot_eye_chnl.x_axis.orientation = "bottom"
plot_eye_chnl.y_axis.title = "Signal Level (V)"
plot_eye_chnl.x_grid.visible = True
plot_eye_chnl.y_grid.visible = True
plot_eye_chnl.x_grid.line_color = 'gray'
plot_eye_chnl.y_grid.line_color = 'gray'
plot_eye_tx = Plot(plotdata)
plot_eye_tx.img_plot("eye_tx", colormap=clr_map,)
plot_eye_tx.y_direction = 'normal'
plot_eye_tx.components[0].y_direction = 'normal'
plot_eye_tx.title = "Channel + Tx Preemphasis (Noise added here.)"
plot_eye_tx.x_axis.title = "Time (ps)"
plot_eye_tx.x_axis.orientation = "bottom"
plot_eye_tx.y_axis.title = "Signal Level (V)"
plot_eye_tx.x_grid.visible = True
plot_eye_tx.y_grid.visible = True
plot_eye_tx.x_grid.line_color = 'gray'
plot_eye_tx.y_grid.line_color = 'gray'
plot_eye_ctle = Plot(plotdata)
plot_eye_ctle.img_plot("eye_ctle", colormap=clr_map,)
plot_eye_ctle.y_direction = 'normal'
plot_eye_ctle.components[0].y_direction = 'normal'
plot_eye_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_eye_ctle.x_axis.title = "Time (ps)"
plot_eye_ctle.x_axis.orientation = "bottom"
plot_eye_ctle.y_axis.title = "Signal Level (V)"
plot_eye_ctle.x_grid.visible = True
plot_eye_ctle.y_grid.visible = True
plot_eye_ctle.x_grid.line_color = 'gray'
plot_eye_ctle.y_grid.line_color = 'gray'
plot_eye_dfe = Plot(plotdata)
plot_eye_dfe.img_plot("eye_dfe", colormap=clr_map,)
plot_eye_dfe.y_direction = 'normal'
plot_eye_dfe.components[0].y_direction = 'normal'
plot_eye_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_eye_dfe.x_axis.title = "Time (ps)"
plot_eye_dfe.x_axis.orientation = "bottom"
plot_eye_dfe.y_axis.title = "Signal Level (V)"
plot_eye_dfe.x_grid.visible = True
plot_eye_dfe.y_grid.visible = True
plot_eye_dfe.x_grid.line_color = 'gray'
plot_eye_dfe.y_grid.line_color = 'gray'
container_eye = GridPlotContainer(shape=(2,2))
container_eye.add(plot_eye_chnl)
container_eye.add(plot_eye_tx)
container_eye.add(plot_eye_ctle)
container_eye.add(plot_eye_dfe)
self.plots_eye = container_eye
# - Jitter Distributions tab
plot_jitter_dist_chnl = Plot(plotdata)
plot_jitter_dist_chnl.plot(('jitter_bins', 'jitter_chnl'), type="line", color="blue", name="Measured")
plot_jitter_dist_chnl.plot(('jitter_bins', 'jitter_ext_chnl'), type="line", color="red", name="Extrapolated")
plot_jitter_dist_chnl.title = "Channel"
plot_jitter_dist_chnl.index_axis.title = "Time (ps)"
plot_jitter_dist_chnl.value_axis.title = "Count"
plot_jitter_dist_chnl.legend.visible = True
plot_jitter_dist_chnl.legend.align = 'ur'
plot_jitter_dist_tx = Plot(plotdata)
plot_jitter_dist_tx.plot(('jitter_bins', 'jitter_tx'), type="line", color="blue", name="Measured")
plot_jitter_dist_tx.plot(('jitter_bins', 'jitter_ext_tx'), type="line", color="red", name="Extrapolated")
plot_jitter_dist_tx.title = "Channel + Tx Preemphasis (Noise added here.)"
plot_jitter_dist_tx.index_axis.title = "Time (ps)"
plot_jitter_dist_tx.value_axis.title = "Count"
plot_jitter_dist_tx.legend.visible = True
plot_jitter_dist_tx.legend.align = 'ur'
plot_jitter_dist_ctle = Plot(plotdata)
plot_jitter_dist_ctle.plot(('jitter_bins', 'jitter_ctle'), type="line", color="blue", name="Measured")
plot_jitter_dist_ctle.plot(('jitter_bins', 'jitter_ext_ctle'), type="line", color="red", name="Extrapolated")
plot_jitter_dist_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_jitter_dist_ctle.index_axis.title = "Time (ps)"
plot_jitter_dist_ctle.value_axis.title = "Count"
plot_jitter_dist_ctle.legend.visible = True
plot_jitter_dist_ctle.legend.align = 'ur'
plot_jitter_dist_dfe = Plot(plotdata)
plot_jitter_dist_dfe.plot(('jitter_bins', 'jitter_dfe'), type="line", color="blue", name="Measured")
plot_jitter_dist_dfe.plot(('jitter_bins', 'jitter_ext_dfe'), type="line", color="red", name="Extrapolated")
plot_jitter_dist_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_jitter_dist_dfe.index_axis.title = "Time (ps)"
plot_jitter_dist_dfe.value_axis.title = "Count"
plot_jitter_dist_dfe.legend.visible = True
plot_jitter_dist_dfe.legend.align = 'ur'
container_jitter_dist = GridPlotContainer(shape=(2,2))
container_jitter_dist.add(plot_jitter_dist_chnl)
container_jitter_dist.add(plot_jitter_dist_tx)
container_jitter_dist.add(plot_jitter_dist_ctle)
container_jitter_dist.add(plot_jitter_dist_dfe)
self.plots_jitter_dist = container_jitter_dist
# - Jitter Spectrums tab
plot_jitter_spec_chnl = Plot(plotdata)
plot_jitter_spec_chnl.plot(('f_MHz', 'jitter_spectrum_chnl'), type="line", color="blue", name="Total")
plot_jitter_spec_chnl.plot(('f_MHz', 'jitter_ind_spectrum_chnl'), type="line", color="red", name="Data Independent")
plot_jitter_spec_chnl.plot(('f_MHz', 'thresh_chnl'), type="line", color="magenta", name="Pj Threshold")
plot_jitter_spec_chnl.title = "Channel"
plot_jitter_spec_chnl.index_axis.title = "Frequency (MHz)"
plot_jitter_spec_chnl.value_axis.title = "|FFT(TIE)| (dBui)"
plot_jitter_spec_chnl.tools.append(PanTool(plot_jitter_spec_chnl, constrain=True, constrain_key=None, constrain_direction='x'))
zoom_jitter_spec_chnl = ZoomTool(plot_jitter_spec_chnl, tool_mode="range", axis='index', always_on=False)
plot_jitter_spec_chnl.overlays.append(zoom_jitter_spec_chnl)
plot_jitter_spec_chnl.legend.visible = True
plot_jitter_spec_chnl.legend.align = 'lr'
plot_jitter_spec_tx = Plot(plotdata)
plot_jitter_spec_tx.plot(('f_MHz', 'jitter_spectrum_tx'), type="line", color="blue", name="Total")
plot_jitter_spec_tx.plot(('f_MHz', 'jitter_ind_spectrum_tx'), type="line", color="red", name="Data Independent")
plot_jitter_spec_tx.plot(('f_MHz', 'thresh_tx'), type="line", color="magenta", name="Pj Threshold")
plot_jitter_spec_tx.title = "Channel + Tx Preemphasis (Noise added here.)"
plot_jitter_spec_tx.index_axis.title = "Frequency (MHz)"
plot_jitter_spec_tx.value_axis.title = "|FFT(TIE)| (dBui)"
plot_jitter_spec_tx.value_range.low_setting = -40.
plot_jitter_spec_tx.tools.append(PanTool(plot_jitter_spec_tx, constrain=True, constrain_key=None, constrain_direction='x'))
zoom_jitter_spec_tx = ZoomTool(plot_jitter_spec_tx, tool_mode="range", axis='index', always_on=False)
plot_jitter_spec_tx.overlays.append(zoom_jitter_spec_tx)
plot_jitter_spec_tx.legend.visible = True
plot_jitter_spec_tx.legend.align = 'lr'
plot_jitter_spec_chnl.value_range = plot_jitter_spec_tx.value_range
plot_jitter_spec_ctle = Plot(plotdata)
plot_jitter_spec_ctle.plot(('f_MHz', 'jitter_spectrum_ctle'), type="line", color="blue", name="Total")
plot_jitter_spec_ctle.plot(('f_MHz', 'jitter_ind_spectrum_ctle'), type="line", color="red", name="Data Independent")
plot_jitter_spec_ctle.plot(('f_MHz', 'thresh_ctle'), type="line", color="magenta", name="Pj Threshold")
plot_jitter_spec_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_jitter_spec_ctle.index_axis.title = "Frequency (MHz)"
plot_jitter_spec_ctle.value_axis.title = "|FFT(TIE)| (dBui)"
plot_jitter_spec_ctle.tools.append(PanTool(plot_jitter_spec_ctle, constrain=True, constrain_key=None, constrain_direction='x'))
zoom_jitter_spec_ctle = ZoomTool(plot_jitter_spec_ctle, tool_mode="range", axis='index', always_on=False)
plot_jitter_spec_ctle.overlays.append(zoom_jitter_spec_ctle)
plot_jitter_spec_ctle.legend.visible = True
plot_jitter_spec_ctle.legend.align = 'lr'
plot_jitter_spec_ctle.value_range = plot_jitter_spec_tx.value_range
plot_jitter_spec_dfe = Plot(plotdata)
plot_jitter_spec_dfe.plot(('f_MHz_dfe', 'jitter_spectrum_dfe'), type="line", color="blue", name="Total")
plot_jitter_spec_dfe.plot(('f_MHz_dfe', 'jitter_ind_spectrum_dfe'), type="line", color="red", name="Data Independent")
plot_jitter_spec_dfe.plot(('f_MHz_dfe', 'thresh_dfe'), type="line", color="magenta", name="Pj Threshold")
plot_jitter_spec_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_jitter_spec_dfe.index_axis.title = "Frequency (MHz)"
plot_jitter_spec_dfe.value_axis.title = "|FFT(TIE)| (dBui)"
plot_jitter_spec_dfe.tools.append(PanTool(plot_jitter_spec_dfe, constrain=True, constrain_key=None, constrain_direction='x'))
zoom_jitter_spec_dfe = ZoomTool(plot_jitter_spec_dfe, tool_mode="range", axis='index', always_on=False)
plot_jitter_spec_dfe.overlays.append(zoom_jitter_spec_dfe)
plot_jitter_spec_dfe.legend.visible = True
plot_jitter_spec_dfe.legend.align = 'lr'
plot_jitter_spec_dfe.value_range = plot_jitter_spec_tx.value_range
container_jitter_spec = GridPlotContainer(shape=(2,2))
container_jitter_spec.add(plot_jitter_spec_chnl)
container_jitter_spec.add(plot_jitter_spec_tx)
container_jitter_spec.add(plot_jitter_spec_ctle)
container_jitter_spec.add(plot_jitter_spec_dfe)
self.plots_jitter_spec = container_jitter_spec
# - Bathtub Curves tab
plot_bathtub_chnl = Plot(plotdata)
plot_bathtub_chnl.plot(("jitter_bins", "bathtub_chnl"), type="line", color="blue")
plot_bathtub_chnl.value_range.high_setting = 0
plot_bathtub_chnl.value_range.low_setting = -18
plot_bathtub_chnl.value_axis.tick_interval = 3
plot_bathtub_chnl.title = "Channel"
plot_bathtub_chnl.index_axis.title = "Time (ps)"
plot_bathtub_chnl.value_axis.title = "Log10(P(Transition occurs inside.))"
plot_bathtub_tx = Plot(plotdata)
plot_bathtub_tx.plot(("jitter_bins", "bathtub_tx"), type="line", color="blue")
plot_bathtub_tx.value_range.high_setting = 0
plot_bathtub_tx.value_range.low_setting = -18
plot_bathtub_tx.value_axis.tick_interval = 3
plot_bathtub_tx.title = "Channel + Tx Preemphasis (Noise added here.)"
plot_bathtub_tx.index_axis.title = "Time (ps)"
plot_bathtub_tx.value_axis.title = "Log10(P(Transition occurs inside.))"
plot_bathtub_ctle = Plot(plotdata)
plot_bathtub_ctle.plot(("jitter_bins", "bathtub_ctle"), type="line", color="blue")
plot_bathtub_ctle.value_range.high_setting = 0
plot_bathtub_ctle.value_range.low_setting = -18
plot_bathtub_ctle.value_axis.tick_interval = 3
plot_bathtub_ctle.title = "Channel + Tx Preemphasis + CTLE"
plot_bathtub_ctle.index_axis.title = "Time (ps)"
plot_bathtub_ctle.value_axis.title = "Log10(P(Transition occurs inside.))"
plot_bathtub_dfe = Plot(plotdata)
plot_bathtub_dfe.plot(("jitter_bins", "bathtub_dfe"), type="line", color="blue")
plot_bathtub_dfe.value_range.high_setting = 0
plot_bathtub_dfe.value_range.low_setting = -18
plot_bathtub_dfe.value_axis.tick_interval = 3
plot_bathtub_dfe.title = "Channel + Tx Preemphasis + CTLE + DFE"
plot_bathtub_dfe.index_axis.title = "Time (ps)"
plot_bathtub_dfe.value_axis.title = "Log10(P(Transition occurs inside.))"
container_bathtub = GridPlotContainer(shape=(2,2))
container_bathtub.add(plot_bathtub_chnl)
container_bathtub.add(plot_bathtub_tx)
container_bathtub.add(plot_bathtub_ctle)
container_bathtub.add(plot_bathtub_dfe)
self.plots_bathtub = container_bathtub
# These various plot customizing functions are left, for future reference.
# plot19.index_range = plot5.index_range # Zoom x-axes in tandem.
update_eyes(self)
# Dependent variable definitions
@cached_property
def _get_jitter_info(self):
isi_chnl = self.isi_chnl * 1.e12
dcd_chnl = self.dcd_chnl * 1.e12
pj_chnl = self.pj_chnl * 1.e12
rj_chnl = self.rj_chnl * 1.e12
isi_tx = self.isi_tx * 1.e12
dcd_tx = self.dcd_tx * 1.e12
pj_tx = self.pj_tx * 1.e12
rj_tx = self.rj_tx * 1.e12
isi_ctle = self.isi_ctle * 1.e12
dcd_ctle = self.dcd_ctle * 1.e12
pj_ctle = self.pj_ctle * 1.e12
rj_ctle = self.rj_ctle * 1.e12
isi_dfe = self.isi_dfe * 1.e12
dcd_dfe = self.dcd_dfe * 1.e12
pj_dfe = self.pj_dfe * 1.e12
rj_dfe = self.rj_dfe * 1.e12
isi_rej_tx = 1.e20
dcd_rej_tx = 1.e20
pj_rej_tx = 1.e20
rj_rej_tx = 1.e20
isi_rej_ctle = 1.e20
dcd_rej_ctle = 1.e20
pj_rej_ctle = 1.e20
rj_rej_ctle = 1.e20
isi_rej_dfe = 1.e20
dcd_rej_dfe = 1.e20
pj_rej_dfe = 1.e20
rj_rej_dfe = 1.e20
isi_rej_total = 1.e20
dcd_rej_total = 1.e20
pj_rej_total = 1.e20
rj_rej_total = 1.e20
if(isi_tx):
isi_rej_tx = isi_chnl / isi_tx
if(dcd_tx):
dcd_rej_tx = dcd_chnl / dcd_tx
if(pj_tx):
pj_rej_tx = pj_chnl / pj_tx
if(rj_tx):
rj_rej_tx = rj_chnl / rj_tx
if(isi_ctle):
isi_rej_ctle = isi_tx / isi_ctle
if(dcd_ctle):
dcd_rej_ctle = dcd_tx / dcd_ctle
if(pj_ctle):
pj_rej_ctle = pj_tx / pj_ctle
if(rj_ctle):
rj_rej_ctle = rj_tx / rj_ctle
if(isi_dfe):
isi_rej_dfe = isi_ctle / isi_dfe
if(dcd_dfe):
dcd_rej_dfe = dcd_ctle / dcd_dfe
if(pj_dfe):
pj_rej_dfe = pj_ctle / pj_dfe
if(rj_dfe):
rj_rej_dfe = rj_ctle / rj_dfe
if(isi_dfe):
isi_rej_total = isi_chnl / isi_dfe
if(dcd_dfe):
dcd_rej_total = dcd_chnl / dcd_dfe
if(pj_dfe):
pj_rej_total = pj_tx / pj_dfe
if(rj_dfe):
rj_rej_total = rj_tx / rj_dfe
info_str = '<H1>Jitter Rejection by Equalization Component</H1>\n'
info_str += '<H2>Tx Preemphasis</H2>\n'
info_str += '<TABLE border="1">\n'
info_str += '<TR align="center">\n'
info_str += "<TH>Jitter Component</TH><TH>Input (ps)</TH><TH>Output (ps)</TH><TH>Rejection (dB)</TH>\n"
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">ISI</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(isi_chnl, isi_tx, 10. * log10(isi_rej_tx))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">DCD</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(dcd_chnl, dcd_tx, 10. * log10(dcd_rej_tx))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Pj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>n/a</TD>\n' % \
(pj_chnl, pj_tx)
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Rj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>n/a</TD>\n' % \
(rj_chnl, rj_tx)
info_str += "</TR>\n"
info_str += "</TABLE>\n"
info_str += '<H2>CTLE</H2>\n'
info_str += '<TABLE border="1">\n'
info_str += '<TR align="center">\n'
info_str += "<TH>Jitter Component</TH><TH>Input (ps)</TH><TH>Output (ps)</TH><TH>Rejection (dB)</TH>\n"
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">ISI</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(isi_tx, isi_ctle, 10. * log10(isi_rej_ctle))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">DCD</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(dcd_tx, dcd_ctle, 10. * log10(dcd_rej_ctle))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Pj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(pj_tx, pj_ctle, 10. * log10(pj_rej_ctle))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Rj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(rj_tx, rj_ctle, 10. * log10(rj_rej_ctle))
info_str += "</TR>\n"
info_str += "</TABLE>\n"
info_str += '<H2>DFE</H2>\n'
info_str += '<TABLE border="1">\n'
info_str += '<TR align="center">\n'
info_str += "<TH>Jitter Component</TH><TH>Input (ps)</TH><TH>Output (ps)</TH><TH>Rejection (dB)</TH>\n"
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">ISI</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(isi_ctle, isi_dfe, 10. * log10(isi_rej_dfe))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">DCD</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(dcd_ctle, dcd_dfe, 10. * log10(dcd_rej_dfe))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Pj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(pj_ctle, pj_dfe, 10. * log10(pj_rej_dfe))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Rj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(rj_ctle, rj_dfe, 10. * log10(rj_rej_dfe))
info_str += "</TR>\n"
info_str += "</TABLE>\n"
info_str += '<H2>TOTAL</H2>\n'
info_str += '<TABLE border="1">\n'
info_str += '<TR align="center">\n'
info_str += "<TH>Jitter Component</TH><TH>Input (ps)</TH><TH>Output (ps)</TH><TH>Rejection (dB)</TH>\n"
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">ISI</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(isi_chnl, isi_dfe, 10. * log10(isi_rej_total))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">DCD</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(dcd_chnl, dcd_dfe, 10. * log10(dcd_rej_total))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Pj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(pj_tx, pj_dfe, 10. * log10(pj_rej_total))
info_str += "</TR>\n"
info_str += '<TR align="right">\n'
info_str += '<TD align="center">Rj</TD><TD>%6.3f</TD><TD>%6.3f</TD><TD>%4.1f</TD>\n' % \
(rj_tx, rj_dfe, 10. * log10(rj_rej_total))
info_str += "</TR>\n"
info_str += "</TABLE>\n"
return info_str
@cached_property
def _get_perf_info(self):
info_str = '<H2>Performance by Component</H2>\n'
info_str += ' <TABLE border="1">\n'
info_str += ' <TR align="center">\n'
info_str += ' <TH>Component</TH><TH>Performance (Msmpls./min.)</TH>\n'
info_str += ' </TR>\n'
info_str += ' <TR align="right">\n'
info_str += ' <TD align="center">Channel</TD><TD>%6.3f</TD>\n' % (self.channel_perf * 60.e-6)
info_str += ' </TR>\n'
info_str += ' <TR align="right">\n'
info_str += ' <TD align="center">Tx Preemphasis</TD><TD>%6.3f</TD>\n' % (self.tx_perf * 60.e-6)
info_str += ' </TR>\n'
info_str += ' <TR align="right">\n'
info_str += ' <TD align="center">CTLE</TD><TD>%6.3f</TD>\n' % (self.ctle_perf * 60.e-6)
info_str += ' </TR>\n'
info_str += ' <TR align="right">\n'
info_str += ' <TD align="center">DFE</TD><TD>%6.3f</TD>\n' % (self.dfe_perf * 60.e-6)
info_str += ' </TR>\n'
info_str += ' <TR align="right">\n'
info_str += ' <TD align="center">Jitter Analysis</TD><TD>%6.3f</TD>\n' % (self.jitter_perf * 60.e-6)
info_str += ' </TR>\n'
info_str += ' <TR align="right">\n'
info_str += ' <TD align="center">Plotting</TD><TD>%6.3f</TD>\n' % (self.plotting_perf * 60.e-6)
info_str += ' </TR>\n'
info_str += ' <TR align="right">\n'
info_str += ' <TD align="center">TOTAL</TD><TD>%6.3f</TD>\n' % (self.total_perf * 60.e-6)
info_str += ' </TR>\n'
info_str += ' </TABLE>\n'
return info_str
@cached_property
def _get_status_str(self):
perf_str = "%-20s | Perf. (Msmpls/min.): %4.1f" % (self.status, self.total_perf * 60.e-6)
jit_str = " | Jitter (ps): ISI=%6.3f DCD=%6.3f Pj=%6.3f Rj=%6.3f" % \
(self.isi_dfe * 1.e12, self.dcd_dfe * 1.e12, self.pj_dfe * 1.e12, self.rj_dfe * 1.e12)
dly_str = " | Channel Delay (ns): %5.3f" % (self.chnl_dly * 1.e9)
err_str = " | Bit errors detected: %d" % self.bit_errs
return perf_str + dly_str + jit_str + err_str
@cached_property
def _get_instructions(self):
help_str = "<H2>PyBERT User's Guide</H2>\n"
help_str += " <H3>Note to developers</H3>\n"
help_str += " This is NOT for you. Instead, open 'pybert/doc/build/html/index.html' in a browser.\n"
help_str += " <H3>PyBERT User Help Options</H3>\n"
help_str += " <UL>\n"
help_str += " <LI>Hover over any user-settable value in the <em>Config.</em> tab, for help message.</LI>\n"
help_str += ' <LI>Visit the PyBERT FAQ at: https://github.com/capn-freako/PyBERT/wiki/pybert_faq.</LI>\n'
help_str += ' <LI>Send e-mail to David Banas at capn.freako@gmail.com.</LI>\n'
help_str += " </UL>\n"
return help_str
if __name__ == '__main__':
PyBERT().configure_traits(view=traits_view)
| MarkMarlett/PyBERT | pybert/pybert.py | Python | bsd-2-clause | 45,021 |
# -*- coding: utf-8 -*-
#
# weatherservice documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 30 18:33:43 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../weatherservice/'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'weatherservice'
copyright = u'2013, acbart'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'weatherservicedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'weatherservice.tex', u'weatherservice Documentation',
u'acbart', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'weatherservice', u'weatherservice Documentation',
[u'acbart'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'weatherservice', u'weatherservice Documentation',
u'acbart', 'weatherservice', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| RealTimeWeb/weather | python/docs/conf.py | Python | mit | 8,090 |
## -*- encoding: utf-8 -*-
import os
import sys
from setuptools import setup
from codecs import open # To open the README file with proper encoding
from setuptools.command.test import test as TestCommand # for tests
# Get information from separate files (README, VERSION)
def readfile(filename):
with open(filename, encoding='utf-8') as f:
return f.read()
# For the tests
class SageTest(TestCommand):
def run_tests(self):
errno = os.system("sage -t --force-lib carlin")
if errno != 0:
sys.exit(1)
setup(
name = "carlin",
version = readfile("VERSION"), # the VERSION file is shared with the documentation
description='Carleman linearization of ordinary differential equations',
long_description = readfile("README.rst"), # get the long description from the README
url='https://github.com/mforets/carlin',
author='Marcelo Forets',
author_email='marcelo.forets-irurtia@univ-grenoble-alpes.fr', # choose a main contact email
license='GPLv3', # This should be consistent with the LICENCE file
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2.7',
], # classifiers list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords = "sage differential-equations polynomials",
packages = ['carlin'],
cmdclass = {'test': SageTest} # adding a special setup command for tests
)
| mforets/carlin | setup.py | Python | gpl-3.0 | 1,724 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 19:54:08 2010
@author: sat kumar tomer (http://civil.iisc.ernet.in/~satkumar/)
Functions:
utm2deg: Calculate utm co-ordinates from Lat, Lon
deg2utm : Calculate Lat, Lon from UTM
kabini:
berambadi:
great_circle_distance:
"""
# load needed python modules
from pyproj import Proj
import os
from subprocess import call
import sys
import numpy as np
import matplotlib.pyplot as plt
def utm2image(GT,utm):
Xpixel = ((utm[:,0] - GT[0])*GT[5] - (utm[:,1] - GT[3])*GT[2])/(GT[1]*GT[5]-GT[4]*GT[2])
Ypixel = ((utm[:,1] - GT[3])*GT[1] - (utm[:,0] - GT[0])*GT[4])/(GT[1]*GT[5]-GT[4]*GT[2])
return Xpixel.astype('int'),Ypixel.astype('int')
def Geo2Pixel(Xgeo,Ygeo,GT):
a1 = GT[1]
b1 = GT[2]
c1 = Xgeo - GT[0]
a2 = GT[4]
b2 = GT[5]
c2 = Ygeo - GT[3]
Xpixel = (b2*c1-b1*c2)/(a1*b2-a2*b1)
Yline = (a2*c1-a1*c2)/(a2*b1-a1*b2)
return Xpixel, Yline
def Pixel2Geo(Xpixel,Yline,GT):
Xgeo = GT[0] + Xpixel*GT[1] + Yline*GT[2]
Ygeo = GT[3] + Xpixel*GT[4] + Yline*GT[5]
return Xgeo,Ygeo
def SetProjectionBerambadi():
return 'PROJCS["WGS 84 / UTM zone 43N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",75],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32643"]]'
def utm2deg(x,y,utmzone=43):
p = Proj(proj='utm',zone=43,ellps='WGS84')
Lon,Lat = p(x, y, inverse=True)
return Lon,Lat
def deg2utm(Lon,Lat,utmzone=43):
p = Proj(proj='utm',zone=utmzone,ellps='WGS84')
x,y = p(Lon, Lat)
return x,y
def cut_xy(Ifile, Ofile, xmin, xmax, ymin, ymax):
"""
Cut the raster using the given xmin, xmax, ymin, ymax
by using the gdal_translate
If the Ofile already exists, it is deleted
"""
if os.path.exists(Ofile):
os.remove(Ofile)
Ber = '%s %s %s %s'%(xmin, ymax, xmax, ymin)
SC = 'gdal_translate -a_ullr %s -projwin %s %s %s'%(Ber, Ber, Ifile, Ofile)
returncode = call(SC, shell=True)
return returncode
def kabini(Ifile,Ofile):
# define the file names file
Temp1 = '/home/tomer/MODISdata/temp/temp1.tif'
Temp2 = '/home/tomer/MODISdata/temp/temp2.tif'
# convert from geographical co-ordinates to UTM zone 43
cmd= 'gdalwarp -r bilinear -t_srs \'+proj=utm +zone=43 +datum=WGS84\' ' + Ifile + ' ' +Temp1
try:
returncode = call(cmd, shell=True)
if returncode:
print 'Failure with returncode', returncode; sys.exit(1)
except OSError, message:
print 'Execution failed!\n', message; sys.exit(1)
# cut the area around Kabini Basin (output image will be of size 129 X 102)
kab='582000 1372000 711000 1270000'
cmd = 'gdal_translate -a_ullr ' + kab + ' -projwin ' + kab + ' ' + Temp1+ ' ' + Temp2
try:
returncode = call(cmd, shell=True)
if returncode:
print 'Failure with returncode', returncode; sys.exit(1)
except OSError, message:
print 'Execution failed!\n', message; sys.exit(1)
# changing the resolution
cmd = 'gdalwarp -r bilinear -tr 1000 1000 ' + Temp2 + ' ' +Ofile
try:
returncode = call(cmd, shell=True)
if returncode:
print 'Failure with returncode', returncode; sys.exit(1)
except OSError, message:
print 'Execution failed!\n', message; sys.exit(1)
# remove the temporary file
try:
os.remove(Temp)
os.remove(Temp2)
except:
print 'temp file not created'
def berambadi(Ifile,Ofile):
# define the file names file
Temp1 = '/home/tomer/MODISdata/temp/temp1.tif'
Temp2 = '/home/tomer/MODISdata/temp/temp2.tif'
# convert from geographical co-ordinates to UTM zone 43
cmd= 'gdalwarp -r bilinear -t_srs \'+proj=utm +zone=43 +datum=WGS84\' ' + Ifile + ' ' +Temp1
try:
returncode = call(cmd, shell=True)
if returncode:
print 'Failure with returncode', returncode; sys.exit(1)
except OSError, message:
print 'Execution failed!\n', message; sys.exit(1)
# cut the area around Kabini Basin (output image will be of size 129 X 102)
bmd='664000 1309000 685000 1294000'
cmd = 'gdal_translate -a_ullr ' + bmd + ' -projwin ' + bmd + ' ' + Temp1+ ' ' + Temp2
try:
returncode = call(cmd, shell=True)
if returncode:
print 'Failure with returncode', returncode; sys.exit(1)
except OSError, message:
print 'Execution failed!\n', message; sys.exit(1)
# changing the resolution
cmd = 'gdalwarp -r bilinear -tr 1000 1000 ' + Temp2 + ' ' +Ofile
try:
returncode = call(cmd, shell=True)
if returncode:
print 'Failure with returncode', returncode; sys.exit(1)
except OSError, message:
print 'Execution failed!\n', message; sys.exit(1)
# remove the temporary file
try:
os.remove(Temp1)
os.remove(Temp2)
except:
print 'temp file not created'
def geodetic_area(lon_cen, size_cell):
"""
Compute the area in square meters given the longitude of the
center and size of the grid
The grid should be square in terms of degree i.e. the size of the
cell should not vary in latitude and longitude
input:
lon_cen: longitude of the center of grid
size_cell: size of the cell in the degree
output:
area: area of the square grid
"""
r = 6371229.0 # radius of earth in meters
area = r**2*np.abs(size_cell)*np.pi/180*np.abs(
np.sin((lon_cen-size_cell/2.0)*np.pi/180) - \
np.sin((lon_cen+size_cell/2.0)*np.pi/180))
return area
def latitude_length(longitude):
"""
computes the length of one degree of a latitude
"""
a = 6378137.0
b = 6356752.3142
e = np.sqrt((a**2 - b**2)/a**2)
length = np.pi*a*(1-e**2)/(180*(1-e**2*np.sin(longitude*np.pi/180.0)**2)**1.5)
return length
def longitude_length(longitude):
"""
computes the length of one degree of a longitude
"""
a = 6378137.0
b = 6356752.3142
e = np.sqrt((a**2 - b**2)/a**2)
length = np.pi*a*np.cos(longitude*np.pi/180)/(180*(1-e**2*np.sin(longitude*np.pi/180.0)**2)**0.5)
return length
def great_circle_distance(lat_s,lon_s,lat_f,lon_f):
"""
computes the great circle distance between two points
Input:
lat_s : latitute (degree) of the standpoint
lon_s : longitude (degree) of the standpoint
lat_f : latitute (degree) of the forepoint
lon_f : longitude (degree) of the forepoint
Output:
dis: great circle distance (km)
"""
r = 6372.8
phi_f = lat_f*np.pi/180.0
phi_s = lat_s*np.pi/180.0
dl = np.abs(lon_f-lon_s)*np.pi/180.0
foo1 = np.sqrt((np.cos(phi_f)*np.sin(dl))**2 + (np.cos(phi_s)*np.sin(phi_f) - np.sin(phi_s)*np.cos(phi_f)*np.cos(dl))**2 )
foo2 = np.sin(phi_s)*np.sin(phi_f) + np.cos(phi_s)*np.cos(phi_f)*np.cos(dl)
dis = r*np.arctan2(foo1,foo2)
return dis
def read_ascii_grid(fname, dtype='float'):
"""
A function to read the ascii grid data
Input:
fname: input file name
dtype: int or float
Output:
data:
header: nrows, ncols, xllcorner, yllcorner, cellsize, NODATA_value
"""
n_headers = 5 # number of headers in the file
# open file for reading
f = open(fname, 'r')
# read the header information
var = ['nrows', 'ncols', 'xllcorner', 'yllcorner', 'cellsize',
'NODATA_value']
for i in range(n_headers):
foo = f.readline().split()
exec("%s = %s"%(foo[0],foo[1]))
# check if NODATA_value exist in the file
foo = f.readline().split()
if 'NODATA_value' in foo[0]:
exec("%s = %s"%(foo[0],foo[1]))
n_headers = n_headers+1
else:
NODATA_value = np.nan
# check if all the variables are read
# if not then issue an error
header = {}
for v in var:
try:
exec("header['%s'] = %s"%(v,v))
except NameError:
print "The variable %s could not be find in the file"%v
f.close()
data = np.genfromtxt(fname, skip_header=n_headers, dtype = dtype)
if dtype == 'float':
data[data==NODATA_value] = np.nan
elif dtype == 'int':
pass
else:
raise Exception('invalid value in dtype')
return data, header
def write_ascii_grid(fname, data, header, dtype='float'):
"""
A function to write the ascii grid data
Input:
fname: input file name
data: input data
header information: nrows ncols xllcorner yllcorner cellsize NODATA_value
dtype: data type of the data variable
"""
# open file for reading
f = open(fname, 'w')
# write the header information
var = ['nrows', 'ncols', 'xllcorner', 'yllcorner', 'cellsize',
'NODATA_value']
# check if all the variables are read
# if not then issue an error
for v in var:
try:
exec("header['%s']"%v)
except NameError:
print "The variable %s could not be find in the file"%v
for i in range(6):
f.write('%s \t %s \n'%(var[i],header[var[i]]))
# convert the nan into NODATA_value
data[np.isnan(data)] = header['NODATA_value']
# convert the data type
data = data.astype(dtype)
# write the data
for i in range(header['nrows']):
for j in range(header['ncols']):
f.write('%s '%data[i,j])
f.write('\n'%data[i,j])
f.close()
return 0
if __name__ == '__main__':
# from utm to degree
#x = 60000
#y = 1200000
#lat,lon = utm2deg(x,y,utmzone=43)
#print(lat,lon)
## from degree to utm
#x,y = deg2utm(lat,lon,utmzone=43)
#
#print(x,y)
# test the sample
#fname = '/home/tomer/svn/ambhas/examples/sample_ascii_grid.grd'
#data, header = read_ascii_grid(fname)
#print data
#print header
#plt.matshow(data)
#plt.show()
# write the sample
#fname = '/home/tomer/svn/ambhas/examples/sample_ascii_grid_out.grd'
#write_ascii_grid(fname, data, header)
#
#generate the area of the square grid globally with 1 degree resolution
#lon_cen = np.linspace(-90,90)
#size = 1
#area = geodetic_area(lon_cen, size)
#area1 = geodetic_area(lon_cen, size/1.0)
#plt.plot(lon_cen, area1/1e6)
#plt.xlabel('Longitude (degree)')
#plt.ylabel('Area of a degree square (Sq. km)')
#plt.show()
# length of latitude and longitude
#longitude = np.linspace(0,90)
#len_lat = latitude_length(longitude)
#len_lon = longitude_length(longitude)
#ax1 = plt.subplot(111)
#plt.plot(latitude, len_lat/1000, 'b-', label='Latitude')
#ax1.set_xlabel('Longitude (degree)')
#ax1.set_ylabel('Length of a degree Latitude (km)', color='b')
#for tl in ax1.get_yticklabels():
# tl.set_color('b')
#ax2 = plt.twinx()
#ax2.plot(latitude, len_lon/1000, 'r--')
#for tl in ax2.get_yticklabels():
# tl.set_color('r')
#ax2.set_ylabel('Length of a degree Longitude (km)', color='r')
#plt.show()
## great circle distance example
#lat_s, lon_s = 36.12, -86.67
#lat_f, lon_f = 33.94, -118.40
#dis = great_circle_distance(lat_s,lon_s,lat_f,lon_f)
#Ifile = '/home/tomer/mdb/input/DEM/15-R.tif'
#Ofile = '/home/tomer/temp/foo.txt'
#xmin, xmax, ymin, ymax = 138.0, 153.0, -38, -24.0
#xmin, xmax, ymin, ymax = 138.5, 152.5, -37.5, -24.5
#cut_xy(Ifile, Ofile, xmin, xmax, ymin, ymax)
file_soil = '/home/tomers/Projects/aicha/soil_map/soil.grd'
data, header = read_ascii_grid(file_soil, dtype='int')
print data.shape, data.max()
| tectronics/ambhas | ambhas/gis.py | Python | lgpl-2.1 | 12,300 |
#!/usr/bin/env python
#
# Copyright 2014 Dell Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for cleaning up environment after Tempest run
Runtime Arguments
-----------------
**--init-saved-state**: Before you can execute cleanup you must initialize
the saved state by running it with the **--init-saved-state** flag
(creating ./saved_state.json), which protects your deployment from
cleanup deleting objects you want to keep. Typically you would run
cleanup with **--init-saved-state** prior to a tempest run. If this is not
the case saved_state.json must be edited, removing objects you want
cleanup to delete.
**--dry-run**: Creates a report (dry_run.json) of the tenants that will be
cleaned up (in the "_tenants_to_clean" array), and the global objects
that will be removed (tenants, users, flavors and images). Once
cleanup is executed in normal mode, running it again with **--dry-run**
should yield an empty report.
**NOTE**: The _tenants_to_clean array in dry-run.json lists the
tenants that cleanup will loop through and delete child objects, not
delete the tenant itself. This may differ from the tenants array as you
can clean the tempest and alternate tempest tenants but by default,
cleanup deletes the objects in the tempest and alternate tempest tenants
but does not delete those tenants unless the **--delete-tempest-conf-objects**
flag is used to force their deletion.
**Normal mode**: running with no arguments, will query your deployment and
build a list of objects to delete after filtering out the objects found in
saved_state.json and based on the **--delete-tempest-conf-objects** flag.
By default the tempest and alternate tempest users and tenants are not
deleted and the admin user specified in tempest.conf is never deleted.
Please run with **--help** to see full list of options.
"""
import sys
import traceback
from cliff import command
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from tempest import clients
from tempest.cmd import cleanup_service
from tempest.common import credentials_factory as credentials
from tempest.common import identity
from tempest import config
SAVED_STATE_JSON = "saved_state.json"
DRY_RUN_JSON = "dry_run.json"
LOG = logging.getLogger(__name__)
CONF = config.CONF
class TempestCleanup(command.Command):
def __init__(self, app, cmd):
super(TempestCleanup, self).__init__(app, cmd)
def take_action(self, parsed_args):
try:
self.init(parsed_args)
if not parsed_args.init_saved_state:
self._cleanup()
except Exception:
LOG.exception("Failure during cleanup")
traceback.print_exc()
raise
return 0
def init(self, parsed_args):
cleanup_service.init_conf()
self.options = parsed_args
self.admin_mgr = credentials.AdminManager()
self.dry_run_data = {}
self.json_data = {}
self.admin_id = ""
self.admin_role_id = ""
self.admin_tenant_id = ""
self._init_admin_ids()
self.admin_role_added = []
# available services
self.tenant_services = cleanup_service.get_tenant_cleanup_services()
self.global_services = cleanup_service.get_global_cleanup_services()
if parsed_args.init_saved_state:
self._init_state()
return
self._load_json()
def _cleanup(self):
print ("Begin cleanup")
is_dry_run = self.options.dry_run
is_preserve = not self.options.delete_tempest_conf_objects
is_save_state = False
if is_dry_run:
self.dry_run_data["_tenants_to_clean"] = {}
admin_mgr = self.admin_mgr
# Always cleanup tempest and alt tempest tenants unless
# they are in saved state json. Therefore is_preserve is False
kwargs = {'data': self.dry_run_data,
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': False,
'is_save_state': is_save_state}
tenant_service = cleanup_service.TenantService(admin_mgr, **kwargs)
tenants = tenant_service.list()
print ("Process %s tenants" % len(tenants))
# Loop through list of tenants and clean them up.
for tenant in tenants:
self._add_admin(tenant['id'])
self._clean_tenant(tenant)
kwargs = {'data': self.dry_run_data,
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': is_preserve,
'is_save_state': is_save_state}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
if is_dry_run:
with open(DRY_RUN_JSON, 'w+') as f:
f.write(json.dumps(self.dry_run_data, sort_keys=True,
indent=2, separators=(',', ': ')))
self._remove_admin_user_roles()
def _remove_admin_user_roles(self):
tenant_ids = self.admin_role_added
LOG.debug("Removing admin user roles where needed for tenants: %s"
% tenant_ids)
for tenant_id in tenant_ids:
self._remove_admin_role(tenant_id)
def _clean_tenant(self, tenant):
print ("Cleaning tenant: %s " % tenant['name'])
is_dry_run = self.options.dry_run
dry_run_data = self.dry_run_data
is_preserve = not self.options.delete_tempest_conf_objects
tenant_id = tenant['id']
tenant_name = tenant['name']
tenant_data = None
if is_dry_run:
tenant_data = dry_run_data["_tenants_to_clean"][tenant_id] = {}
tenant_data['name'] = tenant_name
kwargs = {"username": CONF.auth.admin_username,
"password": CONF.auth.admin_password,
"tenant_name": tenant['name']}
mgr = clients.Manager(credentials=credentials.get_credentials(
**kwargs))
kwargs = {'data': tenant_data,
'is_dry_run': is_dry_run,
'saved_state_json': None,
'is_preserve': is_preserve,
'is_save_state': False,
'tenant_id': tenant_id}
for service in self.tenant_services:
svc = service(mgr, **kwargs)
svc.run()
def _init_admin_ids(self):
tn_cl = self.admin_mgr.tenants_client
rl_cl = self.admin_mgr.roles_client
tenant = identity.get_tenant_by_name(tn_cl,
CONF.auth.admin_tenant_name)
self.admin_tenant_id = tenant['id']
user = identity.get_user_by_username(tn_cl, self.admin_tenant_id,
CONF.auth.admin_username)
self.admin_id = user['id']
roles = rl_cl.list_roles()['roles']
for role in roles:
if role['name'] == CONF.identity.admin_role:
self.admin_role_id = role['id']
break
def get_parser(self, prog_name):
parser = super(TempestCleanup, self).get_parser(prog_name)
parser.add_argument('--init-saved-state', action="store_true",
dest='init_saved_state', default=False,
help="Creates JSON file: " + SAVED_STATE_JSON +
", representing the current state of your "
"deployment, specifically object types "
"tempest creates and destroys during a run. "
"You must run with this flag prior to "
"executing cleanup in normal mode, which is with "
"no arguments.")
parser.add_argument('--delete-tempest-conf-objects',
action="store_true",
dest='delete_tempest_conf_objects',
default=False,
help="Force deletion of the tempest and "
"alternate tempest users and tenants.")
parser.add_argument('--dry-run', action="store_true",
dest='dry_run', default=False,
help="Generate JSON file:" + DRY_RUN_JSON +
", that reports the objects that would have "
"been deleted had a full cleanup been run.")
return parser
def get_description(self):
return 'Cleanup after tempest run'
def _add_admin(self, tenant_id):
rl_cl = self.admin_mgr.roles_client
needs_role = True
roles = rl_cl.list_user_roles(tenant_id, self.admin_id)['roles']
for role in roles:
if role['id'] == self.admin_role_id:
needs_role = False
LOG.debug("User already had admin privilege for this tenant")
if needs_role:
LOG.debug("Adding admin privilege for : %s" % tenant_id)
rl_cl.assign_user_role(tenant_id, self.admin_id,
self.admin_role_id)
self.admin_role_added.append(tenant_id)
def _remove_admin_role(self, tenant_id):
LOG.debug("Remove admin user role for tenant: %s" % tenant_id)
# Must initialize AdminManager for each user role
# Otherwise authentication exception is thrown, weird
id_cl = credentials.AdminManager().identity_client
if (self._tenant_exists(tenant_id)):
try:
id_cl.delete_user_role(tenant_id, self.admin_id,
self.admin_role_id)
except Exception as ex:
LOG.exception("Failed removing role from tenant which still"
"exists, exception: %s" % ex)
def _tenant_exists(self, tenant_id):
tn_cl = self.admin_mgr.tenants_client
try:
t = tn_cl.show_tenant(tenant_id)
LOG.debug("Tenant is: %s" % str(t))
return True
except Exception as ex:
LOG.debug("Tenant no longer exists? %s" % ex)
return False
def _init_state(self):
print ("Initializing saved state.")
data = {}
admin_mgr = self.admin_mgr
kwargs = {'data': data,
'is_dry_run': False,
'saved_state_json': data,
'is_preserve': False,
'is_save_state': True}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
f.write(json.dumps(data,
sort_keys=True, indent=2, separators=(',', ': ')))
def _load_json(self):
try:
with open(SAVED_STATE_JSON) as json_file:
self.json_data = json.load(json_file)
except IOError as ex:
LOG.exception("Failed loading saved state, please be sure you"
" have first run cleanup with --init-saved-state "
"flag prior to running tempest. Exception: %s" % ex)
sys.exit(ex)
except Exception as ex:
LOG.exception("Exception parsing saved state json : %s" % ex)
sys.exit(ex)
| zsoltdudas/lis-tempest | tempest/cmd/cleanup.py | Python | apache-2.0 | 11,901 |
import spacepy.time as spt
x = spt.Ticktock([2452331.0142361112, 2452332.0142361112], 'JD')
print(x.ISO)
| fsbr/se3-path-planner | cuspStudy/scratch/tickexample.py | Python | mit | 106 |
import select
import socket
try:
from eventlet.patcher import is_monkey_patched as is_eventlet
except ImportError:
is_eventlet = lambda module: False # noqa
POLL_READ = 0x001
POLL_ERR = 0x008 | 0x010 | 0x2000
class _epoll(object):
def __init__(self):
self._epoll = select.epoll()
def register(self, fd, events):
self._epoll.register(fd, events)
def unregister(self, fd):
try:
self._epoll.unregister(fd)
except socket.error:
pass
def poll(self, timeout):
return self._epoll.poll(timeout and timeout / 1000.0 or -1)
class _kqueue(object):
def __init__(self):
self._kqueue = select.kqueue()
self._active = {}
def register(self, fd, events):
self._control(fd, events, select.KQ_EV_ADD)
self._active[fd] = events
def unregister(self, fd):
events = self._active.pop(fd)
try:
self._control(fd, events, select.KQ_EV_DELETE)
except socket.error:
pass
def _control(self, fd, events, flags):
self._kqueue.control([select.kevent(fd, filter=select.KQ_FILTER_READ,
flags=flags)], 0)
def poll(self, timeout):
kevents = self._kqueue.control(None, 1000,
timeout and timeout / 1000.0 or timeout)
events = {}
for kevent in kevents:
fd = kevent.ident
if kevent.filter == select.KQ_FILTER_READ:
events[fd] = events.get(fd, 0) | POLL_READ
if kevent.filter == select.KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | POLL_ERR
return events.items()
class _select(object):
def __init__(self):
self._all = self._rfd, self._efd = set(), set()
def register(self, fd, events):
if events & POLL_ERR:
self._efd.add(fd)
self._rfd.add(fd)
elif events & POLL_READ:
self._rfd.add(fd)
def unregister(self, fd):
self._rfd.discard(fd)
self._efd.discard(fd)
def poll(self, timeout):
read, _write, error = select.select(self._rfd, [], self._efd, timeout)
events = {}
for fd in read:
fd = fd.fileno()
events[fd] = events.get(fd, 0) | POLL_READ
for fd in error:
fd = fd.fileno()
events[fd] = events.get(fd, 0) | POLL_ERR
return events.items()
if is_eventlet(select):
# use Eventlet's non-blocking version of select.select
poll = _select
elif hasattr(select, "epoll"):
# Py2.6+ Linux
poll = _epoll
elif hasattr(select, "kqueue"):
# Py2.6+ on BSD / Darwin
poll = _kqueue
else:
poll = _select
| pantheon-systems/kombu | kombu/utils/eventio.py | Python | bsd-3-clause | 2,757 |
# -*- coding: utf-8 -*-
import csv
from django.core.management.base import BaseCommand
from intranet.apps.eighth.models import EighthActivity
from intranet.apps.groups.models import Group
from intranet.apps.users.models import User
class Command(BaseCommand):
help = "Transfer attendance data"
def handle(self, **options):
"""Exported "eighth_activity_permissions" table in CSV format."""
perm_map = {}
with open('eighth_activity_permissions.csv', 'r') as absperms:
perms = csv.reader(absperms)
for row in perms:
aid, uid = row
try:
usr = User.objects.get(id=uid)
except User.DoesNotExist:
self.stdout.write("User {} doesn't exist, aid {}".format(uid, aid))
else:
if aid in perm_map:
perm_map[aid].append(usr)
else:
perm_map[aid] = [usr]
for aid in perm_map:
try:
act = EighthActivity.objects.get(id=aid)
except EighthActivity.DoesNotExist:
self.stdout.write("Activity {} doesn't exist".format(aid))
else:
self.stdout.write("{}: {}".format(aid, EighthActivity.objects.get(id=aid)))
grp, _ = Group.objects.get_or_create(name="{} -- Permissions".format("{}".format(act)[:55]))
users = perm_map[aid]
for u in users:
u.groups.add(grp)
u.save()
act.groups_allowed.add(grp)
act.save()
self.stdout.write("Done.")
| jacobajit/ion | intranet/apps/eighth/management/commands/import_permissions.py | Python | gpl-2.0 | 1,682 |
# -*- coding=utf -*-
import unittest
from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column
from cubes import *
from cubes.errors import *
from ..common import CubesTestCaseBase
from json import dumps
def printable(obj):
return dumps(obj, indent=4)
class AggregatesTestCase(CubesTestCaseBase):
sql_engine = "sqlite:///"
def setUp(self):
super(AggregatesTestCase, self).setUp()
self.facts = Table("facts", self.metadata,
Column("id", Integer),
Column("year", Integer),
Column("amount", Integer),
Column("price", Integer),
Column("discount", Integer)
)
self.metadata.create_all()
data = [
( 1, 2010, 1, 100, 0),
( 2, 2010, 2, 200, 10),
( 3, 2010, 4, 300, 0),
( 4, 2010, 8, 400, 20),
( 5, 2011, 1, 500, 0),
( 6, 2011, 2, 600, 40),
( 7, 2011, 4, 700, 0),
( 8, 2011, 8, 800, 80),
( 9, 2012, 1, 100, 0),
(10, 2012, 2, 200, 0),
(11, 2012, 4, 300, 0),
(12, 2012, 8, 400, 10),
(13, 2013, 1, 500, 0),
(14, 2013, 2, 600, 0),
(15, 2013, 4, 700, 0),
(16, 2013, 8, 800, 20),
]
self.load_data(self.facts, data)
self.workspace = self.create_workspace(model="aggregates.json")
def test_unknown_function(self):
browser = self.workspace.browser("unknown_function")
with self.assertRaisesRegex(ArgumentError, "Unknown.*function"):
browser.aggregate()
def test_explicit(self):
browser = self.workspace.browser("default")
result = browser.aggregate()
summary = result.summary
self.assertEqual(60, summary["amount_sum"])
self.assertEqual(16, summary["count"])
def test_post_calculation(self):
browser = self.workspace.browser("postcalc_in_measure")
result = browser.aggregate(drilldown=["year"])
cells = list(result.cells)
aggregates = sorted(cells[0].keys())
self.assertSequenceEqual(['amount_sma', 'amount_sum', 'count', 'year'],
aggregates)
| ubreddy/cubes | tests/sql/test_aggregates.py | Python | mit | 2,324 |
'''
urlresolver Kodi plugin
Copyright (C) 2018
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import json
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class FembedResolver(UrlResolver):
name = "fembed"
domains = ["fembed.com", "24hd.club", "vcdn.io"]
pattern = '(?://|\.)((?:fembed\.com|24hd\.club|vcdn\.io))/v/([a-zA-Z0-9]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'Referer': web_url, 'User-Agent': common.RAND_UA}
api_url = 'https://www.%s/api/source/%s' % (host, media_id)
js_result = self.net.http_POST(api_url, form_data={'r': ''}, headers=headers).content
if js_result:
try:
js_data = json.loads(js_result)
if js_data.get('success'):
sources = [(i.get('label'), i.get('file')) for i in js_data.get('data') if i.get('type') == 'mp4']
common.logger.log(sources)
sources = helpers.sort_sources_list(sources)
return helpers.pick_source(sources) + helpers.append_headers(headers)
else:
raise Exception(js_data.get('data'))
except Exception as e:
raise ResolverError('Error getting video: %s' % e)
raise ResolverError('Video not found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'https://www.{host}/v/{media_id}')
| koditr/xbmc-tr-team-turkish-addons | script.module.urlresolver/lib/urlresolver/plugins/fembed.py | Python | gpl-2.0 | 2,251 |
#!/usr/bin/env python3
#
# Copyright (C) 2007-2013 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
r"""
This command is the wrapper for all administrative actions on frePPLe.
"""
import os
import sys
if __name__ == "__main__":
# Initialize Python virtual environments
if "VIRTUAL_ENV" in os.environ:
activate_script = os.path.join(
os.environ["VIRTUAL_ENV"],
"Scripts" if os.name == "nt" else "bin",
"activate_this.py",
)
with open(activate_script) as f:
exec(f.read(), {"__file__": activate_script})
# Initialize django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "freppledb.settings")
import django
django.setup()
# Synchronize the scenario table with the settings
from freppledb.common.models import Scenario
Scenario.syncWithSettings()
# Run the command
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| frePPLe/frePPLe | frepplectl.py | Python | agpl-3.0 | 1,613 |
#!/usr/bin/env python
from distutils.core import setup, Extension
import numpy
from Cython.Build import cythonize
setup(
name='PievaCore',
version='1.0.0',
description='Low level pixel to led mapping provider',
author='Albertas Mickenas',
author_email='mic@wemakethings.net',
packages=['core'],
ext_modules=[
Extension('core.PixelMapper', ['core/pixelMapper.c'],
extra_compile_args=['-Os', '-funroll-loops', '-ffast-math'],
),
Extension('core.NoiseGenerator', ['core/noiseGenerator.c'],
extra_compile_args=['-Os', '-funroll-loops', '-ffast-math'],
),
] + cythonize("c_ripples.pyx"),
include_dirs=[numpy.get_include()],
)
| stavka/pieva2 | setup.py | Python | gpl-2.0 | 669 |
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Create your views here.
| reactormonk/eve-wspace | evewspace/Recruitment/views.py | Python | gpl-3.0 | 855 |
#!/usr/bin/env python
""" Provide support for assignment formats.
The convention is that when stored in files, column index starts
with 1, even though python convention starts with 0.
"""
# Version 1
from listR import isFloat
def assignmentExprStream2Dict(data, assignmentSymbol="=", commentSymbol="#"):
""" Assume that each line in the "data" stream is an assignment
expression (e.g. "x=1", "x:1", etc), return a dictionary representing
such assginments. The keys are the striped LHS of the assignments
and the values are the RHS assignments, converted into numerics
if possible.
Any line without the assignmentSymbol will be ignored.
Any string beyond the commentSymbol is discarded.
The returned value is a dictionary, whose keys and values are strings.
"""
return_dict = {};
for aLine in data:
assignmentSymbolLocation = aLine.find(assignmentSymbol)
if assignmentSymbolLocation == -1: continue # no assignmentSymbol found in the line
commentSymbolLocation = aLine.find(commentSymbol)
if commentSymbolLocation != -1: aLine_noComment = aLine[:commentSymbolLocation] # contains the comment symbol
LHS = aLine[:assignmentSymbolLocation].strip()
RHS = aLine[assignmentSymbolLocation+1:].strip()
if isFloat(RHS): RHS = float(RHS)
return_dict[LHS] = RHS
return return_dict
def assignmentExprStream2IndexDict(data, assignmentSymbol="=", commentSymbol="#", autoAdjustShift=True):
""" Once the assignment dictionary is generated by assginmentExprStream2Dict,
convert the "values" to integer indices. By default if the lowest index
in the dictionary is 1 then all indices (values) are shifted by -1.
"""
converted_dict = assignmentExprStream2Dict(data, assignmentSymbol, commentSymbol)
values = [int(converted_dict[key]) for key in converted_dict];
shift_in_index = 0
lowest_index = min(values)
if autoAdjustShift:
if lowest_index==1: shift_in_index = 1
return_dict = {key:int(converted_dict[key])-shift_in_index for key in converted_dict.keys()}
return return_dict
def dict2AssignmentExprList(aDict, assignmentSymbol="="):
""" This function performs the inverse of action done in
assignmentExprStream2Dict by converting dictionaries back to
a list of strings of form (by default) "x=1" etc.
"""
return [key+assignmentSymbol+str(aDict[key]) for key in aDict.keys()]
| palmerjh/iEBE | PlayGround/job-2/binUtilities/assignmentFormat.py | Python | gpl-3.0 | 2,468 |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base classes for objects and collections
"""
import collections
from itertools import ifilter
import operator
from oslo.serialization import jsonutils
import six
from sqlalchemy import and_, not_
from sqlalchemy.orm import joinedload
from nailgun.objects.serializers.base import BasicSerializer
from nailgun.db import db
from nailgun.db import NoCacheQuery
from nailgun.errors import errors
from nailgun.expression import Expression
from nailgun.openstack.common.db import api as db_api
_BACKEND_MAPPING = {'sqlalchemy': 'nailgun.db.sqlalchemy.api'}
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
class RestrictionMixin(object):
"""Mixin which extend nailgun objects with restriction
processing functionality
"""
@classmethod
def check_restrictions(cls, models, restrictions, action=None):
"""Check if attribute satisfied restrictions
:param models: objects which represent models in restrictions
:type models: dict
:param restrictions: list of restrictions to check
:type restrictions: list
:param action: filtering restrictions by action key
:type action: string
:returns: dict -- object with 'result' as number and 'message' as dict
"""
satisfied = []
if restrictions:
# Filter by action
if action:
filterd_by_action_restrictions = filter(
lambda item: item.get('action') == action,
restrictions)
else:
filterd_by_action_restrictions = restrictions[:]
# Filter which restriction satisfied condition
satisfied = filter(
lambda item: Expression(
item.get('condition'), models).evaluate(),
filterd_by_action_restrictions)
return {
'result': bool(satisfied),
'message': '. '.join([item.get('message') for item in
satisfied if item.get('message')])
}
@staticmethod
def _expand_restriction(restriction):
"""Get restriction in different formats like string, short
or long dict formats and return in one canonical format
:param restriction: restriction object
:type restriction: string|dict
:returns: dict -- restriction object in canonical format:
{
'action': 'enable|disable|hide|none'
'condition': 'value1 == value2',
'message': 'value1 shouldn't equal value2'
}
"""
result = {
'action': 'disable'
}
if isinstance(restriction, six.string_types):
result['condition'] = restriction
elif isinstance(restriction, dict):
if 'condition' in restriction:
result.update(restriction)
else:
result['condition'] = list(restriction)[0]
result['message'] = list(restriction.values())[0]
else:
raise errors.InvalidData('Invalid restriction format')
return result
class NailgunObject(object):
"""Base class for objects
"""
#: Serializer class for object
serializer = BasicSerializer
#: SQLAlchemy model for object
model = None
#: JSON schema for object
schema = {
"properties": {}
}
@classmethod
def check_field(cls, field):
"""Check if field is described in object's JSON schema
:param field: name of the field as string
:returns: None
:raises: errors.InvalidField
"""
if field not in cls.schema["properties"]:
raise errors.InvalidField(
u"Invalid field '{0}' for object '{1}'".format(
field,
cls.__name__
)
)
@classmethod
def get_by_uid(cls, uid, fail_if_not_found=False, lock_for_update=False):
"""Get instance by it's uid (PK in case of SQLAlchemy)
:param uid: uid of object
:param fail_if_not_found: raise an exception if object is not found
:param lock_for_update: lock returned object for update (DB mutex)
:returns: instance of an object (model)
"""
q = db().query(cls.model)
if lock_for_update:
q = q.with_lockmode('update')
res = q.get(uid)
if not res and fail_if_not_found:
raise errors.ObjectNotFound(
"Object '{0}' with UID={1} is not found in DB".format(
cls.__name__,
uid
)
)
return res
@classmethod
def create(cls, data):
"""Create object instance with specified parameters in DB
:param data: dictionary of key-value pairs as object fields
:returns: instance of an object (model)
"""
new_obj = cls.model()
for key, value in data.iteritems():
setattr(new_obj, key, value)
db().add(new_obj)
db().flush()
return new_obj
@classmethod
def update(cls, instance, data):
"""Update existing instance with specified parameters
:param instance: object (model) instance
:param data: dictionary of key-value pairs as object fields
:returns: instance of an object (model)
"""
instance.update(data)
db().add(instance)
db().flush()
return instance
@classmethod
def delete(cls, instance):
"""Delete object (model) instance
:param instance: object (model) instance
:returns: None
"""
db().delete(instance)
db().flush()
@classmethod
def save(cls, instance=None):
"""Save current changes for instance in DB.
Current transaction will be commited
(in case of SQLAlchemy).
:param instance: object (model) instance
:returns: None
"""
if instance:
db().add(instance)
db().commit()
@classmethod
def to_dict(cls, instance, fields=None):
"""Serialize instance to Python dict
:param instance: object (model) instance
:param fields: exact fields to serialize
:returns: serialized object (model) as dictionary
"""
return cls.serializer.serialize(instance, fields=fields)
@classmethod
def to_json(cls, instance, fields=None):
"""Serialize instance to JSON
:param instance: object (model) instance
:param fields: exact fields to serialize
:returns: serialized object (model) as JSON string
"""
return jsonutils.dumps(
cls.to_dict(instance, fields=fields)
)
class NailgunCollection(object):
"""Base class for object collections
"""
#: Single object class
single = NailgunObject
@classmethod
def _is_iterable(cls, obj):
return isinstance(
obj,
collections.Iterable
)
@classmethod
def _is_query(cls, obj):
return isinstance(
obj,
NoCacheQuery
)
@classmethod
def all(cls):
"""Get all instances of this object (model)
:returns: iterable (SQLAlchemy query)
"""
return db().query(
cls.single.model
)
@classmethod
def _query_order_by(cls, query, order_by):
"""Adds order by clause into SQLAlchemy query
:param query: SQLAlchemy query
:param order_by: tuple of model fields names for ORDER BY criterion
to SQLAlchemy query. If name starts with '-' desc ordering applies,
else asc.
"""
for field_name in order_by:
if field_name.startswith('-'):
field_name = field_name.lstrip('-')
ordering = 'desc'
else:
ordering = 'asc'
field = getattr(cls.single.model, field_name)
o_func = getattr(field, ordering)
query = query.order_by(o_func())
return query
@classmethod
def _iterable_order_by(cls, iterable, order_by):
"""Sort iterable by field names in order_by
:param iterable: model objects collection
:param order_by: tuple of model fields names for sorting.
If name starts with '-' desc ordering applies, else asc.
"""
for field_name in order_by:
if field_name.startswith('-'):
field_name = field_name.lstrip('-')
reverse = True
else:
reverse = False
iterable = sorted(
iterable,
key=lambda x: getattr(x, field_name),
reverse=reverse
)
return iterable
@classmethod
def order_by(cls, iterable, order_by):
"""Order given iterable by specified order_by.
:param order_by: tuple of model fields names or single field name for
ORDER BY criterion to SQLAlchemy query. If name starts with '-'
desc ordering applies, else asc.
:type order_by: tuple of strings or string
"""
if iterable is None or not order_by:
return iterable
if not isinstance(order_by, (list, tuple)):
order_by = (order_by,)
if cls._is_query(iterable):
return cls._query_order_by(iterable, order_by)
else:
return cls._iterable_order_by(iterable, order_by)
@classmethod
def filter_by(cls, iterable, **kwargs):
"""Filter given iterable by specified kwargs.
In case if iterable=None filters all object instances
:param iterable: iterable (SQLAlchemy query)
:param order_by: tuple of model fields names for ORDER BY criterion
to SQLAlchemy query. If name starts with '-' desc ordering applies,
else asc.
:returns: filtered iterable (SQLAlchemy query)
"""
map(cls.single.check_field, kwargs.iterkeys())
if iterable is not None:
use_iterable = iterable
else:
use_iterable = cls.all()
if cls._is_query(use_iterable):
return use_iterable.filter_by(**kwargs)
elif cls._is_iterable(use_iterable):
return ifilter(
lambda i: all(
(getattr(i, k) == v for k, v in kwargs.iteritems())
),
use_iterable
)
else:
raise TypeError("First argument should be iterable")
@classmethod
def filter_by_not(cls, iterable, **kwargs):
"""Filter given iterable by specified kwargs with negation.
In case of `iterable` is `None` filters all object instances.
:param iterable: iterable (SQLAlchemy query)
:returns: filtered iterable (SQLAlchemy query)
"""
map(cls.single.check_field, kwargs.iterkeys())
use_iterable = iterable or cls.all()
if cls._is_query(use_iterable):
conditions = []
for key, value in kwargs.iteritems():
conditions.append(
getattr(cls.single.model, key) == value
)
return use_iterable.filter(not_(and_(*conditions)))
elif cls._is_iterable(use_iterable):
return ifilter(
lambda i: not all(
(getattr(i, k) == v for k, v in kwargs.iteritems())
),
use_iterable
)
else:
raise TypeError("First argument should be iterable")
@classmethod
def lock_for_update(cls, iterable):
"""Use SELECT FOR UPDATE on a given iterable (query).
In case if iterable=None returns all object instances
:param iterable: iterable (SQLAlchemy query)
:returns: filtered iterable (SQLAlchemy query)
"""
use_iterable = iterable or cls.all()
if cls._is_query(use_iterable):
return use_iterable.with_lockmode('update')
elif cls._is_iterable(use_iterable):
# we can't lock abstract iterable, so returning as is
# for compatibility
return use_iterable
else:
raise TypeError("First argument should be iterable")
@classmethod
def filter_by_list(cls, iterable, field_name, list_of_values,
order_by=()):
"""Filter given iterable by list of list_of_values.
In case if iterable=None filters all object instances
:param iterable: iterable (SQLAlchemy query)
:param field_name: filtering field name
:param list_of_values: list of values for objects filtration
:returns: filtered iterable (SQLAlchemy query)
"""
field_getter = operator.attrgetter(field_name)
use_iterable = iterable or cls.all()
if cls._is_query(use_iterable):
result = use_iterable.filter(
field_getter(cls.single.model).in_(list_of_values)
)
result = cls.order_by(result, order_by)
return result
elif cls._is_iterable(use_iterable):
return ifilter(
lambda i: field_getter(i) in list_of_values,
use_iterable
)
else:
raise TypeError("First argument should be iterable")
@classmethod
def filter_by_id_list(cls, iterable, uid_list):
"""Filter given iterable by list of uids.
In case if iterable=None filters all object instances
:param iterable: iterable (SQLAlchemy query)
:param uid_list: list of uids for objects
:returns: filtered iterable (SQLAlchemy query)
"""
return cls.filter_by_list(
iterable,
'id',
uid_list,
)
@classmethod
def eager_base(cls, iterable, options):
"""Eager load linked object instances (SQLAlchemy FKs).
In case if iterable=None applies to all object instances
:param iterable: iterable (SQLAlchemy query)
:param options: list of sqlalchemy eagerload types
:returns: iterable (SQLAlchemy query)
"""
use_iterable = iterable or cls.all()
if options:
return use_iterable.options(*options)
return use_iterable
@classmethod
def eager(cls, iterable, fields):
"""Eager load linked object instances (SQLAlchemy FKs).
By default joinedload will be applied to every field.
If you want to use custom eagerload method - use eager_base
In case if iterable=None applies to all object instances
:param iterable: iterable (SQLAlchemy query)
:param fields: list of links (model FKs) to eagerload
:returns: iterable (SQLAlchemy query)
"""
options = [joinedload(field) for field in fields]
return cls.eager_base(iterable, options)
@classmethod
def count(cls, iterable=None):
use_iterable = iterable or cls.all()
if cls._is_query(use_iterable):
return use_iterable.count()
elif cls._is_iterable(use_iterable):
return len(list(iterable))
else:
raise TypeError("First argument should be iterable")
@classmethod
def to_list(cls, iterable=None, fields=None):
"""Serialize iterable to list of dicts
In case if iterable=None serializes all object instances
:param iterable: iterable (SQLAlchemy query)
:param fields: exact fields to serialize
:returns: collection of objects as a list of dicts
"""
use_iterable = iterable or cls.all()
return map(
lambda o: cls.single.to_dict(o, fields=fields),
use_iterable
)
@classmethod
def to_json(cls, iterable=None, fields=None):
"""Serialize iterable to JSON
In case if iterable=None serializes all object instances
:param iterable: iterable (SQLAlchemy query)
:param fields: exact fields to serialize
:returns: collection of objects as a JSON string
"""
return jsonutils.dumps(
cls.to_list(
fields=fields,
iterable=iterable
)
)
@classmethod
def create(cls, data):
"""Create object instance with specified parameters in DB
:param data: dictionary of key-value pairs as object fields
:returns: instance of an object (model)
"""
return cls.single.create(data)
| zhaochao/fuel-web | nailgun/nailgun/objects/base.py | Python | apache-2.0 | 17,245 |
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from . import test_addon_hash
from . import test_module
from . import test_module_deprecated
from . import test_module_upgrade_deprecated
| ovnicraft/server-tools | module_auto_update/tests/__init__.py | Python | agpl-3.0 | 257 |
# Copyright (c) 2015 The Phtevencoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Dummy Socks5 server for testing.
'''
from __future__ import print_function, division, unicode_literals
import socket, threading, Queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = str(recvall(self.conn, n))
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception,e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = Queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| ravenbyron/phtevencoin | qa/rpc-tests/test_framework/socks5.py | Python | mit | 5,705 |
# -*- test-case-name: twisted.words.test.test_jabbercomponent -*-
#
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from zope.interface import implements
from twisted.words.xish import domish, xpath, utility
from twisted.words.protocols.jabber import jstrports, xmlstream
def componentFactory(componentid, password):
a = ConnectComponentAuthenticator(componentid, password)
return xmlstream.XmlStreamFactory(a)
class ConnectComponentAuthenticator(xmlstream.ConnectAuthenticator):
""" Authenticator to permit an XmlStream to authenticate against a Jabber
Server as a Component (where the Authenticator is initiating the stream).
This implements the basic component authentication. Unfortunately this
protocol is not formally described anywhere. Fortunately, all the Jabber
servers I know of use this mechanism in exactly the same way.
"""
namespace = 'jabber:component:accept'
def __init__(self, componentjid, password):
"""
@type componentjid: L{str}
@param componentjid: Jabber ID that this component wishes to bind to.
@type password: L{str}
@param password: Password/secret this component uses to authenticate.
"""
# Note that we are sending 'to' our desired component JID.
xmlstream.ConnectAuthenticator.__init__(self, componentjid)
self.password = password
def streamStarted(self):
# Create handshake
hs = domish.Element(("jabber:component:accept", "handshake"))
hs.addContent(xmlstream.hashPassword(self.xmlstream.sid, self.password))
# Setup observer to watch for handshake result
self.xmlstream.addOnetimeObserver("/handshake", self._handshakeEvent)
self.xmlstream.send(hs)
def associateWithStream(self, xs):
xs.version = (0, 0)
xs.useTls = 0
xmlstream.ConnectAuthenticator.associateWithStream(self, xs)
def _handshakeEvent(self, elem):
# we have successfully shaken hands and can now consider this
# entity to represent the component JID.
self.xmlstream.thisHost = self.xmlstream.otherHost
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
class ListenComponentAuthenticator(xmlstream.Authenticator):
""" Placeholder for listening components """
pass
from twisted.application import service
from twisted.python import components
class IService(components.Interface):
def componentConnected(self, xmlstream):
""" Parent component has established a connection
"""
def componentDisconnected(self):
""" Parent component has lost a connection to the Jabber system
"""
def transportConnected(self, xmlstream):
""" Parent component has established a connection over the underlying transport
"""
class Service(service.Service):
implements(IService)
def componentConnected(self, xmlstream):
pass
def componentDisconnected(self):
pass
def transportConnected(self, xmlstream):
pass
def send(self, obj):
self.parent.send(obj)
class ServiceManager(service.MultiService):
""" Business logic representing a managed component connection to a Jabber router
This Service maintains a single connection to a Jabber router and
provides facilities for packet routing and transmission. Business
logic modules can
subclasses, and added as sub-service.
"""
def __init__(self, jid, password):
service.MultiService.__init__(self)
# Setup defaults
self.jabberId = jid
self.xmlstream = None
# Internal buffer of packets
self._packetQueue = []
# Setup the xmlstream factory
self._xsFactory = componentFactory(self.jabberId, password)
# Register some lambda functions to keep the self.xmlstream var up to date
self._xsFactory.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self._connected)
self._xsFactory.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self._authd)
self._xsFactory.addBootstrap(xmlstream.STREAM_END_EVENT, self._disconnected)
# Map addBootstrap and removeBootstrap to the underlying factory -- is this
# right? I have no clue...but it'll work for now, until i can think about it
# more.
self.addBootstrap = self._xsFactory.addBootstrap
self.removeBootstrap = self._xsFactory.removeBootstrap
def getFactory(self):
return self._xsFactory
def _connected(self, xs):
self.xmlstream = xs
for c in self:
if IService.providedBy(c):
c.transportConnected(xs)
def _authd(self, xs):
# Flush all pending packets
for p in self._packetQueue:
self.xmlstream.send(p)
self._packetQueue = []
# Notify all child services which implement
# the IService interface
for c in self:
if IService.providedBy(c):
c.componentConnected(xs)
def _disconnected(self, _):
self.xmlstream = None
# Notify all child services which implement
# the IService interface
for c in self:
if IService.providedBy(c):
c.componentDisconnected()
def send(self, obj):
if self.xmlstream != None:
self.xmlstream.send(obj)
else:
self._packetQueue.append(obj)
def buildServiceManager(jid, password, strport):
""" Constructs a pre-built L{ServiceManager}, using the specified strport
string.
"""
svc = ServiceManager(jid, password)
client_svc = jstrports.client(strport, svc.getFactory())
client_svc.setServiceParent(svc)
return svc
| kenorb/BitTorrent | twisted/words/protocols/jabber/component.py | Python | gpl-3.0 | 5,773 |
import time
import sys
import thread
import server_pool
import db_transfer
import shell
import daemon
#def test():
# thread.start_new_thread(DbTransfer.thread_db, ())
# Api.web_server()
def main():
shell.check_python()
config = shell.get_config(False)
daemon.daemon_exec(config)
daemon.set_user(config.get('user', None))
thread.start_new_thread(db_transfer.DbTransfer.thread_db, ())
while True:
time.sleep(99999)
if __name__ == '__main__':
main()
| ilikecola/Shadowsocks-combine-manyuser | shadowsocks/server.py | Python | apache-2.0 | 495 |
import re
import codecs
from toolz.itertoolz import groupby
from itertools import count
def __flatten(ls):
"""
Aux function to flatten lists and remove None from result (k, v) sequences
:param ls: original list
:return: flattened list
"""
for e in ls:
if type(e) is list:
for i in e:
yield i
elif type(e) is tuple:
yield e
def __input_file(path):
"""
Aux generator function to read text files lazily
:param path: path of the file to read
:return: generator that gives us a line each time we call __next__
"""
f = codecs.open(path, encoding='utf-8')
for line in f:
yield line.strip()
f.close()
def input_file(path):
"""
Read common text file as a stream of (k, v) pairs where k is line number
and v is line text
:param path: path to the file to read
:return: lazy seq of pairs
"""
return zip(count(), __input_file(path))
def input_kv_file(path, sep="\t"):
"""
Read common text file as a stream of pairs (k, v) where k is the first
sequence of characters in the line until sep and v is contains the rest of
characters after removing that first sep.
:param path: path to the file to read
:param sep: optional separator to use during k, v pair resolution
:return: lazy seq of pairs
"""
return map(lambda line: re.split(sep, line, maxsplit=1, flags=re.UNICODE),
__input_file(path))
def process_mapper(in_seq, func):
"""
Simulates mapper function application
:param in_seq: (k, v) pairs to operate on
:param func: mapper function to apply f(k, v)
:return: sequence of transformed (k, v)
"""
return __flatten(
map(lambda t: func(t[0], t[1]), in_seq))
def process_shuffle_sort(in_seq):
"""
Simulates shuffle-sort phase
:param in_seq: (k, v) pairs from mapper application
:return: shuffle-sorted (k, [v, v, v...]) pairs to be used for reduce
"""
# if t[0] is a list needs to be casted as a tuple because lists can't be hash keys in python.
grp = groupby(lambda t: (tuple(t[0]) if type(t[0]) is list else t[0]), in_seq)
for k, vs in grp.items():
yield((k, [v[1] for v in vs]))
def process_reducer(in_seq, func):
"""
Simulates mapper function application
:param in_seq: (k, [v, v, v, ...]) pairs from
:param func: reducer function to apply f(k, vs)
:return: sequence of transformed (k, v)
"""
return __flatten(
map(lambda t: func(t[0], t[1]), in_seq))
def identity_mapper(k, v):
"""
This is the identity mapper, just lets (k, v) pass to the next phase
:param k: key
:param v: value
:return: (k,v) as a pair
"""
return k, v
def identity_reducer(k, vs):
"""
This is the identity reducer, unrolls the values and recreates all the
(k, v) pairs again.
:param k: key
:param vs: list of values
:return: (k,v) as a pair
"""
return [(k, v) for v in vs]
def map_red(in_seq, mapper=identity_mapper, reducer=identity_reducer):
"""
Full map_red process definition
:param in_seq: input (k, v) sequence
:param mapper: mapper function to apply
:param reducer: reducer function to apply
:return: (k, v) resulting sequence
"""
return process_reducer(
process_shuffle_sort(process_mapper(in_seq, mapper)),
reducer)
def run(mp_proc, sep="\t"):
"""
Lazily executes the mapred process and outputs into the console
:param mp_proc:
:param sep: optional k and v separator
:return:
"""
for k, v in mp_proc:
print('%s%s%s' % (k, sep, v))
| ramonpin/mredu | mredu/simul.py | Python | apache-2.0 | 3,677 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import functools
import inspect
import os
import re
import codegen
import six
from google.protobuf.message import Message as ProtoMessage
# A regular expression capturing a python indentifier.
IDENTIFIER_RE = '[a-zA-Z_][a-zA-Z0-9_]*'
# Log of all reported errors
all_errors = []
def log_error(s):
all_errors.append(s)
print('ERROR:', s)
def documentation_path(full_name):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
Returns:
The file path to which to write the documentation for `full_name`.
"""
dirs = full_name.split('.')
return os.path.join(*dirs) + '.md'
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
# For object instances, inspect.getdoc does give us the docstring of their
# type, which is not what we want. Only return the docstring if it is useful.
if (inspect.isclass(py_object) or inspect.ismethod(py_object) or
inspect.isfunction(py_object) or inspect.ismodule(py_object) or
isinstance(py_object, property)):
return inspect.getdoc(py_object) or ''
else:
return ''
# A regular expression for capturing a @{symbol} reference.
SYMBOL_REFERENCE_RE = re.compile(r'@\{([^}]+)\}')
class ReferenceResolver(object):
"""Class for replacing @{...} references with Markdown links.
Args:
duplicate_of: A map from duplicate names to preferred names of API
symbols.
doc_index: A `dict` mapping symbol name strings to objects with `url`
and `title` fields. Used to resolve @{$doc} references in docstrings.
index: A map from all full names to python objects.
py_module_names: A list of string names of Python modules.
"""
def __init__(self, duplicate_of, doc_index, index, py_module_names):
self._duplicate_of = duplicate_of
self._doc_index = doc_index
self._index = index
self._py_module_names = py_module_names
def replace_references(self, string, relative_path_to_root):
"""Replace "@{symbol}" references with links to symbol's documentation page.
This functions finds all occurrences of "@{symbol}" in `string`
and replaces them with markdown links to the documentation page
for "symbol".
`relative_path_to_root` is the relative path from the document
that contains the "@{symbol}" reference to the root of the API
documentation that is linked to. If the containing page is part of
the same API docset, `relative_path_to_root` can be set to
`os.path.dirname(documentation_path(name))`, where `name` is the
python name of the object whose documentation page the reference
lives on.
Args:
string: A string in which "@{symbol}" references should be replaced.
relative_path_to_root: The relative path from the containing document to
the root of the API documentation that is being linked to.
Returns:
`string`, with "@{symbol}" references replaced by Markdown links.
"""
return re.sub(SYMBOL_REFERENCE_RE,
lambda match: self._one_ref(match.group(1), # pylint: disable=g-long-lambda
relative_path_to_root),
string)
def python_link(self, link_text, ref_full_name, relative_path_to_root,
code_ref=True):
"""Resolve a "@{python symbol}" reference to a Markdown link.
This will pick the canonical location for duplicate symbols. The
input to this function should already be stripped of the '@' and
'{}'. This function returns a Markdown link. If `code_ref` is
true, it is assumed that this is a code reference, so the link
text will be rendered as code (using backticks).
`link_text` should refer to a library symbol, starting with 'tf.'.
Args:
link_text: The text of the Markdown link.
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
code_ref: If true (the default), put `link_text` in `...`.
Returns:
A markdown link to the documentation page of `ref_full_name`.
"""
link = self.reference_to_url(ref_full_name, relative_path_to_root)
if code_ref:
return '[`%s`](%s)' % (link_text, link)
else:
return '[%s](%s)' % (link_text, link)
def py_master_name(self, full_name):
"""Return the master name for a Python symbol name."""
return self._duplicate_of.get(full_name, full_name)
def py_name_to_object(self, full_name):
"""Return the Python object for a Python symbol name."""
return self._index[full_name]
def reference_to_url(self, ref_full_name, relative_path_to_root):
"""Resolve a "@{python symbol}" reference to a relative path.
The input to this function should already be stripped of the '@'
and '{}', and its output is only the link, not the full Markdown.
If `ref_full_name` is the name of a class member, method, or property, the
link will point to the page of the containing class, and it will include the
method name as an anchor. For example, `tf.module.MyClass.my_method` will be
translated into a link to
`os.join.path(relative_path_to_root, 'tf/module/MyClass.md#my_method')`.
Args:
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
Returns:
A relative path that links from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
Raises:
RuntimeError: If `ref_full_name` is not in `self._index`.
"""
master_name = self._duplicate_of.get(ref_full_name, ref_full_name)
# Check whether this link exists
if master_name not in self._index:
# TODO(josh11b): Make error reporting more uniform.
print('ERROR: Cannot make link to %s (original: %s): Not in index.' %
(master_name, ref_full_name))
return 'BROKEN_LINK'
# If this is a member of a class, link to the class page with an anchor.
ref_path = None
py_object = self._index[master_name]
if not (inspect.isclass(py_object) or inspect.ismodule(py_object)):
idents = master_name.split('.')
if len(idents) > 1:
class_name = '.'.join(idents[:-1])
assert class_name in self._index
if inspect.isclass(self._index[class_name]):
ref_path = documentation_path(class_name) + '#%s' % idents[-1]
if not ref_path:
ref_path = documentation_path(master_name)
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, string, relative_path_to_root):
"""Return a link for a single "@{symbol}" reference."""
# Look for link text after $.
dollar = string.rfind('$')
if dollar > 0: # Ignore $ in first character
link_text = string[dollar + 1:]
string = string[:dollar]
manual_link_text = True
else:
link_text = string
manual_link_text = False
# Handle different types of references.
if string.startswith('$'): # Doc reference
return self._doc_link(
string, link_text, manual_link_text, relative_path_to_root)
elif string.startswith('tensorflow::'):
# C++ symbol
return self._cc_link(
string, link_text, manual_link_text, relative_path_to_root)
else:
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(py_module_name + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(link_text, string, relative_path_to_root,
code_ref=not manual_link_text)
# Error!
log_error('Did not understand "@{%s}"' % string)
return 'ERROR:%s' % string
def _doc_link(self, string, link_text, manual_link_text,
relative_path_to_root):
"""Generate a link for a @{$...} reference."""
string = string[1:] # remove leading $
# If string has a #, split that part into `hash_tag`
hash_pos = string.find('#')
if hash_pos > -1:
hash_tag = string[hash_pos:]
string = string[:hash_pos]
else:
hash_tag = ''
if string in self._doc_index:
if not manual_link_text: link_text = self._doc_index[string].title
url = os.path.normpath(os.path.join(
relative_path_to_root, '../..', self._doc_index[string].url))
return '[%s](%s%s)' % (link_text, url, hash_tag)
return self._doc_missing(string, hash_tag, link_text, manual_link_text,
relative_path_to_root)
def _doc_missing(self, string, unused_hash_tag, link_text,
unused_manual_link_text, unused_relative_path_to_root):
"""Generate an error for unrecognized @{$...} references."""
log_error('Handle doc reference "@{$%s}"' % string)
return link_text
def _cc_link(self, string, link_text, unused_manual_link_text,
relative_path_to_root):
"""Generate a link for a @{tensorflow::...} reference."""
# TODO(josh11b): Fix this hard-coding of paths.
if string == 'tensorflow::ClientSession':
ret = 'class/tensorflow/client-session.md'
elif string == 'tensorflow::Scope':
ret = 'class/tensorflow/scope.md'
elif string == 'tensorflow::Status':
ret = 'class/tensorflow/status.md'
elif string == 'tensorflow::Tensor':
ret = 'class/tensorflow/tensor.md'
elif string == 'tensorflow::ops::Const':
ret = 'namespace/tensorflow/ops.md#const'
else:
log_error('Handle C++ reference "@{%s}"' % string)
return 'TODO_C++:%s' % string
# relative_path_to_root gets you to api_docs/python, we go from there
# to api_docs/cc, and then add ret.
cc_relative_path = os.path.normpath(os.path.join(
relative_path_to_root, '../cc', ret))
return '[`%s`](%s)' % (link_text, cc_relative_path)
# TODO(aselle): Collect these into a big list for all modules and functions
# and make a rosetta stone page.
def _handle_compatibility(doc):
"""Parse and remove compatibility blocks from the main docstring.
Args:
doc: The docstring that contains compatibility notes"
Returns:
a tuple of the modified doc string and a hash that maps from compatibility
note type to the text of the note.
"""
compatibility_notes = {}
match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n'
r'((?:[^@\n]*\n)+)'
r'\s*@end_compatibility')
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _gen_pairs(items):
"""Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...].
Args:
items: A list of items (length must be even)
Yields:
The original items, in pairs
"""
assert len(items) % 2 == 0
items = iter(items)
while True:
yield next(items), next(items)
class _FunctionDetail(
collections.namedtuple('_FunctionDetail', ['keyword', 'header', 'items'])):
"""A simple class to contain function details.
Composed of a "keyword", a possibly empty "header" string, and a possibly
empty
list of key-value pair "items".
"""
__slots__ = []
def __str__(self):
"""Return the original string that represents the function detail."""
parts = [self.keyword + ':\n']
parts.append(self.header)
for key, value in self.items:
parts.append(' ' + key + ':')
parts.append(value)
return ''.join(parts)
def _parse_function_details(docstring):
r"""Given a docstring, split off the header and parse the function details.
For example the docstring of tf.nn.relu:
'''Computes rectified linear: `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
'''
This is parsed, and returned as:
```
('Computes rectified linear: `max(features, 0)`.\n\n', [
_FunctionDetail(
keyword='Args',
header='',
items=[
('features', ' A `Tensor`. Must be ...'),
('name', ' A name for the operation (optional).\n\n')]),
_FunctionDetail(
keyword='Returns',
header=' A `Tensor`. Has the same type as `features`.',
items=[])
])
```
Args:
docstring: The docstring to parse
Returns:
A (header, function_details) pair, where header is a string and
function_details is a (possibly empty) list of `_FunctionDetail` objects.
"""
detail_keywords = '|'.join([
'Args', 'Arguments', 'Fields', 'Returns', 'Yields', 'Raises', 'Attributes'
])
# TODO(markdaoust): Check if the leading blank line is used consistently.
tag_re = re.compile('(?<=\n\n)(' + detail_keywords + '):\n', re.MULTILINE)
parts = tag_re.split(docstring)
# The first part is the main docstring
docstring = parts[0]
# Everything else alternates keyword-content
pairs = list(_gen_pairs(parts[1:]))
function_details = []
item_re = re.compile(r'^ (\w+):', re.MULTILINE)
for keyword, content in pairs:
content = item_re.split(content)
header = content[0]
items = list(_gen_pairs(content[1:]))
function_details.append(_FunctionDetail(keyword, header, items))
return docstring, function_details
_DocstringInfo = collections.namedtuple('_DocstringInfo', [
'brief', 'docstring', 'function_details', 'compatibility'
])
def _parse_md_docstring(py_object, relative_path_to_root, reference_resolver):
"""Parse the object's docstring and return a `_DocstringInfo`.
This function clears @@'s from the docstring, and replaces @{} references
with markdown links.
For links within the same set of docs, the `relative_path_to_root` for a
docstring on the page for `full_name` can be set to:
```python
relative_path_to_root = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
```
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
relative_path_to_root: The relative path from the location of the current
document to the root of the Python API documentation. This is used to
compute links for "@{symbol}" references.
reference_resolver: An instance of ReferenceResolver.
Returns:
A _DocstringInfo object, all fields will be empty if no docstring was found.
"""
# TODO(wicke): If this is a partial, use the .func docstring and add a note.
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = reference_resolver.replace_references(
raw_docstring, relative_path_to_root)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in raw_docstring.split('\n') if not atat_re.match(line))
docstring, compatibility = _handle_compatibility(raw_docstring)
docstring, function_details = _parse_function_details(docstring)
return _DocstringInfo(
docstring.split('\n')[0], docstring, function_details, compatibility)
def _get_arg_spec(func):
"""Extracts signature information from a function or functools.partial object.
For functions, uses `inspect.getargspec`. For `functools.partial` objects,
corrects the signature of the underlying function to take into account the
removed arguments.
Args:
func: A function whose signature to extract.
Returns:
An `ArgSpec` namedtuple `(args, varargs, keywords, defaults)`, as returned
by `inspect.getargspec`.
"""
# getargspec does not work for functools.partial objects directly.
if isinstance(func, functools.partial):
argspec = inspect.getargspec(func.func)
# Remove the args from the original function that have been used up.
first_default_arg = (
len(argspec.args or []) - len(argspec.defaults or []))
partial_args = len(func.args)
argspec_args = []
if argspec.args:
argspec_args = list(argspec.args[partial_args:])
argspec_defaults = list(argspec.defaults or ())
if argspec.defaults and partial_args > first_default_arg:
argspec_defaults = list(argspec.defaults[partial_args-first_default_arg:])
first_default_arg = max(0, first_default_arg - partial_args)
for kwarg in (func.keywords or []):
if kwarg in (argspec.args or []):
i = argspec_args.index(kwarg)
argspec_args.pop(i)
if i >= first_default_arg:
argspec_defaults.pop(i-first_default_arg)
else:
first_default_arg -= 1
return inspect.ArgSpec(args=argspec_args,
varargs=argspec.varargs,
keywords=argspec.keywords,
defaults=tuple(argspec_defaults))
else: # Regular function or method, getargspec will work fine.
return inspect.getargspec(func)
def _remove_first_line_indent(string):
indent = len(re.match(r'^\s*', string).group(0))
return '\n'.join([line[indent:] for line in string.split('\n')])
def _generate_signature(func, reverse_index):
"""Given a function, returns a list of strings representing its args.
This function produces a list of strings representing the arguments to a
python function. It uses inspect.getargspec, which
does not generalize well to Python 3.x, which is more flexible in how *args
and **kwargs are handled. This is not a problem in TF, since we have to remain
compatible to Python 2.7 anyway.
This function uses `__name__` for callables if it is available. This can lead
to poor results for functools.partial and other callable objects.
The returned string is Python code, so if it is included in a Markdown
document, it should be typeset as code (using backticks), or escaped.
Args:
func: A function, method, or functools.partial to extract the signature for.
reverse_index: A map from object ids to canonical full names to use.
Returns:
A list of strings representing the argument signature of `func` as python
code.
"""
# This produces poor signatures for decorated functions.
# TODO(wicke): We need to use something like the decorator module to fix it.
args_list = []
argspec = _get_arg_spec(func)
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
# Python documentation skips `self` when printing method signatures.
# Note we cannot test for ismethod here since unbound methods do not register
# as methods (in Python 3).
first_arg = 1 if 'self' in argspec.args[:1] else 0
# Add all args without defaults.
for arg in argspec.args[first_arg:first_arg_with_default]:
args_list.append(arg)
# Add all args with defaults.
if argspec.defaults:
try:
source = _remove_first_line_indent(inspect.getsource(func))
func_ast = ast.parse(source)
ast_defaults = func_ast.body[0].args.defaults
except IOError: # If this is a builtin, getsource fails with IOError
# If we cannot get the source, assume the AST would be equal to the repr
# of the defaults.
ast_defaults = [None] * len(argspec.defaults)
for arg, default, ast_default in zip(
argspec.args[first_arg_with_default:], argspec.defaults, ast_defaults):
if id(default) in reverse_index:
default_text = reverse_index[id(default)]
elif ast_default is not None:
default_text = codegen.to_source(ast_default)
if default_text != repr(default):
# This may be an internal name. If so, handle the ones we know about.
# TODO(wicke): This should be replaced with a lookup in the index.
# TODO(wicke): (replace first ident with tf., check if in index)
internal_names = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE)
match = re.match(full_name_re, default_text)
if match:
lookup_text = default_text
for internal_name, public_name in six.iteritems(internal_names):
if match.group(0).startswith(internal_name):
lookup_text = public_name + default_text[len(internal_name):]
break
if default_text is lookup_text:
print('WARNING: Using default arg, failed lookup: %s, repr: %r' %
(default_text, default))
else:
default_text = lookup_text
else:
default_text = repr(default)
args_list.append('%s=%s' % (arg, default_text))
# Add *args and *kwargs.
if argspec.varargs:
args_list.append('*' + argspec.varargs)
if argspec.keywords:
args_list.append('**' + argspec.keywords)
return args_list
def _get_guides_markdown(duplicate_names, guide_index, relative_path):
all_guides = []
for name in duplicate_names:
all_guides.extend(guide_index.get(name, []))
if not all_guides: return ''
prefix = '../' * (relative_path.count('/') + 3)
links = sorted(set([guide_ref.make_md_link(prefix)
for guide_ref in all_guides]))
return 'See the guide%s: %s\n\n' % (
's' if len(links) > 1 else '', ', '.join(links))
def _get_defining_class(py_class, name):
for cls in inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
class _LinkInfo(
collections.namedtuple(
'_LinkInfo', ['short_name', 'full_name', 'obj', 'doc', 'url'])):
__slots__ = []
def is_link(self):
return True
class _OtherMemberInfo(
collections.namedtuple('_OtherMemberInfo',
['short_name', 'full_name', 'obj', 'doc'])):
__slots__ = []
def is_link(self):
return False
_PropertyInfo = collections.namedtuple(
'_PropertyInfo', ['short_name', 'full_name', 'obj', 'doc'])
_MethodInfo = collections.namedtuple(
'_MethodInfo', ['short_name', 'full_name', 'obj', 'doc', 'signature'])
class _FunctionPageInfo(object):
"""Collects docs For a function Page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._signature = None
def for_function(self):
return True
def for_class(self):
return False
def for_module(self):
return False
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def signature(self):
return self._signature
def set_signature(self, function, reverse_index):
"""Attach the function's signature.
Args:
function: The python function being documented.
reverse_index: A map from object ids in the index to full names.
"""
assert self.signature is None
self._signature = _generate_signature(function, reverse_index)
class _ClassPageInfo(object):
"""Collects docs for a class page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._properties = []
self._methods = []
self._classes = []
self._other_members = []
def for_function(self):
return False
def for_class(self):
return True
def for_module(self):
return False
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def properties(self):
return self._properties
def _add_property(self, short_name, full_name, obj, doc):
property_info = _PropertyInfo(short_name, full_name, obj, doc)
self._properties.append(property_info)
@property
def methods(self):
return self._methods
def _add_method(self, short_name, full_name, obj, doc, signature):
method_info = _MethodInfo(short_name, full_name, obj, doc, signature)
self._methods.append(method_info)
@property
def classes(self):
return self._classes
def _add_class(self, short_name, full_name, obj, doc, url):
page_info = _LinkInfo(short_name, full_name, obj, doc, url)
self._classes.append(page_info)
@property
def other_members(self):
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
other_member_info = _OtherMemberInfo(short_name, full_name, obj, doc)
self._other_members.append(other_member_info)
def collect_docs_for_class(self, py_class,
reference_resolver, tree, reverse_index):
"""Collect information necessary specifically for a class's doc page.
Mainly, this is details about information about the class's members.
Args:
py_class: The class object to collect docs for.
reference_resolver: An instance of ReferenceResolver.
tree: A map from full names to the names of all documentable child
objects.
reverse_index: A map from object ids in the index to full names.
"""
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
for short_name in tree[self.full_name]:
# Remove builtin members that we never want to document.
if short_name in ['__class__', '__base__', '__weakref__', '__doc__',
'__module__', '__dict__', '__abstractmethods__',
'__slots__', '__getnewargs__']:
continue
child_name = '.'.join([self.full_name, short_name])
child = reference_resolver.py_name_to_object(child_name)
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, short_name)
if (defining_class is object or
defining_class is type or defining_class is tuple or
defining_class is BaseException or defining_class is Exception or
# The following condition excludes most protobuf-defined symbols.
defining_class and defining_class.__name__ in ['CMessage', 'Message',
'MessageMeta']):
continue
# TODO(markdaoust): Add a note in child docs showing the defining class.
child_doc = _parse_md_docstring(child, relative_path, reference_resolver)
if isinstance(child, property):
self._add_property(short_name, child_name, child, child_doc)
elif inspect.isclass(child):
if defining_class is None:
continue
url = reference_resolver.reference_to_url(
child_name, relative_path)
self._add_class(short_name, child_name, child, child_doc, url)
elif (inspect.ismethod(child) or inspect.isfunction(child) or
inspect.isroutine(child)):
if defining_class is None:
continue
# Omit methods defined by namedtuple.
original_method = defining_class.__dict__[short_name]
if (hasattr(original_method, '__module__') and
(original_method.__module__ or '').startswith('namedtuple')):
continue
# Some methods are often overridden without documentation. Because it's
# obvious what they do, don't include them in the docs if there's no
# docstring.
if not child_doc.brief.strip() and short_name in [
'__str__', '__repr__', '__hash__', '__del__', '__copy__']:
print('Skipping %s, defined in %s, no docstring.' % (child_name,
defining_class))
continue
try:
child_signature = _generate_signature(child, reverse_index)
except TypeError:
# If this is a (dynamically created) slot wrapper, inspect will
# raise typeerror when trying to get to the code. Ignore such
# functions.
continue
self._add_method(short_name, child_name, child, child_doc,
child_signature)
else:
# Exclude members defined by protobuf that are useless
if issubclass(py_class, ProtoMessage):
if (short_name.endswith('_FIELD_NUMBER') or
short_name in ['__slots__', 'DESCRIPTOR']):
continue
# TODO(wicke): We may want to also remember the object itself.
self._add_other_member(short_name, child_name, child, child_doc)
class _ModulePageInfo(object):
"""Collects docs for a module page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._members = []
def for_function(self):
return False
def for_class(self):
return False
def for_module(self):
return True
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def members(self):
return self._members
def _add_member(self, short_name, full_name, obj, doc, url=None):
if url is None:
member = _OtherMemberInfo(short_name, full_name, obj, doc)
else:
member = _LinkInfo(short_name, full_name, obj, doc, url)
self._members.append(member)
def collect_docs_for_module(self, reference_resolver, tree):
"""Collect information necessary specifically for a module's doc page.
Mainly this is information about the members of the module.
Args:
reference_resolver: An instance of ReferenceResolver.
tree: A map from full names to the names of all documentable child
objects.
"""
relative_path = os.path.relpath(
path='.',
start=os.path.dirname(documentation_path(self.full_name)) or '.')
member_names = tree.get(self.full_name, [])
for name in member_names:
if name in ['__builtins__', '__doc__', '__file__', '__name__', '__path__',
'__package__']:
continue
member_full_name = self.full_name + '.' + name if self.full_name else name
member = reference_resolver.py_name_to_object(member_full_name)
member_doc = _parse_md_docstring(member, relative_path,
reference_resolver)
if (not inspect.isclass(member) and not inspect.isfunction(member) and
not inspect.ismodule(member)):
self._add_member(name, member_full_name, member, member_doc)
continue
url = reference_resolver.reference_to_url(
member_full_name, relative_path)
self._add_member(name, member_full_name, member, member_doc, url)
class ParserConfig(object):
def __init__(self, reference_resolver, duplicates, tree, reverse_index,
guide_index, base_dir):
"""Object with the common config for docs_for_object() calls.
Args:
reference_resolver: An instance of ReferenceResolver.
duplicates: A `dict` mapping fully qualified names to a set of all
aliases of this name. This is used to automatically generate a list of
all aliases for each name.
tree: A `dict` mapping a fully qualified name to the names of all its
members. Used to populate the members section of a class or module page.
reverse_index: A `dict` mapping objects in the index to full names.
guide_index: A `dict` mapping symbol name strings to objects with a
`make_md_link()` method.
base_dir: A base path that is stripped from file locations written to the
docs.
"""
self.reference_resolver = reference_resolver
self.duplicates = duplicates
self.tree = tree
self.reverse_index = reverse_index
self.guide_index = guide_index
self.base_dir = base_dir
self.defined_in_prefix = 'tensorflow/'
self.code_url_prefix = (
'https://www.tensorflow.org/code/tensorflow/') # pylint: disable=line-too-long
def docs_for_object(full_name, py_object, parser_config):
"""Return a PageInfo object describing a given object from the TF API.
This function uses _parse_md_docstring to parse the docs pertaining to
`object`.
This function resolves '@{symbol}' references in the docstrings into links to
the appropriate location. It also adds a list of alternative names for the
symbol automatically.
It assumes that the docs for each object live in a file given by
`documentation_path`, and that relative links to files within the
documentation are resolvable.
Args:
full_name: The fully qualified name of the symbol to be
documented.
py_object: The Python object to be documented. Its documentation is sourced
from `py_object`'s docstring.
parser_config: A ParserConfig object.
Returns:
Either a `_FunctionPageInfo`, `_ClassPageInfo`, or a `_ModulePageInfo`
depending on the type of the python object being documented.
Raises:
RuntimeError: If an object is encountered for which we don't know how
to make docs.
"""
# Which other aliases exist for the object referenced by full_name?
master_name = parser_config.reference_resolver.py_master_name(full_name)
duplicate_names = parser_config.duplicates.get(master_name, [full_name])
# TODO(wicke): Once other pieces are ready, enable this also for partials.
if (inspect.ismethod(py_object) or inspect.isfunction(py_object) or
# Some methods in classes from extensions come in as routines.
inspect.isroutine(py_object)):
page_info = _FunctionPageInfo(master_name)
page_info.set_signature(py_object, parser_config.reverse_index)
elif inspect.isclass(py_object):
page_info = _ClassPageInfo(master_name)
page_info.collect_docs_for_class(py_object,
parser_config.reference_resolver,
parser_config.tree,
parser_config.reverse_index)
elif inspect.ismodule(py_object):
page_info = _ModulePageInfo(master_name)
page_info.collect_docs_for_module(parser_config.reference_resolver,
parser_config.tree)
else:
raise RuntimeError('Cannot make docs for object %s: %r' % (full_name,
py_object))
relative_path = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
page_info.set_doc(_parse_md_docstring(
py_object, relative_path, parser_config.reference_resolver))
page_info.set_aliases(duplicate_names)
page_info.set_guides(_get_guides_markdown(
duplicate_names, parser_config.guide_index, relative_path))
page_info.set_defined_in(_get_defined_in(py_object, parser_config))
return page_info
class _PythonBuiltin(object):
"""This class indicated that the object in question is a python builtin.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def is_builtin(self):
return True
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'This is an alias for a Python built-in.\n\n'
class _PythonFile(object):
"""This class indicates that the object is defined in a regular python file.
This can be used for the `defined_in` slot of the `PageInfo` obejcts.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return True
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _ProtoFile(object):
"""This class indicates that the object is defined in a .proto file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _GeneratedFile(object):
"""This class indicates that the object is defined in a generated python file.
Generated files should not be linked to directly.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return True
def __str__(self):
return 'Defined in `%s%s`.\n\n' % (self.path_prefix, self.path)
def _get_defined_in(py_object, parser_config):
"""Returns a description of where the passed in python object was defined.
Arguments:
py_object: The Python object.
parser_config: A ParserConfig object.
Returns:
Either a `_PythonBuiltin`, `_PythonFile`, or a `_GeneratedFile`
"""
# Every page gets a note about where this object is defined
# TODO(wicke): If py_object is decorated, get the decorated object instead.
# TODO(wicke): Only use decorators that support this in TF.
try:
path = os.path.relpath(path=inspect.getfile(py_object),
start=parser_config.base_dir)
except TypeError: # getfile throws TypeError if py_object is a builtin.
return _PythonBuiltin()
# TODO(wicke): If this is a generated file, link to the source instead.
# TODO(wicke): Move all generated files to a generated/ directory.
# TODO(wicke): And make their source file predictable from the file name.
# In case this is compiled, point to the original
if path.endswith('.pyc'):
path = path[:-1]
# Never include links outside this code base.
if path.startswith('..'):
return None
if re.match(r'.*/gen_[^/]*\.py$', path):
return _GeneratedFile(path, parser_config)
elif re.match(r'.*_pb2\.py$', path):
# The _pb2.py files all appear right next to their defining .proto file.
return _ProtoFile(path[:-7] + '.proto', parser_config)
else:
return _PythonFile(path, parser_config)
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
reference_resolver: An instance of ReferenceResolver.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in six.iteritems(index):
if (inspect.ismodule(py_object) or inspect.isfunction(py_object) or
inspect.isclass(py_object)):
# In Python 3, unbound methods are functions, so eliminate those.
if inspect.isfunction(py_object):
if full_name.count('.') == 0:
parent_name = ''
else:
parent_name = full_name[:full_name.rfind('.')]
if parent_name in index and inspect.isclass(index[parent_name]):
# Skip methods (=functions with class parents).
continue
symbol_links.append((
full_name, reference_resolver.python_link(full_name, full_name, '.')))
lines = ['# All symbols in %s' % library_name, '']
for _, link in sorted(symbol_links, key=lambda x: x[0]):
lines.append('* %s' % link)
# TODO(deannarubin): Make this list into a table and add the brief docstring.
return '\n'.join(lines)
| strint/tensorflow | tensorflow/tools/docs/parser.py | Python | apache-2.0 | 43,815 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.redis.operators.redis_publish`."""
import warnings
from airflow.providers.redis.operators.redis_publish import RedisPublishOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.redis.operators.redis_publish`.",
DeprecationWarning,
stacklevel=2,
)
| apache/incubator-airflow | airflow/contrib/operators/redis_publish_operator.py | Python | apache-2.0 | 1,150 |
"""Support for IQVIA sensors."""
from __future__ import annotations
from statistics import mean
from typing import NamedTuple
import numpy as np
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_STATE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import IQVIAEntity
from .const import (
DATA_COORDINATOR,
DOMAIN,
TYPE_ALLERGY_FORECAST,
TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_OUTLOOK,
TYPE_ALLERGY_TODAY,
TYPE_ALLERGY_TOMORROW,
TYPE_ASTHMA_FORECAST,
TYPE_ASTHMA_INDEX,
TYPE_ASTHMA_TODAY,
TYPE_ASTHMA_TOMORROW,
TYPE_DISEASE_FORECAST,
TYPE_DISEASE_INDEX,
TYPE_DISEASE_TODAY,
)
ATTR_ALLERGEN_AMOUNT = "allergen_amount"
ATTR_ALLERGEN_GENUS = "allergen_genus"
ATTR_ALLERGEN_NAME = "allergen_name"
ATTR_ALLERGEN_TYPE = "allergen_type"
ATTR_CITY = "city"
ATTR_OUTLOOK = "outlook"
ATTR_RATING = "rating"
ATTR_SEASON = "season"
ATTR_TREND = "trend"
ATTR_ZIP_CODE = "zip_code"
API_CATEGORY_MAPPING = {
TYPE_ALLERGY_TODAY: TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_TOMORROW: TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_TOMORROW: TYPE_ALLERGY_INDEX,
TYPE_ASTHMA_TODAY: TYPE_ASTHMA_INDEX,
TYPE_ASTHMA_TOMORROW: TYPE_ASTHMA_INDEX,
TYPE_DISEASE_TODAY: TYPE_DISEASE_INDEX,
}
class Rating(NamedTuple):
"""Assign label to value range."""
label: str
minimum: float
maximum: float
RATING_MAPPING: list[Rating] = [
Rating(label="Low", minimum=0.0, maximum=2.4),
Rating(label="Low/Medium", minimum=2.5, maximum=4.8),
Rating(label="Medium", minimum=4.9, maximum=7.2),
Rating(label="Medium/High", minimum=7.3, maximum=9.6),
Rating(label="High", minimum=9.7, maximum=12),
]
TREND_FLAT = "Flat"
TREND_INCREASING = "Increasing"
TREND_SUBSIDING = "Subsiding"
FORECAST_SENSOR_DESCRIPTIONS = (
SensorEntityDescription(
key=TYPE_ALLERGY_FORECAST,
name="Allergy Index: Forecasted Average",
icon="mdi:flower",
),
SensorEntityDescription(
key=TYPE_ASTHMA_FORECAST,
name="Asthma Index: Forecasted Average",
icon="mdi:flower",
),
SensorEntityDescription(
key=TYPE_DISEASE_FORECAST,
name="Cold & Flu: Forecasted Average",
icon="mdi:snowflake",
),
)
INDEX_SENSOR_DESCRIPTIONS = (
SensorEntityDescription(
key=TYPE_ALLERGY_TODAY,
name="Allergy Index: Today",
icon="mdi:flower",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_ALLERGY_TOMORROW,
name="Allergy Index: Tomorrow",
icon="mdi:flower",
),
SensorEntityDescription(
key=TYPE_ASTHMA_TODAY,
name="Asthma Index: Today",
icon="mdi:flower",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=TYPE_ASTHMA_TOMORROW,
name="Asthma Index: Tomorrow",
icon="mdi:flower",
),
SensorEntityDescription(
key=TYPE_DISEASE_TODAY,
name="Cold & Flu Index: Today",
icon="mdi:pill",
state_class=STATE_CLASS_MEASUREMENT,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up IQVIA sensors based on a config entry."""
sensors: list[ForecastSensor | IndexSensor] = [
ForecastSensor(
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
API_CATEGORY_MAPPING.get(description.key, description.key)
],
entry,
description,
)
for description in FORECAST_SENSOR_DESCRIPTIONS
]
sensors.extend(
[
IndexSensor(
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
API_CATEGORY_MAPPING.get(description.key, description.key)
],
entry,
description,
)
for description in INDEX_SENSOR_DESCRIPTIONS
]
)
async_add_entities(sensors)
@callback
def calculate_trend(indices: list[float]) -> str:
"""Calculate the "moving average" of a set of indices."""
index_range = np.arange(0, len(indices))
index_array = np.array(indices)
linear_fit = np.polyfit(index_range, index_array, 1) # type: ignore
slope = round(linear_fit[0], 2)
if slope > 0:
return TREND_INCREASING
if slope < 0:
return TREND_SUBSIDING
return TREND_FLAT
class ForecastSensor(IQVIAEntity, SensorEntity):
"""Define sensor related to forecast data."""
@callback
def update_from_latest_data(self) -> None:
"""Update the sensor."""
if not self.available:
return
data = self.coordinator.data.get("Location", {})
if not data.get("periods"):
return
indices = [p["Index"] for p in data["periods"]]
average = round(mean(indices), 1)
[rating] = [
i.label for i in RATING_MAPPING if i.minimum <= average <= i.maximum
]
self._attr_native_value = average
self._attr_extra_state_attributes.update(
{
ATTR_CITY: data["City"].title(),
ATTR_RATING: rating,
ATTR_STATE: data["State"],
ATTR_TREND: calculate_trend(indices),
ATTR_ZIP_CODE: data["ZIP"],
}
)
if self.entity_description.key == TYPE_ALLERGY_FORECAST:
outlook_coordinator = self.hass.data[DOMAIN][DATA_COORDINATOR][
self._entry.entry_id
][TYPE_ALLERGY_OUTLOOK]
if not outlook_coordinator.last_update_success:
return
self._attr_extra_state_attributes[
ATTR_OUTLOOK
] = outlook_coordinator.data.get("Outlook")
self._attr_extra_state_attributes[
ATTR_SEASON
] = outlook_coordinator.data.get("Season")
class IndexSensor(IQVIAEntity, SensorEntity):
"""Define sensor related to indices."""
@callback
def update_from_latest_data(self) -> None:
"""Update the sensor."""
if not self.coordinator.last_update_success:
return
try:
if self.entity_description.key in (
TYPE_ALLERGY_TODAY,
TYPE_ALLERGY_TOMORROW,
):
data = self.coordinator.data.get("Location")
elif self.entity_description.key in (
TYPE_ASTHMA_TODAY,
TYPE_ASTHMA_TOMORROW,
):
data = self.coordinator.data.get("Location")
elif self.entity_description.key == TYPE_DISEASE_TODAY:
data = self.coordinator.data.get("Location")
except KeyError:
return
key = self.entity_description.key.split("_")[-1].title()
try:
[period] = [p for p in data["periods"] if p["Type"] == key]
except ValueError:
return
[rating] = [
i.label for i in RATING_MAPPING if i.minimum <= period["Index"] <= i.maximum
]
self._attr_extra_state_attributes.update(
{
ATTR_CITY: data["City"].title(),
ATTR_RATING: rating,
ATTR_STATE: data["State"],
ATTR_ZIP_CODE: data["ZIP"],
}
)
if self.entity_description.key in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW):
for idx, attrs in enumerate(period["Triggers"]):
index = idx + 1
self._attr_extra_state_attributes.update(
{
f"{ATTR_ALLERGEN_GENUS}_{index}": attrs["Genus"],
f"{ATTR_ALLERGEN_NAME}_{index}": attrs["Name"],
f"{ATTR_ALLERGEN_TYPE}_{index}": attrs["PlantType"],
}
)
elif self.entity_description.key in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW):
for idx, attrs in enumerate(period["Triggers"]):
index = idx + 1
self._attr_extra_state_attributes.update(
{
f"{ATTR_ALLERGEN_NAME}_{index}": attrs["Name"],
f"{ATTR_ALLERGEN_AMOUNT}_{index}": attrs["PPM"],
}
)
elif self.entity_description.key == TYPE_DISEASE_TODAY:
for attrs in period["Triggers"]:
self._attr_extra_state_attributes[
f"{attrs['Name'].lower()}_index"
] = attrs["Index"]
self._attr_native_value = period["Index"]
| lukas-hetzenecker/home-assistant | homeassistant/components/iqvia/sensor.py | Python | apache-2.0 | 8,845 |
with open('sphere.obj') as f:
lines = f.readlines()
vertecies = filter(lambda line: line.startswith('v '), lines)
faces = filter(lambda line: line.startswith('f '), lines)
normals = [ 'vn' + line[1:] for line in vertecies ]
newFaces = [
(
'f ' +
' '.join([
'{0}//{0}'.format(index)
for index in line.split()[1:]
]) +
'\n'
)
for line in faces
]
with open('sphereWithNormals.obj', 'w') as f:
lines = vertecies + normals + newFaces
f.write(''.join(lines))
| DomNomNom/anisotropic | assets/makeNormalSphere.py | Python | gpl-3.0 | 540 |
from modularodm import Q
from rest_framework import generics
from rest_framework import exceptions
from rest_framework.response import Response
from rest_framework.exceptions import NotFound
from rest_framework.status import HTTP_204_NO_CONTENT
from rest_framework import permissions as drf_permissions
from website.models import Node
from framework.auth.oauth_scopes import CoreScopes
from api.base.views import JSONAPIBaseView
from api.base.filters import ODMFilterMixin
from api.base.utils import get_object_or_error
from api.base import permissions as base_permissions
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.parsers import JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON
from api.preprint_providers.serializers import PreprintProviderSerializer
from api.preprints.parsers import PreprintsJSONAPIParser, PreprintsJSONAPIParserForRegularJSON
from api.preprints.serializers import PreprintSerializer, PreprintPreprintProvidersRelationshipSerializer
from api.nodes.views import NodeMixin, WaterButlerMixin, NodeContributorsList, NodeContributorsSerializer
class PreprintMixin(NodeMixin):
serializer_class = PreprintSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self):
node = get_object_or_error(
Node,
self.kwargs[self.node_lookup_url_kwarg],
display_name='preprint'
)
if not node.is_preprint and self.request.method != 'POST':
raise NotFound
return node
class PreprintList(JSONAPIBaseView, generics.ListCreateAPIView, ODMFilterMixin):
"""Preprints that represent a special kind of preprint node. *Writeable*.
##Note
**This API endpoint is under active development, and is subject to change in the future.**
Paginated list of preprints ordered by their `date_created`. Each resource contains a representation of the
preprint.
##Preprint Attributes
Many of these preprint attributes are the same as node, with a few special fields added in.
OSF Preprint entities have the "preprint" `type`.
name type description
====================================================================================
title string title of preprint, same as its project or component
abstract string description of the preprint
date_created iso8601 timestamp timestamp that the preprint was created
date_modified iso8601 timestamp timestamp when the preprint was last updated
tags array of strings list of tags that describe the node
subjects array of dictionaries list ids of Subject in the PLOS taxonomy. Dictrionary, containing the subject text and subject ID
doi string bare DOI for the manuscript, as entered by the user
##Relationships
###Primary File
The file that is designated as the preprint's primary file, or the manuscript of the preprint.
###Files
Link to list of files associated with this node/preprint
###Contributors
Link to list of contributors that are affiliated with this preprint.
###Provider
Link to preprint_provider detail for this preprint
##Links
- `self` -- Preprint detail page for the current preprint
- `html` -- Project on the OSF corresponding to the current preprint
- `doi` -- URL representation of the DOI entered by the user for the preprint manuscript
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Preprints may be filtered by their `id`, `title`, `public`, `tags`, `date_created`, `date_modified`, and `subjects`
Most are string fields and will be filtered using simple substring matching.
###Creating New Preprints
Create a new preprint by posting to the guid of the existing **node**, including the file_id for the
file you'd like to make the primary preprint file. Note that the **node id** will not be accessible via the
preprints detail view until after the preprint has been created.
Method: POST
URL: /preprints/
Query Params: <none>
Body (JSON): {
"data": {
"id": node_id,
"attributes": {
"subjects": [{subject_id}, ...] # required
"description": {description}, # optional
"tags": [{tag1}, ...], # optional
"provider": {provider} # optional
},
"relationships": {
"primary_file": { # required
"data": {
"type": "primary",
"id": {file_id}
}
}
}
}
}
Success: 201 CREATED + preprint representation
New preprints are created by issuing a POST request to this endpoint, along with the guid for the node to create a preprint from.
Provider defaults to osf.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
parser_classes = (PreprintsJSONAPIParser, PreprintsJSONAPIParserForRegularJSON,)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
ordering = ('-date_created')
view_category = 'preprints'
view_name = 'preprint-list'
# overrides ODMFilterMixin
def get_default_odm_query(self):
return (
Q('preprint_file', 'ne', None) &
Q('is_deleted', 'ne', True) &
Q('is_public', 'eq', True)
)
# overrides ListAPIView
def get_queryset(self):
nodes = Node.find(self.get_query_from_request())
return (node for node in nodes if node.is_preprint)
class PreprintDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, PreprintMixin, WaterButlerMixin):
"""Preprint Detail *Writeable*.
##Note
**This API endpoint is under active development, and is subject to change in the future.**
##Preprint Attributes
Many of these preprint attributes are the same as node, with a few special fields added in.
OSF Preprint entities have the "preprint" `type`.
name type description
====================================================================================
title string title of preprint, same as its project or component
abstract string description of the preprint
date_created iso8601 timestamp timestamp that the preprint was created
date_modified iso8601 timestamp timestamp when the preprint was last updated
tags array of strings list of tags that describe the node
subjects array of dictionaries list ids of Subject in the PLOS taxonomy. Dictrionary, containing the subject text and subject ID
provider string original source of the preprint
doi string bare DOI for the manuscript, as entered by the user
###Updating Preprints
Update a preprint by sending a patch request to the guid of the existing preprint node that you'd like to update.
Method: PATCH
URL: /preprints/{node_id}/
Query Params: <none>
Body (JSON): {
"data": {
"id": node_id,
"attributes": {
"subjects": [{subject_id}, ...] # optional
"description": {description}, # optional
"tags": [{tag}, ...], # optional
"provider": {provider} # optional
},
"relationships": {
"primary_file": { # optional
"data": {
"type": "primary",
"id": {file_id}
}
}
}
}
}
Success: 200 OK + preprint representation
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
parser_classes = (PreprintsJSONAPIParser, PreprintsJSONAPIParserForRegularJSON,)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
view_category = 'preprints'
view_name = 'preprint-detail'
def get_object(self):
return self.get_node()
class PreprintContributorsList(NodeContributorsList, PreprintMixin):
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
view_category = 'preprint'
view_name = 'preprint-contributors'
serializer_class = NodeContributorsSerializer
class PreprintPreprintProvidersList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin, NodeMixin):
""" Detail of the preprint providers a preprint has, if any. Returns [] if the preprint has no
preprnt providers.
##Note
**This API endpoint is under active development, and is subject to change in the future**
##Attributes
OSF Preprint Providers have the "preprint_providers" `type`.
name type description
=========================================================================
name string name of the preprint provider
logo_path string a path to the preprint provider's static logo
banner_path string a path to the preprint provider's banner
description string description of the preprint provider
##Links
self: the canonical api endpoint of this preprint provider
preprints: link to the provider's preprints
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintProviderSerializer
view_category = 'preprints'
view_name = 'preprint-preprint_providers'
def get_queryset(self):
node = self.get_node()
return node.preprint_providers
class PreprintToPreprintProviderRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView, PreprintMixin):
""" Relationship Endpoint for Preprint -> PreprintProvider
Used to set preprint_provider of a preprint to a PreprintProvider
##Note
**This API endpoint is under active development, and is subject to change in the future.**
##Actions
###Get
Method: GET
URL: /links/self
Query Params: <none>
Success: 200
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [
{
"type": "preprint_providers", # required
"id": <provider_id> # required
}
]
}
Success: 201
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "preprint_providers", # required
"id": <provider_id> # required
}]
}
Success: 200
This will delete all preprint_providers not listed, meaning a data: [] payload
does the same as a DELETE with all the preprint_providers.
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "preprint_providers", # required
"id": <provider_id> # required
}]
}
Success: 204
All of these methods require admin permissions in the preprint.
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintPreprintProvidersRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
view_category = 'preprints'
view_name = 'preprint-relationships-preprint_providers'
def get_object(self):
preprint = self.get_node()
obj = {
'data': preprint.preprint_providers,
'self': preprint
}
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
user = self.request.user
current_providers = {provider._id: provider for provider in instance['data']}
node = instance['self']
if not node.has_permission(user, 'admin'):
raise exceptions.PermissionDenied(
detail='User must be an admin to delete the PreprintProvider relationship.'
)
for val in data:
if val['id'] in current_providers:
node.remove_preprint_provider(preprint_provider=current_providers[val['id']], user=user)
node.save()
def create(self, *args, **kwargs):
try:
ret = super(PreprintToPreprintProviderRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=HTTP_204_NO_CONTENT)
return ret
| samchrisinger/osf.io | api/preprints/views.py | Python | apache-2.0 | 15,291 |
import mock
import pytest
import tests
import verzamelend
class RegisterCallbacksTestCase(tests.BaseTestCase):
@mock.patch('verzamelend.collectd')
def test(self, mock_collectd):
"""
Test verzamelend.register_callbacks().
"""
plugin = verzamelend.Plugin('test')
verzamelend.register_callbacks(plugin)
mock_collectd.register_config.called_once_with(plugin.configCallback)
mock_collectd.register_flush(plugin.flushCallback)
mock_collectd.register_init(plugin.initCallback)
mock_collectd.register_log(plugin.logCallback)
mock_collectd.register_notification(plugin.notificationCallback)
mock_collectd.register_read(plugin.readCallback)
mock_collectd.register_shutdown(plugin.shutdownCallback)
mock_collectd.register_write(plugin.writeCallback)
def test_none_plugin(self):
"""
Test verzamelend.register_callbacks() when a None argument is passed.
"""
with pytest.raises(ValueError):
verzamelend.register_callbacks(None)
| collectdbit/verzamelend | tests/module_test.py | Python | apache-2.0 | 1,082 |
#!/usr/bin/env python
# Note: this file is part of some nnet3 config-creation tools that are now deprecated.
from __future__ import print_function
import os
import argparse
import sys
import warnings
import copy
from operator import itemgetter
def GetSumDescriptor(inputs):
sum_descriptors = inputs
while len(sum_descriptors) != 1:
cur_sum_descriptors = []
pair = []
while len(sum_descriptors) > 0:
value = sum_descriptors.pop()
if value.strip() != '':
pair.append(value)
if len(pair) == 2:
cur_sum_descriptors.append("Sum({0}, {1})".format(pair[0], pair[1]))
pair = []
if pair:
cur_sum_descriptors.append(pair[0])
sum_descriptors = cur_sum_descriptors
return sum_descriptors
# adds the input nodes and returns the descriptor
def AddInputLayer(config_lines, feat_dim, splice_indexes=[0], ivector_dim=0):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
output_dim = 0
components.append('input-node name=input dim=' + str(feat_dim))
list = [('Offset(input, {0})'.format(n) if n != 0 else 'input') for n in splice_indexes]
output_dim += len(splice_indexes) * feat_dim
if ivector_dim > 0:
components.append('input-node name=ivector dim=' + str(ivector_dim))
list.append('ReplaceIndex(ivector, t, 0)')
output_dim += ivector_dim
if len(list) > 1:
splice_descriptor = "Append({0})".format(", ".join(list))
else:
splice_descriptor = list[0]
print(splice_descriptor)
return {'descriptor': splice_descriptor,
'dimension': output_dim}
def AddNoOpLayer(config_lines, name, input):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={0}_noop type=NoOpComponent dim={1}'.format(name, input['dimension']))
component_nodes.append('component-node name={0}_noop component={0}_noop input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_noop'.format(name),
'dimension': input['dimension']}
def AddLdaLayer(config_lines, name, input, lda_file):
return AddFixedAffineLayer(config_lines, name, input, lda_file)
def AddFixedAffineLayer(config_lines, name, input, matrix_file):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={0}_fixaffine type=FixedAffineComponent matrix={1}'.format(name, matrix_file))
component_nodes.append('component-node name={0}_fixaffine component={0}_fixaffine input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_fixaffine'.format(name),
'dimension': input['dimension']}
def AddBlockAffineLayer(config_lines, name, input, output_dim, num_blocks):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
assert((input['dimension'] % num_blocks == 0) and
(output_dim % num_blocks == 0))
components.append('component name={0}_block_affine type=BlockAffineComponent input-dim={1} output-dim={2} num-blocks={3}'.format(name, input['dimension'], output_dim, num_blocks))
component_nodes.append('component-node name={0}_block_affine component={0}_block_affine input={1}'.format(name, input['descriptor']))
return {'descriptor' : '{0}_block_affine'.format(name),
'dimension' : output_dim}
def AddPermuteLayer(config_lines, name, input, column_map):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
permute_indexes = ",".join(map(lambda x: str(x), column_map))
components.append('component name={0}_permute type=PermuteComponent column-map={1}'.format(name, permute_indexes))
component_nodes.append('component-node name={0}_permute component={0}_permute input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_permute'.format(name),
'dimension': input['dimension']}
def AddAffineLayer(config_lines, name, input, output_dim, ng_affine_options = "", max_change_per_component = 0.75):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input['dimension'], output_dim, ng_affine_options, max_change_options))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
return {'descriptor': '{0}_affine'.format(name),
'dimension': output_dim}
def AddAffRelNormLayer(config_lines, name, input, output_dim, ng_affine_options = " bias-stddev=0 ", norm_target_rms = 1.0, self_repair_scale = None, max_change_per_component = 0.75):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# self_repair_scale is a constant scaling the self-repair vector computed in RectifiedLinearComponent
self_repair_string = "self-repair-scale={0:.10f}".format(self_repair_scale) if self_repair_scale is not None else ''
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input['dimension'], output_dim, ng_affine_options, max_change_options))
components.append("component name={0}_relu type=RectifiedLinearComponent dim={1} {2}".format(name, output_dim, self_repair_string))
components.append("component name={0}_renorm type=NormalizeComponent dim={1} target-rms={2}".format(name, output_dim, norm_target_rms))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
component_nodes.append("component-node name={0}_relu component={0}_relu input={0}_affine".format(name))
component_nodes.append("component-node name={0}_renorm component={0}_renorm input={0}_relu".format(name))
return {'descriptor': '{0}_renorm'.format(name),
'dimension': output_dim}
def AddAffPnormLayer(config_lines, name, input, pnorm_input_dim, pnorm_output_dim, ng_affine_options = " bias-stddev=0 ", norm_target_rms = 1.0):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3}".format(name, input['dimension'], pnorm_input_dim, ng_affine_options))
components.append("component name={0}_pnorm type=PnormComponent input-dim={1} output-dim={2}".format(name, pnorm_input_dim, pnorm_output_dim))
components.append("component name={0}_renorm type=NormalizeComponent dim={1} target-rms={2}".format(name, pnorm_output_dim, norm_target_rms))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
component_nodes.append("component-node name={0}_pnorm component={0}_pnorm input={0}_affine".format(name))
component_nodes.append("component-node name={0}_renorm component={0}_renorm input={0}_pnorm".format(name))
return {'descriptor': '{0}_renorm'.format(name),
'dimension': pnorm_output_dim}
def AddConvolutionLayer(config_lines, name, input,
input_x_dim, input_y_dim, input_z_dim,
filt_x_dim, filt_y_dim,
filt_x_step, filt_y_step,
num_filters, input_vectorization,
param_stddev = None, bias_stddev = None,
filter_bias_file = None,
is_updatable = True):
assert(input['dimension'] == input_x_dim * input_y_dim * input_z_dim)
components = config_lines['components']
component_nodes = config_lines['component-nodes']
conv_init_string = ("component name={name}_conv type=ConvolutionComponent "
"input-x-dim={input_x_dim} input-y-dim={input_y_dim} input-z-dim={input_z_dim} "
"filt-x-dim={filt_x_dim} filt-y-dim={filt_y_dim} "
"filt-x-step={filt_x_step} filt-y-step={filt_y_step} "
"input-vectorization-order={vector_order}".format(name = name,
input_x_dim = input_x_dim, input_y_dim = input_y_dim, input_z_dim = input_z_dim,
filt_x_dim = filt_x_dim, filt_y_dim = filt_y_dim,
filt_x_step = filt_x_step, filt_y_step = filt_y_step,
vector_order = input_vectorization))
if filter_bias_file is not None:
conv_init_string += " matrix={0}".format(filter_bias_file)
else:
conv_init_string += " num-filters={0}".format(num_filters)
components.append(conv_init_string)
component_nodes.append("component-node name={0}_conv_t component={0}_conv input={1}".format(name, input['descriptor']))
num_x_steps = (1 + (input_x_dim - filt_x_dim) / filt_x_step)
num_y_steps = (1 + (input_y_dim - filt_y_dim) / filt_y_step)
output_dim = num_x_steps * num_y_steps * num_filters;
return {'descriptor': '{0}_conv_t'.format(name),
'dimension': output_dim,
'3d-dim': [num_x_steps, num_y_steps, num_filters],
'vectorization': 'zyx'}
# The Maxpooling component assumes input vectorizations of type zyx
def AddMaxpoolingLayer(config_lines, name, input,
input_x_dim, input_y_dim, input_z_dim,
pool_x_size, pool_y_size, pool_z_size,
pool_x_step, pool_y_step, pool_z_step):
if input_x_dim < 1 or input_y_dim < 1 or input_z_dim < 1:
raise Exception("non-positive maxpooling input size ({0}, {1}, {2})".
format(input_x_dim, input_y_dim, input_z_dim))
if pool_x_size > input_x_dim or pool_y_size > input_y_dim or pool_z_size > input_z_dim:
raise Exception("invalid maxpooling pool size vs. input size")
if pool_x_step > pool_x_size or pool_y_step > pool_y_size or pool_z_step > pool_z_size:
raise Exception("invalid maxpooling pool step vs. pool size")
assert(input['dimension'] == input_x_dim * input_y_dim * input_z_dim)
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={name}_maxp type=MaxpoolingComponent '
'input-x-dim={input_x_dim} input-y-dim={input_y_dim} input-z-dim={input_z_dim} '
'pool-x-size={pool_x_size} pool-y-size={pool_y_size} pool-z-size={pool_z_size} '
'pool-x-step={pool_x_step} pool-y-step={pool_y_step} pool-z-step={pool_z_step} '.
format(name = name,
input_x_dim = input_x_dim, input_y_dim = input_y_dim, input_z_dim = input_z_dim,
pool_x_size = pool_x_size, pool_y_size = pool_y_size, pool_z_size = pool_z_size,
pool_x_step = pool_x_step, pool_y_step = pool_y_step, pool_z_step = pool_z_step))
component_nodes.append('component-node name={0}_maxp_t component={0}_maxp input={1}'.format(name, input['descriptor']))
num_pools_x = 1 + (input_x_dim - pool_x_size) / pool_x_step;
num_pools_y = 1 + (input_y_dim - pool_y_size) / pool_y_step;
num_pools_z = 1 + (input_z_dim - pool_z_size) / pool_z_step;
output_dim = num_pools_x * num_pools_y * num_pools_z;
return {'descriptor': '{0}_maxp_t'.format(name),
'dimension': output_dim,
'3d-dim': [num_pools_x, num_pools_y, num_pools_z],
'vectorization': 'zyx'}
def AddSoftmaxLayer(config_lines, name, input):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append("component name={0}_log_softmax type=LogSoftmaxComponent dim={1}".format(name, input['dimension']))
component_nodes.append("component-node name={0}_log_softmax component={0}_log_softmax input={1}".format(name, input['descriptor']))
return {'descriptor': '{0}_log_softmax'.format(name),
'dimension': input['dimension']}
def AddSigmoidLayer(config_lines, name, input, self_repair_scale = None):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# self_repair_scale is a constant scaling the self-repair vector computed in SigmoidComponent
self_repair_string = "self-repair-scale={0:.10f}".format(self_repair_scale) if self_repair_scale is not None else ''
components.append("component name={0}_sigmoid type=SigmoidComponent dim={1}".format(name, input['dimension'], self_repair_string))
component_nodes.append("component-node name={0}_sigmoid component={0}_sigmoid input={1}".format(name, input['descriptor']))
return {'descriptor': '{0}_sigmoid'.format(name),
'dimension': input['dimension']}
def AddOutputLayer(config_lines, input, label_delay = None, suffix=None, objective_type = "linear"):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
name = 'output'
if suffix is not None:
name = '{0}-{1}'.format(name, suffix)
if label_delay is None:
component_nodes.append('output-node name={0} input={1} objective={2}'.format(name, input['descriptor'], objective_type))
else:
component_nodes.append('output-node name={0} input=Offset({1},{2}) objective={3}'.format(name, input['descriptor'], label_delay, objective_type))
def AddFinalLayer(config_lines, input, output_dim,
ng_affine_options = " param-stddev=0 bias-stddev=0 ",
max_change_per_component = 1.5,
label_delay=None,
use_presoftmax_prior_scale = False,
prior_scale_file = None,
include_log_softmax = True,
add_final_sigmoid = False,
name_affix = None,
objective_type = "linear"):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
if name_affix is not None:
final_node_prefix = 'Final-' + str(name_affix)
else:
final_node_prefix = 'Final'
prev_layer_output = AddAffineLayer(config_lines,
final_node_prefix , input, output_dim,
ng_affine_options, max_change_per_component)
if include_log_softmax:
if use_presoftmax_prior_scale :
components.append('component name={0}-fixed-scale type=FixedScaleComponent scales={1}'.format(final_node_prefix, prior_scale_file))
component_nodes.append('component-node name={0}-fixed-scale component={0}-fixed-scale input={1}'.format(final_node_prefix,
prev_layer_output['descriptor']))
prev_layer_output['descriptor'] = "{0}-fixed-scale".format(final_node_prefix)
prev_layer_output = AddSoftmaxLayer(config_lines, final_node_prefix, prev_layer_output)
elif add_final_sigmoid:
# Useful when you need the final outputs to be probabilities
# between 0 and 1.
# Usually used with an objective-type such as "quadratic"
prev_layer_output = AddSigmoidLayer(config_lines, final_node_prefix, prev_layer_output)
# we use the same name_affix as a prefix in for affine/scale nodes but as a
# suffix for output node
AddOutputLayer(config_lines, prev_layer_output, label_delay, suffix = name_affix, objective_type = objective_type)
def AddLstmLayer(config_lines,
name, input, cell_dim,
recurrent_projection_dim = 0,
non_recurrent_projection_dim = 0,
clipping_threshold = 30.0,
zeroing_threshold = 15.0,
zeroing_interval = 20,
ng_per_element_scale_options = "",
ng_affine_options = "",
lstm_delay = -1,
self_repair_scale_nonlinearity = None,
max_change_per_component = 0.75):
assert(recurrent_projection_dim >= 0 and non_recurrent_projection_dim >= 0)
components = config_lines['components']
component_nodes = config_lines['component-nodes']
input_descriptor = input['descriptor']
input_dim = input['dimension']
name = name.strip()
if (recurrent_projection_dim == 0):
add_recurrent_projection = False
recurrent_projection_dim = cell_dim
recurrent_connection = "m_t"
else:
add_recurrent_projection = True
recurrent_connection = "r_t"
if (non_recurrent_projection_dim == 0):
add_non_recurrent_projection = False
else:
add_non_recurrent_projection = True
# self_repair_scale_nonlinearity is a constant scaling the self-repair vector computed in derived classes of NonlinearComponent,
# i.e., SigmoidComponent, TanhComponent and RectifiedLinearComponent
self_repair_nonlinearity_string = "self-repair-scale={0:.10f}".format(self_repair_scale_nonlinearity) if self_repair_scale_nonlinearity is not None else ''
# Natural gradient per element scale parameters
ng_per_element_scale_options += " param-mean=0.0 param-stddev=1.0 "
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
# Parameter Definitions W*(* replaced by - to have valid names)
components.append("# Input gate control : W_i* matrices")
components.append("component name={0}_W_i-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# note : the cell outputs pass through a diagonal matrix")
components.append("component name={0}_w_ic type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}".format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append("# Forget gate control : W_f* matrices")
components.append("component name={0}_W_f-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# note : the cell outputs pass through a diagonal matrix")
components.append("component name={0}_w_fc type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}".format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append("# Output gate control : W_o* matrices")
components.append("component name={0}_W_o-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# note : the cell outputs pass through a diagonal matrix")
components.append("component name={0}_w_oc type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}".format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append("# Cell input matrices : W_c* matrices")
components.append("component name={0}_W_c-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# Defining the non-linearities")
components.append("component name={0}_i type=SigmoidComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_f type=SigmoidComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_o type=SigmoidComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_g type=TanhComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_h type=TanhComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("# Defining the cell computations")
components.append("component name={0}_c1 type=ElementwiseProductComponent input-dim={1} output-dim={2}".format(name, 2 * cell_dim, cell_dim))
components.append("component name={0}_c2 type=ElementwiseProductComponent input-dim={1} output-dim={2}".format(name, 2 * cell_dim, cell_dim))
components.append("component name={0}_m type=ElementwiseProductComponent input-dim={1} output-dim={2}".format(name, 2 * cell_dim, cell_dim))
components.append("component name={0}_c type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, cell_dim, clipping_threshold, zeroing_threshold,
zeroing_interval, abs(lstm_delay)))
# c1_t and c2_t defined below
component_nodes.append("component-node name={0}_c_t component={0}_c input=Sum({0}_c1_t, {0}_c2_t)".format(name))
c_tminus1_descriptor = "IfDefined(Offset({0}_c_t, {1}))".format(name, lstm_delay)
component_nodes.append("# i_t")
component_nodes.append("component-node name={0}_i1 component={0}_W_i-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_i2 component={0}_w_ic input={1}".format(name, c_tminus1_descriptor))
component_nodes.append("component-node name={0}_i_t component={0}_i input=Sum({0}_i1, {0}_i2)".format(name))
component_nodes.append("# f_t")
component_nodes.append("component-node name={0}_f1 component={0}_W_f-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_f2 component={0}_w_fc input={1}".format(name, c_tminus1_descriptor))
component_nodes.append("component-node name={0}_f_t component={0}_f input=Sum({0}_f1,{0}_f2)".format(name))
component_nodes.append("# o_t")
component_nodes.append("component-node name={0}_o1 component={0}_W_o-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_o2 component={0}_w_oc input={0}_c_t".format(name))
component_nodes.append("component-node name={0}_o_t component={0}_o input=Sum({0}_o1, {0}_o2)".format(name))
component_nodes.append("# h_t")
component_nodes.append("component-node name={0}_h_t component={0}_h input={0}_c_t".format(name))
component_nodes.append("# g_t")
component_nodes.append("component-node name={0}_g1 component={0}_W_c-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_g_t component={0}_g input={0}_g1".format(name))
component_nodes.append("# parts of c_t")
component_nodes.append("component-node name={0}_c1_t component={0}_c1 input=Append({0}_f_t, {1})".format(name, c_tminus1_descriptor))
component_nodes.append("component-node name={0}_c2_t component={0}_c2 input=Append({0}_i_t, {0}_g_t)".format(name))
component_nodes.append("# m_t")
component_nodes.append("component-node name={0}_m_t component={0}_m input=Append({0}_o_t, {0}_h_t)".format(name))
# add the recurrent connections
if (add_recurrent_projection and add_non_recurrent_projection):
components.append("# projection matrices : Wrm and Wpm")
components.append("component name={0}_W-m type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, cell_dim, recurrent_projection_dim + non_recurrent_projection_dim, ng_affine_options, max_change_options))
components.append("component name={0}_r type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, recurrent_projection_dim, clipping_threshold,
zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append("# r_t and p_t")
component_nodes.append("component-node name={0}_rp_t component={0}_W-m input={0}_m_t".format(name))
component_nodes.append("dim-range-node name={0}_r_t_preclip input-node={0}_rp_t dim-offset=0 dim={1}".format(name, recurrent_projection_dim))
component_nodes.append("component-node name={0}_r_t component={0}_r input={0}_r_t_preclip".format(name))
output_descriptor = '{0}_rp_t'.format(name)
output_dim = recurrent_projection_dim + non_recurrent_projection_dim
elif add_recurrent_projection:
components.append("# projection matrices : Wrm")
components.append("component name={0}_Wrm type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(
name, cell_dim, recurrent_projection_dim, ng_affine_options, max_change_options))
components.append("component name={0}_r type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, recurrent_projection_dim, clipping_threshold,
zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append("# r_t")
component_nodes.append("component-node name={0}_r_t_preclip component={0}_Wrm input={0}_m_t".format(name))
component_nodes.append("component-node name={0}_r_t component={0}_r input={0}_r_t_preclip".format(name))
output_descriptor = '{0}_r_t'.format(name)
output_dim = recurrent_projection_dim
else:
components.append("component name={0}_r type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, cell_dim, clipping_threshold,
zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append("component-node name={0}_r_t component={0}_r input={0}_m_t".format(name))
output_descriptor = '{0}_r_t'.format(name)
output_dim = cell_dim
return {
'descriptor': output_descriptor,
'dimension':output_dim
}
def AddBLstmLayer(config_lines,
name, input, cell_dim,
recurrent_projection_dim = 0,
non_recurrent_projection_dim = 0,
clipping_threshold = 1.0,
zeroing_threshold = 3.0,
zeroing_interval = 20,
ng_per_element_scale_options = "",
ng_affine_options = "",
lstm_delay = [-1,1],
self_repair_scale_nonlinearity = None,
max_change_per_component = 0.75):
assert(len(lstm_delay) == 2 and lstm_delay[0] < 0 and lstm_delay[1] > 0)
output_forward = AddLstmLayer(config_lines = config_lines,
name = "{0}_forward".format(name),
input = input,
cell_dim = cell_dim,
recurrent_projection_dim = recurrent_projection_dim,
non_recurrent_projection_dim = non_recurrent_projection_dim,
clipping_threshold = clipping_threshold,
zeroing_threshold = zeroing_threshold,
zeroing_interval = zeroing_interval,
ng_per_element_scale_options = ng_per_element_scale_options,
ng_affine_options = ng_affine_options,
lstm_delay = lstm_delay[0],
self_repair_scale_nonlinearity = self_repair_scale_nonlinearity,
max_change_per_component = max_change_per_component)
output_backward = AddLstmLayer(config_lines = config_lines,
name = "{0}_backward".format(name),
input = input,
cell_dim = cell_dim,
recurrent_projection_dim = recurrent_projection_dim,
non_recurrent_projection_dim = non_recurrent_projection_dim,
clipping_threshold = clipping_threshold,
zeroing_threshold = zeroing_threshold,
zeroing_interval = zeroing_interval,
ng_per_element_scale_options = ng_per_element_scale_options,
ng_affine_options = ng_affine_options,
lstm_delay = lstm_delay[1],
self_repair_scale_nonlinearity = self_repair_scale_nonlinearity,
max_change_per_component = max_change_per_component)
output_descriptor = 'Append({0}, {1})'.format(output_forward['descriptor'], output_backward['descriptor'])
output_dim = output_forward['dimension'] + output_backward['dimension']
return {
'descriptor': output_descriptor,
'dimension':output_dim
}
| michellemorales/OpenMM | kaldi/egs/wsj/s5/steps/nnet3/components.py | Python | gpl-2.0 | 29,765 |
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Some certmonger functions, mostly around updating the request file.
# This is used so we can add tracking to the Apache and 389-ds
# server certificates created during the IPA server installation.
import os
import sys
import re
import time
from ipapython import ipautil
from ipapython import dogtag
REQUEST_DIR='/var/lib/certmonger/requests/'
CA_DIR='/var/lib/certmonger/cas/'
# Normalizer types for critera in get_request_id()
NPATH = 1
def find_request_value(filename, directive):
"""
Return a value from a certmonger request file for the requested directive
It tries to do this a number of times because sometimes there is a delay
when ipa-getcert returns and the file is fully updated, particularly
when doing a request. Generating a CSR is fast but not instantaneous.
"""
tries = 1
value = None
found = False
while value is None and tries <= 5:
tries=tries + 1
time.sleep(1)
fp = open(filename, 'r')
lines = fp.readlines()
fp.close()
for line in lines:
if found:
# A value can span multiple lines. If it does then it has a
# leading space.
if not line.startswith(' '):
# We hit the next directive, return now
return value
else:
value = value + line[1:]
else:
if line.startswith(directive + '='):
found = True
value = line[len(directive)+1:]
return value
def get_request_value(request_id, directive):
"""
There is no guarantee that the request_id will match the filename
in the certmonger requests directory, so open each one to find the
request_id.
"""
fileList=os.listdir(REQUEST_DIR)
for file in fileList:
value = find_request_value('%s/%s' % (REQUEST_DIR, file), 'id')
if value is not None and value.rstrip() == request_id:
return find_request_value('%s/%s' % (REQUEST_DIR, file), directive)
return None
def get_request_id(criteria):
"""
If you don't know the certmonger request_id then try to find it by looking
through all the request files. An alternative would be to parse the
ipa-getcert list output but this seems cleaner.
criteria is a tuple of key/value/type to search for. The more specific
the better. An error is raised if multiple request_ids are returned for
the same criteria.
None is returned if none of the criteria match.
"""
assert type(criteria) is tuple
reqid=None
fileList=os.listdir(REQUEST_DIR)
for file in fileList:
match = True
for (key, value, valtype) in criteria:
rv = find_request_value('%s/%s' % (REQUEST_DIR, file), key)
if rv and valtype == NPATH:
rv = os.path.abspath(rv)
if rv is None or rv.rstrip() != value:
match = False
break
if match and reqid is not None:
raise RuntimeError('multiple certmonger requests match the criteria')
if match:
reqid = find_request_value('%s/%s' % (REQUEST_DIR, file), 'id').rstrip()
return reqid
def get_requests_for_dir(dir):
"""
Return a list containing the request ids for a given NSS database
directory.
"""
reqid=[]
fileList=os.listdir(REQUEST_DIR)
for file in fileList:
rv = find_request_value(os.path.join(REQUEST_DIR, file),
'cert_storage_location')
if rv is None:
continue
rv = os.path.abspath(rv).rstrip()
if rv != dir:
continue
id = find_request_value(os.path.join(REQUEST_DIR, file), 'id')
if id is not None:
reqid.append(id.rstrip())
return reqid
def add_request_value(request_id, directive, value):
"""
Add a new directive to a certmonger request file.
The certmonger service MUST be stopped in order for this to work.
"""
fileList=os.listdir(REQUEST_DIR)
for file in fileList:
id = find_request_value('%s/%s' % (REQUEST_DIR, file), 'id')
if id is not None and id.rstrip() == request_id:
current_value = find_request_value('%s/%s' % (REQUEST_DIR, file), directive)
if not current_value:
fp = open('%s/%s' % (REQUEST_DIR, file), 'a')
fp.write('%s=%s\n' % (directive, value))
fp.close()
return
def add_principal(request_id, principal):
"""
In order for a certmonger request to be renewable it needs a principal.
When an existing certificate is added via start-tracking it won't have
a principal.
"""
return add_request_value(request_id, 'template_principal', principal)
def add_subject(request_id, subject):
"""
In order for a certmonger request to be renwable it needs the subject
set in the request file.
When an existing certificate is added via start-tracking it won't have
a subject_template set.
"""
return add_request_value(request_id, 'template_subject', subject)
def request_cert(nssdb, nickname, subject, principal, passwd_fname=None):
"""
Execute certmonger to request a server certificate
"""
args = ['/usr/bin/ipa-getcert',
'request',
'-d', nssdb,
'-n', nickname,
'-N', subject,
'-K', principal,
]
if passwd_fname:
args.append('-p')
args.append(os.path.abspath(passwd_fname))
(stdout, stderr, returncode) = ipautil.run(args)
# FIXME: should be some error handling around this
m = re.match('New signing request "(\d+)" added', stdout)
request_id = m.group(1)
return request_id
def cert_exists(nickname, secdir):
"""
See if a nickname exists in an NSS database.
Returns True/False
This isn't very sophisticated in that it doesn't differentiate between
a database that doesn't exist and a nickname that doesn't exist within
the database.
"""
args = ["/usr/bin/certutil", "-L",
"-d", os.path.abspath(secdir),
"-n", nickname
]
(stdout, stderr, rc) = ipautil.run(args, raiseonerr=False)
if rc == 0:
return True
else:
return False
def start_tracking(nickname, secdir, password_file=None, command=None):
"""
Tell certmonger to track the given certificate nickname in NSS
database in secdir protected by optional password file password_file.
command is an optional parameter which specifies a command for
certmonger to run when it renews a certificate. This command must
reside in /usr/lib/ipa/certmonger to work with SELinux.
Returns the stdout, stderr and returncode from running ipa-getcert
This assumes that certmonger is already running.
"""
if not cert_exists(nickname, os.path.abspath(secdir)):
raise RuntimeError('Nickname "%s" doesn\'t exist in NSS database "%s"' % (nickname, secdir))
args = ["/usr/bin/ipa-getcert", "start-tracking",
"-d", os.path.abspath(secdir),
"-n", nickname]
if password_file:
args.append("-p")
args.append(os.path.abspath(password_file))
if command:
args.append("-C")
args.append(command)
(stdout, stderr, returncode) = ipautil.run(args)
return (stdout, stderr, returncode)
def stop_tracking(secdir, request_id=None, nickname=None):
"""
Stop tracking the current request using either the request_id or nickname.
This assumes that the certmonger service is running.
"""
if request_id is None and nickname is None:
raise RuntimeError('Both request_id and nickname are missing.')
if nickname:
# Using the nickname find the certmonger request_id
criteria = (('cert_storage_location', os.path.abspath(secdir), NPATH),('cert_nickname', nickname, None))
try:
request_id = get_request_id(criteria)
if request_id is None:
return ('', '', 0)
except RuntimeError:
# This means that multiple requests matched, skip it for now
# Fall back to trying to stop tracking using nickname
pass
args = ['/usr/bin/ipa-getcert',
'stop-tracking',
]
if request_id:
args.append('-i')
args.append(request_id)
else:
args.append('-n')
args.append(nickname)
args.append('-d')
args.append(os.path.abspath(secdir))
(stdout, stderr, returncode) = ipautil.run(args)
return (stdout, stderr, returncode)
def _find_IPA_ca():
"""
Look through all the certmonger CA files to find the one that
has id=IPA
We can use find_request_value because the ca files have the
same file format.
"""
fileList=os.listdir(CA_DIR)
for file in fileList:
value = find_request_value('%s/%s' % (CA_DIR, file), 'id')
if value is not None and value.strip() == 'IPA':
return '%s/%s' % (CA_DIR, file)
return None
def add_principal_to_cas(principal):
"""
If the hostname we were passed to use in ipa-client-install doesn't
match the value of gethostname() then we need to append
-k host/HOSTNAME@REALM to the ca helper defined for
/usr/libexec/certmonger/ipa-submit.
We also need to restore this on uninstall.
The certmonger service MUST be stopped in order for this to work.
"""
cafile = _find_IPA_ca()
if cafile is None:
return
update = False
fp = open(cafile, 'r')
lines = fp.readlines()
fp.close()
for i in xrange(len(lines)):
if lines[i].startswith('ca_external_helper') and \
lines[i].find('-k') == -1:
lines[i] = '%s -k %s\n' % (lines[i].strip(), principal)
update = True
if update:
fp = open(cafile, 'w')
for line in lines:
fp.write(line)
fp.close()
def remove_principal_from_cas():
"""
Remove any -k principal options from the ipa_submit helper.
The certmonger service MUST be stopped in order for this to work.
"""
cafile = _find_IPA_ca()
if cafile is None:
return
update = False
fp = open(cafile, 'r')
lines = fp.readlines()
fp.close()
for i in xrange(len(lines)):
if lines[i].startswith('ca_external_helper') and \
lines[i].find('-k') > 0:
lines[i] = lines[i].strip().split(' ')[0] + '\n'
update = True
if update:
fp = open(cafile, 'w')
for line in lines:
fp.write(line)
fp.close()
# Routines specific to renewing dogtag CA certificates
def get_pin(token, dogtag_constants=None):
"""
Dogtag stores its NSS pin in a file formatted as token:PIN.
The caller is expected to handle any exceptions raised.
"""
if dogtag_constants is None:
dogtag_constants = dogtag.configured_constants()
with open(dogtag_constants.PASSWORD_CONF_PATH, 'r') as f:
for line in f:
(tok, pin) = line.split('=', 1)
if token == tok:
return pin.strip()
return None
def dogtag_start_tracking(ca, nickname, pin, pinfile, secdir, command):
"""
Tell certmonger to start tracking a dogtag CA certificate. These
are handled differently because their renewal must be done directly
and not through IPA.
This uses the generic certmonger command getcert so we can specify
a different helper.
command is the script to execute.
Returns the stdout, stderr and returncode from running ipa-getcert
This assumes that certmonger is already running.
"""
if not cert_exists(nickname, os.path.abspath(secdir)):
raise RuntimeError('Nickname "%s" doesn\'t exist in NSS database "%s"' % (nickname, secdir))
if command is not None and not os.path.isabs(command):
if sys.maxsize > 2**32:
libpath = 'lib64'
else:
libpath = 'lib'
command = '/usr/%s/ipa/certmonger/%s' % (libpath, command)
args = ["/usr/bin/getcert", "start-tracking",
"-d", os.path.abspath(secdir),
"-n", nickname,
"-c", ca,
"-C", command,
]
if pinfile:
args.append("-p")
args.append(pinfile)
else:
args.append("-P")
args.append(pin)
if ca == 'dogtag-ipa-retrieve-agent-submit':
# We cheat and pass in the nickname as the profile when
# renewing on a clone. The submit otherwise doesn't pass in the
# nickname and we need some way to find the right entry in LDAP.
args.append("-T")
args.append(nickname)
(stdout, stderr, returncode) = ipautil.run(args, nolog=[pin])
def check_state(dirs):
"""
Given a set of directories and nicknames verify that we are no longer
tracking certificates.
dirs is a list of directories to test for. We will return a tuple
of nicknames for any tracked certificates found.
This can only check for NSS-based certificates.
"""
reqids = []
for dir in dirs:
reqids.extend(get_requests_for_dir(dir))
return reqids
if __name__ == '__main__':
request_id = request_cert("/etc/httpd/alias", "Test", "cn=tiger.example.com,O=IPA", "HTTP/tiger.example.com@EXAMPLE.COM")
csr = get_request_value(request_id, 'csr')
print csr
stop_tracking(request_id)
| hatchetation/freeipa | ipapython/certmonger.py | Python | gpl-3.0 | 14,272 |
# proxy module
from traitsui.wx.image_panel import *
| enthought/etsproxy | enthought/traits/ui/wx/image_panel.py | Python | bsd-3-clause | 53 |
import math
def convert_infinity_to_string(number):
if math.isinf(number):
if number < 0:
return "-Infinity"
if number > 0:
return "Infinity"
return number
| CitrineInformatics/python-citrination-client | citrination_client/util/maths.py | Python | apache-2.0 | 205 |
from scattering import scatterer
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
def test_ones():
scat = scatterer(.1, 0.0, 'water', diameters=np.array([0.04, 0.05]))
scat.set_scattering_model('rayleigh')
assert_array_almost_equal(scat.sigma_b,
np.array( [0.01170936, 0.04466766], np.float32 ))
if __name__ == "__main__":
run_module_suite()
| dopplershift/Scattering | test/test_scatterer.py | Python | bsd-2-clause | 434 |
"""Functions that ease the use of Django."""
# pylint: disable=protected-access
from __future__ import absolute_import
from django.utils.safestring import mark_safe
def get_fields(model_object, ignore_fields=()):
"""Extract the fields of the model.
Args:
model_object (django.models.Model): model instance.
ignore_fields (list): list of field names to ignore.
Returns:
dict. dictionary contain the models's fields.
"""
fields = [(field.name, getattr(model_object, field.name))
for field in model_object._meta.fields
if field.name not in ignore_fields and
not field.name.endswith("_ptr")]
fields += [(field.name, list(getattr(model_object, field.name).all()))
for field in model_object._meta.many_to_many
if field.name not in ignore_fields]
return dict(fields)
def get_sub_model(model_object):
"""Return the model inherited sub class instance.
Used as a workaround for Django subclasses issues
Args:
model_object (django.models.Model): model instance.
Returns:
object: sub model instance. None if there is no sub model.
"""
for sub_class in model_object.__class__.__subclasses__():
possible_atter = sub_class.__name__.lower()
if hasattr(model_object, possible_atter):
sub_model = getattr(model_object, possible_atter)
sub_sub_model = get_sub_model(sub_model)
if sub_sub_model is not None:
return sub_sub_model
return sub_model
return None
def linked_unicode(item):
"""Return a unicode string with a HTML link to given item's page.
Args:
item (django.db.models.Model): a link to this item will be returned
Returns:
str. the result unicode text
"""
app = item._meta.app_label
pagename = item.__class__.__name__.lower()
return mark_safe("<a href='/admin/%s/%s/%d/'>%s</a>" %
(app, pagename, item.id, item))
| gregoil/rotest | src/rotest/common/django_utils/common.py | Python | mit | 2,036 |
#!/usr/bin/python
# Import the required modules
import cv2, os
import numpy as np
from PIL import Image
# For face detection we will use the Haar Cascade provided by OpenCV.
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
def get_images_and_labels(path):
# Append all the absolute image paths in a list image_paths
# We will not read the image with the .sad extension in the training set
# Rather, we will use them to test our accuracy of the training
###image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('.sad')]
image_paths = [os.path.join(path, f) for f in os.listdir(path)]
# images will contains face images
images = []
# labels will contains the label that is assigned to the image
labels = []
for image_path in image_paths:
# Read the image and convert to grayscale
image_pil = Image.open(image_path).convert('L')
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
# Get the label of the image
nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject", ""))
# Detect the face in the image
faces = faceCascade.detectMultiScale(image)
# If face is detected, append the face to images and the label to labels
for (x, y, w, h) in faces:
images.append(image[y: y + h, x: x + w])
labels.append(nbr)
#cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
#cv2.waitKey(50)
# return the images list and labels list
return images, labels
def train_recognizer(path):
"""Trains a face recognizer on a dataset based on a path to a folder containing images"""
# For face recognition we will the the LBPH Face Recognizer
recognizer = cv2.face.createLBPHFaceRecognizer()
# Call the get_images_and_labels function and get the face images and the
# corresponding labels
images, labels = get_images_and_labels(path)
cv2.destroyAllWindows()
# Perform the tranining
recognizer.train(images, np.array(labels))
return recognizer
def recognize_face(recognizer, img):#path):
#predict_image_pil = Image.open(path).convert('L')
#predict_image = np.array(predict_image_pil, 'uint8')
predict_image = np.array(img)
face = faceCascade.detectMultiScale(predict_image)
nbr_predicted = -1
conf = -1
for (x, y, w, h) in face:
nbr_predicted, conf = recognizer.predict(predict_image[y: y + h, x: x + w])
return nbr_predicted, conf
#print(recognize_face(train_recognizer("./yalefaces"), "./yalefaces/subject01.sad"))
| The-J-Person/Barfacecor | face_recognizer.py | Python | mit | 2,704 |
#
#
# Copyright (C) 2006, 2007, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for the LUXI protocol
This module implements the local unix socket protocol. You only need
this module and the opcodes module in the client program in order to
communicate with the master.
The module is also used by the master daemon.
"""
from ganeti import constants
from ganeti import pathutils
from ganeti import objects
import ganeti.rpc.client as cl
from ganeti.rpc.errors import RequestError
from ganeti.rpc.transport import Transport
__all__ = [
# classes:
"Client"
]
REQ_SUBMIT_JOB = constants.LUXI_REQ_SUBMIT_JOB
REQ_SUBMIT_JOB_TO_DRAINED_QUEUE = constants.LUXI_REQ_SUBMIT_JOB_TO_DRAINED_QUEUE
REQ_SUBMIT_MANY_JOBS = constants.LUXI_REQ_SUBMIT_MANY_JOBS
REQ_PICKUP_JOB = constants.LUXI_REQ_PICKUP_JOB
REQ_WAIT_FOR_JOB_CHANGE = constants.LUXI_REQ_WAIT_FOR_JOB_CHANGE
REQ_CANCEL_JOB = constants.LUXI_REQ_CANCEL_JOB
REQ_ARCHIVE_JOB = constants.LUXI_REQ_ARCHIVE_JOB
REQ_CHANGE_JOB_PRIORITY = constants.LUXI_REQ_CHANGE_JOB_PRIORITY
REQ_AUTO_ARCHIVE_JOBS = constants.LUXI_REQ_AUTO_ARCHIVE_JOBS
REQ_QUERY = constants.LUXI_REQ_QUERY
REQ_QUERY_FIELDS = constants.LUXI_REQ_QUERY_FIELDS
REQ_QUERY_JOBS = constants.LUXI_REQ_QUERY_JOBS
REQ_QUERY_FILTERS = constants.LUXI_REQ_QUERY_FILTERS
REQ_REPLACE_FILTER = constants.LUXI_REQ_REPLACE_FILTER
REQ_DELETE_FILTER = constants.LUXI_REQ_DELETE_FILTER
REQ_QUERY_INSTANCES = constants.LUXI_REQ_QUERY_INSTANCES
REQ_QUERY_NODES = constants.LUXI_REQ_QUERY_NODES
REQ_QUERY_GROUPS = constants.LUXI_REQ_QUERY_GROUPS
REQ_QUERY_NETWORKS = constants.LUXI_REQ_QUERY_NETWORKS
REQ_QUERY_EXPORTS = constants.LUXI_REQ_QUERY_EXPORTS
REQ_QUERY_CONFIG_VALUES = constants.LUXI_REQ_QUERY_CONFIG_VALUES
REQ_QUERY_CLUSTER_INFO = constants.LUXI_REQ_QUERY_CLUSTER_INFO
REQ_QUERY_TAGS = constants.LUXI_REQ_QUERY_TAGS
REQ_SET_DRAIN_FLAG = constants.LUXI_REQ_SET_DRAIN_FLAG
REQ_SET_WATCHER_PAUSE = constants.LUXI_REQ_SET_WATCHER_PAUSE
REQ_ALL = constants.LUXI_REQ_ALL
DEF_RWTO = constants.LUXI_DEF_RWTO
WFJC_TIMEOUT = constants.LUXI_WFJC_TIMEOUT
class Client(cl.AbstractClient):
"""High-level client implementation.
This uses a backing Transport-like class on top of which it
implements data serialization/deserialization.
"""
def __init__(self, address=None, timeouts=None, transport=Transport):
"""Constructor for the Client class.
Arguments are the same as for L{AbstractClient}.
"""
super(Client, self).__init__(timeouts, transport)
# Override the version of the protocol:
self.version = constants.LUXI_VERSION
# Store the socket address
if address is None:
address = pathutils.QUERY_SOCKET
self.address = address
self._InitTransport()
def _GetAddress(self):
return self.address
def SetQueueDrainFlag(self, drain_flag):
return self.CallMethod(REQ_SET_DRAIN_FLAG, (drain_flag, ))
def SetWatcherPause(self, until):
return self.CallMethod(REQ_SET_WATCHER_PAUSE, (until, ))
def PickupJob(self, job):
return self.CallMethod(REQ_PICKUP_JOB, (job,))
def SubmitJob(self, ops):
ops_state = [op.__getstate__()
if not isinstance(op, objects.ConfigObject)
else op.ToDict(_with_private=True)
for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB, (ops_state, ))
def SubmitJobToDrainedQueue(self, ops):
ops_state = [op.__getstate__() for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB_TO_DRAINED_QUEUE, (ops_state, ))
def SubmitManyJobs(self, jobs):
jobs_state = []
for ops in jobs:
jobs_state.append([op.__getstate__() for op in ops])
return self.CallMethod(REQ_SUBMIT_MANY_JOBS, (jobs_state, ))
@staticmethod
def _PrepareJobId(request_name, job_id):
try:
return int(job_id)
except ValueError:
raise RequestError("Invalid parameter passed to %s as job id: "
" expected integer, got value %s" %
(request_name, job_id))
def CancelJob(self, job_id, kill=False):
job_id = Client._PrepareJobId(REQ_CANCEL_JOB, job_id)
return self.CallMethod(REQ_CANCEL_JOB, (job_id, kill))
def ArchiveJob(self, job_id):
job_id = Client._PrepareJobId(REQ_ARCHIVE_JOB, job_id)
return self.CallMethod(REQ_ARCHIVE_JOB, (job_id, ))
def ChangeJobPriority(self, job_id, priority):
job_id = Client._PrepareJobId(REQ_CHANGE_JOB_PRIORITY, job_id)
return self.CallMethod(REQ_CHANGE_JOB_PRIORITY, (job_id, priority))
def AutoArchiveJobs(self, age):
timeout = (DEF_RWTO - 1) // 2
return self.CallMethod(REQ_AUTO_ARCHIVE_JOBS, (age, timeout))
def WaitForJobChangeOnce(self, job_id, fields,
prev_job_info, prev_log_serial,
timeout=WFJC_TIMEOUT):
"""Waits for changes on a job.
@param job_id: Job ID
@type fields: list
@param fields: List of field names to be observed
@type prev_job_info: None or list
@param prev_job_info: Previously received job information
@type prev_log_serial: None or int/long
@param prev_log_serial: Highest log serial number previously received
@type timeout: int/float
@param timeout: Timeout in seconds (values larger than L{WFJC_TIMEOUT} will
be capped to that value)
"""
assert timeout >= 0, "Timeout can not be negative"
return self.CallMethod(REQ_WAIT_FOR_JOB_CHANGE,
(job_id, fields, prev_job_info,
prev_log_serial,
min(WFJC_TIMEOUT, timeout)))
def WaitForJobChange(self, job_id, fields, prev_job_info, prev_log_serial):
job_id = Client._PrepareJobId(REQ_WAIT_FOR_JOB_CHANGE, job_id)
while True:
result = self.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial)
if result != constants.JOB_NOTCHANGED:
break
return result
def Query(self, what, fields, qfilter):
"""Query for resources/items.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: List of strings
@param fields: List of requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: L{objects.QueryResponse}
"""
result = self.CallMethod(REQ_QUERY, (what, fields, qfilter))
return objects.QueryResponse.FromDict(result)
def QueryFields(self, what, fields):
"""Query for available fields.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: None or list of strings
@param fields: List of requested fields
@rtype: L{objects.QueryFieldsResponse}
"""
result = self.CallMethod(REQ_QUERY_FIELDS, (what, fields))
return objects.QueryFieldsResponse.FromDict(result)
def QueryJobs(self, job_ids, fields):
return self.CallMethod(REQ_QUERY_JOBS, (job_ids, fields))
def QueryFilters(self, uuids, fields):
return self.CallMethod(REQ_QUERY_FILTERS, (uuids, fields))
def ReplaceFilter(self, uuid, priority, predicates, action, reason):
return self.CallMethod(REQ_REPLACE_FILTER,
(uuid, priority, predicates, action, reason))
def DeleteFilter(self, uuid):
return self.CallMethod(REQ_DELETE_FILTER, (uuid, ))
def QueryInstances(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_INSTANCES, (names, fields, use_locking))
def QueryNodes(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NODES, (names, fields, use_locking))
def QueryGroups(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_GROUPS, (names, fields, use_locking))
def QueryNetworks(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NETWORKS, (names, fields, use_locking))
def QueryExports(self, nodes, use_locking):
return self.CallMethod(REQ_QUERY_EXPORTS, (nodes, use_locking))
def QueryClusterInfo(self):
return self.CallMethod(REQ_QUERY_CLUSTER_INFO, ())
def QueryConfigValues(self, fields):
return self.CallMethod(REQ_QUERY_CONFIG_VALUES, (fields, ))
def QueryTags(self, kind, name):
return self.CallMethod(REQ_QUERY_TAGS, (kind, name))
| ganeti/ganeti | lib/luxi.py | Python | bsd-2-clause | 9,450 |
"""
Interfaces to the QEMU monitor.
:copyright: 2008-2010 Red Hat Inc.
"""
import socket
import time
import threading
import logging
import select
import re
import os
import utils_misc
import passfd_setup
from autotest.client.shared import utils
try:
import json
except ImportError:
logging.warning("Could not import json module. "
"QMP monitor functionality disabled.")
class MonitorError(Exception):
pass
class MonitorConnectError(MonitorError):
def __init__(self, monitor_name):
MonitorError.__init__(self)
self.monitor_name = monitor_name
def __str__(self):
return "Could not connect to monitor '%s'" % self.monitor_name
class MonitorSocketError(MonitorError):
def __init__(self, msg, e):
Exception.__init__(self, msg, e)
self.msg = msg
self.e = e
def __str__(self):
return "%s (%s)" % (self.msg, self.e)
class MonitorLockError(MonitorError):
pass
class MonitorProtocolError(MonitorError):
pass
class MonitorNotSupportedError(MonitorError):
pass
class MonitorNotSupportedCmdError(MonitorNotSupportedError):
def __init__(self, monitor, cmd):
MonitorError.__init__(self)
self.monitor = monitor
self.cmd = cmd
def __str__(self):
return ("Not supported cmd '%s' in monitor '%s'" %
(self.cmd, self.monitor))
class QMPCmdError(MonitorError):
def __init__(self, cmd, qmp_args, data):
MonitorError.__init__(self, cmd, qmp_args, data)
self.cmd = cmd
self.qmp_args = qmp_args
self.data = data
def __str__(self):
return ("QMP command %r failed (arguments: %r, "
"error message: %r)" % (self.cmd, self.qmp_args, self.data))
def get_monitor_filename(vm, monitor_name):
"""
Return the filename corresponding to a given monitor name.
:param vm: The VM object which has the monitor.
:param monitor_name: The monitor name.
:return: The string of socket file name for qemu monitor.
"""
return "/tmp/monitor-%s-%s" % (monitor_name, vm.instance)
def get_monitor_filenames(vm):
"""
Return a list of all monitor filenames (as specified in the VM's
params).
:param vm: The VM object which has the monitors.
"""
return [get_monitor_filename(vm, m) for m in vm.params.objects("monitors")]
def create_monitor(vm, monitor_name, monitor_params):
"""
Create monitor object and connect to the monitor socket.
:param vm: The VM object which has the monitor.
:param monitor_name: The name of this monitor object.
:param monitor_params: The dict for creating this monitor object.
"""
monitor_creator = HumanMonitor
if monitor_params.get("monitor_type") == "qmp":
monitor_creator = QMPMonitor
if not utils_misc.qemu_has_option("qmp", vm.qemu_binary):
# Add a "human" monitor on non-qmp version of qemu.
logging.warn("QMP monitor is unsupported by this version of qemu,"
" creating human monitor instead.")
monitor_creator = HumanMonitor
monitor_filename = get_monitor_filename(vm, monitor_name)
logging.info("Connecting to monitor '%s'", monitor_name)
monitor = monitor_creator(vm, monitor_name, monitor_filename)
monitor.verify_responsive()
return monitor
def wait_for_create_monitor(vm, monitor_name, monitor_params, timeout):
"""
Wait for the progress of creating monitor object. This function will
retry to create the Monitor object until timeout.
:param vm: The VM object which has the monitor.
:param monitor_name: The name of this monitor object.
:param monitor_params: The dict for creating this monitor object.
:param timeout: Time to wait for creating this monitor object.
"""
# Wait for monitor connection to succeed
end_time = time.time() + timeout
while time.time() < end_time:
try:
return create_monitor(vm, monitor_name, monitor_params)
except MonitorError, e:
logging.warn(e)
time.sleep(1)
else:
raise MonitorConnectError(monitor_name)
class Monitor:
"""
Common code for monitor classes.
"""
ACQUIRE_LOCK_TIMEOUT = 20
DATA_AVAILABLE_TIMEOUT = 0
CONNECT_TIMEOUT = 30
def __init__(self, vm, name, filename):
"""
Initialize the instance.
:param vm: The VM which this monitor belongs to.
:param name: Monitor identifier (a string)
:param filename: Monitor socket filename
:raise MonitorConnectError: Raised if the connection fails
"""
self.vm = vm
self.name = name
self.filename = filename
self._lock = threading.RLock()
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.settimeout(self.CONNECT_TIMEOUT)
self._passfd = None
self._supported_cmds = []
self.debug_log = False
self.log_file = os.path.basename(self.filename + ".log")
try:
self._socket.connect(filename)
except socket.error, details:
raise MonitorConnectError("Could not connect to monitor socket: %s"
% details)
def __del__(self):
# Automatically close the connection when the instance is garbage
# collected
self._close_sock()
utils_misc.close_log_file(self.log_file)
# The following two functions are defined to make sure the state is set
# exclusively by the constructor call as specified in __getinitargs__().
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def __getinitargs__(self):
# Save some information when pickling -- will be passed to the
# constructor upon unpickling
return self.vm, self.name, self.filename, True
def _close_sock(self):
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._socket.close()
def _acquire_lock(self, timeout=ACQUIRE_LOCK_TIMEOUT):
end_time = time.time() + timeout
while time.time() < end_time:
if self._lock.acquire(False):
return True
time.sleep(0.05)
return False
def _data_available(self, timeout=DATA_AVAILABLE_TIMEOUT):
timeout = max(0, timeout)
try:
return bool(select.select([self._socket], [], [], timeout)[0])
except socket.error, e:
raise MonitorSocketError("Verifying data on monitor socket", e)
def _recvall(self):
s = ""
while self._data_available():
try:
data = self._socket.recv(1024)
except socket.error, e:
raise MonitorSocketError("Could not receive data from monitor",
e)
if not data:
break
s += data
return s
def _has_command(self, cmd):
"""
Check wheter kvm monitor support 'cmd'.
:param cmd: command string which will be checked.
:return: True if cmd is supported, False if not supported.
"""
if cmd and cmd in self._supported_cmds:
return True
return False
def _log_command(self, cmd, debug=True, extra_str=""):
"""
Print log message beening sent.
:param cmd: Command string.
:param debug: Whether to print the commands.
:param extra_str: Extra string would be printed in log.
"""
if self.debug_log or debug:
logging.debug("(monitor %s) Sending command '%s' %s",
self.name, cmd, extra_str)
def _log_lines(self, log_str):
"""
Record monitor cmd/output in log file.
"""
try:
for l in log_str.splitlines():
utils_misc.log_line(self.log_file, l)
except Exception:
pass
def correct(self, cmd):
"""
Automatic conversion "-" and "_" in commands if the translate command
is supported commands;
"""
def translate(cmd):
return "-".join(re.split("[_-]", cmd))
if not self._has_command(cmd):
for _cmd in self._supported_cmds:
if translate(_cmd) == translate(cmd):
logging.info("Convert command %s -> %s", cmd, _cmd)
return _cmd
return cmd
def is_responsive(self):
"""
Return True iff the monitor is responsive.
"""
try:
self.verify_responsive()
return True
except MonitorError:
return False
def verify_supported_cmd(self, cmd):
"""
Verify whether cmd is supported by monitor. If not, raise a
MonitorNotSupportedCmdError Exception.
:param cmd: The cmd string need to verify.
"""
if not self._has_command(cmd):
raise MonitorNotSupportedCmdError(self.name, cmd)
# Methods that may be implemented by subclasses:
def human_monitor_cmd(self, cmd="", timeout=None,
debug=True, fd=None):
"""
Send HMP command
This method allows code to send HMP commands without the need to check
if the monitor is QMPMonitor or HumanMonitor.
:param cmd: human monitor command.
:param timeout: Time duration to wait for response
:param debug: Whether to print the commands being sent and responses
:param fd: file object or file descriptor to pass
:return: The response to the command
"""
raise NotImplementedError
# Methods that should work on both classes, as long as human_monitor_cmd()
# works:
re_numa_nodes = re.compile(r"^([0-9]+) nodes$", re.M)
re_numa_node_info = re.compile(r"^node ([0-9]+) (cpus|size): (.*)$", re.M)
@classmethod
def parse_info_numa(cls, r):
"""
Parse 'info numa' output
See info_numa() for information about the return value.
"""
nodes = cls.re_numa_nodes.search(r)
if nodes is None:
raise Exception(
"Couldn't get number of nodes from 'info numa' output")
nodes = int(nodes.group(1))
data = [[0, set()] for i in range(nodes)]
for nodenr, field, value in cls.re_numa_node_info.findall(r):
nodenr = int(nodenr)
if nodenr > nodes:
raise Exception(
"Invalid node number on 'info numa' output: %d", nodenr)
if field == 'size':
if not value.endswith(' MB'):
raise Exception("Unexpected size value: %s", value)
megabytes = int(value[:-3])
data[nodenr][0] = megabytes
elif field == 'cpus':
cpus = set([int(v) for v in value.split()])
data[nodenr][1] = cpus
data = [tuple(i) for i in data]
return data
def info_numa(self):
"""
Run 'info numa' command and parse returned information
:return: An array of (ram, cpus) tuples, where ram is the RAM size in
MB and cpus is a set of CPU numbers
"""
r = self.human_monitor_cmd("info numa")
r = "\n".join(r.splitlines())
return self.parse_info_numa(r)
def info(self, what, debug=True):
"""
Request info about something and return the response.
"""
raise NotImplementedError
def info_block(self, debug=True):
"""
Request info about blocks and return dict of parsed results
:return: Dict of disk parameters
"""
info = self.info('block', debug)
if isinstance(info, str):
try:
return self._parse_info_block_old(info)
except ValueError:
return self._parse_info_block_1_5(info)
else:
return self._parse_info_block_qmp(info)
@staticmethod
def _parse_info_block_old(info):
"""
Parse output of "info block" into dict of disk params (qemu < 1.5.0)
"""
blocks = {}
info = info.split('\n')
for line in info:
if not line.strip():
continue
line = line.split(':', 1)
name = line[0].strip()
blocks[name] = {}
if line[1].endswith('[not inserted]'):
blocks[name]['not-inserted'] = 1
line[1] = line[1][:-14]
for _ in line[1].strip().split(' '):
(prop, value) = _.split('=', 1)
if value.isdigit():
value = int(value)
blocks[name][prop] = value
return blocks
@staticmethod
def _parse_info_block_1_5(info):
"""
Parse output of "info block" into dict of disk params (qemu >= 1.5.0)
"""
blocks = {}
info = info.split('\n')
for line in info:
if not line.strip():
continue
if not line.startswith(' '): # new block device
line = line.split(':', 1)
name = line[0].strip()
line = line[1][1:]
blocks[name] = {}
if line == "[not inserted]":
blocks[name]['not-inserted'] = 1
continue
line = line.rsplit(' (', 1)
if len(line) == 1: # disk_name
blocks[name]['file'] = line
else: # disk_name (options)
blocks[name]['file'] = line[0]
options = (_.strip() for _ in line[1][:-1].split(','))
_ = False
for option in options:
if not _: # First argument is driver (qcow2, raw, ..)
blocks[name]['drv'] = option
_ = True
elif option == 'read-only':
blocks[name]['ro'] = 1
elif option == 'encrypted':
blocks[name]['encrypted'] = 1
else:
err = ("_parse_info_block_1_5 got option '%s' "
"which is not yet mapped in autotest. "
"Please contact developers on github.com/"
"autotest." % option)
raise NotImplementedError(err)
else:
try:
option, line = line.split(':', 1)
option, line = option.strip(), line.strip()
if option == "Backing file":
line = line.rsplit(' (chain depth: ')
blocks[name]['backing_file'] = line[0]
blocks[name]['backing_file_depth'] = int(line[1][:-1])
elif option == "Removable device":
blocks[name]['removable'] = 1
if 'not locked' not in line:
blocks[name]['locked'] = 1
if 'try open' in line:
blocks[name]['try-open'] = 1
except ValueError:
continue
return blocks
@staticmethod
def _parse_info_block_qmp(info):
"""
Parse output of "query block" into dict of disk params
"""
blocks = {}
for item in info:
if not item.get('device'):
raise ValueError("Incorrect QMP respone, device not set in"
"info block: %s" % info)
name = item.pop('device')
blocks[name] = {}
if 'inserted' not in item:
blocks[name]['not-inserted'] = True
else:
for key, value in item.pop('inserted', {}).iteritems():
blocks[name][key] = value
for key, value in item.iteritems():
blocks[name][key] = value
return blocks
def close(self):
"""
Close the connection to the monitor and its log file.
"""
self._close_sock()
utils_misc.close_log_file(self.log_file)
class HumanMonitor(Monitor):
"""
Wraps "human monitor" commands.
"""
PROMPT_TIMEOUT = 60
CMD_TIMEOUT = 120
def __init__(self, vm, name, filename, suppress_exceptions=False):
"""
Connect to the monitor socket and find the (qemu) prompt.
:param vm: The VM which this monitor belongs to.
:param name: Monitor identifier (a string)
:param filename: Monitor socket filename
:raise MonitorConnectError: Raised if the connection fails and
suppress_exceptions is False
:raise MonitorProtocolError: Raised if the initial (qemu) prompt isn't
found and suppress_exceptions is False
:note: Other exceptions may be raised. See cmd()'s
docstring.
"""
try:
Monitor.__init__(self, vm, name, filename)
self.protocol = "human"
# Find the initial (qemu) prompt
s, o = self._read_up_to_qemu_prompt()
if not s:
raise MonitorProtocolError("Could not find (qemu) prompt "
"after connecting to monitor. "
"Output so far: %r" % o)
self._get_supported_cmds()
except MonitorError, e:
self._close_sock()
if suppress_exceptions:
logging.warn(e)
else:
raise
# Private methods
def _read_up_to_qemu_prompt(self, timeout=PROMPT_TIMEOUT):
s = ""
end_time = time.time() + timeout
while self._data_available(end_time - time.time()):
data = self._recvall()
if not data:
break
s += data
try:
lines = s.splitlines()
# Sometimes the qemu monitor lacks a line break before the
# qemu prompt, so we have to be less exigent:
if lines[-1].split()[-1].endswith("(qemu)"):
self._log_lines("\n".join(lines[1:]))
return True, "\n".join(lines[:-1])
except IndexError:
continue
if s:
try:
self._log_lines(s.splitlines()[1:])
except IndexError:
pass
return False, "\n".join(s.splitlines())
def _send(self, cmd):
"""
Send a command without waiting for output.
:param cmd: Command to send
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSocketError: Raised if a socket error occurs
"""
if not self._acquire_lock():
raise MonitorLockError("Could not acquire exclusive lock to send "
"monitor command '%s'" % cmd)
try:
try:
self._socket.sendall(cmd + "\n")
self._log_lines(cmd)
except socket.error, e:
raise MonitorSocketError("Could not send monitor command %r" %
cmd, e)
finally:
self._lock.release()
def _get_supported_cmds(self):
"""
Get supported human monitor cmds list.
"""
cmds = self.cmd("help", debug=False)
if cmds:
cmd_list = re.findall("^(.*?) ", cmds, re.M)
self._supported_cmds = [c for c in cmd_list if c]
if not self._supported_cmds:
logging.warn("Could not get supported monitor cmds list")
def _log_response(self, cmd, resp, debug=True):
"""
Print log message for monitor cmd's response.
:param cmd: Command string.
:param resp: Response from monitor command.
:param debug: Whether to print the commands.
"""
if self.debug_log or debug:
logging.debug("(monitor %s) Response to '%s'", self.name, cmd)
for l in resp.splitlines():
logging.debug("(monitor %s) %s", self.name, l)
# Public methods
def cmd(self, cmd, timeout=CMD_TIMEOUT, debug=True, fd=None):
"""
Send command to the monitor.
:param cmd: Command to send to the monitor
:param timeout: Time duration to wait for the (qemu) prompt to return
:param debug: Whether to print the commands being sent and responses
:return: Output received from the monitor
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSocketError: Raised if a socket error occurs
:raise MonitorProtocolError: Raised if the (qemu) prompt cannot be
found after sending the command
"""
self._log_command(cmd, debug)
if not self._acquire_lock():
raise MonitorLockError("Could not acquire exclusive lock to send "
"monitor command '%s'" % cmd)
try:
# Read any data that might be available
self._recvall()
if fd is not None:
if self._passfd is None:
self._passfd = passfd_setup.import_passfd()
# If command includes a file descriptor, use passfd module
self._passfd.sendfd(self._socket, fd, "%s\n" % cmd)
else:
# Send command
if debug:
logging.debug("Send command: %s" % cmd)
self._send(cmd)
# Read output
s, o = self._read_up_to_qemu_prompt(timeout)
# Remove command echo from output
o = "\n".join(o.splitlines()[1:])
# Report success/failure
if s:
if o:
self._log_response(cmd, o, debug)
return o
else:
msg = ("Could not find (qemu) prompt after command '%s'. "
"Output so far: %r" % (cmd, o))
raise MonitorProtocolError(msg)
finally:
self._lock.release()
def human_monitor_cmd(self, cmd="", timeout=CMD_TIMEOUT,
debug=True, fd=None):
"""
Send human monitor command directly
:param cmd: human monitor command.
:param timeout: Time duration to wait for response
:param debug: Whether to print the commands being sent and responses
:param fd: file object or file descriptor to pass
:return: The response to the command
"""
return self.cmd(cmd, timeout, debug, fd)
def verify_responsive(self):
"""
Make sure the monitor is responsive by sending a command.
"""
self.cmd("info status", debug=False)
def get_status(self):
return self.cmd("info status", debug=False)
def verify_status(self, status):
"""
Verify VM status
:param status: Optional VM status, 'running' or 'paused'
:return: return True if VM status is same as we expected
"""
return (status in self.get_status())
# Command wrappers
# Notes:
# - All of the following commands raise exceptions in a similar manner to
# cmd().
# - A command wrapper should use self._has_command if it requires
# information about the monitor's capabilities.
def send_args_cmd(self, cmdlines, timeout=CMD_TIMEOUT, convert=True):
"""
Send a command with/without parameters and return its output.
Have same effect with cmd function.
Implemented under the same name for both the human and QMP monitors.
Command with parameters should in following format e.g.:
'memsave val=0 size=10240 filename=memsave'
Command without parameter: 'sendkey ctrl-alt-f1'
:param cmdlines: Commands send to qemu which is separated by ";". For
command with parameters command should send in a string
with this format:
$command $arg_name=$arg_value $arg_name=$arg_value
:param timeout: Time duration to wait for (qemu) prompt after command
:param convert: If command need to convert. For commands such as:
$command $arg_value
:return: The output of the command
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSendError: Raised if the command cannot be sent
:raise MonitorProtocolError: Raised if the (qemu) prompt cannot be
found after sending the command
"""
cmd_output = []
for cmdline in cmdlines.split(";"):
if not convert:
return self.cmd(cmdline, timeout)
if "=" in cmdline:
command = cmdline.split()[0]
cmdargs = " ".join(cmdline.split()[1:]).split(",")
for arg in cmdargs:
value = "=".join(arg.split("=")[1:])
if arg.split("=")[0] == "cert-subject":
value = value.replace('/', ',')
command += " " + value
else:
command = cmdline
cmd_output.append(self.cmd(command, timeout))
if len(cmd_output) == 1:
return cmd_output[0]
return cmd_output
def quit(self):
"""
Send "quit" without waiting for output.
"""
self._send("quit")
def info(self, what, debug=True):
"""
Request info about something and return the output.
:param debug: Whether to print the commands being sent and responses
"""
return self.cmd("info %s" % what, debug=debug)
def query(self, what):
"""
Alias for info.
"""
return self.info(what)
def screendump(self, filename, debug=True):
"""
Request a screendump.
:param filename: Location for the screendump
:return: The command's output
"""
return self.cmd(cmd="screendump %s" % filename, debug=debug)
def set_link(self, name, up):
"""
Set link up/down.
:param name: Link name
:param up: Bool value, True=set up this link, False=Set down this link
:return: The response to the command
"""
set_link_cmd = "set_link"
# set_link in RHEL5 host use "up|down" instead of "on|off" which is
# used in RHEL6 host and Fedora host. So here find out the string
# this monitor accept.
o = self.cmd("help %s" % set_link_cmd)
try:
on_str, off_str = re.findall("(\w+)\|(\w+)", o)[0]
except IndexError:
# take a default value if can't get on/off string from monitor.
on_str, off_str = "on", "off"
status = off_str
if up:
status = on_str
return self.cmd("%s %s %s" % (set_link_cmd, name, status))
def live_snapshot(self, device, snapshot_file, snapshot_format="qcow2"):
"""
Take a live disk snapshot.
:param device: device id of base image
:param snapshot_file: image file name of snapshot
:param snapshot_format: image format of snapshot
:return: The response to the command
"""
cmd = ("snapshot_blkdev %s %s %s" %
(device, snapshot_file, snapshot_format))
return self.cmd(cmd)
def block_stream(self, device, speed=None, base=None,
cmd="block_stream", correct=True):
"""
Start block-stream job;
:param device: device ID
:param speed: int type, lmited speed(B/s)
:param base: base file
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
cmd += " %s" % device
if speed is not None:
cmd += " %sB" % speed
if base:
cmd += " %s" % base
return self.cmd(cmd)
def set_block_job_speed(self, device, speed=0,
cmd="block_job_set_speed", correct=True):
"""
Set limited speed for runnig job on the device
:param device: device ID
:param speed: int type, limited speed(B/s)
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
cmd += " %s %sB" % (device, speed)
return self.cmd(cmd)
def cancel_block_job(self, device, cmd="block_job_cancel", correct=True):
"""
Cancel running block stream/mirror job on the device
:param device: device ID
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
cmd += " %s" % device
return self.send_args_cmd(cmd)
def query_block_job(self, device):
"""
Get block job status on the device
:param device: device ID
:return: dict about job info, return empty dict if no active job
"""
job = dict()
output = str(self.info("block-jobs"))
for line in output.split("\n"):
if "No" in re.match("\w+", output).group(0):
continue
if device in line:
if "Streaming" in re.match("\w+", output).group(0):
job["type"] = "stream"
else:
job["type"] = "mirror"
job["device"] = device
job["offset"] = int(re.findall("\d+", output)[-3])
job["len"] = int(re.findall("\d+", output)[-2])
job["speed"] = int(re.findall("\d+", output)[-1])
break
return job
def get_backingfile(self, device):
"""
Return "backing_file" path of the device
:param device: device ID
:return: string, backing_file path
"""
backing_file = None
block_info = self.query("block")
try:
pattern = "%s:.*backing_file=([^\s]*)" % device
backing_file = re.search(pattern, block_info, re.M).group(1)
except Exception:
pass
return backing_file
def block_mirror(self, device, target, speed, sync, format, mode,
cmd="drive_mirror", correct=True):
"""
Start mirror type block device copy job
:param device: device ID
:param target: target image
:param speed: limited speed, unit is B/s
:param sync: full copy to target image(unsupport in human monitor)
:param mode: target image create mode, 'absolute-paths' or 'existing'
:param format: target image format
:param cmd: block mirror command
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
args = " %s %s %s" % (device, target, format)
info = str(self.cmd("help %s" % cmd))
if (mode == "existing") and "-n" in info:
args = "-n %s" % args
if (sync == "full") and "-f" in info:
args = "-f %s" % args
if (speed is not None) and ("speed" in info):
args = "%s %s" % (args, speed)
cmd = "%s %s" % (cmd, args)
return self.cmd(cmd)
def block_reopen(self, device, new_image_file, image_format,
cmd="block_job_complete", correct=True):
"""
Reopen new target image
:param device: device ID
:param new_image_file: new image file name
:param image_format: new image file format
:param cmd: image reopen command
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
args = "%s" % device
info = str(self.cmd("help %s" % cmd))
if "format" in info:
args += " %s %s" % (new_image_file, image_format)
cmd = "%s %s" % (cmd, args)
return self.cmd(cmd)
def migrate(self, uri, full_copy=False, incremental_copy=False, wait=False):
"""
Migrate.
:param uri: destination URI
:param full_copy: If true, migrate with full disk copy
:param incremental_copy: If true, migrate with incremental disk copy
:param wait: If true, wait for completion
:return: The command's output
"""
cmd = "migrate"
if not wait:
cmd += " -d"
if full_copy:
cmd += " -b"
if incremental_copy:
cmd += " -i"
cmd += " %s" % uri
return self.cmd(cmd)
def migrate_set_speed(self, value):
"""
Set maximum speed (in bytes/sec) for migrations.
:param value: Speed in bytes/sec
:return: The command's output
"""
return self.cmd("migrate_set_speed %s" % value)
def migrate_set_downtime(self, value):
"""
Set maximum tolerated downtime (in seconds) for migration.
:param value: maximum downtime (in seconds)
:return: The command's output
"""
return self.cmd("migrate_set_downtime %s" % value)
def sendkey(self, keystr, hold_time=1):
"""
Send key combination to VM.
:param keystr: Key combination string
:param hold_time: Hold time in ms (should normally stay 1 ms)
:return: The command's output
"""
return self.cmd("sendkey %s %s" % (keystr, hold_time))
def mouse_move(self, dx, dy):
"""
Move mouse.
:param dx: X amount
:param dy: Y amount
:return: The command's output
"""
return self.cmd("mouse_move %s %s" % (dx, dy))
def mouse_button(self, state):
"""
Set mouse button state.
:param state: Button state (1=L, 2=M, 4=R)
:return: The command's output
"""
return self.cmd("mouse_button %s" % state)
def getfd(self, fd, name):
"""
Receives a file descriptor
:param fd: File descriptor to pass to QEMU
:param name: File descriptor name (internal to QEMU)
:return: The command's output
"""
return self.cmd("getfd %s" % name, fd=fd)
def system_wakeup(self):
"""
Wakeup suspended guest.
"""
cmd = "system_wakeup"
self.verify_supported_cmd(cmd)
return self.cmd(cmd)
def nmi(self):
"""
Inject a NMI on all guest's CPUs.
"""
return self.cmd("nmi")
def block_resize(self, device, size):
"""
Resize the block device size
:param device: Block device name
:param size: Block device size need to set to. To keep the same with
qmp monitor will use bytes as unit for the block size
:return: Command output
"""
size = int(size) / 1024 / 1024
cmd = "block_resize device=%s,size=%s" % (device, size)
return self.send_args_cmd(cmd)
def eject_cdrom(self, device, force=False):
"""
Eject media of cdrom and open cdrom door;
"""
cmd = "eject"
self.verify_supported_cmd(cmd)
if force:
cmd += " -f "
cmd += " %s" % device
return self.cmd(cmd)
def change_media(self, device, target):
"""
Change media of cdrom of drive;
"""
cmd = "change"
self.verify_supported_cmd(cmd)
cmd += " %s %s" % (device, target)
return self.cmd(cmd)
class QMPMonitor(Monitor):
"""
Wraps QMP monitor commands.
"""
READ_OBJECTS_TIMEOUT = 5
CMD_TIMEOUT = 120
RESPONSE_TIMEOUT = 120
PROMPT_TIMEOUT = 60
def __init__(self, vm, name, filename, suppress_exceptions=False):
"""
Connect to the monitor socket, read the greeting message and issue the
qmp_capabilities command. Also make sure the json module is available.
:param vm: The VM which this monitor belongs to.
:param name: Monitor identifier (a string)
:param filename: Monitor socket filename
:raise MonitorConnectError: Raised if the connection fails and
suppress_exceptions is False
:raise MonitorProtocolError: Raised if the no QMP greeting message is
received and suppress_exceptions is False
:raise MonitorNotSupportedError: Raised if json isn't available and
suppress_exceptions is False
:note: Other exceptions may be raised if the qmp_capabilities command
fails. See cmd()'s docstring.
"""
try:
Monitor.__init__(self, vm, name, filename)
self.protocol = "qmp"
self._greeting = None
self._events = []
self._supported_hmp_cmds = []
# Make sure json is available
try:
json
except NameError:
raise MonitorNotSupportedError("QMP requires the json module "
"(Python 2.6 and up)")
# Read greeting message
end_time = time.time() + 20
output_str = ""
while time.time() < end_time:
for obj in self._read_objects():
output_str += str(obj)
if "QMP" in obj:
self._greeting = obj
break
if self._greeting:
break
time.sleep(0.1)
else:
raise MonitorProtocolError("No QMP greeting message received."
" Output so far: %s" % output_str)
# Issue qmp_capabilities
self.cmd("qmp_capabilities")
self._get_supported_cmds()
self._get_supported_hmp_cmds()
except MonitorError, e:
self._close_sock()
if suppress_exceptions:
logging.warn(e)
else:
raise
# Private methods
def _build_cmd(self, cmd, args=None, q_id=None):
obj = {"execute": cmd}
if args is not None:
obj["arguments"] = args
if q_id is not None:
obj["id"] = q_id
return obj
def _read_objects(self, timeout=READ_OBJECTS_TIMEOUT):
"""
Read lines from the monitor and try to decode them.
Stop when all available lines have been successfully decoded, or when
timeout expires. If any decoded objects are asynchronous events, store
them in self._events. Return all decoded objects.
:param timeout: Time to wait for all lines to decode successfully
:return: A list of objects
"""
if not self._data_available():
return []
s = ""
end_time = time.time() + timeout
while self._data_available(end_time - time.time()):
s += self._recvall()
# Make sure all lines are decodable
for line in s.splitlines():
if line:
try:
json.loads(line)
except Exception:
# Found an incomplete or broken line -- keep reading
break
else:
# All lines are OK -- stop reading
break
# Decode all decodable lines
objs = []
for line in s.splitlines():
try:
objs += [json.loads(line)]
self._log_lines(line)
except Exception:
pass
# Keep track of asynchronous events
self._events += [obj for obj in objs if "event" in obj]
return objs
def _send(self, data):
"""
Send raw data without waiting for response.
:param data: Data to send
:raise MonitorSocketError: Raised if a socket error occurs
"""
try:
self._socket.sendall(data)
self._log_lines(str(data))
except socket.error, e:
raise MonitorSocketError("Could not send data: %r" % data, e)
def _get_response(self, q_id=None, timeout=RESPONSE_TIMEOUT):
"""
Read a response from the QMP monitor.
:param id: If not None, look for a response with this id
:param timeout: Time duration to wait for response
:return: The response dict, or None if none was found
"""
end_time = time.time() + timeout
while self._data_available(end_time - time.time()):
for obj in self._read_objects():
if isinstance(obj, dict):
if q_id is not None and obj.get("id") != q_id:
continue
if "return" in obj or "error" in obj:
return obj
def _get_supported_cmds(self):
"""
Get supported qmp cmds list.
"""
cmds = self.cmd("query-commands", debug=False)
if cmds:
self._supported_cmds = [n["name"] for n in cmds if
n.has_key("name")]
if not self._supported_cmds:
logging.warn("Could not get supported monitor cmds list")
def _get_supported_hmp_cmds(self):
"""
Get supported human monitor cmds list.
"""
cmds = self.human_monitor_cmd("help", debug=False)
if cmds:
cmd_list = re.findall(
r"(?:^\w+\|(\w+)\s)|(?:^(\w+?)\s)", cmds, re.M)
self._supported_hmp_cmds = [(i + j) for i, j in cmd_list if i or j]
if not self._supported_cmds:
logging.warn("Could not get supported monitor cmds list")
def _has_hmp_command(self, cmd):
"""
Check wheter monitor support hmp 'cmd'.
:param cmd: command string which will be checked.
:return: True if cmd is supported, False if not supported.
"""
if cmd and cmd in self._supported_hmp_cmds:
return True
return False
def verify_supported_hmp_cmd(self, cmd):
"""
Verify whether cmd is supported by hmp monitor.
If not, raise a MonitorNotSupportedCmdError Exception.
:param cmd: The cmd string need to verify.
"""
if not self._has_hmp_command(cmd):
raise MonitorNotSupportedCmdError(self.name, cmd)
def _log_response(self, cmd, resp, debug=True):
"""
Print log message for monitor cmd's response.
:param cmd: Command string.
:param resp: Response from monitor command.
:param debug: Whether to print the commands.
"""
def _log_output(o, indent=0):
logging.debug("(monitor %s) %s%s",
self.name, " " * indent, o)
def _dump_list(li, indent=0):
for l in li:
if isinstance(l, dict):
_dump_dict(l, indent + 2)
else:
_log_output(str(l), indent)
def _dump_dict(di, indent=0):
for k, v in di.iteritems():
o = "%s%s: " % (" " * indent, k)
if isinstance(v, dict):
_log_output(o, indent)
_dump_dict(v, indent + 2)
elif isinstance(v, list):
_log_output(o, indent)
_dump_list(v, indent + 2)
else:
o += str(v)
_log_output(o, indent)
if self.debug_log or debug:
logging.debug("(monitor %s) Response to '%s' "
"(re-formated)", self.name, cmd)
if isinstance(resp, dict):
_dump_dict(resp)
elif isinstance(resp, list):
_dump_list(resp)
else:
for l in str(resp).splitlines():
_log_output(l)
# Public methods
def cmd(self, cmd, args=None, timeout=CMD_TIMEOUT, debug=True, fd=None):
"""
Send a QMP monitor command and return the response.
Note: an id is automatically assigned to the command and the response
is checked for the presence of the same id.
:param cmd: Command to send
:param args: A dict containing command arguments, or None
:param timeout: Time duration to wait for response
:param debug: Whether to print the commands being sent and responses
:param fd: file object or file descriptor to pass
:return: The response received
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSocketError: Raised if a socket error occurs
:raise MonitorProtocolError: Raised if no response is received
:raise QMPCmdError: Raised if the response is an error message
(the exception's args are (cmd, args, data)
where data is the error data)
"""
self._log_command(cmd, debug)
if not self._acquire_lock():
raise MonitorLockError("Could not acquire exclusive lock to send "
"QMP command '%s'" % cmd)
try:
# Read any data that might be available
self._read_objects()
# Send command
q_id = utils_misc.generate_random_string(8)
cmdobj = self._build_cmd(cmd, args, q_id)
if debug:
logging.debug("Send command: %s" % cmdobj)
if fd is not None:
if self._passfd is None:
self._passfd = passfd_setup.import_passfd()
# If command includes a file descriptor, use passfd module
self._passfd.sendfd(
self._socket, fd, json.dumps(cmdobj) + "\n")
else:
self._send(json.dumps(cmdobj) + "\n")
# Read response
r = self._get_response(q_id, timeout)
if r is None:
raise MonitorProtocolError("Received no response to QMP "
"command '%s', or received a "
"response with an incorrect id"
% cmd)
if "return" in r:
ret = r["return"]
if ret:
self._log_response(cmd, ret, debug)
return ret
if "error" in r:
raise QMPCmdError(cmd, args, r["error"])
finally:
self._lock.release()
def cmd_raw(self, data, timeout=CMD_TIMEOUT):
"""
Send a raw string to the QMP monitor and return the response.
Unlike cmd(), return the raw response dict without performing any
checks on it.
:param data: The data to send
:param timeout: Time duration to wait for response
:return: The response received
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSocketError: Raised if a socket error occurs
:raise MonitorProtocolError: Raised if no response is received
"""
if not self._acquire_lock():
raise MonitorLockError("Could not acquire exclusive lock to send "
"data: %r" % data)
try:
self._read_objects()
self._send(data)
r = self._get_response(None, timeout)
if r is None:
raise MonitorProtocolError("Received no response to data: %r" %
data)
return r
finally:
self._lock.release()
def cmd_obj(self, obj, timeout=CMD_TIMEOUT):
"""
Transform a Python object to JSON, send the resulting string to the QMP
monitor, and return the response.
Unlike cmd(), return the raw response dict without performing any
checks on it.
:param obj: The object to send
:param timeout: Time duration to wait for response
:return: The response received
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSocketError: Raised if a socket error occurs
:raise MonitorProtocolError: Raised if no response is received
"""
return self.cmd_raw(json.dumps(obj) + "\n", timeout)
def cmd_qmp(self, cmd, args=None, q_id=None, timeout=CMD_TIMEOUT):
"""
Build a QMP command from the passed arguments, send it to the monitor
and return the response.
Unlike cmd(), return the raw response dict without performing any
checks on it.
:param cmd: Command to send
:param args: A dict containing command arguments, or None
:param id: An id for the command, or None
:param timeout: Time duration to wait for response
:return: The response received
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSocketError: Raised if a socket error occurs
:raise MonitorProtocolError: Raised if no response is received
"""
return self.cmd_obj(self._build_cmd(cmd, args, q_id), timeout)
def verify_responsive(self):
"""
Make sure the monitor is responsive by sending a command.
"""
self.cmd(cmd="query-status", debug=False)
def get_status(self):
"""
Get VM status.
:return: return VM status
"""
return self.cmd(cmd="query-status", debug=False)
def verify_status(self, status):
"""
Verify VM status
:param status: Optional VM status, 'running' or 'paused'
:return: return True if VM status is same as we expected
"""
o = dict(self.cmd(cmd="query-status", debug=False))
if status == 'paused':
return (o['running'] is False)
if status == 'running':
return (o['running'] is True)
if o['status'] == status:
return True
return False
def get_events(self):
"""
Return a list of the asynchronous events received since the last
clear_events() call.
:return: A list of events (the objects returned have an "event" key)
:raise MonitorLockError: Raised if the lock cannot be acquired
"""
if not self._acquire_lock():
raise MonitorLockError("Could not acquire exclusive lock to read "
"QMP events")
try:
self._read_objects()
return self._events[:]
finally:
self._lock.release()
def get_event(self, name):
"""
Look for an event with the given name in the list of events.
:param name: The name of the event to look for (e.g. 'RESET')
:return: An event object or None if none is found
"""
for e in self.get_events():
if e.get("event") == name:
return e
def human_monitor_cmd(self, cmd="", timeout=CMD_TIMEOUT,
debug=True, fd=None):
"""
Run human monitor command in QMP through human-monitor-command
:param cmd: human monitor command.
:param timeout: Time duration to wait for response
:param debug: Whether to print the commands being sent and responses
:param fd: file object or file descriptor to pass
:return: The response to the command
"""
self._log_command(cmd, extra_str="(via Human Monitor)")
args = {"command-line": cmd}
ret = self.cmd("human-monitor-command", args, timeout, False, fd)
if ret:
self._log_response(cmd, ret, debug)
return ret
def clear_events(self):
"""
Clear the list of asynchronous events.
:raise MonitorLockError: Raised if the lock cannot be acquired
"""
if not self._acquire_lock():
raise MonitorLockError("Could not acquire exclusive lock to clear "
"QMP event list")
self._events = []
self._lock.release()
def clear_event(self, name):
"""
Clear a kinds of events in events list only.
:raise MonitorLockError: Raised if the lock cannot be acquired
"""
if not self._acquire_lock():
raise MonitorLockError("Could not acquire exclusive lock to clear "
"QMP event list")
while True:
event = self.get_event(name)
if event:
self._events.remove(event)
else:
break
self._lock.release()
def get_greeting(self):
"""
Return QMP greeting message.
"""
return self._greeting
# Command wrappers
# Note: all of the following functions raise exceptions in a similar manner
# to cmd().
def send_args_cmd(self, cmdlines, timeout=CMD_TIMEOUT, convert=True):
"""
Send a command with/without parameters and return its output.
Have same effect with cmd function.
Implemented under the same name for both the human and QMP monitors.
Command with parameters should in following format e.g.:
'memsave val=0 size=10240 filename=memsave'
Command without parameter: 'query-vnc'
:param cmdlines: Commands send to qemu which is separated by ";". For
command with parameters command should send in a string
with this format:
$command $arg_name=$arg_value $arg_name=$arg_value
:param timeout: Time duration to wait for (qemu) prompt after command
:param convert: If command need to convert. For commands not in standard
format such as: $command $arg_value
:return: The response to the command
:raise MonitorLockError: Raised if the lock cannot be acquired
:raise MonitorSendError: Raised if the command cannot be sent
:raise MonitorProtocolError: Raised if no response is received
"""
cmd_output = []
for cmdline in cmdlines.split(";"):
command = cmdline.split()[0]
if not self._has_command(command):
if "=" in cmdline:
command = cmdline.split()[0]
self.verify_supported_hmp_cmd(command)
cmdargs = " ".join(cmdline.split()[1:]).split(",")
for arg in cmdargs:
value = "=".join(arg.split("=")[1:])
if arg.split("=")[0] == "cert-subject":
value = value.replace('/', ',')
command += " " + value
else:
command = cmdline
cmd_output.append(self.human_monitor_cmd(command))
else:
cmdargs = " ".join(cmdline.split()[1:]).split(",")
args = {}
for arg in cmdargs:
opt = arg.split('=')
value = "=".join(opt[1:])
try:
if re.match("^[0-9]+$", value):
value = int(value)
elif re.match("^[0-9]+\.[0-9]*$", value):
value = float(value)
elif re.findall("true", value, re.I):
value = True
elif re.findall("false", value, re.I):
value = False
else:
value = value.strip()
if opt[0] == "cert-subject":
value = value.replace('/', ',')
if opt[0]:
args[opt[0].strip()] = value
except:
logging.debug("Fail to create args, please check cmd")
cmd_output.append(self.cmd(command, args, timeout=timeout))
if len(cmd_output) == 1:
return cmd_output[0]
return cmd_output
def quit(self):
"""
Send "quit" and return the response.
"""
return self.cmd("quit")
def info(self, what, debug=True):
"""
Request info about something and return the response.
"""
cmd = "query-%s" % what
if not self._has_command(cmd):
cmd = "info %s" % what
return self.human_monitor_cmd(cmd, debug=debug)
return self.cmd(cmd, debug=debug)
def query(self, what, debug=True):
"""
Alias for info.
"""
return self.info(what, debug)
def screendump(self, filename, debug=True):
"""
Request a screendump.
:param filename: Location for the screendump
:param debug: Whether to print the commands being sent and responses
:return: The response to the command
"""
cmd = "screendump"
if not self._has_command(cmd):
self.verify_supported_hmp_cmd(cmd)
cmdline = "%s %s" % (cmd, filename)
return self.human_monitor_cmd(cmdline, debug=debug)
args = {"filename": filename}
return self.cmd(cmd=cmd, args=args, debug=debug)
def sendkey(self, keystr, hold_time=1):
"""
Send key combination to VM.
:param keystr: Key combination string
:param hold_time: Hold time in ms (should normally stay 1 ms)
:return: The response to the command
"""
return self.human_monitor_cmd("sendkey %s %s" % (keystr, hold_time))
def migrate(self, uri, full_copy=False, incremental_copy=False, wait=False):
"""
Migrate.
:param uri: destination URI
:param full_copy: If true, migrate with full disk copy
:param incremental_copy: If true, migrate with incremental disk copy
:param wait: If true, wait for completion
:return: The response to the command
"""
args = {"uri": uri,
"blk": full_copy,
"inc": incremental_copy}
args['uri'] = re.sub('"', "", args['uri'])
try:
return self.cmd("migrate", args)
except QMPCmdError, e:
if e.data['class'] in ['SockConnectInprogress', 'GenericError']:
logging.debug(
"Migrate socket connection still initializing...")
else:
raise e
def migrate_set_speed(self, value):
"""
Set maximum speed (in bytes/sec) for migrations.
:param value: Speed in bytes/sec
:return: The response to the command
"""
value = utils.convert_data_size(value, "M")
args = {"value": value}
return self.cmd("migrate_set_speed", args)
def set_link(self, name, up):
"""
Set link up/down.
:param name: Link name
:param up: Bool value, True=set up this link, False=Set down this link
:return: The response to the command
"""
return self.send_args_cmd("set_link name=%s,up=%s" % (name, str(up)))
def migrate_set_downtime(self, value):
"""
Set maximum tolerated downtime (in seconds) for migration.
:param value: maximum downtime (in seconds)
:return: The command's output
"""
val = value * 10 ** 9
args = {"value": val}
return self.cmd("migrate_set_downtime", args)
def live_snapshot(self, device, snapshot_file, snapshot_format="qcow2"):
"""
Take a live disk snapshot.
:param device: device id of base image
:param snapshot_file: image file name of snapshot
:param snapshot_format: image format of snapshot
:return: The response to the command
"""
args = {"device": device,
"snapshot-file": snapshot_file,
"format": snapshot_format}
return self.cmd("blockdev-snapshot-sync", args)
def block_stream(self, device, speed=None, base=None,
cmd="block-stream", correct=True):
"""
Start block-stream job;
:param device: device ID
:param speed: int type, limited speed(B/s)
:param base: base file
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
args = {"device": device}
if speed is not None:
args["speed"] = speed
if base:
args["base"] = base
return self.cmd(cmd, args)
def set_block_job_speed(self, device, speed=0,
cmd="block-job-set-speed", correct=True):
"""
Set limited speed for runnig job on the device
:param device: device ID
:param speed: int type, limited speed(B/s)
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
args = {"device": device,
"speed": speed}
return self.cmd(cmd, args)
def cancel_block_job(self, device, cmd="block-job-cancel", correct=True):
"""
Cancel running block stream/mirror job on the device
:param device: device ID
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
args = {"device": device}
return self.cmd(cmd, args)
def query_block_job(self, device):
"""
Get block job status on the device
:param device: device ID
:return: dict about job info, return empty dict if no active job
"""
job = dict()
output = str(self.info("block-jobs"))
try:
job = filter(lambda x: x.get("device") == device,
eval(output))[0]
except Exception:
pass
return job
def get_backingfile(self, device):
"""
Return "backing_file" path of the device
:param device: device ID
:return: string, backing_file path
"""
backing_file = None
block_info = self.query("block")
try:
image_info = filter(lambda x: x["device"] == device, block_info)[0]
backing_file = image_info["inserted"].get("backing_file")
except Exception:
pass
return backing_file
def block_mirror(self, device, target, speed, sync, format, mode,
cmd="drive-mirror", correct=True):
"""
Start mirror type block device copy job
:param device: device ID
:param target: target image
:param speed: limited speed, unit is B/s
:param sync: what parts of the disk image should be copied to the
destination;
:param mode: 'absolute-paths' or 'existing'
:param format: target image format
:param cmd: block mirror command
:param correct: auto correct command, correct by default
:return: The command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
args = {"device": device,
"target": target}
if cmd.startswith("__com.redhat"):
args["full"] = sync
else:
args["sync"] = sync
if mode:
args["mode"] = mode
if format:
args["format"] = format
if speed:
args["speed"] = speed
return self.cmd(cmd, args)
def block_reopen(self, device, new_image_file, image_format,
cmd="block-job-complete", correct=True):
"""
Reopen new target image;
:param device: device ID
:param new_image_file: new image file name
:param image_format: new image file format
:param cmd: image reopen command
:param correct: auto correct command, correct by default
:return: the command's output
"""
if correct:
cmd = self.correct(cmd)
self.verify_supported_cmd(cmd)
args = {"device": device}
if cmd.startswith("__"):
args["new-image-file"] = new_image_file
args["format"] = image_format
return self.cmd(cmd, args)
def getfd(self, fd, name):
"""
Receives a file descriptor
:param fd: File descriptor to pass to QEMU
:param name: File descriptor name (internal to QEMU)
:return: The response to the command
"""
args = {"fdname": name}
return self.cmd("getfd", args, fd=fd)
def system_wakeup(self):
"""
Wakeup suspended guest.
"""
cmd = "system_wakeup"
self.verify_supported_cmd(cmd)
return self.cmd(cmd)
def nmi(self):
"""
Inject a NMI on all guest's CPUs.
"""
return self.cmd("inject-nmi")
def block_resize(self, device, size):
"""
Resize the block device size
:param device: Block device name
:param size: Block device size need to set to. Unit is bytes.
:return: Command output
"""
cmd = "block_resize device=%s,size=%s" % (device, size)
return self.send_args_cmd(cmd)
def eject_cdrom(self, device, force=False):
"""
Eject media of cdrom and open cdrom door;
"""
cmd = "eject"
self.verify_supported_cmd(cmd)
args = {"device": device, "force": force}
return self.cmd(cmd, args)
def change_media(self, device, target):
"""
Change media of cdrom of drive;
"""
cmd = "change"
self.verify_supported_cmd(cmd)
args = {"device": device, "target": target}
return self.cmd(cmd, args)
| rbian/virt-test | virttest/qemu_monitor.py | Python | gpl-2.0 | 67,086 |
# coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import qualities
class DumpertIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dumpert\.nl/mediabase/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
_TEST = {
'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/',
'md5': '1b9318d7d5054e7dcb9dc7654f21d643',
'info_dict': {
'id': '6646981/951bc60f',
'ext': 'mp4',
'title': 'Ik heb nieuws voor je',
'description': 'Niet schrikken hoor',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'nsfw=1; cpc=10')
webpage = self._download_webpage(req, video_id)
files_base64 = self._search_regex(
r'data-files="([^"]+)"', webpage, 'data files')
files = self._parse_json(
base64.b64decode(files_base64.encode('utf-8')).decode('utf-8'),
video_id)
quality = qualities(['flv', 'mobile', 'tablet', '720p'])
formats = [{
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
} for format_id, video_url in files.items() if format_id != 'still']
self._sort_formats(formats)
title = self._html_search_meta(
'title', webpage) or self._og_search_title(webpage)
description = self._html_search_meta(
'description', webpage) or self._og_search_description(webpage)
thumbnail = files.get('still') or self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats
}
| apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/dumpert.py | Python | unlicense | 1,957 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ver 16 - 28 July 2019 -
from urllib.request import *
import json
import time
import mysql.connector
from mysql.connector import errorcode
import string
import sys
import datetime
from db import *
import threading
#from threading import Thread
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
from apcaccess import status as apc
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
PORT3 = 5014
PORT4 = 5015
PORT5 = 5016
AUTHKEY = str("123456").encode("utf-8")
CMDPREFIXOUT = "9"
def output(o, x):
print(str(str(o) + " " + str(datetime.datetime.now().time())[:8]) + " "+ str(x))
sys.stdout.flush()
def printDBError(x, e):
output(x, "Error: " + str(e)) # errno, sqlstate, msg values
# -- DB Connection ---------------------------
def QueueServerClient(HOST, PORT, AUTHKEY):
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue')
QueueManager.register('get_name')
QueueManager.register('get_description')
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.connect() # This starts the connected client
return manager
# -- DB Connection ---------------------------
try:
db = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
output("DB", "Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
output("DB", "Database does not exists")
else:
output("DB", err)
else:
output("METEO", "Start procedure")
db.commit()
# -- END DB Connection ---------------------------
curM = db.cursor()
curF = db.cursor()
curM2 = db.cursor()
curF2 = db.cursor()
curU = db.cursor()
cnt = 10
# -- get meteo parameters ------------------------#
sql = "SELECT api, location, url FROM tbapi WHERE tbapi.service = 'weather'"
curM.execute(sql)
row = curM.fetchone()
if row:
apiM=row[0]
locationM=row[1]
urlM=row[2]
# -- get forecast parameters ------------------------#
sql = "SELECT api, location, url FROM tbapi WHERE tbapi.service = 'forecast'"
curF.execute(sql)
row = curF.fetchone()
if row:
apiF=row[0]
locationF=row[1]
urlF=row[2]
# -- get nodes to send meteo and forecast ------------#
sql = "SELECT id FROM tbnode WHERE sendmeteo = 1"
curF.execute(sql)
result = curF.fetchall()
rc = len(result)
if ( rc > 0):
nodeList = [[] for i in range(rc)]
cnt = 0
for id in result:
nodeList[cnt] = id[0]
cnt = cnt + 1
elif ( rc == 0):
nodeList = [[] for i in range(1)]
nodeList[0] = 9999
def getTemp():
global temp_c
global relative_humidity
global pressure
global meteo
global api
global location
global url
try:
f = urlopen(urlM+"&id="+locationM+"&appid="+apiM)
json_string = f.read()
dataJSON = json.loads(json_string.decode())
temp_c = float(dataJSON['main']['temp'])
relative_humidity = int(dataJSON['main']['humidity'])
meteo = dataJSON['weather'][0]['id']
pressure = dataJSON['main']['pressure']
f.close()
#print(temp_c)
#print(relative_humidity)
#print(meteo)
return "Ok"
except Exception as e:
print("Meteo " + str(e))
return "error"
def getForecast():
global forecast_day_1
global forecast_day_2
global forecast_day_3
global forecast_day_4
global t1min
global t1max
global t2min
global t2max
global t3min
global t3max
global t4min
global t4max
global api
global location
global url
try:
phases = {
0 : 7,
1 : 7,
2 : 7,
3 : 6,
4 : 6,
5 : 6,
6 : 5,
7 : 5,
8 : 5,
9 : 4,
10: 4,
11: 4,
12: 3,
13: 3,
14: 3,
15: 2,
16: 2,
17: 2,
18: 1,
19: 1,
20: 1,
21: 0,
22: 0,
23: 0
}
f = urlopen(urlF+"&id="+locationF+"&appid="+apiF)
json_string = f.read()
dataJSON = json.loads(json_string.decode())
now = datetime.datetime.now()
hour = now.hour
#print(hour)
#print(dataJSON['list'][phases[hour]+12]['dt_txt'])
#print(dataJSON['list'][phases[hour]+12]['main']['temp'])
#print(dataJSON['list'][phases[hour]+12]['main']['temp'])
#print(dataJSON['list'][phases[hour]+20]['main']['temp'])
#print(dataJSON['list'][phases[hour]+28]['main']['temp'])
forecast_day_1 = dataJSON['list'][phases[hour]+4]['weather'][0]['id']
forecast_day_2 = dataJSON['list'][phases[hour]+12]['weather'][0]['id']
forecast_day_3 = dataJSON['list'][phases[hour]+20]['weather'][0]['id']
forecast_day_4 = dataJSON['list'][phases[hour]+28]['weather'][0]['id']
t1min = int(dataJSON['list'][phases[hour]+2]['main']['temp'])
if (t1min < 0):
t1min = abs(t1min)+9000
t1max = int(dataJSON['list'][phases[hour]+5]['main']['temp'])
if (t1max < 0):
t1max = abs(t1max)+9000
t2min = int(dataJSON['list'][phases[hour]+10]['main']['temp_min'])
if (t2min < 0):
t2min = abs(t1min)+9000
t2max = int(dataJSON['list'][phases[hour]+13]['main']['temp_max'])
if (t2max < 0):
t2max = abs(t1max)+9000
t3min = int(dataJSON['list'][phases[hour]+18]['main']['temp_min'])
if (t3min < 0):
t3min = abs(t1min)+9000
t3max = int(dataJSON['list'][phases[hour]+21]['main']['temp_max'])
if (t3max < 0):
t3max = abs(t1max)+9000
t4min = int(dataJSON['list'][phases[hour]+26]['main']['temp_min'])
if (t4min < 0):
t4min = abs(t1min)+9000
t4max = int(dataJSON['list'][phases[hour]+28]['main']['temp_max'])
if (t4max < 0):
t4max = abs(t1max)+9000
f.close()
return "Ok"
except Exception as e:
print("Forecast " + str(e))
return "error"
def printDBError(x, e):
output(x, "Error: " + str(e)) # errno, sqlstate, msg values
exit
def parseint(string):
return int(''.join([x for x in string if x.isdigit()]))
def execForecast():
global qSql
output ("Meteo Forecast", "Exec Forecast")
ret = getForecast()
try:
if (ret == "Ok"):
sql = "SELECT DISTINCT id FROM tbmeteo WHERE tbmeteo.condition = '" + str(forecast_day_1) + "'"
#print(sql)
curF.execute(sql)
for (id) in curF:
frcst1icon = id[0]
sql = "SELECT DISTINCT id FROM tbmeteo WHERE tbmeteo.condition = '" + str(forecast_day_2) + "'"
curF.execute(sql)
for (id) in curF:
frcst2icon = id[0]
sql = "SELECT DISTINCT id FROM tbmeteo WHERE tbmeteo.condition = '" + str(forecast_day_3) + "'"
curF.execute(sql)
for (id) in curF:
frcst3icon = id[0]
sql = "SELECT DISTINCT id FROM tbmeteo WHERE tbmeteo.condition = '" + str(forecast_day_4) + "'"
curF.execute(sql)
for (id) in curF:
frcst4icon = id[0]
for id in nodeList:
if (id != 9999):
sql = CMDPREFIXOUT + "INSERT INTO tbdataout (timekey,type,v0,v1,v2,v3,v4) VALUES (millis(),8,%s,%s,%s,%s,%s)" % (id, frcst1icon, frcst2icon, frcst3icon, frcst4icon)
#print(sql)
qSql.put(sql)
time.sleep(5)
sql = CMDPREFIXOUT + "INSERT INTO tbdataout (timekey,type,v0,v1,v2,v3,v4,V5,V6,V7,V8) VALUES (millis(),9,%s,%s,%s,%s,%s,%s,%s,%s,%s)" % (id, t1min, t1max, t2min, t2max, t3min, t3max, t4min, t4max)
#print(sql)
qSql.put(sql)
output ("Meteo Forecast","Forecast sent")
except mysql.connector.Error as e:
printDBError("MeteoForecast", e)
def execMeteo():
output ("Meteo","Exec Meteo")
global temp_c
global relative_humidity
global pressure
global meteo
global qSql
ret = getTemp()
try:
if (ret == "Ok"):
sql = CMDPREFIXOUT + "REPLACE INTO tbdatain (idx,timekey,type,v0,v1,v2) values (1,millis(),1,%s,%s,%s)" % ('2','1000',temp_c)
sql = sql + ",(2,millis(),1,%s,%s,%s)" % ('2','1001',relative_humidity)
sql = sql + ",(3,millis(),1,%s,%s,%s)" % ('2','1002',pressure)
qSql.put(sql)
#print(sql)
output ("Meteo","Temperatura corrente "+str(temp_c)+" Celsius")
#get meteo icon
sql = "SELECT DISTINCT id FROM tbmeteo WHERE tbmeteo.condition = '" + str(meteo) + "'"
curM.execute(sql)
for (id) in curM:
meteoid = id[0]
if int(temp_c) < 0:
temp_c_fixed = abs(int(temp_c))+9000
else:
temp_c_fixed = int(temp_c)*100
for id in nodeList:
if (id != 9999):
sql = CMDPREFIXOUT + "INSERT INTO tbdataout (timekey,type,v0,v1,v2,v3,v4) VALUES (millis(),7,%s,%s,%s,%s,%s)" % (id, temp_c_fixed, relative_humidity, pressure, meteoid)
#print(sql)
qSql.put(sql)
output ("Meteo","Meteo inviato")
except mysql.connector.Error as e:
printDBError("Meteo", e)
def execUps():
global qSql
#output ("UPS","Exec UPS")
OrderedDict = apc.parse(apc.get(), strip_units=True)
status = OrderedDict['STATUS']
if status == 'ONLINE' or status == 'CHARGING':
vstatus = 1
bcharge = int(float(OrderedDict['BCHARGE']))
battv = float(OrderedDict['BATTV'])
timeleft = float(OrderedDict['TIMELEFT'])
linev = int(float(OrderedDict['LINEV']))
elif status == 'ONBATT':
vstatus = 0
bcharge = int(float(OrderedDict['BCHARGE']))
battv = float(OrderedDict['BATTV'])
timeleft = float(OrderedDict['TIMELEFT'])
linev = int(float(OrderedDict['LINEV']))
elif status == 'COMMLOST':
vstatus = 99
bcharge = 0
battv = 0.0
timeleft = 0.0
linev = 0
else:
vstatus = 999
bcharge = 0
battv = 0.0
timeleft = 0.0
linev = 0
sql = CMDPREFIXOUT + "REPLACE INTO tbdatain (idx,timekey,type,v0,v1,v2) values (4,millis(),1,%s,%s,%s)" % ('8','32',bcharge)
sql = sql + ",(5,millis(),1,%s,%s,%s)" % ('8','33',vstatus)
sql = sql + ",(6,millis(),1,%s,%s,%s)" % ('8','34',battv)
sql = sql + ",(7,millis(),1,%s,%s,%s)" % ('8','30',timeleft)
sql = sql + ",(8,millis(),1,%s,%s,%s)" % ('8','31',linev)
qSql.put(sql)
output ("UPS","Aggiornamento UPS fatto")
def run():
execMeteo()
time.sleep(2.0)
execForecast()
time.sleep(2.0)
#execUps()
timerMeteo = threading.Timer(300.0, run).start()
#------- Main section ----------------------------#
#------- Run once --------------------------------#
# create three connected managers
qmSql = QueueServerClient(HOST, PORT2, AUTHKEY)
# Get the queue objects from the clients
qSql = qmSql.get_queue()
timerMeteo = threading.Timer(1.0, run).start()
#------- End run once -------------------------#
while True:
a = 0
| theflorianmaas/dh | Python/dhproc dev/getDirectUpdate.py | Python | mit | 10,081 |
# -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
import bandit
from bandit.core import constants
from bandit.core import issue
class IssueTests(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
self.assertEqual(
("Issue: 'Test issue' from B999:bandit_plugin: Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1"),
str(test_issue)
)
def test_issue_as_dict(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
self.assertEqual('code.py', test_issue_dict['filename'])
self.assertEqual('bandit_plugin', test_issue_dict['test_name'])
self.assertEqual('B999', test_issue_dict['test_id'])
self.assertEqual('MEDIUM', test_issue_dict['issue_severity'])
self.assertEqual('MEDIUM', test_issue_dict['issue_confidence'])
self.assertEqual('Test issue', test_issue_dict['issue_text'])
self.assertEqual(1, test_issue_dict['line_number'])
self.assertEqual([], test_issue_dict['line_range'])
def test_issue_filter_severity(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(l, bandit.HIGH) for l in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.severity)
result = i.filter(level, bandit.UNDEFINED)
self.assertTrue((test >= rank) == result)
def test_issue_filter_confidence(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(bandit.HIGH, l) for l in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.confidence)
result = i.filter(bandit.UNDEFINED, level)
self.assertTrue((test >= rank) == result)
def test_matches_issue(self):
issue_a = _get_issue_instance()
issue_b = _get_issue_instance(severity=bandit.HIGH)
issue_c = _get_issue_instance(confidence=bandit.LOW)
issue_d = _get_issue_instance()
issue_d.text = 'ABCD'
issue_e = _get_issue_instance()
issue_e.fname = 'file1.py'
issue_f = issue_a
issue_g = _get_issue_instance()
issue_g.test = 'ZZZZ'
issue_h = issue_a
issue_h.lineno = 12345
# positive tests
self.assertEqual(issue_a, issue_a)
self.assertEqual(issue_a, issue_f)
self.assertEqual(issue_f, issue_a)
# severity doesn't match
self.assertNotEqual(issue_a, issue_b)
# confidence doesn't match
self.assertNotEqual(issue_a, issue_c)
# text doesn't match
self.assertNotEqual(issue_a, issue_d)
# filename doesn't match
self.assertNotEqual(issue_a, issue_e)
# plugin name doesn't match
self.assertNotEqual(issue_a, issue_g)
# line number doesn't match but should pass because we don't test that
self.assertEqual(issue_a, issue_h)
@mock.patch('linecache.getline')
def test_get_code(self, getline):
getline.return_value = b'\x08\x30'
new_issue = issue.Issue(bandit.MEDIUM, lineno=1)
try:
new_issue.get_code()
except UnicodeDecodeError:
self.fail('Bytes not properly decoded in issue.get_code()')
def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM):
new_issue = issue.Issue(severity, confidence, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.test_id = 'B999'
new_issue.lineno = 1
return new_issue
| pombredanne/bandit | tests/unit/core/test_issue.py | Python | apache-2.0 | 4,613 |
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009-2010 Benny Malengier <benny.malengier@gramps-project.org>
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Tim Lyons
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Report output generator for html documents, based on Html and HtmlBackend
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
import os
import shutil
import logging
#------------------------------------------------------------------------
#
# Gprime modules
#
#------------------------------------------------------------------------
from gprime.utils.image import resize_to_jpeg
from gprime.const import DATA_DIR, IMAGE_DIR, PROGRAM_NAME, URL_HOMEPAGE
from gprime.errors import ReportError
from gprime.version import VERSION
from gprime.plug.docgen import BaseDoc, TextDoc, URL_PATTERN
from gprime.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces
from gprime.plugins.lib.libhtml import Html
from gprime.const import LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
LOG = logging.getLogger(".htmldoc")
_TEXTDOCSCREEN = 'grampstextdoc.css'
_HTMLSCREEN = 'grampshtml.css'
#------------------------------------------------------------------------
#
# Set up to make links clickable
#
#------------------------------------------------------------------------
_CLICKABLE = r'''<a href="\1">\1</a>'''
#------------------------------------------------------------------------
#
# HtmlDoc
#
#------------------------------------------------------------------------
class HtmlDoc(BaseDoc, TextDoc):
"""Implementation of the BaseDoc and TextDoc gen.plug.docgen api for the
creation of Html files. This is achieved by writing on a HtmlBackend
object
div id's defined here:
id="grampstextdoc" : the entire text report
id="grampsheading" : a small defined heading, but not h1 to h6 !
id="grampsstylednote" : start of part with a styled note, divided in
paragraphs
id="grampsnote" : start of part with a note. This id is normally not
used
The styles as defined in the stylesheed of the textdoc, will be converted
to css class. Color is removed to avoid conflicts with the css. Also
Fontface is removed. Size, italic, bold, margins, borders are retained
"""
def __init__(self, styles, paper_style):
BaseDoc.__init__(self, styles, None)
self.style_declaration = ''
self.htmllist = []
self._backend = None
self.css_filename = ''
self.warn_dir = True
self._col = 0
self._tbl = None
self._empty = 1
self.title = ''
self.__title_written = -1 # -1 = not written, 0 = writing, 1 = written
self.__link_attrs = {} # additional link attrs, eg {"style": "...", "class": "..."}
self.use_table_headers = False # th, td
self.first_row = True
def set_css_filename(self, css_filename):
"""
Set the css file to use. The path must be included.
Note: DocReportDialog sets this for html doc
"""
if css_filename and os.path.basename(css_filename):
self.css_filename = css_filename
else:
self.css_filename = ''
def open(self, filename):
"""
Overwrite base method
"""
self._backend = HtmlBackend(filename)
self._backend.open()
self.htmllist += [self._backend.html_body]
#start a gramps report
self.htmllist += [Html('div', id="grampstextdoc")]
self.build_header()
def build_header(self):
"""
Build up the header of the html file over the defaults of Html()
"""
# add additional meta tags and stylesheet links to head section
# create additional meta tags
_meta1 = 'name="generator" content="%s %s %s"' % (
PROGRAM_NAME, VERSION, URL_HOMEPAGE)
meta = Html('meta', attr=_meta1)
#set styles of the report as inline css
self.build_style_declaration()
# Gramps favicon en css
fname1 = '/'.join([self._backend.datadir(), 'favicon.ico'])
fname2 = '/'.join([self._backend.datadir(), _TEXTDOCSCREEN])
fname3 = '/'.join([self._backend.datadir(), _HTMLSCREEN])
# links for GRAMPS favicon and stylesheets
links = Html('link', rel='shortcut icon', href=fname1,
type='image/x-icon') + (
Html('link', rel='stylesheet', href=fname2,
type='text/css', media='screen', indent=False),)
if self.css_filename:
links += (Html('link', rel='stylesheet', href=fname3,
type='text/css', media='screen', indent=False),)
self._backend.html_header += (meta, links)
def build_style_declaration(self, id="grampstextdoc"):
"""
Convert the styles of the report into inline css for the html doc
"""
styles = self.get_style_sheet()
text = []
for sname in sorted(styles.get_cell_style_names()):
style = styles.get_cell_style(sname)
pad = "%.3fcm" % style.get_padding()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
text.append('#%s .%s {\n'
'\tpadding: %s %s %s %s;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n}'
% (id, sname, pad, pad, pad, pad, top, bottom,
left, right))
for style_name in sorted(styles.get_paragraph_style_names()):
style = styles.get_paragraph_style(style_name)
font = style.get_font()
font_size = font.get_size()
#font_color = '#%02x%02x%02x' % font.get_color()
align = style.get_alignment_text()
text_indent = "%.2f" % style.get_first_indent()
right_margin = "%.2f" % style.get_right_margin()
left_margin = "%.2f" % style.get_left_margin()
top_margin = "%.2f" % style.get_top_margin()
bottom_margin = "%.2f" % style.get_bottom_margin()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
italic = bold = ''
if font.get_italic():
italic = 'font-style:italic; '
if font.get_bold():
bold = 'font-weight:bold; '
#if font.get_type_face() == FONT_SANS_SERIF:
# family = '"Helvetica","Arial","sans-serif"'
#else:
# family = '"Times New Roman","Times","serif"'
# do not allow color, set in base css !
# so no : 'color: %s' % font_color
# so no : 'font-family:%s;' % family
text.append('#%s .%s {\n'
'\tfont-size: %dpt;\n'
'\ttext-align: %s; text-indent: %scm;\n'
'\tmargin-right: %scm; margin-left: %scm;\n'
'\tmargin-top: %scm; margin-bottom: %scm;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n'
'\t%s%s\n}'
% (id, style_name, font_size,
align, text_indent,
right_margin, left_margin,
top_margin, bottom_margin,
top, bottom, left, right,
italic, bold))
self.style_declaration = '\n'.join(text)
def close(self):
"""
Overwrite base method
"""
while len(self.htmllist) > 1:
self.__reduce_list()
#now write the actual file
self._backend.close()
self.write_support_files()
def copy_file(self, from_fname, to_fname, to_dir=''):
"""
Copy a file from a source to a (report) destination. If to_dir is not
present, then the destination directory will be created.
Normally 'to_fname' will be just a filename, without directory path.
'to_dir' is the relative path name in the destination root. It will
be prepended before 'to_fname'.
"""
#build absolute path
dest = os.path.join(self._backend.datadirfull(), to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
shutil.copyfile(from_fname, dest)
elif self.warn_dir:
raise ReportError(
_("Possible destination error"),
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
def write_support_files(self):
"""
Copy support files to the datadir that needs to hold them
"""
#css of textdoc styles
with open(os.path.join(self._backend.datadirfull(),
_TEXTDOCSCREEN), 'w') as tdfile:
tdfile.write(self.style_declaration)
#css file
if self.css_filename:
#we do an extra check in case file does not exist, eg cli call
fullpath = os.path.join(DATA_DIR, self.css_filename)
if os.path.exists(fullpath):
self.copy_file(fullpath, _HTMLSCREEN)
#favicon
self.copy_file(os.path.join(IMAGE_DIR, 'favicon.ico'),
'favicon.ico')
def __reduce_list(self):
"""
Takes the internal list of html objects, and adds the last to the
previous. This closes the upper tag
"""
self.htmllist[-2] += self.htmllist[-1]
self.htmllist.pop()
def __write_text(self, text, mark=None, markup=False, links=False):
"""
@param text: text to write.
@param mark: IndexMark to use for indexing (not supported)
@param markup: True if text already contains markup info.
Then text will no longer be escaped
@param links: make URLs clickable if True
"""
if not markup:
text = self._backend.ESCAPE_FUNC()(text)
if self.__title_written == 0:
self.title += text
if links is True:
import re
text = re.sub(URL_PATTERN, _CLICKABLE, text)
self.htmllist[-1] += text
def __empty_char(self):
"""
Output a non breaking whitespace so as to have browser behave ok on
empty content
"""
self.__write_text(' ', markup=True)
def write_text(self, text, mark=None, links=False):
"""
Overwrite base method
"""
if text != "":
self._empty = 0
self.__write_text(text, mark, links=links)
def write_title(self):
"""
Add title field to header
"""
self._backend.html_header += Html('title', self.title,
inline=True)
def start_table(self, name, style):
"""
Overwrite base method
"""
self.first_row = True
styles = self.get_style_sheet()
self._tbl = styles.get_table_style(style)
self.htmllist += [Html('table', width=str(self._tbl.get_width())+'%',
cellspacing='0')]
def end_table(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_row(self):
"""
Overwrite base method
"""
self.htmllist += [Html('tr')]
self._col = 0
def end_row(self):
"""
Overwrite base method
"""
self.first_row = False
self.__reduce_list()
def start_cell(self, style_name, span=1):
"""
Overwrite base method
"""
if self.use_table_headers and self.first_row:
tag = "th"
else:
tag = "td"
self._empty = 1
if span > 1:
self.htmllist += (Html(tag, colspan=str(span), class_=style_name),)
self._col += span
else:
self.htmllist += (Html(tag, colspan=str(span),
width=str(self._tbl.get_column_width(
self._col))+ '%',
class_=style_name),)
self._col += 1
def end_cell(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_paragraph(self, style_name, leader=None):
"""
Overwrite base method
"""
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
level = style.get_header_level()
if level == 0:
#a normal paragraph
self.htmllist += (Html('p', class_=style_name, inline=True),)
elif level == 1:
if self.__title_written == -1 and \
style_name.upper().find('TITLE') != -1:
self.__title_written = 0
self.htmllist += (Html('div', id="header"),)
self.htmllist += (Html('h1', class_=style_name, id='SiteTitle',
inline=True),)
else:
self.htmllist += (Html('h1', class_=style_name, inline=True),)
elif 2 <= level <= 5:
tag = 'h'+str(level+1)
self.htmllist += (Html(tag, class_=style_name, inline=True),)
else:
# a low level header
self.htmllist += (Html('div', id='grampsheading',
class_=style_name),)
if leader is not None:
self.write_text(leader+' ')
def end_paragraph(self):
"""
Overwrite base method
"""
if self._empty == 1:
self.__empty_char()
self._empty = 0
self.__reduce_list()
if self.__title_written == 0:
self.__title_written = 1
#close div statement
self.__reduce_list()
self.write_title()
def start_bold(self):
"""
Overwrite base method
"""
self.htmllist += [Html('strong')]
def end_bold(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_superscript(self):
"""
Overwrite base method
"""
self.htmllist += [Html('sup')]
def end_superscript(self):
"""
Overwrite base method
"""
self.__reduce_list()
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the html doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. HtmlDoc will show the html as pure text, so
no escaping will happen.
links: bool, make URLs clickable if True
"""
text = str(styledtext)
self.htmllist += [Html('div', id='grampsstylednote')]
if contains_html:
#just dump the note out as it is. Adding markup would be dangerous
# as it could destroy the html. If html code, one can do the
self.start_paragraph(style_name)
self.__write_text(text, markup=True, links=links)
self.end_paragraph()
else:
s_tags = styledtext.get_tags()
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n')
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
# we explicitly set _empty because start and end para do not seem
# to do a very good job at setting them
linenb = 1
# The code is tricky here, because we don't want to start a new para
# at the end of the last line if there is no newline there.
# Instead, we want to just end the current para.
for line in markuptext.split('\n'):
[line, sigcount] = process_spaces(line, format)
if sigcount == 0:
if inpara is False:
# needed for runs of three or more newlines
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
self.end_paragraph()
inpara = False
linenb = 1
else:
if inpara is False:
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
if linenb > 1:
self.htmllist[-1] += Html('br')
self.__write_text(line, markup=True, links=links)
self._empty = 0 # para is not empty
linenb += 1
if inpara is True:
self.end_paragraph()
if sigcount == 0:
# if the last line was blank, then as well as outputting the
# previous para, which we have just done, we also output a new
# blank para
self.start_paragraph(style_name)
self._empty = 1 # para is empty
self.end_paragraph()
#end div element
self.__reduce_list()
def add_media(self, name, pos, w_cm, h_cm, alt='', style_name=None,
crop=None):
"""
Overwrite base method
"""
self._empty = 0
size = int(max(w_cm, h_cm) * float(150.0/2.54))
refname = "is%s" % os.path.basename(name)
imdir = self._backend.datadirfull()
try:
resize_to_jpeg(name, imdir + os.sep + refname, size, size,
crop=crop)
except:
LOG.warning(_("Could not create jpeg version of image %(name)s"),
name)
return
if len(alt):
alt = '<br />'.join(alt)
if pos not in ["right", "left"]:
if len(alt):
self.htmllist[-1] += Html('div') + (
Html('img', src=imdir + os.sep + refname,
border='0', alt=alt),
Html('p', class_="DDR-Caption") + alt
)
else:
self.htmllist[-1] += Html('img', src=imdir + os.sep + refname,
border='0', alt=alt)
else:
if len(alt):
self.htmllist[-1] += Html(
'div', style_="float: %s; padding: 5px; margin: 0;" % pos
) + (Html('img', src=imdir + os.sep + refname,
border='0', alt=alt),
Html('p', class_="DDR-Caption") + alt)
else:
self.htmllist[-1] += Html('img', src=imdir + os.sep + refname,
border='0', alt=alt, align=pos)
def page_break(self):
"""
overwrite base method so page break has no effect
"""
pass
def start_link(self, link):
"""
Starts a section to add a link. Link is a URI.
"""
self.htmllist += [Html('a', href=link, **self.__link_attrs)]
def stop_link(self):
"""
Stop a section of a link.
"""
self.__reduce_list()
def start_underline(self):
"""
Starts a section of underlining.
"""
self.htmllist += [Html('u')]
def stop_underline(self):
"""
Stop underlining.
"""
self.__reduce_list()
def set_link_attrs(self, attrs):
"""
Set some a attributes/values. attrs is a dictionary, eg
{"style": "...", "class": "..."}
"""
self.__link_attrs = attrs
| sam-m888/gprime | gprime/plugins/docgen/htmldoc.py | Python | gpl-2.0 | 22,518 |
# -*- coding: utf-8 -*-
#
# dbsync documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 14 17:15:11 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../source/'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dbsync'
copyright = u'2013, Reinaldo Gil Lima de Carvalho'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'dbsyncdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dbsync.tex', u'dbsync Documentation',
u'Reinaldo Gil Lima de Carvalho', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| reinaldoc/dbsync | docs/conf.py | Python | gpl-2.0 | 6,413 |
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='tgbotplug',
version='0.1.14',
packages=['tgbot'],
include_package_data=True,
license='MIT License',
description='Telegram plugin-based bot',
long_description=README,
url='https://github.com/fopina/tgbotplug',
author='Filipe Pina',
author_email='fopina@skmobi.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'requests==2.7.0',
'twx.botapi==1.0.2',
'peewee==2.6.3',
]
)
| pmpfl/tgbotplug | setup.py | Python | mit | 1,080 |
#!/usr/bin/env python3
import json
import random
def main():
dictionary_file_path = 'dictionary.json'
with open(dictionary_file_path, "r") as dictionary_file:
dictionary = json.load(dictionary_file)
while True:
# Select a random word-meaning pair
word_meaning = random.choice(list(dictionary.items()))
print(word_meaning[0])
input('??')
print(word_meaning[1])
print('\n' + '-' * 30 + '\n')
if __name__ == '__main__':
main()
| Anmol-Singh-Jaggi/Dictionary | quiz.py | Python | gpl-3.0 | 501 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for state domain objects and methods defined on them."""
from __future__ import annotations
import contextlib
import copy
import logging
import os
import re
from core import feconf
from core import schema_utils
from core import utils
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import html_validation_service
from core.domain import interaction_registry
from core.domain import rules_registry
from core.domain import state_domain
from core.domain import translatable_object_registry
from core.tests import test_utils
class StateDomainUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self):
"""Test the method for extracting all the HTML from a state having
DragAndDropSortInput interaction.
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_content_dict = {
'content_id': 'content',
'html': '<p>state content html</p>'
}
state_customization_args_dict = {
'choices': {
'value': [
{
'content_id': 'ca_choices_0',
'html': '<p>state customization arg html 1</p>'
}, {
'content_id': 'ca_choices_1',
'html': '<p>state customization arg html 2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>state customization arg html 3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>state customization arg html 4</p>'
}
]
},
'allowMultipleItemsInSamePosition': {
'value': False
}
}
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
'Introduction', state_domain.SubtitledHtml(
'feedback_1', '<p>State Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'IsEqualToOrdering',
{
'x': [['<p>IsEqualToOrdering rule_spec htmls</p>']]
}),
state_domain.RuleSpec(
'HasElementXAtPositionY',
{
'x': '<p>HasElementXAtPositionY rule_spec '
'html</p>',
'y': 2
}),
state_domain.RuleSpec(
'HasElementXBeforeElementY',
{
'x': '<p>x input for HasElementXAtPositionY '
'rule_spec </p>',
'y': '<p>y input for HasElementXAtPositionY '
'rule_spec </p>'
}),
state_domain.RuleSpec(
'IsEqualToOrderingWithOneItemAtIncorrectPosition',
{
'x': [[(
'<p>IsEqualToOrderingWithOneItemAtIncorrectPos'
'ition rule_spec htmls</p>')
]]
})
],
[],
None
)
state_solution_dict = {
'answer_is_exclusive': True,
'correct_answer': [
'<p>state customization arg html 1</p>',
'<p>state customization arg html 2</p>',
'<p>state customization arg html 3</p>',
'<p>state customization arg html 4</p>'
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state_written_translations_dict = {
'translations_mapping': {
'content': {
'en': {
'data_format': 'html',
'translation':
'<p>state written_translation content-en</p>',
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation':
'<p>state written_translation content-hi</p>',
'needs_update': False
}
},
'ca_choices_0': {
'hi': {
'data_format': 'html',
'translation':
(
'<p>state written_translation ca_choices_0-hi'
'</p>'
),
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
(
'<p>state written_translation ca_choices_0'
'-en</p>'
),
'needs_update': False
}
},
'ca_choices_1': {
'hi': {
'data_format': 'html',
'translation':
(
'<p>state written_translation ca_choices_1-hi'
'</p>'
),
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
(
'<p>state written_translation ca_choices_1-en'
'</p>'
),
'needs_update': False
}
},
'ca_choices_2': {
'hi': {
'data_format': 'html',
'translation':
(
'<p>state written_translation ca_choices_2-hi'
'</p>'
),
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
(
'<p>state written_translation ca_choices_2-en'
'</p>'
),
'needs_update': False
}
},
'ca_choices_3': {
'hi': {
'data_format': 'html',
'translation': (
'<p>state written_translation ca_choices_3-hi'
'</p>'
),
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
(
'<p>state written_translation ca_choices_3-en'
'</p>'
),
'needs_update': False
}
},
'default_outcome': {
'hi': {
'data_format': 'html',
'translation':
'<p>state written_translation outcome-hi</p>',
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
'<p>state written_translation outcome-en</p>',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'data_format': 'html',
'translation':
'<p>state written_translation feedback-hi</p>',
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
'<p>state written_translation feedback-en</p>',
'needs_update': False
}
},
'hint_1': {
'hi': {
'data_format': 'html',
'translation':
'<p>state written_translation hint_1-hi</p>',
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
'<p>state written_translation hint_1-en</p>',
'needs_update': False
}
},
'solution': {
'hi': {
'data_format': 'html',
'translation':
'<p>state written_translation solution-hi</p>',
'needs_update': False
},
'en': {
'data_format': 'html',
'translation':
'<p>state written_translation solution-en</p>',
'needs_update': False
}
}
}
}
state_hint_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for hint 1</p>'
)
)
]
state_solution_dict = {
'answer_is_exclusive': True,
'correct_answer': [
['<p>state customization arg html 1</p>'],
['<p>state customization arg html 2</p>'],
['<p>state customization arg html 3</p>'],
['<p>state customization arg html 4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state.update_content(
state_domain.SubtitledHtml.from_dict(state_content_dict))
state.update_interaction_id('DragAndDropSortInput')
state.update_interaction_customization_args(
state_customization_args_dict)
state.update_next_content_id_index(4)
state.update_interaction_hints(state_hint_list)
solution = state_domain.Solution.from_dict(
state.interaction.id, state_solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_answer_groups(
[state_answer_group])
state.update_written_translations(
state_domain.WrittenTranslations.from_dict(
state_written_translations_dict))
exp_services.save_new_exploration('owner_id', exploration)
mock_html_field_types_to_rule_specs_dict = copy.deepcopy(
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
def mock_get_html_field_types_to_rule_specs(unused_cls):
return mock_html_field_types_to_rule_specs_dict
def mock_get_interaction_by_id(cls, interaction_id):
interaction = copy.deepcopy(cls._interactions[interaction_id]) # pylint: disable=protected-access
interaction.answer_type = 'ListOfSetsOfHtmlStrings'
return interaction
rules_registry_swap = self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs))
interaction_registry_swap = self.swap(
interaction_registry.Registry, 'get_interaction_by_id',
classmethod(mock_get_interaction_by_id))
with rules_registry_swap, interaction_registry_swap:
html_list = state.get_all_html_content_strings()
self.assertItemsEqual(
html_list,
[
'<p>state written_translation solution-hi</p>',
'<p>state written_translation solution-en</p>',
'<p>state written_translation content-hi</p>',
'<p>state written_translation content-en</p>',
'<p>state written_translation feedback-hi</p>',
'<p>state written_translation feedback-en</p>',
'<p>state written_translation hint_1-hi</p>',
'<p>state written_translation hint_1-en</p>',
'<p>state written_translation outcome-hi</p>',
'<p>state written_translation outcome-en</p>',
'<p>state written_translation ca_choices_0-hi</p>',
'<p>state written_translation ca_choices_0-en</p>',
'<p>state written_translation ca_choices_1-hi</p>',
'<p>state written_translation ca_choices_1-en</p>',
'<p>state written_translation ca_choices_2-hi</p>',
'<p>state written_translation ca_choices_2-en</p>',
'<p>state written_translation ca_choices_3-hi</p>',
'<p>state written_translation ca_choices_3-en</p>',
'<p>State Feedback</p>',
'<p>IsEqualToOrdering rule_spec htmls</p>',
'<p>HasElementXAtPositionY rule_spec html</p>',
'<p>y input for HasElementXAtPositionY rule_spec </p>',
'<p>x input for HasElementXAtPositionY rule_spec </p>',
(
'<p>IsEqualToOrderingWithOneItemAtIncorrectPosition rule_s'
'pec htmls</p>'),
'',
'<p>Hello, this is html1 for hint 1</p>',
'<p>This is solution for state1</p>',
'<p>state customization arg html 1</p>',
'<p>state customization arg html 2</p>',
'<p>state customization arg html 3</p>',
'<p>state customization arg html 4</p>',
'<p>state customization arg html 1</p>',
'<p>state customization arg html 2</p>',
'<p>state customization arg html 3</p>',
'<p>state customization arg html 4</p>',
'<p>state content html</p>'])
def test_get_all_html_in_exploration_with_text_input_interaction(self):
"""Test the method for extracting all the HTML from a state having
TextInput interaction.
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_content_dict = {
'content_id': 'content',
'html': '<p>state content html</p>'
}
state_answer_group = [state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>state outcome html</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Equals', {
'x': {
'contentId': 'rule_input_Equals',
'normalizedStrSet': ['Test']
}})
],
[],
None
)]
state_default_outcome = state_domain.Outcome(
'State1', state_domain.SubtitledHtml(
'default_outcome', '<p>Default outcome for State1</p>'),
False, [], None, None
)
state_hint_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state1</p>'
)
),
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_2', '<p>Hello, this is html2 for state1</p>'
)
),
]
state_solution_dict = {
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state_interaction_cust_args = {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
}
state.update_next_content_id_index(3)
state.update_content(
state_domain.SubtitledHtml.from_dict(state_content_dict))
state.update_interaction_id('TextInput')
state.update_interaction_customization_args(state_interaction_cust_args)
state.update_interaction_answer_groups(
state_answer_group)
state.update_interaction_default_outcome(state_default_outcome)
state.update_interaction_hints(state_hint_list)
solution = state_domain.Solution.from_dict(
state.interaction.id, state_solution_dict)
state.update_interaction_solution(solution)
exp_services.save_new_exploration('owner_id', exploration)
html_list = state.get_all_html_content_strings()
self.assertEqual(
html_list,
[
'<p>state outcome html</p>',
'<p>Default outcome for State1</p>',
'<p>Hello, this is html1 for state1</p>',
'<p>Hello, this is html2 for state1</p>',
'<p>This is solution for state1</p>',
'<p>state content html</p>'])
def test_get_all_html_in_exploration_with_item_selection_interaction(self):
"""Test the method for extracting all the HTML from a state having
ItemSelectionInput interaction.
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_content_dict = {
'content_id': 'content',
'html': '<p>state content html</p>'
}
state_customization_args_dict = {
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
},
'choices': {
'value': [
{
'content_id': 'ca_choices_0',
'html': '<p>init_state customization arg html 1</p>'
}, {
'content_id': 'ca_choices_1',
'html': '<p>init_state customization arg html 2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>init_state customization arg html 3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>init_state customization arg html 4</p>'
},
]
}
}
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback', '<p>state outcome html</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Equals',
{
'x': ['<p>Equals rule_spec html</p>']
}),
state_domain.RuleSpec(
'ContainsAtLeastOneOf',
{
'x': ['<p>ContainsAtLeastOneOf rule_spec html</p>']
}),
state_domain.RuleSpec(
'IsProperSubsetOf',
{
'x': ['<p>IsProperSubsetOf rule_spec html</p>']
}),
state_domain.RuleSpec(
'DoesNotContainAtLeastOneOf',
{
'x': ['<p>DoesNotContainAtLeastOneOf rule_'
'spec html</p>']
})
],
[],
None
)
state_solution_dict = {
'answer_is_exclusive': True,
'correct_answer': [
'<p>state customization arg html 1</p>',
'<p>state customization arg html 2</p>',
'<p>state customization arg html 3</p>',
'<p>state customization arg html 4</p>'
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state_hint_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for hint 1</p>'
)
)
]
state.update_content(
state_domain.SubtitledHtml.from_dict(state_content_dict))
state.update_interaction_id('ItemSelectionInput')
state.update_interaction_answer_groups([state_answer_group])
state.update_interaction_customization_args(
state_customization_args_dict)
state.update_next_content_id_index(4)
state.update_interaction_hints(state_hint_list)
solution = state_domain.Solution.from_dict(
state.interaction.id, state_solution_dict)
state.update_interaction_solution(solution)
exp_services.save_new_exploration('owner_id', exploration)
mock_html_field_types_to_rule_specs_dict = (
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
def mock_get_html_field_types_to_rule_specs(unused_cls):
return mock_html_field_types_to_rule_specs_dict
def mock_get_interaction_by_id(cls, interaction_id):
interaction = copy.deepcopy(cls._interactions[interaction_id]) # pylint: disable=protected-access
interaction.answer_type = 'SetOfHtmlString'
interaction.can_have_solution = True
return interaction
rules_registry_swap = self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs))
interaction_registry_swap = self.swap(
interaction_registry.Registry, 'get_interaction_by_id',
classmethod(mock_get_interaction_by_id))
with rules_registry_swap, interaction_registry_swap:
html_list = state.get_all_html_content_strings()
self.assertEqual(
html_list,
[
'<p>state outcome html</p>',
'<p>Equals rule_spec html</p>',
'<p>ContainsAtLeastOneOf rule_spec html</p>',
'<p>IsProperSubsetOf rule_spec html</p>',
'<p>DoesNotContainAtLeastOneOf rule_spec html</p>', '',
'<p>Hello, this is html1 for hint 1</p>',
'<p>This is solution for state1</p>',
'<p>state customization arg html 1</p>',
'<p>state customization arg html 2</p>',
'<p>state customization arg html 3</p>',
'<p>state customization arg html 4</p>',
'<p>init_state customization arg html 1</p>',
'<p>init_state customization arg html 2</p>',
'<p>init_state customization arg html 3</p>',
'<p>init_state customization arg html 4</p>',
'<p>state content html</p>'])
def test_rule_spec_with_invalid_html_format(self):
"""Test the method for extracting all the HTML from a state
when the rule_spec has invalid html format.
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback', '<p>state outcome html</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Equals',
{
'x': ['<p>Equals rule_spec html</p>']
}),
state_domain.RuleSpec(
'ContainsAtLeastOneOf',
{
'x': ['<p>ContainsAtLeastOneOf rule_spec html</p>']
}),
state_domain.RuleSpec(
'IsProperSubsetOf',
{
'x': ['<p>IsProperSubsetOf rule_spec html</p>']
}),
state_domain.RuleSpec(
'DoesNotContainAtLeastOneOf',
{
'x': ['<p>DoesNotContainAtLeastOneOf rule_'
'spec html</p>']
})
],
[],
None
)
state.update_interaction_id('ItemSelectionInput')
state.update_interaction_answer_groups([state_answer_group])
mock_html_field_types_to_rule_specs_dict = copy.deepcopy(
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
for html_type_dict in (
mock_html_field_types_to_rule_specs_dict.values()):
html_type_dict['format'] = 'invalid format'
def mock_get_html_field_types_to_rule_specs(unused_cls):
return mock_html_field_types_to_rule_specs_dict
with self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs)
):
with self.assertRaisesRegexp(
Exception,
'The rule spec does not belong to a valid format.'):
state.get_all_html_content_strings()
def test_update_customization_args_with_invalid_content_id(self):
"""Test the method for updating interaction customization arguments
when a content_id is invalid (set to None).
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_customization_args_dict = {
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
},
'choices': {
'value': [
{
'content_id': None,
'html': '<p>init_state customization arg html 1</p>'
}, {
'content_id': 'ca_choices_1',
'html': '<p>init_state customization arg html 2</p>'
}
]
}
}
state.update_interaction_id('ItemSelectionInput')
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected content id to be a string, received None'
):
state.update_interaction_customization_args(
state_customization_args_dict)
def test_rule_spec_with_html_having_invalid_input_variable(self):
"""Test the method for extracting all the HTML from a state
when the rule_spec has html but the input variable is invalid.
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback', '<p>state outcome html</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Equals',
{
'x': ['<p>init_state customization arg html 1</p>']
})
],
[],
None
)
state_customization_args_dict = {
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
},
'choices': {
'value': [
{
'content_id': 'ca_choices_0',
'html': '<p>init_state customization arg html 1</p>'
}, {
'content_id': 'ca_choices_1',
'html': '<p>init_state customization arg html 2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>init_state customization arg html 3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>init_state customization arg html 4</p>'
}
]
}
}
state.update_interaction_id('ItemSelectionInput')
state.update_interaction_customization_args(
state_customization_args_dict)
state.update_interaction_answer_groups([state_answer_group])
mock_html_field_types_to_rule_specs_dict = copy.deepcopy(
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
for html_type_dict in (
mock_html_field_types_to_rule_specs_dict.values()):
if html_type_dict['interactionId'] == 'ItemSelectionInput':
html_type_dict['ruleTypes']['Equals']['htmlInputVariables'] = (
['y'])
def mock_get_html_field_types_to_rule_specs(unused_cls):
return mock_html_field_types_to_rule_specs_dict
with self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs)
):
with self.assertRaisesRegexp(
Exception,
'Rule spec should have at least one valid input variable with '
'Html in it.'):
state.get_all_html_content_strings()
def test_get_all_html_when_solution_has_invalid_answer_type(self):
"""Test the method for extracting all the HTML from a state
when the interaction has a solution but the answer_type for the
corrent_answer is invalid.
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_content_dict = {
'content_id': 'content',
'html': '<p>state content html</p>'
}
state_customization_args_dict = {
'choices': {
'value': [
{
'content_id': 'ca_choices_0',
'html': '<p>state customization arg html 1</p>'
}, {
'content_id': 'ca_choices_1',
'html': '<p>state customization arg html 2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>state customization arg html 3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>state customization arg html 4</p>'
}
]
},
'allowMultipleItemsInSamePosition': {
'value': False
}
}
state_hint_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for hint 1</p>'
)
)
]
state_solution_dict = {
'interaction_id': '',
'answer_is_exclusive': True,
'correct_answer': [
['<p>state customization arg html 1</p>'],
['<p>state customization arg html 2</p>'],
['<p>state customization arg html 3</p>'],
['<p>state customization arg html 4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state.update_content(
state_domain.SubtitledHtml.from_dict(state_content_dict))
state.update_interaction_id('DragAndDropSortInput')
state.update_interaction_customization_args(
state_customization_args_dict)
state.update_next_content_id_index(4)
state.update_interaction_hints(state_hint_list)
solution = state_domain.Solution.from_dict(
state.interaction.id, state_solution_dict)
state.update_interaction_solution(solution)
exp_services.save_new_exploration('owner_id', exploration)
interaction = (
interaction_registry.Registry.get_interaction_by_id(
'DragAndDropSortInput'))
interaction.answer_type = 'DragAndDropHtmlString'
mock_html_field_types_to_rule_specs_dict = copy.deepcopy(
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
def mock_get_html_field_types_to_rule_specs(unused_cls):
return mock_html_field_types_to_rule_specs_dict
with self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs)):
with self.assertRaisesRegexp(
Exception,
'The solution does not have a valid '
'correct_answer type.'):
state.get_all_html_content_strings()
def test_get_all_html_when_interaction_is_none(self):
"""Test the method for extracting all the HTML from a state
when the state has no interaction.
"""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['State1'])
state = exploration.states['State1']
state_content_dict = {
'content_id': 'content',
'html': '<p>state content html</p>'
}
state.update_content(
state_domain.SubtitledHtml.from_dict(state_content_dict))
exp_services.save_new_exploration('owner_id', exploration)
html_list = state.get_all_html_content_strings()
self.assertEqual(html_list, ['', '<p>state content html</p>'])
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': ''
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': {
'content_id': 'default_outcome',
'html': ''
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'hints': [],
'id': None,
'solution': None,
},
'linked_skill_id': None,
'next_content_id_index': 0,
'param_changes': [],
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
}
}
self.assertEqual(expected_dict, state_dict)
def test_can_undergo_classification(self):
"""Test the can_undergo_classification() function."""
exploration_id = 'eid'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exploration_id)
state_with_training_data = exploration.states['Home']
state_without_training_data = exploration.states['End']
# A state with 786 training examples.
self.assertTrue(
state_with_training_data.can_undergo_classification())
# A state with no training examples.
self.assertFalse(
state_without_training_data.can_undergo_classification())
def test_get_training_data(self):
"""Test retrieval of training data."""
exploration_id = 'eid'
test_exp_filepath = os.path.join(
feconf.SAMPLE_EXPLORATIONS_DIR, 'classifier_demo_exploration.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exploration_id)
state = exploration.states['text']
expected_training_data = [{
'answer_group_index': 1,
'answers': [u'cheerful', u'merry', u'ecstatic', u'glad',
u'overjoyed', u'pleased', u'thrilled', u'smile']}]
observed_training_data = state.get_training_data()
self.assertEqual(observed_training_data, expected_training_data)
def test_get_content_html_with_correct_state_name_returns_html(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
init_state.update_interaction_hints(hints_list)
self.assertEqual(
init_state.get_content_html('hint_1'), '<p>hint one</p>')
hints_list[0].hint_content.html = '<p>Changed hint one</p>'
init_state.update_interaction_hints(hints_list)
self.assertEqual(
init_state.get_content_html('hint_1'), '<p>Changed hint one</p>')
def test_rte_content_validation_for_android(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': (
'<oppia-noninteractive-collapsible content-with-value='
'"&quot;&lt;p&gt;Hello&lt;/p&gt;&'
'quot;" heading-with-value="&quot;SubCollapsible&'
'quot;"></oppia-noninteractive-collapsible><p> </p>')
},
}
solution = state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict
)
init_state.update_interaction_solution(solution)
self.assertFalse(init_state.is_rte_content_supported_on_android())
solution_dict['explanation']['html'] = ''
init_state.update_interaction_solution(state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict))
self.assertTrue(init_state.is_rte_content_supported_on_android())
hints_list = []
hints_list.append(
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1',
'<oppia-noninteractive-collapsible content-with-value='
'"&quot;&lt;p&gt;Hello&lt;/p&gt;&'
'quot;" heading-with-value="&quot;SubCollapsible&'
'quot;"></oppia-noninteractive-collapsible><p> </p>'
)
)
)
init_state.update_interaction_hints(hints_list)
self.assertFalse(init_state.is_rte_content_supported_on_android())
hints_list[0].hint_content.html = ''
init_state.update_interaction_hints(hints_list)
self.assertTrue(init_state.is_rte_content_supported_on_android())
default_outcome = state_domain.Outcome(
'Introduction', state_domain.SubtitledHtml(
'default_outcome', (
'<oppia-noninteractive-collapsible content-with-value='
'"&quot;&lt;p&gt;Hello&lt;/p&gt;&'
'quot;" heading-with-value="&quot;Sub&quot;">'
'</oppia-noninteractive-collapsible><p> </p>')),
False, [], None, None
)
init_state.update_interaction_default_outcome(default_outcome)
self.assertFalse(init_state.is_rte_content_supported_on_android())
default_outcome.feedback.html = ''
init_state.update_interaction_default_outcome(default_outcome)
self.assertTrue(init_state.is_rte_content_supported_on_android())
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', (
'<oppia-noninteractive-tabs tab_contents-with-value'
'=\"[{&quot;content&quot;:&quot;&lt;p'
'&gt;&lt;i&gt;lorem ipsum&lt;/i&'
'gt;&lt;/p&gt;&quot;,&quot;title&'
'quot;:&quot;hello&quot;}]\">'
'</oppia-noninteractive-tabs>')),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Equals',
'normalizedStrSet': ['Test']
}
})
],
[],
None
)
init_state.update_interaction_answer_groups(
[state_answer_group])
self.assertFalse(init_state.is_rte_content_supported_on_android())
state_answer_group.outcome.feedback.html = (
'<p><oppia-noninteractive-image caption-with-value="&quot;'
'&quot;" filepath-with-value="&quot;startBlue.png&'
'quot;" alt-with-value="&quot;&quot;">'
'</oppia-noninteractive-image></p>')
init_state.update_interaction_answer_groups(
[state_answer_group])
self.assertTrue(init_state.is_rte_content_supported_on_android())
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': (
'<oppia-noninteractive-tabs tab_contents-with-value'
'=\"[{&quot;content&quot;:&quot;&lt;p'
'&gt;&lt;i&gt;lorem ipsum&lt;/i&'
'gt;&lt;/p&gt;&quot;,&quot;title&'
'quot;:&quot;hello&quot;}]\">'
'</oppia-noninteractive-tabs>')
}))
self.assertFalse(init_state.is_rte_content_supported_on_android())
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': (
'<p><oppia-noninteractive-link text-with-value="'
'&quot;What is a link?&quot;" url-with-'
'value="&quot;htt://link.com&'
';quot;"></oppia-noninteractive-link></p>')
}))
self.assertTrue(init_state.is_rte_content_supported_on_android())
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': (
'<p><oppia-noninteractive-skillreview text-with-value="'
'&quot;&quot;" skill_id-with-value="&quot;'
'&quot;"></oppia-noninteractive-skillreview></p>')
}))
self.assertTrue(init_state.is_rte_content_supported_on_android())
def test_interaction_validation_for_android(self):
_checked_interaction_ids = set()
def _create_init_state_for_interaction_verification():
"""Creates an init state for interaction verification."""
exploration = (
exp_domain.Exploration.create_default_exploration('0'))
return exploration.states[exploration.init_state_name]
def _verify_interaction_supports_android(self, interaction_id):
"""Checks that the provided interaction is supported on Android."""
init_state = _create_init_state_for_interaction_verification()
init_state.update_interaction_id(interaction_id)
self.assertTrue(
init_state.interaction.is_supported_on_android_app())
_checked_interaction_ids.add(interaction_id)
def _verify_interaction_does_not_support_android(self, interaction_id):
"""Checks that the provided interaction is not supported on
Android.
"""
init_state = _create_init_state_for_interaction_verification()
init_state.update_interaction_id(interaction_id)
self.assertFalse(
init_state.interaction.is_supported_on_android_app())
_checked_interaction_ids.add(interaction_id)
def _verify_all_interaction_ids_checked(self):
"""Verifies that all the interaction ids are checked."""
all_interaction_ids = set(
interaction_registry.Registry.get_all_interaction_ids())
missing_interaction_ids = (
all_interaction_ids - _checked_interaction_ids)
self.assertFalse(missing_interaction_ids)
_verify_interaction_supports_android(self, 'AlgebraicExpressionInput')
_verify_interaction_supports_android(self, 'Continue')
_verify_interaction_supports_android(self, 'DragAndDropSortInput')
_verify_interaction_supports_android(self, 'EndExploration')
_verify_interaction_supports_android(self, 'FractionInput')
_verify_interaction_supports_android(self, 'ImageClickInput')
_verify_interaction_supports_android(self, 'ItemSelectionInput')
_verify_interaction_supports_android(self, 'MathEquationInput')
_verify_interaction_supports_android(self, 'MultipleChoiceInput')
_verify_interaction_supports_android(self, 'NumberWithUnits')
_verify_interaction_supports_android(self, 'NumericInput')
_verify_interaction_supports_android(self, 'TextInput')
_verify_interaction_supports_android(self, 'NumericExpressionInput')
_verify_interaction_supports_android(self, 'RatioExpressionInput')
_verify_interaction_supports_android(self, None)
_verify_interaction_does_not_support_android(self, 'CodeRepl')
_verify_interaction_does_not_support_android(self, 'GraphInput')
_verify_interaction_does_not_support_android(self, 'InteractiveMap')
_verify_interaction_does_not_support_android(self, 'MusicNotesInput')
_verify_interaction_does_not_support_android(self, 'PencilCodeEditor')
_verify_interaction_does_not_support_android(self, 'SetInput')
_verify_all_interaction_ids_checked(self)
def test_get_content_html_with_invalid_content_id_raise_error(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
init_state.update_interaction_hints(hints_list)
self.assertEqual(
init_state.get_content_html('hint_1'), '<p>hint one</p>')
with self.assertRaisesRegexp(
ValueError, 'Content ID Invalid id does not exist'):
init_state.get_content_html('Invalid id')
def test_get_content_id_mapping_needing_translations_with_existing_translations(self): # pylint: disable=line-too-long
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': '<p>This is content</p>'
}))
init_state.update_interaction_id('TextInput')
default_outcome = state_domain.Outcome(
'Introduction', state_domain.SubtitledHtml(
'default_outcome', '<p>The default outcome.</p>'),
False, [], None, None
)
init_state.update_interaction_default_outcome(default_outcome)
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'), False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Equals',
'normalizedStrSet': ['Test']
}
})
],
[],
None
)
init_state.update_interaction_answer_groups(
[state_answer_group])
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
init_state.update_interaction_hints(hints_list)
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
},
}
solution = state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict)
init_state.update_interaction_solution(solution)
written_translations_dict = {
'translations_mapping': {
'content': {
'hi': {
'data_format': 'html',
'translation': '<p>hello!</p>',
'needs_update': False
}
},
'hint_1': {},
'default_outcome': {},
'solution': {},
'feedback_1': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
init_state.update_written_translations(written_translations)
content_id_mapping_needing_translations = (
init_state.get_content_id_mapping_needing_translations('hi'))
self.assertEqual(
content_id_mapping_needing_translations[
'hint_1'
].content,
'<p>hint one</p>'
)
self.assertEqual(
content_id_mapping_needing_translations[
'solution'
].content,
'<p>hello_world is a string</p>'
)
self.assertEqual(
content_id_mapping_needing_translations[
'feedback_1'
].content,
'<p>Feedback</p>',
)
self.assertEqual(
content_id_mapping_needing_translations[
'default_outcome'
].content,
'<p>The default outcome.</p>'
)
def test_get_content_id_mapping_needing_translations_with_interaction_translations(self): # pylint: disable=line-too-long
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': '<p>This is content</p>'
}))
init_state.update_interaction_id('TextInput')
state_interaction_cust_args = {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': 'Placeholder'
}
},
'rows': {'value': 1}
}
init_state.update_interaction_customization_args(
state_interaction_cust_args)
default_outcome = state_domain.Outcome(
'Introduction', state_domain.SubtitledHtml(
'default_outcome', '<p>The default outcome.</p>'),
False, [], None, None
)
init_state.update_interaction_default_outcome(default_outcome)
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_4',
'normalizedStrSet': ['Input1', 'Input2']
}
})
],
[],
None
)
init_state.update_interaction_answer_groups(
[state_answer_group])
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
init_state.update_interaction_hints(hints_list)
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
},
}
solution = state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict)
init_state.update_interaction_solution(solution)
written_translations_dict = {
'translations_mapping': {
'content': {},
'hint_1': {},
'default_outcome': {},
'solution': {},
'feedback_1': {},
'ca_placeholder_0': {},
'rule_input_4': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
init_state.update_written_translations(written_translations)
content_id_mapping_needing_translations = (
init_state.get_content_id_mapping_needing_translations('hi'))
self.assertEqual(
content_id_mapping_needing_translations[
'hint_1'
].content,
'<p>hint one</p>'
)
self.assertEqual(
content_id_mapping_needing_translations[
'solution'
].content,
'<p>hello_world is a string</p>'
)
self.assertEqual(
content_id_mapping_needing_translations[
'feedback_1'
].content,
'<p>Feedback</p>'
)
self.assertEqual(
content_id_mapping_needing_translations[
'default_outcome'
].content,
'<p>The default outcome.</p>'
)
self.assertEqual(
content_id_mapping_needing_translations[
'content'
].content,
'<p>This is content</p>',
)
self.assertEqual(
content_id_mapping_needing_translations[
'ca_placeholder_0'
].content,
'Placeholder'
)
rule_translatable_item = content_id_mapping_needing_translations[
'rule_input_4'
]
self.assertEqual(rule_translatable_item.content, ['Input1', 'Input2'])
self.assertEqual(rule_translatable_item.interaction_id, 'TextInput')
self.assertEqual(rule_translatable_item.rule_type, 'Contains')
def test_get_content_id_mapping_needing_translations_for_set_input_rule(self): # pylint: disable=line-too-long
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': '<p>This is content</p>'
}))
init_state.update_interaction_id('SetInput')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Equals',
{
'x': {
'contentId': 'rule_input_4',
'unicodeStrSet': ['Input1', 'Input2']
}
})
],
[],
None
)
init_state.update_interaction_answer_groups(
[state_answer_group])
written_translations_dict = {
'translations_mapping': {
'content': {},
'feedback_1': {},
'rule_input_4': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
init_state.update_written_translations(written_translations)
content_id_mapping_needing_translations = (
init_state.get_content_id_mapping_needing_translations('hi'))
rule_translatable_item = content_id_mapping_needing_translations[
'rule_input_4'
]
self.assertEqual(rule_translatable_item.content, ['Input1', 'Input2'])
self.assertEqual(rule_translatable_item.interaction_id, 'SetInput')
self.assertEqual(rule_translatable_item.rule_type, 'Equals')
def test_get_content_id_mapping_needing_translations_does_not_return_numeric_content(self): # pylint: disable=line-too-long
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
# Set the content.
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': '<p>This is content</p>'
}))
# Set the multiple choice interaction.
init_state.update_interaction_id('MultipleChoiceInput')
state_interaction_cust_args = {
'showChoicesInShuffledOrder': {
'value': True
},
'choices': {
'value': [
{
'content_id': 'ca_choices_0',
'html': '\u003cp\u003eoption 1\u003c/p\u003e'
},
{
'content_id': 'ca_choices_1',
'html': '1,000'
},
{
'content_id': 'ca_choices_2',
'html': '100'
}
]
}
}
init_state.update_interaction_customization_args(
state_interaction_cust_args)
# Set the default outcome.
default_outcome = state_domain.Outcome(
'Introduction', state_domain.SubtitledHtml(
'default_outcome', '<p>The default outcome.</p>'),
False, [], None, None
)
init_state.update_interaction_default_outcome(default_outcome)
# Set the translations.
written_translations_dict = {
'translations_mapping': {
'content': {},
'default_outcome': {},
'ca_choices_0': {},
'ca_choices_1': {},
'ca_choices_2': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
init_state.update_written_translations(written_translations)
# Choice 2 should not be returned as its value is numeric.
content_id_mapping_needing_translations = (
init_state.get_content_id_mapping_needing_translations('hi'))
self.assertEqual(
content_id_mapping_needing_translations[
'content'
].content, '<p>This is content</p>')
self.assertEqual(
content_id_mapping_needing_translations[
'default_outcome'
].content, '<p>The default outcome.</p>')
self.assertEqual(
content_id_mapping_needing_translations[
'ca_choices_0'
].content, '\u003cp\u003eoption 1\u003c/p\u003e')
self.assertEqual(
content_id_mapping_needing_translations[
'ca_choices_1'
].content, '1,000')
self.assertFalse(
'ca_choices_2' in content_id_mapping_needing_translations)
def test_content_id_existance_checks_work_correctly(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.has_content_id('content'), True)
with self.assertRaisesRegexp(
ValueError, 'Content ID content0 does not exist'):
init_state.get_content_html('content0')
self.assertEqual(init_state.has_content_id('content0'), False)
def test_add_translation_works_correctly(self):
exploration = exp_domain.Exploration.create_default_exploration('0')
init_state = exploration.states[exploration.init_state_name]
init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': '<p>This is content</p>'
}))
self.assertEqual(init_state.get_translation_counts(), {})
init_state.add_translation('content', 'hi', '<p>Translated text</p>')
self.assertEqual(init_state.get_translation_counts(), {'hi': 1})
def test_get_translation_counts_returns_correct_value(self):
state = state_domain.State.create_default_state(None)
state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'content',
'html': '<p>This is content</p>'
}))
self.set_interaction_for_state(state, 'TextInput')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>'))]
state.update_interaction_hints(hints_list)
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
},
}
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.validate({}, True)
state.add_translation('hint_1', 'hi', 'Some translation')
state.add_translation('content', 'hi', 'Some translation')
self.assertEqual(state.get_translation_counts(), {'hi': 2})
# Adding interaction placeholder translation won't be reflected in
# get_translation_counts method.
state.add_translation('ca_placeholder_0', 'hi', 'Some translation')
self.assertEqual(state.get_translation_counts(), {'hi': 2})
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertNotIn('invalid_state_name', exploration.states)
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state.
exploration.add_states(['END'])
# Should fail to rename like any other state.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END.
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions.
self.set_interaction_for_state(
exploration.states['Renamed state'], 'TextInput')
self.set_interaction_for_state(
exploration.states['State 2'], 'TextInput')
# Other miscellaneous requirements for validation.
exploration.title = 'Title'
exploration.category = 'Category'
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaisesRegexp(
Exception,
'This state does not have any interaction specified.'):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
self.set_interaction_for_state(another_end_state, 'EndExploration')
another_end_state.update_interaction_default_outcome(None)
exploration.validate(strict=True)
# Name it back for final tests.
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it.
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
def test_update_solicit_answer_details(self):
"""Test updating solicit_answer_details."""
state = state_domain.State.create_default_state('state_1')
self.assertEqual(state.solicit_answer_details, False)
state.update_solicit_answer_details(True)
self.assertEqual(state.solicit_answer_details, True)
def test_update_solicit_answer_details_with_non_bool_fails(self):
"""Test updating solicit_answer_details with non bool value."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, False)
with self.assertRaisesRegexp(Exception, (
'Expected solicit_answer_details to be a boolean, received')):
init_state.update_solicit_answer_details('abc')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, False)
def test_update_linked_skill_id(self):
"""Test updating linked_skill_id."""
state = state_domain.State.create_default_state('state_1')
self.assertEqual(state.linked_skill_id, None)
state.update_linked_skill_id('string_2')
self.assertEqual(state.linked_skill_id, 'string_2')
def test_update_card_is_checkpoint(self):
"""Test update card_is_checkpoint."""
state = state_domain.State.create_default_state('state_1')
self.assertEqual(state.card_is_checkpoint, False)
state.update_card_is_checkpoint(True)
self.assertEqual(state.card_is_checkpoint, True)
def test_update_card_is_checkpoint_with_non_bool_fails(self):
"""Test updating card_is_checkpoint with non bool value."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.card_is_checkpoint, True)
with self.assertRaisesRegexp(Exception, (
'Expected card_is_checkpoint to be a boolean, received')):
init_state.update_card_is_checkpoint('abc')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.card_is_checkpoint, True)
def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self):
"""Test the method for converting all the HTML in a state having
DragAndDropSortInput interaction.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
html_with_new_math_schema = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
written_translations_dict_with_old_math_schema = {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': html_with_old_math_schema,
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'data_format': 'html',
'translation': html_with_old_math_schema,
'needs_update': False
},
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
written_translations_dict_with_new_math_schema = {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': html_with_new_math_schema,
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'data_format': 'html',
'translation': html_with_new_math_schema,
'needs_update': False
},
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
answer_group_dict_with_old_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': [[html_with_old_math_schema]]
},
'rule_type': 'IsEqualToOrdering'
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_with_old_math_schema,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_with_old_math_schema]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_with_old_math_schema,
'y': html_with_old_math_schema
}
}, {
'rule_type': 'IsEqualToOrderingWithOneItemAtIncorrectPosition',
'inputs': {
'x': [[html_with_old_math_schema]]
}
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
answer_group_dict_with_new_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': [[html_with_new_math_schema]]
},
'rule_type': 'IsEqualToOrdering'
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_with_new_math_schema,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_with_new_math_schema]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_with_new_math_schema,
'y': html_with_new_math_schema
}
}, {
'rule_type': 'IsEqualToOrderingWithOneItemAtIncorrectPosition',
'inputs': {
'x': [[html_with_new_math_schema]]
}
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'answer_groups': [answer_group_dict_with_old_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': html_with_old_math_schema
}, {
'content_id': 'ca_choices_1',
'html': '<p>2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>4</p>'
}]
},
'allowMultipleItemsInSamePosition': {'value': True}
},
'confirmed_unclassified_answers': [],
'id': 'DragAndDropSortInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_old_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_old_math_schema
}
}
],
'solution': {
'answer_is_exclusive': True,
'correct_answer': [
[html_with_old_math_schema],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
},
'written_translations': (
written_translations_dict_with_old_math_schema)
}
state_dict_with_new_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'answer_groups': [answer_group_dict_with_new_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': html_with_new_math_schema
}, {
'content_id': 'ca_choices_1',
'html': '<p>2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>4</p>'
}]
},
'allowMultipleItemsInSamePosition': {'value': True}
},
'confirmed_unclassified_answers': [],
'id': 'DragAndDropSortInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_new_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_new_math_schema
}
}
],
'solution': {
'answer_is_exclusive': True,
'correct_answer': [
[html_with_new_math_schema],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
},
'written_translations': (
written_translations_dict_with_new_math_schema)
}
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components,
state_uses_old_rule_template_schema=True),
state_dict_with_new_math_schema)
def test_convert_html_fields_in_state_with_item_selection_interaction(self):
"""Test the method for converting all the HTML in a state having
ItemSelection interaction.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
html_with_new_math_schema = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
answer_group_with_old_math_schema = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_with_old_math_schema]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_with_old_math_schema]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_with_old_math_schema]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_with_old_math_schema]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_with_old_math_schema
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
answer_group_with_new_math_schema = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_with_new_math_schema]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_with_new_math_schema]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_with_new_math_schema]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_with_new_math_schema]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_with_new_math_schema
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': {
'answer_is_exclusive': True,
'correct_answer': [
html_with_old_math_schema,
'<p>state customization arg html 2</p>',
'<p>state customization arg html 3</p>',
'<p>state customization arg html 4</p>'
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
},
'answer_groups': answer_group_with_old_math_schema,
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
},
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': '<p>init_state customization arg html 1</p>'
}, {
'content_id': 'ca_choices_1',
'html': html_with_old_math_schema
}, {
'content_id': 'ca_choices_2',
'html': '<p>init_state customization arg html 3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>init_state customization arg html 4</p>'
}]
}
},
'confirmed_unclassified_answers': [],
'id': 'ItemSelectionInput',
'hints': []
}
}
state_dict_with_new_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': {
'answer_is_exclusive': True,
'correct_answer': [
html_with_new_math_schema,
'<p>state customization arg html 2</p>',
'<p>state customization arg html 3</p>',
'<p>state customization arg html 4</p>'
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
},
'answer_groups': answer_group_with_new_math_schema,
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
},
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': '<p>init_state customization arg html 1</p>'
}, {
'content_id': 'ca_choices_1',
'html': html_with_new_math_schema
}, {
'content_id': 'ca_choices_2',
'html': '<p>init_state customization arg html 3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>init_state customization arg html 4</p>'
}]
}
},
'confirmed_unclassified_answers': [],
'id': 'ItemSelectionInput',
'hints': []
}
}
interaction_registry.Registry.get_all_specs_for_state_schema_version(
41)['ItemSelectionInput']['can_have_solution'] = True
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components,
state_uses_old_rule_template_schema=True),
state_dict_with_new_math_schema)
def test_convert_html_fields_in_state_with_text_input_interaction(self):
"""Test the method for converting all the HTML in a state having
TextInput interaction.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
html_with_new_math_schema = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
answer_group_with_old_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': html_with_old_math_schema
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
answer_group_with_new_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': html_with_new_math_schema
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': html_with_old_math_schema
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': {
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': html_with_old_math_schema
}
},
'answer_groups': [answer_group_with_old_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_with_old_math_schema
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'rows': {
'value': 1
},
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
}
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_old_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_old_math_schema
}
}]
}
}
state_dict_with_new_math_schema = {
'content': {
'content_id': 'content', 'html': html_with_new_math_schema
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': {
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': html_with_new_math_schema
}
},
'answer_groups': [answer_group_with_new_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_with_new_math_schema
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'rows': {
'value': 1
},
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
}
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_new_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_new_math_schema
}
}]
}
}
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components),
state_dict_with_new_math_schema)
def test_convert_html_fields_in_state_with_math_expression_input(self):
"""Test the method for converting all the HTML in a state having
MathExpressionInput interaction.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
html_with_new_math_schema = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
answer_group_with_old_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': html_with_old_math_schema
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
answer_group_with_new_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': html_with_new_math_schema
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': html_with_old_math_schema
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': {
'answer_is_exclusive': True,
'correct_answer': '42',
'explanation': {
'content_id': 'solution',
'html': html_with_old_math_schema
}
},
'answer_groups': [answer_group_with_old_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_with_old_math_schema
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {},
'confirmed_unclassified_answers': [],
'id': 'MathExpressionInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_old_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_old_math_schema
}
}]
}
}
state_dict_with_new_math_schema = {
'content': {
'content_id': 'content', 'html': html_with_new_math_schema
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': {
'answer_is_exclusive': True,
'correct_answer': '42',
'explanation': {
'content_id': 'solution',
'html': html_with_new_math_schema
}
},
'answer_groups': [answer_group_with_new_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_with_new_math_schema
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {},
'confirmed_unclassified_answers': [],
'id': 'MathExpressionInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_new_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_new_math_schema
}
}]
}
}
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components),
state_dict_with_new_math_schema)
def test_convert_html_fields_in_state_with_old_written_translations(self):
"""Test the method for converting all the HTML in a state having
written_translations in the old format. This is needed for converting
older snapshots (prior to state schema version 35) properly.
TODO(#11950): Remove this test once old schema migration functions are
deleted.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
html_with_new_math_schema = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
written_translations_dict_with_old_math_schema_and_old_format = {
'translations_mapping': {
'content1': {
'en': {
'html': html_with_old_math_schema,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': html_with_old_math_schema,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
written_translations_dict_with_new_math_schema_and_old_format = {
'translations_mapping': {
'content1': {
'en': {
'html': html_with_new_math_schema,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': html_with_new_math_schema,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
answer_group_dict_with_old_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': [[html_with_old_math_schema]]
},
'rule_type': 'IsEqualToOrdering'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
answer_group_dict_with_new_math_schema = {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': [[html_with_new_math_schema]]
},
'rule_type': 'IsEqualToOrdering'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'answer_groups': [answer_group_dict_with_old_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': html_with_old_math_schema
}, {
'content_id': 'ca_choices_1',
'html': '<p>2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>4</p>'
}]
},
'allowMultipleItemsInSamePosition': {'value': True}
},
'confirmed_unclassified_answers': [],
'id': 'DragAndDropSortInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_old_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_old_math_schema
}
}
],
'solution': {
'answer_is_exclusive': True,
'correct_answer': [
[html_with_old_math_schema],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
},
'written_translations': (
written_translations_dict_with_old_math_schema_and_old_format)
}
state_dict_with_new_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'answer_groups': [answer_group_dict_with_new_math_schema],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': html_with_new_math_schema
}, {
'content_id': 'ca_choices_1',
'html': '<p>2</p>'
}, {
'content_id': 'ca_choices_2',
'html': '<p>3</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>4</p>'
}]
},
'allowMultipleItemsInSamePosition': {'value': True}
},
'confirmed_unclassified_answers': [],
'id': 'DragAndDropSortInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_new_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_new_math_schema
}
}
],
'solution': {
'answer_is_exclusive': True,
'correct_answer': [
[html_with_new_math_schema],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
},
'written_translations': (
written_translations_dict_with_new_math_schema_and_old_format)
}
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components,
state_uses_old_rule_template_schema=True),
state_dict_with_new_math_schema)
def test_convert_html_fields_in_state_having_rule_spec_with_invalid_format(
self):
"""Test the method for converting the HTML in a state
when the rule_spec has invalid html format.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
answer_group_with_old_math_schema = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_with_old_math_schema]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_with_old_math_schema]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_with_old_math_schema
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': answer_group_with_old_math_schema,
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
},
'choices': {
'value': [
'<p>init_state customization arg html 1</p>',
html_with_old_math_schema,
'<p>init_state customization arg html 3</p>',
'<p>init_state customization arg html 4</p>'
]
}
},
'confirmed_unclassified_answers': [],
'id': 'ItemSelectionInput',
'hints': []
}
}
mock_html_field_types_to_rule_specs_dict = copy.deepcopy(
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
for html_type_dict in (
mock_html_field_types_to_rule_specs_dict.values()):
html_type_dict['format'] = 'invalid format'
def mock_get_html_field_types_to_rule_specs(
unused_cls, state_schema_version=None): # pylint: disable=unused-argument
return mock_html_field_types_to_rule_specs_dict
with self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs)):
with self.assertRaisesRegexp(
Exception,
'The rule spec does not belong to a valid format.'):
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components,
state_uses_old_rule_template_schema=True)
def test_convert_html_fields_in_rule_spec_with_invalid_input_variable(self):
"""Test the method for converting the HTML in a state
when the rule_spec has invalid input variable.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
answer_group_with_old_math_schema = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_with_old_math_schema]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_with_old_math_schema]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_with_old_math_schema
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': 'Hello!'
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': answer_group_with_old_math_schema,
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-image filepath'
'-with-value="&quot;random.png&'
'quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>'
)
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
},
'choices': {
'value': [
'<p>init_state customization arg html 1</p>',
html_with_old_math_schema,
'<p>init_state customization arg html 3</p>',
'<p>init_state customization arg html 4</p>'
]
}
},
'confirmed_unclassified_answers': [],
'id': 'ItemSelectionInput',
'hints': []
}
}
mock_html_field_types_to_rule_specs_dict = copy.deepcopy(
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
for html_type_dict in (
mock_html_field_types_to_rule_specs_dict.values()):
if html_type_dict['interactionId'] == 'ItemSelectionInput':
html_type_dict['ruleTypes']['Equals']['htmlInputVariables'] = (
['y'])
def mock_get_html_field_types_to_rule_specs(unused_cls):
return mock_html_field_types_to_rule_specs_dict
with self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs)
):
with self.assertRaisesRegexp(
Exception,
'Rule spec should have at least one valid input variable with '
'Html in it.'):
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components)
def test_convert_html_fields_in_rule_spec_with_invalid_correct_answer(self):
"""Test the method for converting the HTML in a state when the
interaction solution has invalid answer type.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': html_with_old_math_schema
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': {
'interaction_id': '',
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': html_with_old_math_schema
}
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_with_old_math_schema
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'rows': {
'value': 1
},
'placeholder': {
'value': ''
}
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_old_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_old_math_schema
}
}
]
}
}
mock_html_field_types_to_rule_specs_dict = copy.deepcopy(
rules_registry.Registry.get_html_field_types_to_rule_specs(
state_schema_version=41))
mock_html_field_types_to_rule_specs_dict['NormalizedString'] = (
mock_html_field_types_to_rule_specs_dict.pop('SetOfHtmlString'))
def mock_get_html_field_types_to_rule_specs(unused_cls):
return mock_html_field_types_to_rule_specs_dict
with self.swap(
rules_registry.Registry, 'get_html_field_types_to_rule_specs',
classmethod(mock_get_html_field_types_to_rule_specs)
):
with self.assertRaisesRegexp(
Exception,
'The solution does not have a valid '
'correct_answer type.'):
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components)
def test_convert_html_fields_in_state_when_interaction_is_none(self):
"""Test the method for converting all the HTML in a state having
no interaction.
"""
html_with_old_math_schema = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
html_with_new_math_schema = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
state_dict_with_old_math_schema = {
'content': {
'content_id': 'content', 'html': html_with_old_math_schema
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_with_old_math_schema
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': None,
'confirmed_unclassified_answers': [],
'id': None,
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_old_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_old_math_schema
}
}]
}
}
state_dict_with_new_math_schema = {
'content': {
'content_id': 'content', 'html': html_with_new_math_schema
},
'param_changes': [],
'content_ids_to_audio_translations': {'content': {}},
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None,
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_with_new_math_schema
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': None,
'confirmed_unclassified_answers': [],
'id': None,
'hints': [
{
'hint_content': {
'content_id': 'hint_1',
'html': html_with_new_math_schema
}
},
{
'hint_content': {
'content_id': 'hint_2',
'html': html_with_new_math_schema
}
}]
}
}
solution_dict = {
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': html_with_old_math_schema
}
}
self.assertEqual(
state_domain.State.convert_html_fields_in_state(
state_dict_with_old_math_schema,
html_validation_service.
add_math_content_to_math_rte_components),
state_dict_with_new_math_schema)
# Assert that no action is performed on a solution dict when the
# interaction ID is None.
self.assertEqual(
state_domain.Solution.convert_html_in_solution(
None, solution_dict,
html_validation_service.
add_math_content_to_math_rte_components,
rules_registry.Registry.get_html_field_types_to_rule_specs(),
{}
), solution_dict)
def test_subtitled_html_validation_with_invalid_html_type(self):
"""Test validation of subtitled HTML with invalid html type."""
subtitled_html = state_domain.SubtitledHtml(
'content_id', '<p>some html</p>')
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid content HTML'
):
with self.swap(subtitled_html, 'html', 20):
subtitled_html.validate()
def test_subtitled_html_validation_with_invalid_content(self):
"""Test validation of subtitled HTML with invalid content."""
subtitled_html = state_domain.SubtitledHtml(
'content_id', '<p>some html</p>')
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected content id to be a string, ' +
'received 20'):
with self.swap(subtitled_html, 'content_id', 20):
subtitled_html.validate()
def test_subtitled_unicode_validation_with_invalid_html_type(self):
"""Test validation of subtitled unicode with invalid unicode type."""
subtitled_unicode = state_domain.SubtitledUnicode(
'content_id', 'some string')
subtitled_unicode.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid content unicode'
):
with self.swap(subtitled_unicode, 'unicode_str', 20):
subtitled_unicode.validate()
def test_subtitled_unicode_validation_with_invalid_content(self):
"""Test validation of subtitled unicode with invalid content."""
subtitled_unicode = state_domain.SubtitledUnicode(
'content_id', 'some html string')
subtitled_unicode.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected content id to be a string, ' +
'received 20'):
with self.swap(subtitled_unicode, 'content_id', 20):
subtitled_unicode.validate()
def test_voiceover_validation(self):
"""Test validation of voiceover."""
audio_voiceover = state_domain.Voiceover('a.mp3', 20, True, 24.5)
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected audio filename to be a string'
):
with self.swap(audio_voiceover, 'filename', 20):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_voiceover, 'filename', '.invalidext'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_voiceover, 'filename', 'justanextension'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_voiceover, 'filename', 'a.invalidext'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected file size to be an int'
):
with self.swap(audio_voiceover, 'file_size_bytes', 'abc'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid file size'
):
with self.swap(audio_voiceover, 'file_size_bytes', -3):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected needs_update to be a bool'
):
with self.swap(audio_voiceover, 'needs_update', 'hello'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected duration_secs to be a float'
):
with self.swap(audio_voiceover, 'duration_secs', 'test'):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected duration_secs to be a float'
):
with self.swap(audio_voiceover, 'duration_secs', 10):
audio_voiceover.validate()
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected duration_secs to be positive number, '
'or zero if not yet specified'
):
with self.swap(audio_voiceover, 'duration_secs', -3.45):
audio_voiceover.validate()
def test_written_translation_validation(self):
"""Test validation of translation script."""
written_translation = state_domain.WrittenTranslation(
'html', 'Test.', True)
written_translation.validate()
with self.assertRaisesRegexp(
AssertionError, 'Expected unicode HTML string, received 30'):
with self.swap(written_translation, 'translation', 30):
written_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected needs_update to be a bool'
):
with self.swap(written_translation, 'needs_update', 20):
written_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid data_format'
):
with self.swap(written_translation, 'data_format', 'int'):
written_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid data_format'
):
with self.swap(written_translation, 'data_format', 2):
written_translation.validate()
def test_hints_validation(self):
"""Test validation of state hints."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'TextInput')
exploration.validate()
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>hint one</p>')
)
]
init_state.update_interaction_hints(hints_list)
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
},
}
solution = state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict
)
init_state.update_interaction_solution(solution)
exploration.validate()
hints_list.append(
state_domain.Hint(
state_domain.SubtitledHtml('hint_2', '<p>new hint</p>')
)
)
init_state.update_interaction_hints(hints_list)
self.assertEqual(
init_state.interaction.hints[1].hint_content.html,
'<p>new hint</p>')
hints_list.append(
state_domain.Hint(
state_domain.SubtitledHtml('hint_3', '<p>hint three</p>')
)
)
init_state.update_interaction_hints(hints_list)
del hints_list[1]
init_state.update_interaction_hints(hints_list)
init_state.update_next_content_id_index(4)
self.assertEqual(len(init_state.interaction.hints), 2)
exploration.validate()
def test_update_customization_args_with_non_unique_content_ids(self):
"""Test that update customization args throws an error when passed
customization args with non-unique content ids.
"""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'MultipleChoiceInput')
with self.assertRaisesRegexp(
Exception,
'All customization argument content_ids should be unique.'
):
init_state.update_interaction_customization_args({
'choices': {
'value': [{
'content_id': 'non-unique-content-id',
'html': '1'
}, {
'content_id': 'non-unique-content-id',
'html': '2'
}]
},
'showChoicesInShuffledOrder': {'value': True}
})
def test_solution_validation(self):
"""Test validation of state solution."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'TextInput')
exploration.validate()
# Solution should be set to None as default.
self.assertEqual(init_state.interaction.solution, None)
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '')
)
]
init_state.update_interaction_hints(hints_list)
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': [0, 0],
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
# Object type of answer must match that of correct_answer.
with self.assertRaisesRegexp(
AssertionError,
re.escape('Expected unicode string, received [0, 0]')
):
init_state.interaction.solution = (
state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict))
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
init_state.update_interaction_solution(
state_domain.Solution.from_dict(
init_state.interaction.id, solution_dict))
exploration.validate()
def test_validate_state_unique_content_ids(self):
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('MultipleChoiceInput')
init_state.update_interaction_customization_args({
'choices': {
'value': [{
'content_id': '',
'html': 'one'
}]
},
'showChoicesInShuffledOrder': {'value': True}
})
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected all content_ids to be unique, received'
):
with self.swap(
init_state.interaction.customization_args['choices'].value[0],
'content_id',
'content'
):
exploration.validate()
def test_validate_state_content_id_indexes(self):
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('MultipleChoiceInput')
init_state.update_interaction_customization_args({
'choices': {
'value': [{
'content_id': 'ca_choices_10',
'html': 'one'
}]
},
'showChoicesInShuffledOrder': {'value': True}
})
init_state.update_next_content_id_index(9)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected all content id indexes to be less than the "next '
'content id index"'
):
exploration.validate()
def test_validate_state_solicit_answer_details(self):
"""Test validation of solicit_answer_details."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, False)
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected solicit_answer_details to be ' +
'a boolean, received'):
with self.swap(init_state, 'solicit_answer_details', 'abc'):
exploration.validate()
self.assertEqual(init_state.solicit_answer_details, False)
self.set_interaction_for_state(init_state, 'Continue')
self.assertEqual(init_state.interaction.id, 'Continue')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'The Continue interaction does not ' +
'support soliciting answer details from learners.'):
with self.swap(init_state, 'solicit_answer_details', True):
exploration.validate()
self.set_interaction_for_state(init_state, 'TextInput')
self.assertEqual(init_state.interaction.id, 'TextInput')
self.assertEqual(init_state.solicit_answer_details, False)
exploration.validate()
init_state.solicit_answer_details = True
self.assertEqual(init_state.solicit_answer_details, True)
exploration.validate()
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.solicit_answer_details, True)
def test_validate_state_linked_skill_id(self):
"""Test validation of linked_skill_id."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.linked_skill_id, None)
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected linked_skill_id to be ' +
'a str, received 12.'):
with self.swap(init_state, 'linked_skill_id', 12):
exploration.validate()
self.assertEqual(init_state.linked_skill_id, None)
def test_validate_state_card_is_checkpoint(self):
"""Test validation of card_is_checkpoint."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
init_state = exploration.states[exploration.init_state_name]
self.assertEqual(init_state.card_is_checkpoint, True)
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected card_is_checkpoint to be ' +
'a boolean, received'):
with self.swap(init_state, 'card_is_checkpoint', 'abc'):
exploration.validate()
self.assertEqual(init_state.card_is_checkpoint, True)
def test_validate_solution_answer_is_exclusive(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
# Solution should be set to None as default.
self.assertEqual(exploration.init_state.interaction.solution, None)
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '')
)
]
solution = state_domain.Solution.from_dict(
exploration.init_state.interaction.id, solution_dict)
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_interaction_solution(solution)
exploration.validate()
solution_dict = {
'answer_is_exclusive': 1,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
solution = state_domain.Solution.from_dict(
exploration.init_state.interaction.id, solution_dict)
exploration.init_state.update_interaction_solution(solution)
with self.assertRaisesRegexp(
Exception, 'Expected answer_is_exclusive to be bool, received 1'):
exploration.validate()
def test_validate_non_list_param_changes(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
exploration.init_state.param_changes = 0
with self.assertRaisesRegexp(
Exception, 'Expected state param_changes to be a list, received 0'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_answer_group_feedback(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Contains',
'normalizedStrSet': ['Test']
}
})
],
[],
None
)
exploration.init_state.update_interaction_answer_groups(
[state_answer_group])
exploration.init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
}))
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id feedback_1'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_answer_group_rules(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Contains',
'normalizedStrSet': ['Test']
}
}),
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Contains',
'normalizedStrSet': ['Test1']
}
})
],
[],
None
)
exploration.init_state.update_interaction_answer_groups(
[state_answer_group])
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id rule_input_Contains'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_default_outcome(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
default_outcome = state_domain.Outcome(
'Introduction', state_domain.SubtitledHtml('default_outcome', ''),
False, [], None, None
)
exploration.init_state.update_interaction_default_outcome(
default_outcome
)
exploration.init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'default_outcome',
'html': ''
}))
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id default_outcome'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_hints(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>some html</p>')
)
]
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'hint_1',
'html': ''
}))
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id hint_1'):
exploration.init_state.validate(None, True)
def test_validate_duplicate_content_id_with_solution(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
subtitled_html = state_domain.SubtitledHtml('content_id', 'some html')
hints_list = [state_domain.Hint(subtitled_html)]
exploration.init_state.interaction.hints = hints_list
solution_dict = {
'answer_is_exclusive': True,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': '<p>hello_world is a string</p>'
}
}
solution = state_domain.Solution.from_dict(
exploration.init_state.interaction.id, solution_dict)
exploration.init_state.update_interaction_solution(solution)
exploration.init_state.update_content(
state_domain.SubtitledHtml.from_dict({
'content_id': 'solution',
'html': ''
}))
with self.assertRaisesRegexp(
Exception, 'Found a duplicate content id solution'):
exploration.init_state.validate(None, True)
def test_cannot_convert_state_dict_to_yaml_with_invalid_state_dict(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with contextlib.ExitStack() as stack:
captured_logs = stack.enter_context(
self.capture_logging(min_level=logging.ERROR))
stack.enter_context(
self.assertRaisesRegexp(
Exception, 'string indices must be integers')
)
exploration.init_state.convert_state_dict_to_yaml(
'invalid_state_dict', 10)
self.assertEqual(len(captured_logs), 1)
self.assertIn('Bad state dict: invalid_state_dict', captured_logs[0])
def test_cannot_update_hints_with_content_id_not_in_written_translations(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state2</p>')
)
]
new_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_2', '<p>Hello, this is html2 for state2</p>')
)
]
exploration.init_state.update_interaction_hints(old_hints_list)
written_translations_dict = {
'translations_mapping': {
'content': {
'hi': {
'data_format': 'html',
'translation': '<p>Test!</p>',
'needs_update': True
}
},
'default_outcome': {}
}
}
written_translations = (
state_domain.WrittenTranslations.from_dict(
written_translations_dict))
exploration.init_state.update_written_translations(written_translations)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_1 does not exist in written_translations'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_hints_with_content_id_not_in_recorded_voiceovers(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state2</p>')
)
]
new_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_2', '<p>Hello, this is html2 for state2</p>')
)
]
exploration.init_state.update_interaction_hints(old_hints_list)
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'filename3.mp3',
'file_size_bytes': 3000,
'needs_update': False,
'duration_secs': 8.1
}
},
'default_outcome': {}
}
}
recorded_voiceovers = (
state_domain.RecordedVoiceovers.from_dict(recorded_voiceovers_dict))
exploration.init_state.update_recorded_voiceovers(recorded_voiceovers)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_1 does not exist in recorded_voiceovers'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_hints_with_new_content_id_in_written_translations(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state2</p>')
)
]
new_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_2', '<p>Hello, this is html2 for state2</p>')
)
]
exploration.init_state.update_interaction_hints(old_hints_list)
written_translations_dict = {
'translations_mapping': {
'hint_2': {
'hi': {
'data_format': 'html',
'translation': '<p>Test!</p>',
'needs_update': True
}
},
'hint_1': {
'hi': {
'data_format': 'html',
'translation': '<p>Test1!</p>',
'needs_update': True
}
},
'default_outcome': {}
}
}
written_translations = (
state_domain.WrittenTranslations.from_dict(
written_translations_dict))
exploration.init_state.update_written_translations(written_translations)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_2 already exists in written_translations'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_hints_with_new_content_id_in_recorded_voiceovers(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
old_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state2</p>')
)
]
new_hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_2', '<p>Hello, this is html2 for state2</p>')
)
]
exploration.init_state.update_interaction_hints(old_hints_list)
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'hint_1': {
'en': {
'filename': 'filename3.mp3',
'file_size_bytes': 3000,
'needs_update': False,
'duration_secs': 6.1
}
},
'hint_2': {
'en': {
'filename': 'filename4.mp3',
'file_size_bytes': 3000,
'needs_update': False,
'duration_secs': 7.5
}
},
'default_outcome': {}
}
}
recorded_voiceovers = (
state_domain.RecordedVoiceovers.from_dict(recorded_voiceovers_dict))
exploration.init_state.update_recorded_voiceovers(recorded_voiceovers)
with self.assertRaisesRegexp(
Exception,
'The content_id hint_2 already exists in recorded_voiceovers'):
exploration.init_state.update_interaction_hints(new_hints_list)
def test_cannot_update_interaction_solution_with_non_dict_solution(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state2</p>')
)
]
solution_dict = {
'answer_is_exclusive': True,
'correct_answer': u'hello_world!',
'explanation': {
'content_id': 'solution',
'html': u'<p>hello_world is a string</p>'
}
}
solution = state_domain.Solution.from_dict(
exploration.init_state.interaction.id, solution_dict)
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_interaction_solution(solution)
self.assertEqual(
exploration.init_state.interaction.solution.to_dict(),
solution_dict)
with self.assertRaisesRegexp(
Exception, 'Expected solution to be a Solution object,'
'received test string'):
exploration.init_state.update_interaction_solution('test string')
def test_update_interaction_solution_with_no_solution(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml(
'hint_1', '<p>Hello, this is html1 for state2</p>'
)
)
]
exploration.init_state.update_interaction_hints(hints_list)
exploration.init_state.update_interaction_solution(None)
self.assertIsNone(exploration.init_state.interaction.solution)
def test_cannot_update_interaction_hints_with_non_list_hints(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with self.assertRaisesRegexp(
Exception, 'Expected hints_list to be a list'):
exploration.init_state.update_interaction_hints({})
def test_cannot_update_non_list_interaction_confirmed_unclassified_answers(
self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with self.assertRaisesRegexp(
Exception, 'Expected confirmed_unclassified_answers to be a list'):
(
exploration.init_state
.update_interaction_confirmed_unclassified_answers({}))
def test_update_interaction_confirmed_unclassified_answers(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': 'Test'
})
],
[],
None
)
self.assertEqual(
exploration.init_state.interaction.confirmed_unclassified_answers,
[])
(
exploration.init_state
.update_interaction_confirmed_unclassified_answers(
[state_answer_group])
)
self.assertEqual(
exploration.init_state.interaction.confirmed_unclassified_answers,
[state_answer_group])
def test_cannot_update_non_list_interaction_answer_groups(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
with self.assertRaisesRegexp(
Exception, 'Expected interaction_answer_groups to be a list'):
exploration.init_state.update_interaction_answer_groups(
'invalid_answer_groups')
def test_cannot_update_answer_groups_with_non_dict_rule_inputs(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains', []
)
],
[],
None
)
with self.assertRaisesRegexp(
Exception,
re.escape('Expected rule_inputs to be a dict, received []')
):
exploration.init_state.update_interaction_answer_groups(
[state_answer_group])
def test_cannot_update_answer_groups_with_non_list_rule_specs(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'), False, [], None, None
), {}, [], None
)
state_answer_group.rule_specs = {}
with self.assertRaisesRegexp(
Exception, 'Expected answer group rule specs to be a list'):
exploration.init_state.update_interaction_answer_groups(
[state_answer_group])
def test_cannot_update_answer_groups_with_invalid_rule_input_value(self):
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Equals',
'normalizedStrSet': [[]]
}
})
],
[],
None
)
with self.assertRaisesRegexp(
Exception,
re.escape(
'Value has the wrong type. It should be a TranslatableSetOf'
'NormalizedString. The value is'
)
):
exploration.init_state.update_interaction_answer_groups(
[state_answer_group])
def test_validate_rule_spec(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'warning', _mock_logging_function)
exploration = self.save_new_valid_exploration('exp_id', 'owner_id')
state_answer_group = state_domain.AnswerGroup(
state_domain.Outcome(
exploration.init_state_name, state_domain.SubtitledHtml(
'feedback_1', '<p>Feedback</p>'),
False, [], None, None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Equals',
'normalizedStrSet': ['Test']
}
})
],
[],
None
)
exploration.init_state.update_interaction_answer_groups(
[state_answer_group])
with logging_swap, self.assertRaisesRegexp(KeyError, '\'x\''):
(
exploration.init_state.interaction.answer_groups[0]
.rule_specs[0].validate([], {})
)
self.assertEqual(
observed_log_messages,
[
'RuleSpec \'Contains\' has inputs which are not recognized '
'parameter names: {\'x\'}'
]
)
class InteractionCustomizationArgDomainTests(test_utils.GenericTestBase):
"""Test methods for InteractionCustomizationArg domain object."""
def test_traverse_by_schema_and_convert(self):
html = []
def extract_html(value, unused_schema_obj_type):
"""Extracts html from SubtitledHtml values.
Args:
value: SubtitledHtml|SubtitledUnicode. The value in the
customization argument value to be converted.
unused_schema_obj_type: str. The schema obj_type for the
customization argument value, which is one of
'SubtitledUnicode' or 'SubtitledHtml'.
Returns:
SubtitledHtml|SubtitledUnicode. The converted SubtitledHtml
object, if schema_type is 'SubititledHtml', otherwise the
unmodified SubtitledUnicode object.
"""
html.append(value.html)
return html
schema = {
'type': 'dict',
'properties': [{
'name': 'content',
'schema': {
'type': 'custom',
'obj_type': 'SubtitledHtml',
}
}]
}
value = {
'content': state_domain.SubtitledHtml('id', '<p>testing</p>')
}
state_domain.InteractionCustomizationArg.traverse_by_schema_and_convert(
schema, value, extract_html)
self.assertEqual(html, ['<p>testing</p>'])
def test_traverse_by_schema_and_get(self):
html = []
schema = {
'type': 'dict',
'properties': [{
'name': 'content',
'schema': {
'type': 'custom',
'obj_type': 'SubtitledHtml',
}
}]
}
value = {
'content': state_domain.SubtitledHtml('id', '<p>testing</p>')
}
html = (
state_domain.InteractionCustomizationArg.traverse_by_schema_and_get(
schema,
value,
[schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML],
lambda x: x.html)
)
self.assertEqual(html, ['<p>testing</p>'])
class SubtitledUnicodeDomainUnitTests(test_utils.GenericTestBase):
"""Test SubtitledUnicode domain object methods."""
def test_from_and_to_dict(self):
subtitled_unicode_dict = {
'content_id': 'id',
'unicode_str': ''
}
subtitled_unicode = state_domain.SubtitledUnicode.from_dict(
subtitled_unicode_dict)
self.assertEqual(subtitled_unicode.to_dict(), subtitled_unicode_dict)
def test_create_default(self):
subtitled_unicode = (
state_domain.SubtitledUnicode.create_default_subtitled_unicode(
'id')
)
self.assertEqual(subtitled_unicode.to_dict(), {
'content_id': 'id',
'unicode_str': ''
})
class WrittenTranslationsDomainUnitTests(test_utils.GenericTestBase):
"""Test methods operating on written transcripts."""
def test_data_formats_are_correct_and_complete(self):
translatable_class_names_in_data_formats = sorted(
state_domain.WrittenTranslation.
DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE.values())
self.assertEqual(
translatable_class_names_in_data_formats,
translatable_object_registry.Registry.get_all_class_names())
def test_from_and_to_dict_works_correctly(self):
written_translations_dict = {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': 'hello',
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
},
'fr': {
'data_format': 'set_of_normalized_string',
'translation': ['test1', 'test2'],
'needs_update': False
},
},
'feedback_1': {
'hi': {
'data_format': 'html',
'translation': 'Testing!',
'needs_update': False
},
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
},
'fr': {
'data_format': 'set_of_normalized_string',
'translation': ['test1', 'test2'],
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
self.assertEqual(
written_translations.to_dict(), written_translations_dict)
def test_get_content_ids_for_text_translation_return_correct_list_of_content_id(self): # pylint: disable=line-too-long
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
self.assertEqual(
written_translations.get_content_ids_for_text_translation(), [])
written_translations.add_content_id_for_translation('feedback_1')
written_translations.add_content_id_for_translation('feedback_2')
self.assertItemsEqual(
written_translations.get_content_ids_for_text_translation(), [
'feedback_2', 'feedback_1'])
def test_get_translated_content_in_non_existing_language_raise_error(self):
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'content': {
'en': {
'data_format': 'html',
'translation': '<p> In English.</p>',
'needs_update': False
}
}
}
})
translated_content = written_translations.get_translated_content(
'content', 'en')
self.assertEqual(translated_content, '<p> In English.</p>')
with self.assertRaisesRegexp(
Exception, 'Translation for the given content_id content does not '
'exist in hi language code'):
written_translations.get_translated_content('content', 'hi')
def test_get_translated_content_for_invalid_content_id_raise_error(self):
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'content': {
'en': {
'data_format': 'html',
'translation': '<p> In English.</p>',
'needs_update': False
}
}
}
})
translated_content = written_translations.get_translated_content(
'content', 'en')
self.assertEqual(translated_content, '<p> In English.</p>')
with self.assertRaisesRegexp(
Exception, 'Invalid content_id: invalid_id'):
written_translations.get_translated_content('invalid_id', 'hi')
def test_add_content_id_for_translations_adds_content_id(self):
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 0)
new_content_id = 'content_id'
written_translations.add_content_id_for_translation(new_content_id)
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 1)
self.assertEqual(
written_translations.get_content_ids_for_text_translation(),
['content_id'])
def test_add_content_id_for_translation_with_invalid_content_id_raise_error(
self):
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
invalid_content_id = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, received 123'):
written_translations.add_content_id_for_translation(
invalid_content_id)
def test_add_content_id_for_translation_with_existing_content_id_raise_error( # pylint: disable=line-too-long
self):
written_translations_dict = {
'translations_mapping': {
'feedback_1': {
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
existing_content_id = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 already exist.'):
written_translations.add_content_id_for_translation(
existing_content_id)
def test_delete_content_id_for_translations_deletes_content_id(self):
old_written_translations_dict = {
'translations_mapping': {
'content': {
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
old_written_translations_dict)
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 1)
written_translations.delete_content_id_for_translation('content')
self.assertEqual(
len(written_translations.get_content_ids_for_text_translation()), 0)
def test_delete_content_id_for_translation_with_nonexisting_content_id_raise_error(self): # pylint: disable=line-too-long
written_translations_dict = {
'translations_mapping': {
'content': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
nonexisting_content_id_to_delete = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 does not exist.'):
written_translations.delete_content_id_for_translation(
nonexisting_content_id_to_delete)
def test_delete_content_id_for_translation_with_invalid_content_id_raise_error(self): # pylint: disable=line-too-long
written_translations = state_domain.WrittenTranslations.from_dict({
'translations_mapping': {}
})
invalid_content_id_to_delete = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
written_translations.delete_content_id_for_translation(
invalid_content_id_to_delete)
def test_validation_with_invalid_content_id_raise_error(self):
written_translations_dict = {
'translations_mapping': {
123: {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
written_translations.validate([123])
def test_validate_non_dict_language_code_to_written_translation(self):
written_translations = state_domain.WrittenTranslations({
'en': []
})
with self.assertRaisesRegexp(
Exception,
re.escape('Expected content_id value to be a dict, received []')):
written_translations.validate(None)
def test_validation_with_invalid_type_language_code_raise_error(self):
written_translations_dict = {
'translations_mapping': {
'content': {
123: {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, '):
written_translations.validate(['content'])
def test_validation_with_unknown_language_code_raise_error(self):
written_translations_dict = {
'translations_mapping': {
'content': {
'ed': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(Exception, 'Invalid language_code: ed'):
written_translations.validate(['content'])
def test_validation_with_invalid_content_id_list(self):
written_translations_dict = {
'translations_mapping': {
'content': {
'en': {
'data_format': 'html',
'translation': '<p>hello!</p>',
'needs_update': False
}
}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
with self.assertRaisesRegexp(
Exception,
re.escape(
'Expected state written_translations to match the listed '
'content ids [\'invalid_content\']')):
written_translations.validate(['invalid_content'])
def test_get_content_ids_that_are_correctly_translated(self):
written_translations_dict = {
'translations_mapping': {
'content': {},
'hint_1': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
self.assertEqual(
written_translations.get_content_ids_that_are_correctly_translated(
'hi'), [])
def test_get_content_ids_that_are_correctly_translated_with_some_existing_translations(self): # pylint: disable=line-too-long
written_translations_dict = {
'translations_mapping': {
'content': {
'hi': {
'data_format': 'html',
'translation': '<p>hello!</p>',
'needs_update': False
}
},
'hint_1': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
self.assertEqual(
written_translations.get_content_ids_that_are_correctly_translated(
'hi'), ['content'])
def test_get_content_ids_that_are_correctly_translated_with_some_existing_translations_needs_update(self): # pylint: disable=line-too-long
written_translations_dict = {
'translations_mapping': {
'content': {
'hi': {
'data_format': 'html',
'translation': '<p>hello!</p>',
'needs_update': True
}
},
'hint_1': {}
}
}
written_translations = state_domain.WrittenTranslations.from_dict(
written_translations_dict)
self.assertEqual(
written_translations.get_content_ids_that_are_correctly_translated(
'hi'), [])
class RecordedVoiceoversDomainUnitTests(test_utils.GenericTestBase):
"""Test methods operating on recorded voiceovers."""
def test_from_and_to_dict_wroks_correctly(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content1': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': True,
'duration_secs': 1.1
},
'hi': {
'filename': 'abc.mp3',
'file_size_bytes': 1234,
'needs_update': False,
'duration_secs': 1.3
}
},
'feedback_1': {
'hi': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False,
'duration_secs': 1.1
},
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False,
'duration_secs': 1.3
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
self.assertEqual(
recorded_voiceovers.to_dict(), recorded_voiceovers_dict)
def test_get_content_ids_for_voiceovers_return_correct_list_of_content_id(self): # pylint: disable=line-too-long
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
self.assertEqual(
recorded_voiceovers.get_content_ids_for_voiceovers(), [])
recorded_voiceovers.add_content_id_for_voiceover('feedback_1')
recorded_voiceovers.add_content_id_for_voiceover('feedback_2')
self.assertItemsEqual(
recorded_voiceovers.get_content_ids_for_voiceovers(),
['feedback_2', 'feedback_1'])
def test_add_content_id_for_voiceovers_adds_content_id(self):
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 0)
new_content_id = 'content_id'
recorded_voiceovers.add_content_id_for_voiceover(new_content_id)
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 1)
self.assertEqual(
recorded_voiceovers.get_content_ids_for_voiceovers(),
['content_id'])
def test_add_content_id_for_voiceover_with_invalid_content_id_raise_error(
self):
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
invalid_content_id = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, received 123'):
recorded_voiceovers.add_content_id_for_voiceover(
invalid_content_id)
def test_add_content_id_for_voiceover_with_existing_content_id_raise_error( # pylint: disable=line-too-long
self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'feedback_1': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False,
'duration_secs': 1.1
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
existing_content_id = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 already exist.'):
recorded_voiceovers.add_content_id_for_voiceover(
existing_content_id)
def test_delete_content_id_for_voiceovers_deletes_content_id(self):
old_recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False,
'duration_secs': 1.1
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
old_recorded_voiceovers_dict)
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 1)
recorded_voiceovers.delete_content_id_for_voiceover('content')
self.assertEqual(
len(recorded_voiceovers.get_content_ids_for_voiceovers()), 0)
def test_delete_content_id_for_voiceover_with_nonexisting_content_id_raise_error(self): # pylint: disable=line-too-long
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
nonexisting_content_id_to_delete = 'feedback_1'
with self.assertRaisesRegexp(
Exception, 'The content_id feedback_1 does not exist.'):
recorded_voiceovers.delete_content_id_for_voiceover(
nonexisting_content_id_to_delete)
def test_delete_content_id_for_voiceover_with_invalid_content_id_raise_error(self): # pylint: disable=line-too-long
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {}
})
invalid_content_id_to_delete = 123
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
recorded_voiceovers.delete_content_id_for_voiceover(
invalid_content_id_to_delete)
def test_validation_with_invalid_content_id_raise_error(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
123: {}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(
Exception, 'Expected content_id to be a string, '):
recorded_voiceovers.validate([123])
def test_validate_non_dict_language_code_to_voiceover(self):
recorded_voiceovers = state_domain.RecordedVoiceovers({
'en': []
})
with self.assertRaisesRegexp(
Exception,
re.escape('Expected content_id value to be a dict, received []')):
recorded_voiceovers.validate(None)
def test_validation_with_invalid_type_language_code_raise_error(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
123: {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False,
'duration_secs': 1.1
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, '):
recorded_voiceovers.validate(['content'])
def test_validation_with_unknown_language_code_raise_error(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'ed': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False,
'duration_secs': 1.1
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(Exception, 'Invalid language_code: ed'):
recorded_voiceovers.validate(['content'])
def test_validation_with_invalid_content_id_list(self):
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'xyz.mp3',
'file_size_bytes': 123,
'needs_update': False,
'duration_secs': 1.1
}
}
}
}
recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict(
recorded_voiceovers_dict)
with self.assertRaisesRegexp(
Exception,
re.escape(
'Expected state recorded_voiceovers to match the listed '
'content ids [\'invalid_content\']')):
recorded_voiceovers.validate(['invalid_content'])
class VoiceoverDomainTests(test_utils.GenericTestBase):
def setUp(self):
super(VoiceoverDomainTests, self).setUp()
self.voiceover = state_domain.Voiceover('filename.mp3', 10, False, 15.0)
def test_validate_non_str_filename(self):
self.voiceover.validate()
self.voiceover.filename = 0
with self.assertRaisesRegexp(
Exception, 'Expected audio filename to be a string'):
self.voiceover.validate()
def test_validate_filename(self):
self.voiceover.validate()
self.voiceover.filename = 'invalid_filename'
with self.assertRaisesRegexp(Exception, 'Invalid audio filename'):
self.voiceover.validate()
def test_validate_audio_extension(self):
self.voiceover.validate()
self.voiceover.filename = 'filename.png'
with self.assertRaisesRegexp(
Exception,
re.escape(
'Invalid audio filename: it should have one of the following '
'extensions: %s'
% list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()))):
self.voiceover.validate()
def test_validate_non_int_file_size_bytes(self):
self.voiceover.validate()
self.voiceover.file_size_bytes = 'file_size_bytes'
with self.assertRaisesRegexp(
Exception, 'Expected file size to be an int'):
self.voiceover.validate()
def test_validate_negative_file_size_bytes(self):
self.voiceover.validate()
self.voiceover.file_size_bytes = -1
with self.assertRaisesRegexp(Exception, 'Invalid file size'):
self.voiceover.validate()
def test_validate_non_bool_needs_update(self):
self.voiceover.validate()
self.voiceover.needs_update = 'needs_update'
with self.assertRaisesRegexp(
Exception, 'Expected needs_update to be a bool'):
self.voiceover.validate()
def test_validate_float_duration_secs(self):
self.voiceover.validate()
self.voiceover.duration_secs = 'duration_secs'
with self.assertRaisesRegexp(
Exception, 'Expected duration_secs to be a float'):
self.voiceover.validate()
def test_validate_int_duration_secs(self):
self.voiceover.validate()
self.voiceover.duration_secs = 10
with self.assertRaisesRegexp(
Exception, 'Expected duration_secs to be a float'):
self.voiceover.validate()
def test_validate_negative_duration_seconds(self):
self.voiceover.validate()
self.voiceover.duration_secs = -1.45
with self.assertRaisesRegexp(
Exception, 'Expected duration_secs to be positive number, '
'or zero if not yet specified'):
self.voiceover.validate()
| kevinlee12/oppia | core/domain/state_domain_test.py | Python | apache-2.0 | 203,993 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.