gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from textwrap import dedent
from typing import Iterable, Set, cast
import pytest
from pants.base.exceptions import ResolveError
from pants.base.specs import (
AddressLiteralSpec,
AddressSpec,
AddressSpecs,
AscendantAddresses,
DescendantAddresses,
SiblingAddresses,
)
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.engine.addresses import Address, Addresses, AddressInput, BuildFileAddress
from pants.engine.fs import DigestContents, FileContent, PathGlobs
from pants.engine.internals.build_files import (
AddressFamilyDir,
evaluate_preludes,
parse_address_family,
)
from pants.engine.internals.parser import BuildFilePreludeSymbols, Parser
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.target_adaptor import TargetAdaptor
from pants.engine.target import Dependencies, Sources, Tags, Target
from pants.option.global_options import GlobalOptions
from pants.testutil.option_util import create_subsystem
from pants.testutil.rule_runner import MockGet, QueryRule, RuleRunner, run_rule_with_mocks
from pants.util.frozendict import FrozenDict
def test_parse_address_family_empty() -> None:
"""Test that parsing an empty BUILD file results in an empty AddressFamily."""
af = run_rule_with_mocks(
parse_address_family,
rule_args=[
Parser(target_type_aliases=[], object_aliases=BuildFileAliases()),
create_subsystem(GlobalOptions, build_patterns=["BUILD"], build_ignore=[]),
BuildFilePreludeSymbols(FrozenDict()),
AddressFamilyDir("/dev/null"),
],
mock_gets=[
MockGet(
output_type=DigestContents,
input_type=PathGlobs,
mock=lambda _: DigestContents([FileContent(path="/dev/null/BUILD", content=b"")]),
),
],
)
assert len(af.name_to_target_adaptors) == 0
def run_prelude_parsing_rule(prelude_content: str) -> BuildFilePreludeSymbols:
symbols = run_rule_with_mocks(
evaluate_preludes,
rule_args=[create_subsystem(GlobalOptions, build_file_prelude_globs=["prelude"])],
mock_gets=[
MockGet(
output_type=DigestContents,
input_type=PathGlobs,
mock=lambda _: DigestContents(
[FileContent(path="/dev/null/prelude", content=prelude_content.encode())]
),
),
],
)
return cast(BuildFilePreludeSymbols, symbols)
def test_prelude_parsing_good() -> None:
result = run_prelude_parsing_rule("def foo(): return 1")
assert result.symbols["foo"]() == 1
def test_prelude_parsing_syntax_error() -> None:
with pytest.raises(
Exception, match="Error parsing prelude file /dev/null/prelude: name 'blah' is not defined"
):
run_prelude_parsing_rule("blah")
def test_prelude_parsing_illegal_import() -> None:
prelude_content = dedent(
"""\
import os
def make_target():
python_library()
"""
)
with pytest.raises(
Exception,
match="Import used in /dev/null/prelude at line 1\\. Import statements are banned",
):
run_prelude_parsing_rule(prelude_content)
class MockTgt(Target):
alias = "mock_tgt"
core_fields = (Dependencies, Sources, Tags)
def test_resolve_address() -> None:
rule_runner = RuleRunner(rules=[QueryRule(Address, (AddressInput,))])
def assert_is_expected(address_input: AddressInput, expected: Address) -> None:
assert rule_runner.request(Address, [address_input]) == expected
rule_runner.create_file("a/b/c.txt")
assert_is_expected(
AddressInput("a/b/c.txt"), Address("a/b", target_name=None, relative_file_path="c.txt")
)
assert_is_expected(
AddressInput("a/b"), Address("a/b", target_name=None, relative_file_path=None)
)
assert_is_expected(AddressInput("a/b", target_component="c"), Address("a/b", target_name="c"))
assert_is_expected(
AddressInput("a/b/c.txt", target_component="c"),
Address("a/b", relative_file_path="c.txt", target_name="c"),
)
# Top-level addresses will not have a path_component, unless they are a file address.
rule_runner.create_file("f.txt")
assert_is_expected(
AddressInput("f.txt", target_component="original"),
Address("", relative_file_path="f.txt", target_name="original"),
)
assert_is_expected(AddressInput("", target_component="t"), Address("", target_name="t"))
with pytest.raises(ExecutionError) as exc:
rule_runner.request(Address, [AddressInput("a/b/fake")])
assert "'a/b/fake' does not exist on disk" in str(exc.value)
@pytest.fixture
def target_adaptor_rule_runner() -> RuleRunner:
return RuleRunner(rules=[QueryRule(TargetAdaptor, (Address,))], target_types=[MockTgt])
def test_target_adaptor_parsed_correctly(target_adaptor_rule_runner: RuleRunner) -> None:
target_adaptor_rule_runner.add_to_build_file(
"helloworld",
dedent(
"""\
mock_tgt(
fake_field=42,
dependencies=[
# Because we don't follow dependencies or even parse dependencies, this
# self-cycle should be fine.
"helloworld",
":sibling",
"helloworld/util",
"helloworld/util:tests",
],
)
"""
),
)
addr = Address("helloworld")
target_adaptor = target_adaptor_rule_runner.request(TargetAdaptor, [addr])
assert target_adaptor.name == "helloworld"
assert target_adaptor.type_alias == "mock_tgt"
assert target_adaptor.kwargs["dependencies"] == [
"helloworld",
":sibling",
"helloworld/util",
"helloworld/util:tests",
]
# NB: TargetAdaptors do not validate what fields are valid. The Target API should error
# when encountering this, but it's fine at this stage.
assert target_adaptor.kwargs["fake_field"] == 42
def test_target_adaptor_not_found(target_adaptor_rule_runner: RuleRunner) -> None:
with pytest.raises(ExecutionError) as exc:
target_adaptor_rule_runner.request(TargetAdaptor, [Address("helloworld")])
assert "Directory \\'helloworld\\' does not contain any BUILD files" in str(exc)
target_adaptor_rule_runner.add_to_build_file("helloworld", "mock_tgt(name='other_tgt')")
expected_rx_str = re.escape(
"'helloworld' was not found in namespace 'helloworld'. Did you mean one of:\n :other_tgt"
)
with pytest.raises(ExecutionError, match=expected_rx_str):
target_adaptor_rule_runner.request(TargetAdaptor, [Address("helloworld")])
def test_build_file_address() -> None:
rule_runner = RuleRunner(
rules=[QueryRule(BuildFileAddress, (Address,))], target_types=[MockTgt]
)
rule_runner.create_file("helloworld/BUILD.ext", "mock_tgt()")
def assert_bfa_resolved(address: Address) -> None:
expected_bfa = BuildFileAddress(address, "helloworld/BUILD.ext")
bfa = rule_runner.request(BuildFileAddress, [address])
assert bfa == expected_bfa
assert_bfa_resolved(Address("helloworld"))
# File addresses should use their BUILD target to find the BUILD file.
assert_bfa_resolved(Address("helloworld", relative_file_path="f.txt"))
@pytest.fixture
def address_specs_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[QueryRule(Addresses, (AddressSpecs,))],
target_types=[MockTgt],
)
def resolve_address_specs(
rule_runner: RuleRunner,
specs: Iterable[AddressSpec],
) -> Set[Address]:
result = rule_runner.request(Addresses, [AddressSpecs(specs, filter_by_global_options=True)])
return set(result)
def test_address_specs_deduplication(address_specs_rule_runner: RuleRunner) -> None:
"""When multiple specs cover the same address, we should deduplicate to one single Address."""
address_specs_rule_runner.create_file("demo/f.txt")
address_specs_rule_runner.add_to_build_file("demo", "mock_tgt(sources=['f.txt'])")
# We also include a file address to ensure that that is included in the result.
specs = [
AddressLiteralSpec("demo", "demo"),
AddressLiteralSpec("demo/f.txt", "demo"),
SiblingAddresses("demo"),
DescendantAddresses("demo"),
AscendantAddresses("demo"),
]
assert resolve_address_specs(address_specs_rule_runner, specs) == {
Address("demo"),
Address("demo", relative_file_path="f.txt"),
}
def test_address_specs_filter_by_tag(address_specs_rule_runner: RuleRunner) -> None:
address_specs_rule_runner.set_options(["--tag=+integration"])
address_specs_rule_runner.create_file("demo/f.txt")
address_specs_rule_runner.add_to_build_file(
"demo",
dedent(
"""\
mock_tgt(name="a", sources=["f.txt"])
mock_tgt(name="b", sources=["f.txt"], tags=["integration"])
mock_tgt(name="c", sources=["f.txt"], tags=["ignore"])
"""
),
)
assert resolve_address_specs(address_specs_rule_runner, [SiblingAddresses("demo")]) == {
Address("demo", target_name="b")
}
# The same filtering should work when given literal addresses, including file addresses.
# For file addresses, we look up the `tags` field of the original BUILD target.
literals_result = resolve_address_specs(
address_specs_rule_runner,
[
AddressLiteralSpec("demo", "a"),
AddressLiteralSpec("demo", "b"),
AddressLiteralSpec("demo", "c"),
AddressLiteralSpec("demo/f.txt", "a"),
AddressLiteralSpec("demo/f.txt", "b"),
AddressLiteralSpec("demo/f.txt", "c"),
],
)
assert literals_result == {
Address("demo", relative_file_path="f.txt", target_name="b"),
Address("demo", target_name="b"),
}
def test_address_specs_filter_by_exclude_pattern(address_specs_rule_runner: RuleRunner) -> None:
address_specs_rule_runner.set_options(["--exclude-target-regexp=exclude_me.*"])
address_specs_rule_runner.create_file("demo/f.txt")
address_specs_rule_runner.add_to_build_file(
"demo",
dedent(
"""\
mock_tgt(name="exclude_me", sources=["f.txt"])
mock_tgt(name="not_me", sources=["f.txt"])
"""
),
)
assert resolve_address_specs(address_specs_rule_runner, [SiblingAddresses("demo")]) == {
Address("demo", target_name="not_me")
}
# The same filtering should work when given literal addresses, including file addresses.
# The filtering will operate against the normalized Address.spec.
literals_result = resolve_address_specs(
address_specs_rule_runner,
[
AddressLiteralSpec("demo", "exclude_me"),
AddressLiteralSpec("demo", "not_me"),
AddressLiteralSpec("demo/f.txt", "exclude_me"),
AddressLiteralSpec("demo/f.txt", "not_me"),
],
)
assert literals_result == {
Address("demo", relative_file_path="f.txt", target_name="not_me"),
Address("demo", target_name="not_me"),
}
def test_address_specs_do_not_exist(address_specs_rule_runner: RuleRunner) -> None:
address_specs_rule_runner.create_file("real/f.txt")
address_specs_rule_runner.add_to_build_file("real", "mock_tgt(sources=['f.txt'])")
address_specs_rule_runner.add_to_build_file("empty", "# empty")
def assert_resolve_error(specs: Iterable[AddressSpec], *, expected: str) -> None:
with pytest.raises(ExecutionError) as exc:
resolve_address_specs(address_specs_rule_runner, specs)
assert expected in str(exc.value)
# Literal addresses require both a BUILD file to exist and for a target to be resolved.
assert_resolve_error(
[AddressLiteralSpec("fake", "tgt")], expected="'fake' does not exist on disk"
)
assert_resolve_error(
[AddressLiteralSpec("fake/f.txt", "tgt")],
expected="'fake/f.txt' does not exist on disk",
)
did_you_mean = ResolveError.did_you_mean(
bad_name="fake_tgt", known_names=["real"], namespace="real"
)
assert_resolve_error([AddressLiteralSpec("real", "fake_tgt")], expected=str(did_you_mean))
assert_resolve_error([AddressLiteralSpec("real/f.txt", "fake_tgt")], expected=str(did_you_mean))
# SiblingAddresses require the BUILD file to exist, but are okay if no targets are resolved.
assert_resolve_error(
[SiblingAddresses("fake")],
expected=(
"'fake' does not contain any BUILD files, but 'fake:' expected matching targets "
"there."
),
)
assert not resolve_address_specs(address_specs_rule_runner, [SiblingAddresses("empty")])
# DescendantAddresses requires at least one match, even if BUILD files exist.
assert_resolve_error(
[DescendantAddresses("fake"), DescendantAddresses("empty")],
expected="Address spec 'fake::' does not match any targets",
)
# AscendantAddresses does not require any matches or BUILD files.
assert not resolve_address_specs(
address_specs_rule_runner, [AscendantAddresses("fake"), AscendantAddresses("empty")]
)
def test_address_specs_file_does_not_belong_to_target(
address_specs_rule_runner: RuleRunner,
) -> None:
"""Even if a file's address file exists and target exist, we should validate that the file
actually belongs to that target."""
address_specs_rule_runner.create_file("demo/f.txt")
address_specs_rule_runner.add_to_build_file(
"demo",
dedent(
"""\
mock_tgt(name='owner', sources=['f.txt'])
mock_tgt(name='not_owner')
"""
),
)
with pytest.raises(ExecutionError) as exc:
resolve_address_specs(
address_specs_rule_runner, [AddressLiteralSpec("demo/f.txt", "not_owner")]
)
assert "does not match a file demo/f.txt" in str(exc.value)
|
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from uuid import uuid4
from oslo_config import cfg
from sqlalchemy import and_
from sqlalchemy import func
from sqlalchemy.orm.exc import NoResultFound
from tuskar.db.sqlalchemy.api import get_session
from tuskar.db.sqlalchemy.models import StoredFile
from tuskar.storage.drivers.base import BaseDriver
from tuskar.storage.exceptions import NameAlreadyUsed
from tuskar.storage.exceptions import UnknownName
from tuskar.storage.exceptions import UnknownUUID
from tuskar.storage.exceptions import UnknownVersion
from tuskar.storage.models import StoredFile as StorageModel
sql_opts = [
cfg.StrOpt('mysql_engine',
default='InnoDB',
help='MySQL engine')
]
cfg.CONF.register_opts(sql_opts)
class SQLAlchemyDriver(BaseDriver):
def _generate_uuid(self):
return str(uuid4())
def _to_storage_model(self, store, result):
"""Convert a result from SQLAlchemy into an instance of the common
model used in the tuskar.storage.
:param store: Instance of the storage store
:type store: tuskat.storage.stores._BaseStore
:param result: Instance of the SQLAlchemy model as returned by a query.
:type result: tuskar.db.sqlalchemy.models.StoredFile
:return: Instance of the StoredFile class.
:rtype: tuskar.storage.models.StoredFile
"""
file_dict = result.as_dict()
file_dict.pop('object_type')
file_dict['store'] = store
return StorageModel(**file_dict)
def _upsert(self, store, stored_file):
session = get_session()
session.begin()
try:
session.add(stored_file)
session.commit()
return self._to_storage_model(store, stored_file)
finally:
session.close()
def _get_latest_version(self, store, name):
session = get_session()
try:
return session.query(
func.max(StoredFile.version)
).filter_by(
object_type=store.object_type, name=name
).scalar()
finally:
session.close()
def _create(self, store, name, contents, version, relative_path='',
registry_path=''):
stored_file = StoredFile(
uuid=self._generate_uuid(),
contents=contents,
object_type=store.object_type,
name=name,
version=version,
relative_path=relative_path,
registry_path=registry_path
)
return self._upsert(store, stored_file)
def create(self, store, name, contents, relative_path='',
registry_path=''):
"""Given the store, name and contents create a new file and return a
`StoredFile` instance representing it. The optional relative_path
is appended to the generated template directory structure.
Some of the stored items such as environment files do not have names.
When working with these, name must be passed explicitly as None. This
is why the name has a type of "str or None" below.
:param store: The store class, used for routing the storage.
:type store: tuskar.storage.stores._BaseStore
:param name: name of the object to store (optional)
:type name: str or None
:param contents: String containing the file contents
:type contents: str
:param relative_path: String relative path to place the template under
: type relative_path: str
:param registry_path: String path with which a Role will appear in
the resource registry.
: type relative_path: str
:return: StoredFile instance containing the file metadata and contents
:rtype: tuskar.storage.models.StoredFile
"""
if store.versioned:
version = 1
else:
version = None
if name is not None:
try:
self.retrieve_by_name(store, name)
msg = "A {0} with the name '{1}' already exists".format(
store.object_type,
name
)
raise NameAlreadyUsed(msg)
except UnknownName:
pass
return self._create(store, name, contents, version, relative_path,
registry_path)
def _retrieve(self, object_type, uuid):
session = get_session()
try:
return session.query(StoredFile).filter_by(
uuid=uuid,
object_type=object_type
).one()
except NoResultFound:
msg = "No {0}s for the UUID: {1}".format(object_type, uuid)
raise UnknownUUID(msg)
finally:
session.close()
def retrieve(self, store, uuid):
"""Returns the stored file for a given store that matches the provided
UUID.
:param store: The store class, used for routing the storage.
:type store: tuskar.storage.stores._BaseStore
:param uuid: UUID of the object to retrieve.
:type uuid: str
:return: StoredFile instance containing the file metadata and contents
:rtype: tuskar.storage.models.StoredFile
:raises: tuskar.storage.exceptions.UnknownUUID if the UUID can't be
found
"""
stored_file = self._retrieve(store.object_type, uuid)
return self._to_storage_model(store, stored_file)
def update(self, store, uuid, contents, relative_path='',
registry_path=''):
"""Given the store, uuid, name and contents update the existing stored
file and return an instance of StoredFile that reflects the updates.
Either name and/or contents can be provided. If they are not then they
will remain unchanged.
:param store: The store class, used for routing the storage.
:type store: tuskar.storage.stores._BaseStore
:param uuid: UUID of the object to update.
:type uuid: str
:param name: name of the object to store (optional)
:type name: str
:param contents: String containing the file contents (optional)
:type contents: str
:return: StoredFile instance containing the file metadata and contents
:rtype: tuskar.storage.models.StoredFile
:raises: tuskar.storage.exceptions.UnknownUUID if the UUID can't be
found
"""
stored_file = self._retrieve(store.object_type, uuid)
stored_file.contents = contents
stored_file.relative_path = relative_path if relative_path else None
stored_file.registry_path = registry_path if registry_path else None
if store.versioned:
version = self._get_latest_version(store, stored_file.name) + 1
return self._create(
store, stored_file.name, stored_file.contents, version,
relative_path, registry_path)
return self._upsert(store, stored_file)
def delete(self, store, uuid):
"""Delete the stored file with the UUID under the given store.
:param store: The store class, used for routing the storage.
:type store: tuskar.storage.stores._BaseStore
:param uuid: UUID of the object to delete.
:type uuid: str
:return: Returns nothing on success. Exceptions are expected for errors
:rtype: None
:raises: tuskar.storage.exceptions.UnknownUUID if the UUID can't be
found
"""
session = get_session()
session.begin()
stored_file = self._retrieve(store.object_type, uuid)
try:
session.delete(stored_file)
session.commit()
finally:
session.close()
def list(self, store, only_latest=False):
"""Return a list of all the stored objects for a given store.
Optionally only_latest can be set to True to return only the most
recent version of each objects (grouped by name).
:param store: The store class, used for routing the storage.
:type store: tuskar.storage.stores._BaseStore
:param only_latest: If set to True only the latest versions of each
object will be returned.
:type only_latest: bool
:return: List of StoredFile instances
:rtype: [tuskar.storage.models.StoredFile]
"""
object_type = store.object_type
session = get_session()
try:
files = session.query(StoredFile).filter_by(
object_type=object_type
)
if only_latest:
# When only_latest is provided, then we want to select only the
# stored files with the latest version. To do this we use a
# subquery to get a set of names and latest versions for the
# object type. After we have that, we join in the name and
# version to make sure we match it.
stmt = session.query(
StoredFile.name,
func.max(StoredFile.version).label("version")
).filter_by(
object_type=object_type
).group_by(
StoredFile.name
).subquery()
# join our existing query on the subquery.
files = files.join(
stmt,
and_(
StoredFile.name == stmt.c.name,
StoredFile.version == stmt.c.version,
)
)
return [self._to_storage_model(store, file_) for file_ in files]
finally:
session.close()
def retrieve_by_name(self, store, name, version=None):
"""Returns the stored file for a given store that matches the provided
name and optionally version.
:param store: The store class, used for routing the storage.
:type store: tuskar.storage.stores._BaseStore
:param name: name of the object to retrieve.
:type name: str
:param version: Version of the object to retrieve. If the version isn't
provided, the latest will be returned.
:type version: int
:return: StoredFile instance containing the file metadata and contents
:rtype: tuskar.storage.models.StoredFile
:raises: tuskar.storage.exceptions.UnknownName if the name can't be
found
:raises: tuskar.storage.exceptions.UnknownVersion if the version can't
be found
"""
object_type = store.object_type
session = get_session()
try:
query = session.query(StoredFile).filter_by(
name=name,
object_type=object_type,
)
if version is not None:
query = query.filter_by(version=version)
else:
query = query.filter_by(
version=self._get_latest_version(store, name)
)
stored_file = query.one()
return self._to_storage_model(store, stored_file)
except NoResultFound:
name_query = session.query(StoredFile).filter_by(
name=name,
object_type=object_type,
)
if name_query.count() == 0:
msg = "No {0}s found for the name: {1}".format(
object_type,
name
)
raise UnknownName(msg)
elif name_query.filter_by(version=version).count() == 0:
msg = "No {0}s found for the Version: {1}".format(
object_type,
name
)
raise UnknownVersion(msg)
raise
finally:
session.close()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import simple_audit
import logging
import datetime
from datetime import date, timedelta
from django.db import models, transaction, Error
from django.db.models.signals import pre_save, post_save, pre_delete
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields.encrypted import EncryptedCharField
from util import slugify, make_db_random_password
from util.models import BaseModel
from physical.models import DatabaseInfra, Environment
from drivers import factory_for
from system.models import Configuration
from account.models import Team
from drivers.base import DatabaseStatus
from drivers.errors import ConnectionError
from logical.validators import database_name_evironment_constraint
from notification.models import TaskHistory
LOG = logging.getLogger(__name__)
KB_FACTOR = 1.0 / 1024.0
MB_FACTOR = 1.0 / 1024.0 / 1024.0
GB_FACTOR = 1.0 / 1024.0 / 1024.0 / 1024.0
class Project(BaseModel):
name = models.CharField(
verbose_name=_("Project name"), max_length=100, unique=True)
description = models.TextField(
verbose_name=_("Description"), null=True, blank=True)
is_active = models.BooleanField(
verbose_name=_("Is project active"), default=True)
slug = models.SlugField()
def __unicode__(self):
return "%s" % self.name
class Meta:
permissions = (
("view_project", "Can view projects"),
)
ordering = ['name']
class DatabaseAliveManager(models.Manager):
def get_query_set(self):
return Database.objects.filter(is_in_quarantine=False)
class DatabaseHistory(models.Model):
database_id = models.IntegerField(db_index=True)
environment = models.CharField(
verbose_name=_("environment"), max_length=20
)
engine = models.CharField(
verbose_name=_("engine"), max_length=100
)
name = models.CharField(
verbose_name=_("name"), max_length=200
)
project = models.CharField(
verbose_name=_("project"), max_length=100
)
team = models.CharField(
verbose_name=_("team"), max_length=100
)
databaseinfra_name = models.CharField(
verbose_name=_("databaseinfra_name"), max_length=100
)
plan = models.CharField(
verbose_name=_("plan"), max_length=100
)
disk_size_kb = models.PositiveIntegerField(verbose_name=_("Size KB"))
has_persistence = models.BooleanField(
verbose_name="Disk persistence", default=True
)
created_at = models.DateTimeField(
verbose_name=_("created_at"))
deleted_at = models.DateTimeField(
verbose_name=_("deleted_at"), auto_now_add=True)
description = models.TextField(
verbose_name=_("Description"), null=True, blank=True
)
class Database(BaseModel):
DEAD = 0
ALIVE = 1
INITIALIZING = 2
ALERT = 3
DB_STATUS = (
(DEAD, 'Dead'),
(ALIVE, 'Alive'),
(INITIALIZING, 'Initializing'),
(ALERT, 'Alert')
)
name = models.CharField(
verbose_name=_("Database name"), max_length=100, db_index=True
)
databaseinfra = models.ForeignKey(
DatabaseInfra, related_name="databases", on_delete=models.PROTECT
)
project = models.ForeignKey(
Project, related_name="databases", on_delete=models.PROTECT, null=True,
blank=True
)
team = models.ForeignKey(
Team, related_name="databases", null=True, blank=True,
help_text=_("Team that is accountable for the database")
)
is_in_quarantine = models.BooleanField(
verbose_name=_("Is database in quarantine?"), default=False
)
quarantine_dt = models.DateField(
verbose_name=_("Quarantine date"), null=True, blank=True,
editable=False
)
description = models.TextField(
verbose_name=_("Description"), null=True, blank=True
)
status = models.IntegerField(choices=DB_STATUS, default=2)
used_size_in_bytes = models.FloatField(default=0.0)
environment = models.ForeignKey(
Environment, related_name="databases", on_delete=models.PROTECT,
db_index=True
)
backup_path = models.CharField(
verbose_name=_("Backup path"), max_length=300, null=True, blank=True,
help_text=_("Full path to backup file")
)
subscribe_to_email_events = models.BooleanField(
verbose_name=_("Subscribe to email events"), default=True,
help_text=_(
"Check this box if you'd like to receive information "
"regarding this database by email."
)
)
disk_auto_resize = models.BooleanField(
verbose_name=_("Disk auto resize"), default=True,
help_text=_("When marked, the disk will be resized automatically.")
)
is_protected = models.BooleanField(
verbose_name=_("Protected"), default=False,
help_text=_("When marked, the database can not be deleted.")
)
quarantine_user = models.ForeignKey(
User, related_name='databases_quarantine',
null=True, blank=True, editable=False
)
def validate_unique(self, *args, **kwargs):
''' Validate if database name is unique
in environemnt stage'''
super(Database, self).validate_unique(*args, **kwargs)
if not any([
hasattr(self, "environment"),
hasattr(self, "name")]) or self.id:
return
environment = Environment.objects.filter(pk=self.environment_id)
if not environment.exists():
return
environment = environment[0]
db_check = Database.objects.filter(
name=self.name,
environment__stage=environment.stage
)
if db_check.exists():
raise ValidationError({
"name": [
"Name %s is alredy been used in the %s environment" % (
self.name,
Environment.get_stage_by_id(self.environment.stage)
)
]
})
def team_contact(self):
if self.team:
return self.team.emergency_contacts
team_contact.short_description = 'Emergency contacts'
objects = models.Manager()
alive = DatabaseAliveManager()
quarantine_time = Configuration.get_by_name_as_int(
'quarantine_retention_days'
)
def __unicode__(self):
return u"{}".format(self.name)
class Meta:
permissions = (
("can_manage_quarantine_databases",
"Can manage databases in quarantine"),
("view_database", "Can view databases"),
("upgrade_mongo24_to_30",
"Can upgrade mongoDB version from 2.4 to 3.0"),
("upgrade_database", "Can upgrade databases"),
("configure_ssl", "Can configure SSL"),
)
unique_together = (
('name', 'environment'),
)
ordering = ('name', )
@property
def is_in_memory(self):
return self.engine.engine_type.is_in_memory
@property
def has_persistence(self):
return self.plan.has_persistence
@property
def has_persistense_equivalent_plan(self):
if self.plan.persistense_equivalent_plan:
return True
return False
@property
def persistence_change_text(self):
if self.has_persistence:
return 'Change to Memory Only'
return 'Change to Persisted'
@property
def infra(self):
return self.databaseinfra
@property
def engine_type(self):
return self.infra.engine_name
@property
def engine(self):
return self.infra.engine
@property
def plan(self):
return self.databaseinfra and self.databaseinfra.plan
def pin_task(self, task):
try:
with transaction.atomic():
DatabaseLock(database=self, task=task).save()
except Error:
return False
else:
return True
@staticmethod
def __clean_task(task_name):
if task_name.endswith('_rollback'):
return task_name.rsplit('_rollback', 1)[0]
if task_name.endswith('_retry'):
return task_name.rsplit('_retry', 1)[0]
return task_name
def update_task(self, task):
lock = self.lock.first()
if not lock:
return self.pin_task(task)
with transaction.atomic():
lock = DatabaseLock.objects.select_for_update().filter(
database=self
).first()
task_name = self.__clean_task(task.task_name)
lock_task_name = self.__clean_task(lock.task.task_name)
if lock_task_name != task_name or not lock.task.is_status_error:
return False
lock.task = task
lock.save()
return True
def finish_task(self):
for instance in self.infra.instances.all():
try:
instance.update_status()
except Exception as e:
LOG.error(
"Could not refresh status for {} - {}".format(instance, e)
)
continue
try:
self.update_status()
except Exception as e:
LOG.error("Could not refresh status for {} - {}".format(self, e))
self.unpin_task()
def update_status(self):
self.status = Database.DEAD
if self.database_status and self.database_status.is_alive:
self.status = Database.ALIVE
instances_status = self.databaseinfra.check_instances_status()
if instances_status == self.databaseinfra.ALERT:
self.status = Database.ALERT
self.save(update_fields=['status'])
def unpin_task(self):
DatabaseLock.objects.filter(database=self).delete()
@property
def current_locked_task(self):
lock = self.lock.first()
if lock:
return lock.task
@property
def is_locked(self):
lock = self.lock.first()
if lock:
return True
return False
def delete(self, *args, **kwargs):
if self.is_in_quarantine:
LOG.warning(
"Database {} is in quarantine and will be removed".format(
self.name
)
)
for credential in self.credentials.all():
instance = factory_for(self.databaseinfra)
instance.try_remove_user(credential)
engine = self.databaseinfra.engine
databaseinfra = self.databaseinfra
try:
DatabaseHistory.objects.create(
database_id=self.id,
name=self.name,
description=self.description,
engine='{} {}'.format(
engine.engine_type.name,
engine.version
),
project=self.project.name if self.project else '',
team=self.team.name if self.team else '',
databaseinfra_name=databaseinfra.name,
plan=databaseinfra.plan.name,
disk_size_kb=databaseinfra.disk_offering.size_kb,
has_persistence=databaseinfra.plan.has_persistence,
environment=self.environment.name,
created_at=self.created_at
)
except Exception as err:
LOG.error(
('Error on creating database history for '
'"database {}: {}'.format(self.id, err)))
super(Database, self).delete(*args, **kwargs)
else:
LOG.warning("Putting database {} in quarantine".format(self.name))
self.is_in_quarantine = True
self.is_protected = False
self.save()
if self.credentials.exists():
for credential in self.credentials.all():
new_password = make_db_random_password()
new_credential = Credential.objects.get(pk=credential.id)
new_credential.password = new_password
new_credential.save()
instance = factory_for(self.databaseinfra)
instance.try_update_user(new_credential)
def clean(self):
if not self.pk:
self.name = slugify(self.name)
if self.name in self.__get_database_reserved_names():
raise ValidationError(
_("{} is a reserved database name".format(
self.name
))
)
def automatic_create_first_credential(self):
LOG.info("creating new credential for database {}".format(self.name))
user = Credential.USER_PATTERN % self.name
credential = Credential.create_new_credential(user, self)
return credential
@classmethod
def provision(cls, name, databaseinfra):
if not isinstance(databaseinfra, DatabaseInfra):
raise ValidationError(
'Invalid databaseinfra type {} - {}'.format(
type(databaseinfra), databaseinfra
)
)
database = Database()
database.databaseinfra = databaseinfra
database.environment = databaseinfra.environment
database.name = name
database.full_clean()
database.save()
database = Database.objects.get(pk=database.pk)
return database
def __get_database_reserved_names(self):
return getattr(self.driver, 'RESERVED_DATABASES_NAME', [])
@property
def driver(self):
if self.databaseinfra_id is not None:
return self.databaseinfra.get_driver()
def get_endpoint(self):
return self.driver.get_connection(database=self)
def get_endpoint_dns(self):
return self.driver.get_connection_dns(database=self)
def get_endpoint_dns_simple(self):
return self.driver.get_connection_dns_simple(database=self)
def __graylog_url(self):
from util import get_credentials_for
from dbaas_credentials.models import CredentialType
if self.databaseinfra.plan.is_pre_provisioned:
return ""
credential = get_credentials_for(
environment=self.environment,
credential_type=CredentialType.GRAYLOG
)
stream = credential.get_parameter_by_name(
'stream_{}'.format(self.plan.engine.engine_type.name)
)
search_field = credential.get_parameter_by_name('search_field')
if not stream or not search_field:
return ""
return "{}/streams/{}/search?q={}:{}".format(
credential.endpoint, stream, search_field, self.name
)
def __kibana_url(self):
from util import get_credentials_for
from dbaas_credentials.models import CredentialType
if self.databaseinfra.plan.is_pre_provisioned:
return ""
credential = get_credentials_for(
environment=self.environment,
credential_type=CredentialType.KIBANA_LOG
)
search_field = credential.get_parameter_by_name('search_field')
if not search_field:
return ""
time_query = "_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-6h,to:now))"
filter_query = "_a=(columns:!(_source),filters:!(),interval:auto,query:(language:lucene,query:'{}:{}'))".format(
search_field, self.name
)
return "{}/app/kibana#/discover?{}&{}".format(
credential.endpoint, time_query, filter_query
)
def get_log_url(self):
if Configuration.get_by_name_as_int('graylog_integration') == 1:
return self.__graylog_url()
if Configuration.get_by_name_as_int('kibana_integration') == 1:
return self.__kibana_url()
def get_dex_url(self):
if Configuration.get_by_name_as_int('dex_analyze') != 1:
return ""
if self.databaseinfra.plan.is_pre_provisioned:
return ""
if self.engine_type != 'mongodb':
return ""
return 1
def get_is_preprovisioned(self):
return self.databaseinfra.plan.is_pre_provisioned
endpoint = property(get_endpoint)
endpoint_dns = property(get_endpoint_dns)
@cached_property
def database_status(self):
try:
info = self.databaseinfra.get_info()
if info is None:
return None
database_status = info.get_database_status(self.name)
if database_status is None:
# try get without cache
info = self.databaseinfra.get_info(force_refresh=True)
database_status = info.get_database_status(self.name)
except ConnectionError as e:
msg = ("ConnectionError calling database_status for database {}:"
"{}").format(self, e)
LOG.error(msg)
database_status = DatabaseStatus(self)
return database_status
def get_offering_name(self):
LOG.info("Get offering")
try:
offer_name = self.infra.offering.name
except Exception as e:
LOG.info("Oops...{}".format(e))
offer_name = None
return offer_name
offering = property(get_offering_name)
@property
def total_size(self):
return self.driver.masters_total_size_in_bytes
@property
def total_size_in_kb(self):
return round(self.driver.masters_total_size_in_bytes * KB_FACTOR, 2)
@property
def total_size_in_mb(self):
return round(self.driver.masters_total_size_in_bytes * MB_FACTOR, 2)
@property
def total_size_in_gb(self):
return round(self.driver.masters_total_size_in_bytes * GB_FACTOR, 2)
@property
def used_size_in_kb(self):
return self.driver.masters_used_size_in_bytes * KB_FACTOR
@property
def used_size_in_mb(self):
return self.driver.masters_used_size_in_bytes * MB_FACTOR
@property
def used_size_in_gb(self):
return self.driver.masters_used_size_in_bytes * GB_FACTOR
@property
def capacity(self):
if self.status:
return round(
((1.0 * self.used_size_in_bytes / self.total_size)
if self.total_size else 0, 2))
@classmethod
def purge_quarantine(self):
quarantine_time = Configuration.get_by_name_as_int(
'quarantine_retention_days')
quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
databases = Database.objects.filter(
is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt
)
for database in databases:
database.delete()
LOG.info(
("The database %s was deleted, because it was set to "
"quarentine %d days ago") % (database.name, quarantine_time)
)
@classmethod
def clone(cls, database, clone_name, plan, environment, user):
from notification.tasks import TaskRegister
TaskRegister.database_clone(
origin_database=database, clone_name=clone_name, plan=plan,
environment=environment, user=user
)
@classmethod
def restore(cls, database, snapshot, user):
from notification.tasks import TaskRegister
LOG.info(
("Changing database volume with params: "
"database {} snapshot: {}, user: {}").format(
database, snapshot, user
)
)
TaskRegister.restore_snapshot(
database=database, snapshot=snapshot, user=user
)
@classmethod
def upgrade_disk_type(cls, database, disk_offering_type, user):
from notification.tasks import TaskRegister
LOG.info(
("Changing database volume with params: "
"database {}, new_disk_type: {}, user: {}").format(
database, disk_offering_type, user
)
)
TaskRegister.upgrade_disk_type(
database=database, new_disk_type_upgrade=disk_offering_type, user=user
)
@classmethod
def resize(cls, database, offering, user):
from notification.tasks import TaskRegister
TaskRegister.database_resize(
database=database, user=user,
offering=offering
)
# @classmethod
# def recover_snapshot(cls, database, snapshot, user, task_history):
# from backup.tasks import restore_snapshot
#
# restore_snapshot.delay(
# database=database, snapshot=snapshot, user=user,
# task_history=task_history
# )
def get_metrics_url(self):
return "/admin/logical/database/{}/metrics/".format(self.id)
def get_resize_retry_url(self):
return "/admin/logical/database/{}/resize_retry/".format(self.id)
def get_resize_rollback_url(self):
return "/admin/logical/database/{}/resize_rollback/".format(self.id)
def get_disk_resize_url(self):
return "/admin/logical/database/{}/disk_resize/".format(self.id)
def get_add_instances_database_retry_url(self):
return "/admin/logical/database/{}/add_instances_database_retry/".format(self.id)
def get_add_instances_database_rollback_url(self):
return "/admin/logical/database/{}/add_instances_database_rollback/".format(self.id)
def get_remove_instance_database_retry_url(self):
return "/admin/logical/database/{}/remove_instance_database_retry/".format(self.id)
def get_mongodb_engine_version_upgrade_url(self):
return ("/admin/logical/database/{}/"
"mongodb_engine_version_upgrade/").format(self.id)
def get_upgrade_url(self):
return "/admin/logical/database/{}/upgrade/".format(self.id)
def get_upgrade_retry_url(self):
return "/admin/logical/database/{}/upgrade_retry/".format(self.id)
def get_migrate_engine_retry_url(self):
return "/admin/logical/database/{}/migrate_engine_retry/".format(self.id)
def get_upgrade_patch_url(self):
return "/admin/logical/database/{}/upgrade_patch/".format(self.id)
def get_upgrade_patch_retry_url(self):
return "/admin/logical/database/{}/upgrade_patch_retry/".format(
self.id
)
def get_change_parameters_retry_url(self):
return "/admin/logical/database/{}/change_parameters_retry/".format(
self.id
)
def get_reinstallvm_retry_url(self):
return "/admin/logical/database/{}/reinstallvm_retry/".format(self.id)
def get_recreateslave_retry_url(self):
return "/admin/logical/database/{}/recreateslave_retry/".format(
self.id
)
def get_configure_ssl_url(self):
return "/admin/logical/database/{}/configure_ssl/".format(self.id)
def get_configure_ssl_retry_url(self):
return "/admin/logical/database/{}/configure_ssl_retry/".format(
self.id
)
def get_set_ssl_required_url(self):
return "/admin/logical/database/{}/set_ssl_required/".format(self.id)
def get_set_ssl_required_retry_url(self):
return "/admin/logical/database/{}/set_ssl_required_retry/".format(
self.id
)
def get_set_ssl_not_required_url(self):
return "/admin/logical/database/{}/set_ssl_not_required/".format(
self.id)
def get_set_ssl_not_required_retry_url(self):
return "/admin/logical/database/{}/set_ssl_not_required_retry/".format(
self.id
)
def get_change_persistence_url(self):
return "/admin/logical/database/{}/change_persistence/".format(self.id)
def get_change_persistence_retry_url(self):
return "/admin/logical/database/{}/change_persistence_retry/".format(
self.id
)
def is_mongodb_24(self):
engine = self.engine
if engine.name == 'mongodb' and engine.version.startswith('2.4'):
return True
return False
def get_offering_id(self):
LOG.info("Get offering")
try:
offer_id = self.infra.plan.stronger_offering.id
except Exception as e:
LOG.info("Oops...{}".format(e))
offer_id = None
return offer_id
offering_id = property(get_offering_id)
def is_being_used_elsewhere(self, skip_tasks=None):
tasks = TaskHistory.objects.filter(
task_status=TaskHistory.STATUS_WAITING,
object_id=self.id,
object_class=self._meta.db_table)
if tasks:
return True
if not self.current_locked_task:
return False
skip_tasks = skip_tasks or []
if self.current_locked_task.task_name in skip_tasks:
if self.current_locked_task.is_status_error:
return False
return True
def restore_allowed(self):
if Configuration.get_by_name_as_int('restore_allowed') == 1:
return True
return False
def has_offerings(self):
offerings = self.environment.offerings.exclude(id=self.offering_id)
return bool(offerings)
def has_disk_offerings(self):
from physical.models import DiskOffering
offerings = DiskOffering.objects.exclude(
id=self.databaseinfra.disk_offering.id
)
return bool(offerings)
@property
def can_modify_parameters(self):
if self.plan.replication_topology.parameter.all():
return True
else:
return False
@property
def is_host_migrate_available(self):
from util.providers import get_host_migrate_steps
class_path = self.plan.replication_topology.class_path
try:
get_host_migrate_steps(class_path)
except NotImplementedError:
return False
else:
return True
@property
def is_dead(self):
if self.status != Database.ALIVE:
return True
if self.database_status and not self.database_status.is_alive:
return True
return False
@classmethod
def disk_resize(cls, database, new_disk_offering, user):
from physical.models import DiskOffering
from notification.tasks import TaskRegister
disk_offering = DiskOffering.objects.get(id=new_disk_offering)
TaskRegister.database_disk_resize(
database=database, user=user, disk_offering=disk_offering
)
def update_host_disk_used_size(self, host_address, used_size_kb,
total_size_kb=None):
instance = self.databaseinfra.instances.filter(
address=host_address
).first()
if not instance:
raise ObjectDoesNotExist()
volume = instance.hostname.volumes.last()
if not volume:
return None
if total_size_kb:
volume.total_size_kb = total_size_kb
volume.used_size_kb = used_size_kb
volume.save(update_fields=['total_size_kb','used_size_kb'])
return volume
def can_be_cloned(self, database_view_button=False):
if not self.plan.has_persistence:
return False, "Database does not have persistence cannot be cloned"
if self.is_being_used_elsewhere():
return False, "Database is being used by another task"
if self.is_in_quarantine:
return False, "Database in quarantine cannot be cloned"
if database_view_button:
if self.status != self.ALIVE:
return False, "Database is not alive and cannot be cloned"
else:
if self.is_dead:
return False, "Database is not alive and cannot be cloned"
return True, None
def can_be_restored(self):
if not self.restore_allowed():
return False, ('Restore is not allowed. Please, contact DBaaS '
'team for more information')
if self.is_in_quarantine:
return False, "Database in quarantine cannot be restored"
if self.status != self.ALIVE or self.is_dead:
return False, "Database is not alive and cannot be restored"
if self.is_being_used_elsewhere():
return False, ("Database is being used by another task, please "
"check your tasks")
return True, None
def can_be_disk_type_upgraded(self):
if self.is_in_quarantine:
return False, "Database in quarantine cannot be upgraded"
if self.status != self.ALIVE or self.is_dead:
return False, "Database is not alive and cannot be upgraded"
if self.is_being_used_elsewhere():
return False, ("Database is being used by another task, please "
"check your tasks")
return True, None
def can_be_deleted(self):
error = None
if self.is_protected and not self.is_in_quarantine:
error = "Database {} is protected and cannot be deleted"
# elif self.is_dead:
# error = "Database {} is not alive and cannot be deleted"
# elif self.is_being_used_elsewhere():
# error = "Database {} cannot be deleted because" \
# " it is in use by another task."
if error:
return False, error.format(self.name)
return True, None
def can_do_upgrade_retry(self):
error = None
if self.is_mongodb_24():
error = "MongoDB 2.4 cannot be upgraded by this task."
elif self.is_in_quarantine:
error = "Database in quarantine and cannot be upgraded."
elif self.is_being_used_elsewhere([('notification.tasks'
'.upgrade_database')]):
error = "Database cannot be upgraded because " \
"it is in use by another task."
elif not self.infra.plan.engine_equivalent_plan:
error = "Source plan do not has equivalent plan to upgrade."
if error:
return False, error
return True, None
def can_do_upgrade(self):
can_do_upgrade, error = self.can_do_upgrade_retry()
if can_do_upgrade:
if self.is_dead:
error = "Database is dead and cannot be upgraded."
elif self.is_being_used_elsewhere():
error = "Database cannot be upgraded because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_engine_migration(self, retry=False):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be upgraded."
elif self.is_being_used_elsewhere([('notification.tasks'
'.migrate_engine')]):
error = "Database engine cannot be migrated because " \
"it is in use by another task."
elif not retry and self.is_dead:
error = "Database is dead and cannot be upgraded."
elif not retry and self.is_being_used_elsewhere():
error = "Database engine cannot be migrated because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_upgrade_patch_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be upgraded."
elif self.is_being_used_elsewhere(
['notification.tasks.upgrade_database_patch']
):
error = "Database cannot be upgraded because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_upgrade_patch(self):
can_do_upgrade, error = self.can_do_upgrade_patch_retry()
if can_do_upgrade:
if self.is_dead:
error = "Database is dead and cannot be upgraded."
elif self.is_being_used_elsewhere():
error = "Database cannot be upgraded because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_resize_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be resized."
elif not self.has_offerings:
error = "There is no offerings for this database."
elif self.is_being_used_elsewhere(['notification.tasks.resize_database', 'notification.tasks.resize_database_rollback']):
error = "Database cannot be resized because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_do_resize(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be resized."
elif not self.has_offerings:
error = "There is no offerings for this database."
elif self.is_dead:
error = "Database is dead and cannot be resized."
elif self.is_being_used_elsewhere():
error = "Database cannot be resized because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_do_change_parameters_retry(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the parameters "
"changed.")
elif self.is_being_used_elsewhere([('notification.tasks'
'.change_parameters_database')]):
error = "Database cannot have the parameters changed because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_do_change_parameters(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the parameters "
"changed.")
elif self.is_dead:
error = "Database is dead and cannot have the parameters changed."
elif self.is_being_used_elsewhere():
error = "Database cannot have the parameters changed because" \
" it is in use by another task."
if error:
return False, error
return True, None
def can_migrate_host(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have host migrate."
elif self.is_dead:
error = "Database is dead and cannot migrate host"
elif self.is_being_used_elsewhere():
error = ("Database cannot migrate host it is in use "
"by another task.")
if error:
return False, error
return True, None
def can_do_change_persistence_retry(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the persistence "
"changed.")
elif self.is_being_used_elsewhere([('notification.tasks'
'.change_database_persistence')]):
error = "Database cannot have the persistence changed because" \
" it is in use by another task."
elif not self.has_persistense_equivalent_plan:
error = "Database cannot have the persistence changed because" \
" it has not any persistense equivalent plan "
if error:
return False, error
return True, None
def can_do_change_persistence(self):
error = None
if self.is_in_quarantine:
error = ("Database in quarantine and cannot have the persistence "
"changed.")
elif self.is_dead:
error = "Database is dead and cannot have the persistence changed."
elif self.is_being_used_elsewhere():
error = "Database cannot have the persistence changed because" \
" it is in use by another task."
elif not self.has_persistense_equivalent_plan:
error = "Database cannot have the persistence changed because" \
" it has not any persistense equivalent plan "
if error:
return False, error
return True, None
def can_do_disk_resize(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot be resized."
elif self.is_being_used_elsewhere():
error = "Database cannot be resized because" \
" it is in use by another task."
elif not self.has_disk_offerings:
error = "There is no other disk offering for this database."
if error:
return False, error
return True, None
def can_do_configure_ssl_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have SSL cofigured."
elif self.is_being_used_elsewhere([('notification.tasks'
'.configure_ssl_database')]):
error = "Database cannot have SSL cofigured because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_configure_ssl(self):
can_do_configure_ssl, error = self.can_do_configure_ssl_retry()
if can_do_configure_ssl:
if self.is_dead:
error = "Database is dead and cannot have SSL cofigured."
elif self.is_being_used_elsewhere():
error = "Database cannot have SSL cofigured because " \
"it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_required_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have set SSL " \
"required."
elif self.is_being_used_elsewhere([('notification.tasks'
'.database_set_ssl_required')]):
error = "Database cannot have set SSL required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_required(self):
can_do_set_ssl_required, error = self.can_do_set_ssl_required_retry()
if can_do_set_ssl_required:
if self.is_dead:
error = "Database is dead and cannot have set SSL required."
elif self.is_being_used_elsewhere():
error = "Database cannot have set SSL required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_not_required_retry(self):
error = None
if self.is_in_quarantine:
error = "Database in quarantine and cannot have set SSL not " \
"required."
elif self.is_being_used_elsewhere(
[('notification.tasks.database_set_ssl_not_required')]):
error = "Database cannot have set SSL not required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def can_do_set_ssl_not_required(self):
can_do_ssl, error = self.can_do_set_ssl_not_required_retry()
if can_do_ssl:
if self.is_dead:
error = "Database is dead and cannot have set SSL not " \
"required."
elif self.is_being_used_elsewhere():
error = "Database cannot have set SSL not required " \
"because it is in use by another task."
if error:
return False, error
return True, None
def destroy(self, user):
if not self.is_in_quarantine:
self.delete()
return
if self.plan.provider != self.plan.CLOUDSTACK:
self.delete()
return
LOG.debug(
"call destroy_database - name={}, team={}, project={}, "
"user={}".format(self.name, self.team, self.project, user)
)
from notification.tasks import TaskRegister
TaskRegister.database_destroy(database=self, user=user)
return
@property
def last_successful_upgrade(self):
from maintenance.models import DatabaseUpgrade
return self.upgrades.filter(status=DatabaseUpgrade.SUCCESS).last()
@property
def status_html(self):
html_default = '<span class="label label-{}">{}</span>'
if self.status == Database.ALIVE:
status = html_default.format("success", "Alive")
elif self.status == Database.DEAD:
status = html_default.format("important", "Dead")
elif self.status == Database.ALERT:
status = html_default.format("warning", "Alert")
else:
status = html_default.format("info", "Initializing")
return format_html(status)
@property
def migrating_html(self):
html_default = ' <span class="label label-{}">{}</span>'
if self.infra.migration_in_progress:
status = html_default.format("info", "Migrating ({} of {})".format(
self.infra.migration_stage, self.infra.total_stages_migration))
return format_html(status)
return ""
@property
def organization(self):
return self.team.organization
class DatabaseLock(BaseModel):
database = models.ForeignKey(
Database, related_name="lock", unique=True
)
task = models.ForeignKey(
TaskHistory, related_name="lock"
)
class Credential(BaseModel):
USER_PATTERN = "u_%s"
USER_MAXIMUM_LENGTH_NAME = 16
user = models.CharField(verbose_name=_("User name"), max_length=100)
password = EncryptedCharField(
verbose_name=_("User password"), max_length=255)
database = models.ForeignKey(Database, related_name="credentials")
force_ssl = models.BooleanField(default=False)
OWNER = 'Owner'
READ_WRITE = 'Read-Write'
READ_ONLY = 'Read-Only'
PRIVILEGES_CHOICES = {
(OWNER, 'Owner'),
(READ_WRITE, 'Read-Write'),
(READ_ONLY, 'Read-Only'),
}
privileges = models.CharField(max_length=10, choices=PRIVILEGES_CHOICES,
default=OWNER)
def __unicode__(self):
return u"%s" % self.user
class Meta:
permissions = (
("view_credential", "Can view credentials"),
)
unique_together = (
('user', 'database'),
)
ordering = ('database', 'user',)
def clean(self):
if len(self.user) > self.USER_MAXIMUM_LENGTH_NAME:
raise ValidationError(_("%s is too long" % self.user))
@cached_property
def driver(self):
return self.database.databaseinfra.get_driver()
def reset_password(self):
""" Reset credential password to a new random password """
self.password = make_db_random_password()
self.driver.update_user(self)
self.save()
@property
def ssl_swap_label(self):
if self.force_ssl:
return "Disable SSL"
else:
return "Enable SSL"
def swap_force_ssl(self):
if self.force_ssl:
self.force_ssl = False
self.driver.set_user_not_require_ssl(self)
self.save()
else:
self.force_ssl = True
self.driver.set_user_require_ssl(self)
self.save()
@classmethod
def create_new_credential(cls, user, database, privileges="Owner"):
credential = Credential()
credential.database = database
credential.user = user[:cls.USER_MAXIMUM_LENGTH_NAME]
credential.user = slugify(credential.user)
credential.password = make_db_random_password()
credential.privileges = privileges
credential.full_clean()
credential.driver.create_user(credential)
credential.save()
return credential
def delete(self, *args, **kwargs):
self.driver.remove_user(self)
LOG.info('User removed from driver')
super(Credential, self).delete(*args, **kwargs)
#
# SIGNALS
#
@receiver(pre_delete, sender=Database)
def database_pre_delete(sender, **kwargs):
"""
database pre delete signal. Removes database from the engine
"""
database = kwargs.get("instance")
LOG.debug("database pre-delete triggered")
engine = factory_for(database.databaseinfra)
engine.try_remove_database(database)
@receiver(post_save, sender=Database, dispatch_uid="database_drive_credentials")
def database_post_save(sender, **kwargs):
"""
Database post save signal. Creates the database in the driver and
creates a new credential.
"""
database = kwargs.get("instance")
is_new = kwargs.get("created")
LOG.debug("database post-save triggered")
if is_new and database.engine_type != 'redis':
LOG.info(
("a new database (%s) were created... "
"provision it in the engine" % (
database.name))
)
engine = factory_for(database.databaseinfra)
engine.create_database(database)
database.automatic_create_first_credential()
@receiver(pre_save, sender=Database)
def database_pre_save(sender, **kwargs):
from notification.tasks import TaskRegister
database = kwargs.get('instance')
if database.is_in_quarantine:
if database.quarantine_dt is None:
database.quarantine_dt = datetime.datetime.now().date()
if not database.quarantine_user:
from dbaas.middleware import UserMiddleware
database.quarantine_user = UserMiddleware.current_user()
else:
database.quarantine_dt = None
database.quarantine_user = None
if database.id:
saved_object = Database.objects.get(id=database.id)
if database.name != saved_object.name:
raise AttributeError(_("Attribute name cannot be edited"))
if database.team and saved_object.team:
if database.team.organization != saved_object.team.organization:
TaskRegister.update_organization_name_monitoring(
database=database,
organization_name=database.team.organization.name)
if saved_object.team.external:
TaskRegister.update_database_monitoring(
database=database,
hostgroup=(saved_object.team.organization
.grafana_hostgroup),
action='remove')
if database.team.external:
TaskRegister.update_database_monitoring(
database=database,
hostgroup=database.team.organization.grafana_hostgroup,
action='add')
else:
# new database
if database_name_evironment_constraint(
database.name, database.environment.name):
raise AttributeError(
_('%s already exists in production!') % database.name
)
LOG.debug("slugfying database's name for %s" % database.name)
database.name = slugify(database.name)
@receiver(pre_save, sender=Credential)
def credential_pre_save(sender, **kwargs):
credential = kwargs.get('instance')
if credential.id:
saved_object = Credential.objects.get(id=credential.id)
if credential.user != saved_object.user:
raise AttributeError(_("Attribute user cannot be edited"))
if credential.database != saved_object.database:
raise AttributeError(_("Attribute database cannot be edited"))
@receiver(pre_save, sender=Project)
def project_pre_save(sender, **kwargs):
instance = kwargs.get('instance')
instance.slug = slugify(instance.name)
class NoDatabaseInfraCapacity(Exception):
""" There isn't databaseinfra capable to support a new database
with this plan """
pass
simple_audit.register(Project, Database, Credential)
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ServicePort(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'node_port': 'int',
'port': 'int',
'protocol': 'str',
'target_port': 'object'
}
attribute_map = {
'name': 'name',
'node_port': 'nodePort',
'port': 'port',
'protocol': 'protocol',
'target_port': 'targetPort'
}
def __init__(self, name=None, node_port=None, port=None, protocol=None, target_port=None):
"""
V1ServicePort - a model defined in Swagger
"""
self._name = None
self._node_port = None
self._port = None
self._protocol = None
self._target_port = None
self.discriminator = None
if name is not None:
self.name = name
if node_port is not None:
self.node_port = node_port
self.port = port
if protocol is not None:
self.protocol = protocol
if target_port is not None:
self.target_port = target_port
@property
def name(self):
"""
Gets the name of this V1ServicePort.
The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.
:return: The name of this V1ServicePort.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ServicePort.
The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.
:param name: The name of this V1ServicePort.
:type: str
"""
self._name = name
@property
def node_port(self):
"""
Gets the node_port of this V1ServicePort.
The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
:return: The node_port of this V1ServicePort.
:rtype: int
"""
return self._node_port
@node_port.setter
def node_port(self, node_port):
"""
Sets the node_port of this V1ServicePort.
The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
:param node_port: The node_port of this V1ServicePort.
:type: int
"""
self._node_port = node_port
@property
def port(self):
"""
Gets the port of this V1ServicePort.
The port that will be exposed by this service.
:return: The port of this V1ServicePort.
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this V1ServicePort.
The port that will be exposed by this service.
:param port: The port of this V1ServicePort.
:type: int
"""
if port is None:
raise ValueError("Invalid value for `port`, must not be `None`")
self._port = port
@property
def protocol(self):
"""
Gets the protocol of this V1ServicePort.
The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.
:return: The protocol of this V1ServicePort.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this V1ServicePort.
The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.
:param protocol: The protocol of this V1ServicePort.
:type: str
"""
self._protocol = protocol
@property
def target_port(self):
"""
Gets the target_port of this V1ServicePort.
Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
:return: The target_port of this V1ServicePort.
:rtype: object
"""
return self._target_port
@target_port.setter
def target_port(self, target_port):
"""
Sets the target_port of this V1ServicePort.
Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
:param target_port: The target_port of this V1ServicePort.
:type: object
"""
self._target_port = target_port
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ServicePort):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import os.path
import re
import shutil
import codecs
import threading
import string
import tarfile
import random
import datetime
import logging
from flask import current_app, send_from_directory
from werkzeug.utils import cached_property
from . import compat
from .compat import range
logger = logging.getLogger(__name__)
unicode_underscore = '_'.decode('utf-8') if compat.PY_LEGACY else '_'
underscore_replace = '%s:underscore' % __name__
codecs.register_error(underscore_replace,
lambda error: (unicode_underscore, error.start + 1)
)
binary_units = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
standard_units = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
common_path_separators = '\\/'
restricted_chars = '\\/\0'
restricted_names = ('.', '..', '::', os.sep)
nt_device_names = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
'LPT2', 'LPT3', 'PRN', 'NUL')
fs_safe_characters = string.ascii_uppercase + string.digits
class Node(object):
'''
Abstract filesystem node class.
This represents an unspecified entity with a filesystem's path suitable for
being inherited by plugins.
When inheriting, the following attributes should be overwritten in order
to specify :meth:`from_urlpath` classmethod behavior:
* :attr:`generic`, if true, an instance of directory_class or file_class
will be created instead of an instance of this class tself.
* :attr:`directory_class`, class will be used for directory nodes,
* :attr:`file_class`, class will be used for file nodes.
'''
generic = True
directory_class = None # set later at import time
file_class = None # set later at import time
re_charset = re.compile('; charset=(?P<charset>[^;]+)')
can_download = False
@cached_property
def plugin_manager(self):
'''
Get current app's plugin manager.
:returns: plugin manager instance
'''
return self.app.extensions['plugin_manager']
@cached_property
def widgets(self):
'''
List widgets with filter return True for this node (or without filter).
Remove button is prepended if :property:can_remove returns true.
:returns: list of widgets
:rtype: list of namedtuple instances
'''
widgets = []
if self.can_remove:
widgets.append(
self.plugin_manager.create_widget(
'entry-actions',
'button',
file=self,
css='remove',
endpoint='remove'
)
)
return widgets + self.plugin_manager.get_widgets(file=self)
@cached_property
def link(self):
'''
Get last widget with place "entry-link".
:returns: widget on entry-link (ideally a link one)
:rtype: namedtuple instance
'''
link = None
for widget in self.widgets:
if widget.place == 'entry-link':
link = widget
return link
@cached_property
def can_remove(self):
'''
Get if current node can be removed based on app config's
directory_remove.
:returns: True if current node can be removed, False otherwise.
:rtype: bool
'''
dirbase = self.app.config["directory_remove"]
return dirbase and self.path.startswith(dirbase + os.sep)
@cached_property
def stats(self):
'''
Get current stats object as returned by os.stat function.
:returns: stats object
:rtype: posix.stat_result or nt.stat_result
'''
return os.stat(self.path)
@cached_property
def parent(self):
'''
Get parent node if available based on app config's directory_base.
:returns: parent object if available
:rtype: Node instance or None
'''
if self.path == self.app.config['directory_base']:
return None
parent = os.path.dirname(self.path) if self.path else None
return self.directory_class(parent, self.app) if parent else None
@cached_property
def ancestors(self):
'''
Get list of ancestors until app config's directory_base is reached.
:returns: list of ancestors starting from nearest.
:rtype: list of Node objects
'''
ancestors = []
parent = self.parent
while parent:
ancestors.append(parent)
parent = parent.parent
return ancestors
@property
def modified(self):
'''
Get human-readable last modification date-time.
:returns: iso9008-like date-time string (without timezone)
:rtype: str
'''
dt = datetime.datetime.fromtimestamp(self.stats.st_mtime)
return dt.strftime('%Y.%m.%d %H:%M:%S')
@property
def urlpath(self):
'''
Get the url substring corresponding to this node for those endpoints
accepting a 'path' parameter, suitable for :meth:`from_urlpath`.
:returns: relative-url-like for node's path
:rtype: str
'''
return abspath_to_urlpath(self.path, self.app.config['directory_base'])
@property
def name(self):
'''
Get the basename portion of node's path.
:returns: filename
:rtype: str
'''
return os.path.basename(self.path)
@property
def type(self):
'''
Get the mime portion of node's mimetype (without the encoding part).
:returns: mimetype
:rtype: str
'''
return self.mimetype.split(";", 1)[0]
@property
def category(self):
'''
Get mimetype category (first portion of mimetype before the slash).
:returns: mimetype category
:rtype: str
As of 2016-11-03's revision of RFC2046 it could be one of the
following:
* application
* audio
* example
* image
* message
* model
* multipart
* text
* video
'''
return self.type.split('/', 1)[0]
def __init__(self, path=None, app=None, **defaults):
'''
:param path: local path
:type path: str
:param path: optional app instance
:type path: flask.app
:param **defaults: attributes will be set to object
'''
self.path = compat.fsdecode(path) if path else None
self.app = current_app if app is None else app
self.__dict__.update(defaults) # only for attr and cached_property
def remove(self):
'''
Does nothing except raising if can_remove property returns False.
:raises: OutsideRemovableBase if :property:can_remove returns false
'''
if not self.can_remove:
raise OutsideRemovableBase("File outside removable base")
@classmethod
def from_urlpath(cls, path, app=None):
'''
Alternative constructor which accepts a path as taken from URL and uses
the given app or the current app config to get the real path.
If class has attribute `generic` set to True, `directory_class` or
`file_class` will be used as type.
:param path: relative path as from URL
:param app: optional, flask application
:return: file object pointing to path
:rtype: File
'''
app = app or current_app
base = app.config['directory_base']
path = urlpath_to_abspath(path, base)
if not cls.generic:
kls = cls
elif os.path.isdir(path):
kls = cls.directory_class
else:
kls = cls.file_class
return kls(path=path, app=app)
@classmethod
def register_file_class(cls, kls):
'''
Convenience method for setting current class file_class property.
:param kls: class to set
:type kls: type
:returns: given class (enabling using this as decorator)
:rtype: type
'''
cls.file_class = kls
return kls
@classmethod
def register_directory_class(cls, kls):
'''
Convenience method for setting current class directory_class property.
:param kls: class to set
:type kls: type
:returns: given class (enabling using this as decorator)
:rtype: type
'''
cls.directory_class = kls
return kls
@Node.register_file_class
class File(Node):
'''
Filesystem file class.
Some notes:
* :attr:`can_download` is fixed to True, so Files can be downloaded
inconditionaly.
* :attr:`can_upload` is fixed to False, so nothing can be uploaded to
file path.
* :attr:`is_directory` is fixed to False, so no further checks are
performed.
* :attr:`generic` is set to False, so static method :meth:`from_urlpath`
will always return instances of this class.
'''
can_download = True
can_upload = False
is_directory = False
generic = False
@cached_property
def widgets(self):
'''
List widgets with filter return True for this file (or without filter).
Entry link is prepended.
Download button is prepended if :property:can_download returns true.
Remove button is prepended if :property:can_remove returns true.
:returns: list of widgets
:rtype: list of namedtuple instances
'''
widgets = [
self.plugin_manager.create_widget(
'entry-link',
'link',
file=self,
endpoint='open'
)
]
if self.can_download:
widgets.append(
self.plugin_manager.create_widget(
'entry-actions',
'button',
file=self,
css='download',
endpoint='download_file'
)
)
return widgets + super(File, self).widgets
@cached_property
def mimetype(self):
'''
Get full mimetype, with encoding if available.
:returns: mimetype
:rtype: str
'''
return self.plugin_manager.get_mimetype(self.path)
@cached_property
def is_file(self):
'''
Get if node is file.
:returns: True if file, False otherwise
:rtype: bool
'''
return os.path.isfile(self.path)
@property
def size(self):
'''
Get human-readable node size in bytes.
If directory, this will corresponds with own inode size.
:returns: fuzzy size with unit
:rtype: str
'''
size, unit = fmt_size(
self.stats.st_size,
self.app.config["use_binary_multiples"]
)
if unit == binary_units[0]:
return "%d %s" % (size, unit)
return "%.2f %s" % (size, unit)
@property
def encoding(self):
'''
Get encoding part of mimetype, or "default" if not available.
:returns: file conding as returned by mimetype function or "default"
:rtype: str
'''
if ";" in self.mimetype:
match = self.re_charset.search(self.mimetype)
gdict = match.groupdict() if match else {}
return gdict.get("charset") or "default"
return "default"
def remove(self):
'''
Remove file.
:raises OutsideRemovableBase: when not under removable base directory
'''
super(File, self).remove()
os.unlink(self.path)
def download(self):
'''
Get a Flask's send_file Response object pointing to this file.
:returns: Response object as returned by flask's send_file
:rtype: flask.Response
'''
directory, name = os.path.split(self.path)
return send_from_directory(directory, name, as_attachment=True)
@Node.register_directory_class
class Directory(Node):
'''
Filesystem directory class.
Some notes:
* :attr:`mimetype` is fixed to 'inode/directory', so mimetype detection
functions won't be called in this case.
* :attr:`is_file` is fixed to False, so no further checks are needed.
* :attr:`size` is fixed to 0 (zero), so stats are not required for this.
* :attr:`encoding` is fixed to 'default'.
* :attr:`generic` is set to False, so static method :meth:`from_urlpath`
will always return instances of this class.
'''
_listdir_cache = None
mimetype = 'inode/directory'
is_file = False
size = 0
encoding = 'default'
generic = False
@cached_property
def widgets(self):
'''
List widgets with filter return True for this dir (or without filter).
Entry link is prepended.
Upload scripts and widget are added if :property:can_upload is true.
Download button is prepended if :property:can_download returns true.
Remove button is prepended if :property:can_remove returns true.
:returns: list of widgets
:rtype: list of namedtuple instances
'''
widgets = [
self.plugin_manager.create_widget(
'entry-link',
'link',
file=self,
endpoint='browse'
)
]
if self.can_upload:
widgets.extend((
self.plugin_manager.create_widget(
'head',
'script',
file=self,
endpoint='static',
filename='browse.directory.head.js'
),
self.plugin_manager.create_widget(
'scripts',
'script',
file=self,
endpoint='static',
filename='browse.directory.body.js'
),
self.plugin_manager.create_widget(
'header',
'upload',
file=self,
text='Upload',
endpoint='upload'
)
))
if self.can_download:
widgets.append(
self.plugin_manager.create_widget(
'entry-actions',
'button',
file=self,
css='download',
endpoint='download_directory'
)
)
return widgets + super(Directory, self).widgets
@cached_property
def is_directory(self):
'''
Get if path points to a real directory.
:returns: True if real directory, False otherwise
:rtype: bool
'''
return os.path.isdir(self.path)
@cached_property
def can_download(self):
'''
Get if path is downloadable (if app's `directory_downloadable` config
property is True).
:returns: True if downloadable, False otherwise
:rtype: bool
'''
return self.app.config['directory_downloadable']
@cached_property
def can_upload(self):
'''
Get if a file can be uploaded to path (if directory path is under app's
`directory_upload` config property).
:returns: True if a file can be upload to directory, False otherwise
:rtype: bool
'''
dirbase = self.app.config["directory_upload"]
return dirbase and (
dirbase == self.path or
self.path.startswith(dirbase + os.sep)
)
@cached_property
def is_empty(self):
'''
Get if directory is empty (based on :meth:`_listdir`).
:returns: True if this directory has no entries, False otherwise.
:rtype: bool
'''
if self._listdir_cache is not None:
return bool(self._listdir_cache)
for entry in self._listdir():
return False
return True
def remove(self):
'''
Remove directory tree.
:raises OutsideRemovableBase: when not under removable base directory
'''
super(Directory, self).remove()
shutil.rmtree(self.path)
def download(self):
'''
Get a Flask Response object streaming a tarball of this directory.
:returns: Response object
:rtype: flask.Response
'''
return self.app.response_class(
TarFileStream(
self.path,
self.app.config["directory_tar_buffsize"]
),
mimetype="application/octet-stream"
)
def contains(self, filename):
'''
Check if directory contains an entry with given filename.
:param filename: filename will be check
:type filename: str
:returns: True if exists, False otherwise.
:rtype: bool
'''
return os.path.exists(os.path.join(self.path, filename))
def choose_filename(self, filename, attempts=999):
'''
Get a new filename which does not colide with any entry on directory,
based on given filename.
:param filename: base filename
:type filename: str
:param attempts: number of attempts, defaults to 999
:type attempts: int
:returns: filename
:rtype: str
'''
new_filename = filename
for attempt in range(2, attempts + 1):
if not self.contains(new_filename):
return new_filename
new_filename = alternative_filename(filename, attempt)
while self.contains(new_filename):
new_filename = alternative_filename(filename)
return new_filename
def _listdir(self, precomputed_stats=os.name == 'nt'):
'''
Iter unsorted entries on this directory.
:yields: Directory or File instance for each entry in directory
:ytype: Node
'''
for entry in compat.scandir(self.path):
kwargs = {'path': entry.path, 'app': self.app, 'parent': self}
if precomputed_stats and not entry.is_symlink():
kwargs['stats'] = entry.stat()
if entry.is_dir(follow_symlinks=True):
yield self.directory_class(**kwargs)
continue
yield self.file_class(**kwargs)
def listdir(self, sortkey=None, reverse=False):
'''
Get sorted list (by given sortkey and reverse params) of File objects.
:return: sorted list of File instances
:rtype: list of File
'''
if self._listdir_cache is None:
if sortkey:
data = sorted(self._listdir(), key=sortkey, reverse=reverse)
elif reverse:
data = list(reversed(self._listdir()))
else:
data = list(self._listdir())
self._listdir_cache = data
return self._listdir_cache
class TarFileStream(object):
'''
Tarfile which compresses while reading for streaming.
Buffsize can be provided, it must be 512 multiple (the tar block size) for
compression.
Note on corroutines: this class uses threading by default, but
corroutine-based applications can change this behavior overriding the
:attr:`event_class` and :attr:`thread_class` values.
'''
event_class = threading.Event
thread_class = threading.Thread
tarfile_class = tarfile.open
def __init__(self, path, buffsize=10240):
'''
Internal tarfile object will be created, and compression will start
on a thread until buffer became full with writes becoming locked until
a read occurs.
:param path: local path of directory whose content will be compressed.
:type path: str
:param buffsize: size of internal buffer on bytes, defaults to 10KiB
:type buffsize: int
'''
self.path = path
self.name = os.path.basename(path) + ".tgz"
self._finished = 0
self._want = 0
self._data = bytes()
self._add = self.event_class()
self._result = self.event_class()
self._tarfile = self.tarfile_class( # stream write
fileobj=self,
mode="w|gz",
bufsize=buffsize
)
self._th = self.thread_class(target=self.fill)
self._th.start()
def fill(self):
'''
Writes data on internal tarfile instance, which writes to current
object, using :meth:`write`.
As this method is blocking, it is used inside a thread.
This method is called automatically, on a thread, on initialization,
so there is little need to call it manually.
'''
self._tarfile.add(self.path, "")
self._tarfile.close() # force stream flush
self._finished += 1
if not self._result.is_set():
self._result.set()
def write(self, data):
'''
Write method used by internal tarfile instance to output data.
This method blocks tarfile execution once internal buffer is full.
As this method is blocking, it is used inside the same thread of
:meth:`fill`.
:param data: bytes to write to internal buffer
:type data: bytes
:returns: number of bytes written
:rtype: int
'''
self._add.wait()
self._data += data
if len(self._data) > self._want:
self._add.clear()
self._result.set()
return len(data)
def read(self, want=0):
'''
Read method, gets data from internal buffer while releasing
:meth:`write` locks when needed.
The lock usage means it must ran on a different thread than
:meth:`fill`, ie. the main thread, otherwise will deadlock.
The combination of both write and this method running on different
threads makes tarfile being streamed on-the-fly, with data chunks being
processed and retrieved on demand.
:param want: number bytes to read, defaults to 0 (all available)
:type want: int
:returns: tarfile data as bytes
:rtype: bytes
'''
if self._finished:
if self._finished == 1:
self._finished += 1
return ""
return EOFError("EOF reached")
# Thread communication
self._want = want
self._add.set()
self._result.wait()
self._result.clear()
if want:
data = self._data[:want]
self._data = self._data[want:]
else:
data = self._data
self._data = bytes()
return data
def __iter__(self):
'''
Iterate through tarfile result chunks.
Similarly to :meth:`read`, this methos must ran on a different thread
than :meth:`write` calls.
:yields: data chunks as taken from :meth:`read`.
:ytype: bytes
'''
data = self.read()
while data:
yield data
data = self.read()
class OutsideDirectoryBase(Exception):
'''
Exception thrown when trying to access to a file outside path defined on
`directory_base` config property.
'''
pass
class OutsideRemovableBase(Exception):
'''
Exception thrown when trying to access to a file outside path defined on
`directory_remove` config property.
'''
pass
def fmt_size(size, binary=True):
'''
Get size and unit.
:param size: size in bytes
:param binary: whether use binary or standard units, defaults to True
:return: size and unit
:rtype: tuple of int and unit as str
'''
if binary:
fmt_sizes = binary_units
fmt_divider = 1024.
else:
fmt_sizes = standard_units
fmt_divider = 1000.
for fmt in fmt_sizes[:-1]:
if size < 1000:
return (size, fmt)
size /= fmt_divider
return size, fmt_sizes[-1]
def relativize_path(path, base, os_sep=os.sep):
'''
Make absolute path relative to an absolute base.
:param path: absolute path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: relative path
:rtype: str or unicode
:raises OutsideDirectoryBase: if path is not below base
'''
if not check_under_base(path, base, os_sep):
raise OutsideDirectoryBase("%r is not under %r" % (path, base))
prefix_len = len(base)
if not base.endswith(os_sep):
prefix_len += len(os_sep)
return path[prefix_len:]
def abspath_to_urlpath(path, base, os_sep=os.sep):
'''
Make filesystem absolute path uri relative using given absolute base path.
:param path: absolute path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: relative uri
:rtype: str or unicode
:raises OutsideDirectoryBase: if resulting path is not below base
'''
return relativize_path(path, base, os_sep).replace(os_sep, '/')
def urlpath_to_abspath(path, base, os_sep=os.sep):
'''
Make uri relative path fs absolute using a given absolute base path.
:param path: relative path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: absolute path
:rtype: str or unicode
:raises OutsideDirectoryBase: if resulting path is not below base
'''
prefix = base if base.endswith(os_sep) else base + os_sep
realpath = os.path.abspath(prefix + path.replace('/', os_sep))
if base == realpath or realpath.startswith(prefix):
return realpath
raise OutsideDirectoryBase("%r is not under %r" % (realpath, base))
def generic_filename(path):
'''
Extract filename of given path os-indepently, taking care of known path
separators.
:param path: path
:return: filename
:rtype: str or unicode (depending on given path)
'''
for sep in common_path_separators:
if sep in path:
_, path = path.rsplit(sep, 1)
return path
def clean_restricted_chars(path, restricted_chars=restricted_chars):
'''
Get path without restricted characters.
:param path: path
:return: path without restricted characters
:rtype: str or unicode (depending on given path)
'''
for character in restricted_chars:
path = path.replace(character, '_')
return path
def check_forbidden_filename(filename,
destiny_os=os.name,
restricted_names=restricted_names):
'''
Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool
'''
if destiny_os == 'nt':
fpc = filename.split('.', 1)[0].upper()
if fpc in nt_device_names:
return True
return filename in restricted_names
def check_under_base(path, base, os_sep=os.sep):
'''
Check if given absolute path is under given base.
:param path: absolute path
:param base: absolute base path
:return: wether file is under given base or not
:rtype: bool
'''
prefix = base if base.endswith(os_sep) else base + os_sep
return path == base or path.startswith(prefix)
def secure_filename(path, destiny_os=os.name, fs_encoding=compat.FS_ENCODING):
'''
Get rid of parent path components and special filenames.
If path is invalid or protected, return empty string.
:param path: unsafe path
:type: str
:param destiny_os: destination operative system
:type destiny_os: str
:return: filename or empty string
:rtype: str
'''
path = generic_filename(path)
path = clean_restricted_chars(path)
if check_forbidden_filename(path, destiny_os=destiny_os):
return ''
if isinstance(path, bytes):
path = path.decode('latin-1', errors=underscore_replace)
# Decode and recover from filesystem encoding in order to strip unwanted
# characters out
kwargs = dict(
os_name=destiny_os,
fs_encoding=fs_encoding,
errors=underscore_replace
)
fs_encoded_path = compat.fsencode(path, **kwargs)
fs_decoded_path = compat.fsdecode(fs_encoded_path, **kwargs)
return fs_decoded_path
def alternative_filename(filename, attempt=None):
'''
Generates an alternative version of given filename.
If an number attempt parameter is given, will be used on the alternative
name, a random value will be used otherwise.
:param filename: original filename
:param attempt: optional attempt number, defaults to null
:return: new filename
:rtype: str or unicode
'''
filename_parts = filename.rsplit(u'.', 2)
name = filename_parts[0]
ext = ''.join(u'.%s' % ext for ext in filename_parts[1:])
if attempt is None:
choose = random.choice
extra = u' %s' % ''.join(choose(fs_safe_characters) for i in range(8))
else:
extra = u' (%d)' % attempt
return u'%s%s%s' % (name, extra, ext)
|
|
import json
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from helpers.event_simulator import EventSimulator
from helpers.match_helper import MatchHelper
from models.event import Event
from models.event_details import EventDetails
from models.match import Match
class TestEventSimulator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self._alliance_selections = [{u'declines': [], u'backup': None, u'name': u'Alliance 1', u'picks': [u'frc359', u'frc3990', u'frc4508']}, {u'declines': [], u'backup': None, u'name': u'Alliance 2', u'picks': [u'frc5254', u'frc20', u'frc229']}, {u'declines': [], u'backup': None, u'name': u'Alliance 3', u'picks': [u'frc5236', u'frc2791', u'frc3624']}, {u'declines': [], u'backup': None, u'name': u'Alliance 4', u'picks': [u'frc3419', u'frc5240', u'frc663']}, {u'declines': [], u'backup': None, u'name': u'Alliance 5', u'picks': [u'frc48', u'frc1493', u'frc1551']}, {u'declines': [], u'backup': None, u'name': u'Alliance 6', u'picks': [u'frc250', u'frc333', u'frc145']}, {u'declines': [], u'backup': None, u'name': u'Alliance 7', u'picks': [u'frc358', u'frc3003', u'frc527']}, {u'declines': [], u'backup': None, u'name': u'Alliance 8', u'picks': [u'frc4930', u'frc3044', u'frc4481']}]
self._alliance_selections_with_backup = [{u'declines': [], u'backup': None, u'name': u'Alliance 1', u'picks': [u'frc359', u'frc3990', u'frc4508']}, {u'declines': [], u'backup': {u'in': u'frc1665', u'out': u'frc229'}, u'name': u'Alliance 2', u'picks': [u'frc5254', u'frc20', u'frc229']}, {u'declines': [], u'backup': None, u'name': u'Alliance 3', u'picks': [u'frc5236', u'frc2791', u'frc3624']}, {u'declines': [], u'backup': None, u'name': u'Alliance 4', u'picks': [u'frc3419', u'frc5240', u'frc663']}, {u'declines': [], u'backup': None, u'name': u'Alliance 5', u'picks': [u'frc48', u'frc1493', u'frc1551']}, {u'declines': [], u'backup': None, u'name': u'Alliance 6', u'picks': [u'frc250', u'frc333', u'frc145']}, {u'declines': [], u'backup': None, u'name': u'Alliance 7', u'picks': [u'frc358', u'frc3003', u'frc527']}, {u'declines': [], u'backup': None, u'name': u'Alliance 8', u'picks': [u'frc4930', u'frc3044', u'frc4481']}]
def tearDown(self):
self.testbed.deactivate()
def test_event_smulator(self):
es = EventSimulator()
# Before anything has happened
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details, None)
self.assertEqual(event.matches, [])
# Qual match schedule added
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertNotEqual(event.details, None)
for rank in event.details.rankings2:
self.assertEqual(rank['sort_orders'][0], 0)
self.assertEqual(len(event.matches), 72)
for match in event.matches:
self.assertEqual(match.comp_level, 'qm')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each qual match
for i in xrange(72):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, None)
self.assertEqual(len(event.matches), 72)
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
# Check some final rankings
self.assertEqual(event.details.rankings2[0]['sort_orders'][0], 22)
self.assertEqual(event.details.rankings2[-1]['sort_orders'][0], 4)
# After alliance selections
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
self.assertEqual(len(event.matches), 72)
# QF schedule added
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
self.assertEqual(len(event.matches), 84)
for match in event.matches:
if match.comp_level == 'qm':
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertEqual(match.comp_level, 'qf')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each QF match
for i in xrange(72, 82):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
if i <= 75:
self.assertEqual(len(event.matches), 84)
elif i <= 77:
self.assertEqual(len(event.matches), 86) # 1 match removed, 3 added
else:
self.assertEqual(len(event.matches), 88) # 1 match removed, 3 added
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if match.key.id() in {'2016nytr_qf1m3', '2016nytr_qf3m3'}:
# Unneeded tiebreak matches
self.assertFalse(match.has_been_played)
elif j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
# Check SF Matches
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
self.assertEqual(len(event.matches), 88)
for match in event.matches:
if match.comp_level in {'qm', 'qf'}:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertEqual(match.comp_level, 'sf')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each SF match
for i in xrange(82, 87):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
if i < 85:
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
else:
self.assertEqual(event.details.alliance_selections, self._alliance_selections_with_backup)
if i <= 83:
self.assertEqual(len(event.matches), 88)
else:
self.assertEqual(len(event.matches), 90) # 1 match removed, 3 added
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if match.key.id() == '2016nytr_sf1m3':
# Unneeded tiebreak matches
self.assertFalse(match.has_been_played)
elif j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
# Check F Matches
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections_with_backup)
self.assertEqual(len(event.matches), 90)
for match in event.matches:
if match.comp_level in {'qm', 'qf', 'sf'}:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertEqual(match.comp_level, 'f')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each F match
for i in xrange(87, 90):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections_with_backup)
self.assertEqual(len(event.matches), 90)
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
def test_event_smulator_batch_advance(self):
es = EventSimulator(batch_advance=True)
# Before anything has happened
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details, None)
self.assertEqual(event.matches, [])
# Qual match schedule added
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertNotEqual(event.details, None)
for rank in event.details.rankings2:
self.assertEqual(rank['sort_orders'][0], 0)
self.assertEqual(len(event.matches), 72)
for match in event.matches:
self.assertEqual(match.comp_level, 'qm')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each qual match
for i in xrange(72):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, None)
self.assertEqual(len(event.matches), 72)
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
# Check some final rankings
self.assertEqual(event.details.rankings2[0]['sort_orders'][0], 22)
self.assertEqual(event.details.rankings2[-1]['sort_orders'][0], 4)
# After alliance selections
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
self.assertEqual(len(event.matches), 72)
# QF schedule added
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
self.assertEqual(len(event.matches), 84)
for match in event.matches:
if match.comp_level == 'qm':
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertEqual(match.comp_level, 'qf')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each QF match
for i in xrange(72, 82):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
if i <= 75:
self.assertEqual(len(event.matches), 84)
elif i <= 77:
self.assertEqual(len(event.matches), 83)
else:
self.assertEqual(len(event.matches), 82)
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if match.key.id() in {'2016nytr_qf1m3', '2016nytr_qf3m3'}:
# Unneeded tiebreak matches
self.assertFalse(match.has_been_played)
elif j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
# SF schedule added
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
self.assertEqual(len(event.matches), 88)
for match in event.matches:
if match.comp_level in {'qm', 'qf'}:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertEqual(match.comp_level, 'sf')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each SF match
for i in xrange(82, 87):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
if i < 85:
self.assertEqual(event.details.alliance_selections, self._alliance_selections)
else:
self.assertEqual(event.details.alliance_selections, self._alliance_selections_with_backup)
if i <= 83:
self.assertEqual(len(event.matches), 88)
else:
self.assertEqual(len(event.matches), 87)
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if match.key.id() == '2016nytr_sf1m3':
# Unneeded tiebreak matches
self.assertFalse(match.has_been_played)
elif j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
# F schedule added
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections_with_backup)
self.assertEqual(len(event.matches), 90)
for match in event.matches:
if match.comp_level in {'qm', 'qf', 'sf'}:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertEqual(match.comp_level, 'f')
self.assertFalse(match.has_been_played)
self.assertEqual(match.actual_time, None)
# After each F match
for i in xrange(87, 90):
es.step()
event = Event.get_by_id('2016nytr')
self.assertNotEqual(event, None)
self.assertEqual(event.details.alliance_selections, self._alliance_selections_with_backup)
self.assertEqual(len(event.matches), 90)
matches = MatchHelper.play_order_sort_matches(event.matches)
for j, match in enumerate(matches):
if j <= i:
self.assertTrue(match.has_been_played)
self.assertNotEqual(match.actual_time, None)
else:
self.assertFalse(match.has_been_played)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from itertools import cycle
import os
import tarfile
import threading
import zipfile
import numpy as np
from six.moves.urllib.parse import urljoin
from six.moves.urllib.request import pathname2url
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
class TestGetFileAndValidateIt(test.TestCase):
def test_get_file_and_validate_it(self):
"""Tests get_file from a url, plus extraction and validation.
"""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
text_file_path = os.path.join(orig_dir, 'test.txt')
zip_file_path = os.path.join(orig_dir, 'test.zip')
tar_file_path = os.path.join(orig_dir, 'test.tar.gz')
with open(text_file_path, 'w') as text_file:
text_file.write('Float like a butterfly, sting like a bee.')
with tarfile.open(tar_file_path, 'w:gz') as tar_file:
tar_file.add(text_file_path)
with zipfile.ZipFile(zip_file_path, 'w') as zip_file:
zip_file.write(text_file_path)
origin = urljoin('file://', pathname2url(os.path.abspath(tar_file_path)))
path = keras.utils.data_utils.get_file('test.txt', origin,
untar=True, cache_subdir=dest_dir)
filepath = path + '.tar.gz'
hashval_sha256 = keras.utils.data_utils._hash_file(filepath)
hashval_md5 = keras.utils.data_utils._hash_file(filepath, algorithm='md5')
path = keras.utils.data_utils.get_file(
'test.txt', origin, md5_hash=hashval_md5,
untar=True, cache_subdir=dest_dir)
path = keras.utils.data_utils.get_file(
filepath, origin, file_hash=hashval_sha256,
extract=True, cache_subdir=dest_dir)
self.assertTrue(os.path.exists(filepath))
self.assertTrue(keras.utils.data_utils.validate_file(filepath,
hashval_sha256))
self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_md5))
os.remove(filepath)
origin = urljoin('file://', pathname2url(os.path.abspath(zip_file_path)))
hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path)
hashval_md5 = keras.utils.data_utils._hash_file(zip_file_path,
algorithm='md5')
path = keras.utils.data_utils.get_file(
'test', origin, md5_hash=hashval_md5,
extract=True, cache_subdir=dest_dir)
path = keras.utils.data_utils.get_file(
'test', origin, file_hash=hashval_sha256,
extract=True, cache_subdir=dest_dir)
self.assertTrue(os.path.exists(path))
self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_sha256))
self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5))
class ThreadsafeIter(object):
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self.lock:
return next(self.it)
def threadsafe_generator(f):
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
class TestSequence(keras.utils.data_utils.Sequence):
def __init__(self, shape):
self.shape = shape
def __getitem__(self, item):
return np.ones(self.shape, dtype=np.uint8) * item
def __len__(self):
return 100
class FaultSequence(keras.utils.data_utils.Sequence):
def __getitem__(self, item):
raise IndexError(item, 'item is not present')
def __len__(self):
return 100
@threadsafe_generator
def create_generator_from_sequence_threads(ds):
for i in cycle(range(len(ds))):
yield ds[i]
def create_generator_from_sequence_pcs(ds):
for i in cycle(range(len(ds))):
yield ds[i]
class TestEnqueuers(test.TestCase):
def test_generator_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(TestSequence([3, 200, 200, 3])),
use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertEqual(len(set(acc) - set(range(100))), 0)
enqueuer.stop()
def test_generator_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_pcs(TestSequence([3, 200, 200, 3])),
use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertNotEqual(acc, list(range(100)))
enqueuer.stop()
def test_generator_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(FaultSequence()),
use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
def test_generator_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_pcs(FaultSequence()),
use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
def test_ordered_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
def test_ordered_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
def test_ordered_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
def test_ordered_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
if __name__ == '__main__':
test.main()
|
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import re
import pickle
import shutil
import string
import itertools
from cloudify._compat import text_type
from cloudify.manager import get_rest_client
from cloudify.workflows import ctx
from .constants import V_4_1_0, SECRET_STORE_AGENT_KEY_PREFIX
from .utils import is_compute, run, get_tenants_list
ALLOWED_KEY_CHARS = string.ascii_letters + string.digits + '-._'
CRED_DIR = 'snapshot-credentials'
DEPLOYMENTS_QUERY = """
SELECT nodes.id, deployments.id, properties
FROM nodes
JOIN deployments
ON nodes._deployment_fk = deployments._storage_id
JOIN tenants
ON deployments._tenant_id = tenants.id
WHERE tenants.name = %(tenant)s
;
"""
class Credentials(object):
_CRED_KEY_NAME = 'agent_key'
_ARCHIVE_CRED_PATH = None
def dump(self, tempdir, version):
self._ARCHIVE_CRED_PATH = os.path.join(tempdir, CRED_DIR)
ctx.logger.debug('Dumping credentials data, archive_cred_path: '
'{0}'.format(self._ARCHIVE_CRED_PATH))
os.makedirs(self._ARCHIVE_CRED_PATH)
for tenant, dep_id, node in self._get_hosts(version):
agent_config = get_agent_config(node.properties)
agent_key = agent_config.get('key')
# Don't do anything with empty or {get_secret} agent keys
if agent_key and isinstance(agent_key, text_type):
agent_dirname = _get_agent_dirname(
version, tenant, dep_id, node.id
)
self._dump_agent_key(agent_dirname, agent_key)
@staticmethod
def _get_hosts(version):
"""Find host nodes, and yield (tenant_name, dep_id, node)"""
tenants = get_tenants_list(version)
for tenant_name in tenants:
client = get_rest_client(tenant_name)
deployments = client.deployments.list(
_include=['id'],
_get_all_results=True
)
for deployment in deployments:
nodes = client.nodes.list(
deployment_id=deployment.id,
_get_all_results=True
)
for node in nodes:
if is_compute(node):
yield tenant_name, deployment.id, node
def _dump_agent_key(self, agent_dirname, agent_key):
"""Copy an agent key from its location on the manager to the snapshot
dump
"""
os.makedirs(os.path.join(self._ARCHIVE_CRED_PATH, agent_dirname))
source = os.path.expanduser(agent_key)
destination = os.path.join(self._ARCHIVE_CRED_PATH, agent_dirname,
self._CRED_KEY_NAME)
ctx.logger.debug('Dumping credentials data, '
'copy from: {0} to {1}'
.format(source, destination))
try:
shutil.copy(source, destination)
except Exception as e:
ctx.logger.debug(
"Key doesn't appear to be a file path. Skipping ({})".format(
e))
def get_agent_config(node_properties):
"""cloudify_agent is deprecated, but still might be used in older
systems, so we try to gather the agent config from both sources
"""
agent_config = node_properties.get('cloudify_agent', {})
agent_config.update(node_properties.get('agent_config', {}))
return agent_config
def candidate_key_names(path):
filtered = SECRET_STORE_AGENT_KEY_PREFIX + ''.join(
char if char in ALLOWED_KEY_CHARS else '_'
for char in path
)
yield filtered
for suffix in itertools.count(1):
yield '{name}_{suffix}'.format(name=filtered, suffix=suffix)
def _fix_snapshot_ssh_db(tenant, orig, replace):
python_bin = '/opt/manager/env/bin/python'
dir_path = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.join(dir_path, 'fix_snapshot_ssh_db.py')
command = [python_bin, script_path, tenant, orig, replace]
res = run(command)
if res and hasattr(res, 'aggr_stdout'):
ctx.logger.debug('Process result: \n{0}'
.format(res.aggr_stdout))
def _get_agent_dirname(version, tenant, dep_id, node_id):
if version >= V_4_1_0:
return '{tenant}_{dep_id}_{node_id}'.format(
tenant=tenant, dep_id=dep_id, node_id=node_id
)
else:
return '{dep_id}_{node_id}'.format(dep_id=dep_id, node_id=node_id)
def restore(tempdir, postgres, version):
dump_cred_dir = os.path.join(tempdir, CRED_DIR)
if not os.path.isdir(dump_cred_dir):
ctx.logger.info('Missing credentials dir: '
'{0}'.format(dump_cred_dir))
return
credential_dirs = set(os.listdir(dump_cred_dir))
for tenant in get_tenants_list(version):
client = get_rest_client(tenant=tenant)
# !! mapping key CONTENTS to their secret store keys
key_secrets = {}
secret_keys = set()
secrets_list = client.secrets.list(_include=['key'],
_get_all_results=True)
for secret in secrets_list:
if secret.key.startswith(SECRET_STORE_AGENT_KEY_PREFIX):
secret = client.secrets.get(secret.key)
key_secrets[secret.value] = secret.key
secret_keys.add(secret.key)
new_key_secrets = {}
replacements = {}
result = postgres.run_query(
DEPLOYMENTS_QUERY,
{'tenant': tenant},
)['all']
for elem in result:
node_id = elem[0]
deployment_id = elem[1]
node_properties = pickle.loads(elem[2])
agent_config = get_agent_config(node_properties)
agent_key = agent_config.get('key')
if not agent_key:
continue
dir_name = _get_agent_dirname(version,
tenant,
deployment_id,
node_id)
if not isinstance(agent_key, text_type):
ctx.logger.info('key for {} is not a path'.format(dir_name))
continue
if re.search('BEGIN .* PRIVATE KEY', agent_key):
ctx.logger.info('key for {} is bare key'.format(dir_name))
continue
if dir_name not in credential_dirs:
continue
agent_key_path_in_dump = os.path.join(
dump_cred_dir,
dir_name,
'agent_key',
)
try:
with open(agent_key_path_in_dump) as f:
key_data = f.read()
except IOError as e:
if e.errno == os.errno.ENOENT:
ctx.logger.info(
'key file for {} not found'.format(dir_name))
continue
raise
# We've probably found the right key!
if key_data not in key_secrets:
# If we got here, we need to create a secret
for key in candidate_key_names(agent_key):
if key not in secret_keys:
new_key_secrets[key] = key_data
key_secrets[key_data] = key
secret_keys.add(key)
break
replacements[agent_key] = key_secrets[key_data]
for key, value in new_key_secrets.items():
client.secrets.create(key, value)
for orig, replace in replacements.items():
_fix_snapshot_ssh_db(tenant, orig, replace)
|
|
"""Module/script to byte-compile all .py files to .pyc (or .pyo) files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
import struct
import imp
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet):
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet):
success = 0
return success
def compile_file(fullname, ddir=None, force=0, rx=None, quiet=0):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', imp.get_magic(), mtime)
cfile = fullname.replace('.py', '$py.class')
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except IOError:
pass
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def expand_args(args, flist):
"""read names in flist and append to args"""
expanded = args[:]
if flist:
try:
if flist == '-':
fd = sys.stdin
else:
fd = open(flist)
while 1:
line = fd.readline()
if not line:
break
expanded.append(line[:-1])
except IOError:
print "Error reading file list %s" % flist
raise
return expanded
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:i:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [-i list] [directory|file ...]"
print
print "arguments: zero or more file and directory names to compile; " \
"if no arguments given, "
print " defaults to the equivalent of -l sys.path"
print
print "options:"
print "-l: don't recurse into subdirectories"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: output only error messages"
print "-d destdir: directory to prepend to file paths for use in " \
"compile-time tracebacks and in"
print " runtime tracebacks in cases where the source " \
"file is unavailable"
print "-x regexp: skip files matching the regular expression regexp; " \
"the regexp is searched for"
print " in the full path of each file considered for " \
"compilation"
print "-i file: add all the files and directories listed in file to " \
"the list considered for"
print ' compilation; if "-", names are read from stdin'
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
flist = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if o == '-i': flist = a
if ddir:
if len(args) != 1 and not os.path.isdir(args[0]):
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args or flist:
try:
if flist:
args = expand_args(args, flist)
except IOError:
success = 0
if success:
for arg in args:
if os.path.isdir(arg):
if not compile_dir(arg, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
if not compile_file(arg, ddir, force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupted]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
|
|
"""Various helper functions"""
import asyncio
import base64
import binascii
import cgi
import datetime
import functools
import inspect
import os
import re
import sys
import time
import warnings
import weakref
from collections import namedtuple
from collections.abc import Coroutine
from contextlib import suppress
from math import ceil
from pathlib import Path
from urllib.parse import quote
from urllib.request import getproxies
import async_timeout
from yarl import URL
from . import hdrs
from .abc import AbstractAccessLogger
from .log import client_logger
PY_352 = sys.version_info >= (3, 5, 2)
__all__ = ('BasicAuth',)
sentinel = object()
NO_EXTENSIONS = bool(os.environ.get('AIOHTTP_NO_EXTENSIONS'))
CHAR = set(chr(i) for i in range(0, 128))
CTL = set(chr(i) for i in range(0, 32)) | {chr(127), }
SEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']',
'?', '=', '{', '}', ' ', chr(9)}
TOKEN = CHAR ^ CTL ^ SEPARATORS
class _BaseCoroMixin(Coroutine):
__slots__ = ('_coro')
def __init__(self, coro):
self._coro = coro
def send(self, arg):
return self._coro.send(arg)
def throw(self, arg):
return self._coro.throw(arg)
def close(self):
return self._coro.close()
@property
def gi_frame(self):
return self._coro.gi_frame
@property
def gi_running(self):
return self._coro.gi_running
@property
def gi_code(self):
return self._coro.gi_code
def __next__(self):
return self.send(None)
@asyncio.coroutine
def __iter__(self):
ret = yield from self._coro
return ret
def __await__(self):
ret = yield from self._coro
return ret
class _CoroGuard(_BaseCoroMixin):
"""Only to be used with func:`deprecated_noop`.
Otherwise the stack information in the raised warning doesn't line up with
the user's code anymore.
"""
__slots__ = ('_msg', '_awaited')
def __init__(self, coro, msg):
super().__init__(coro)
self._msg = msg
self._awaited = False
def send(self, arg):
self._awaited = True
return self._coro.send(arg)
@asyncio.coroutine
def __iter__(self):
self._awaited = True
return super().__iter__()
def __await__(self):
self._awaited = True
return super().__await__()
def __del__(self):
self._coro = None
if not self._awaited:
warnings.warn(self._msg, DeprecationWarning, stacklevel=2)
coroutines = asyncio.coroutines
old_debug = coroutines._DEBUG
coroutines._DEBUG = False
@asyncio.coroutine
def noop(*args, **kwargs):
return
def deprecated_noop(message):
return _CoroGuard(noop(), message)
coroutines._DEBUG = old_debug
try:
from asyncio import isfuture
except ImportError:
def isfuture(fut):
return isinstance(fut, asyncio.Future)
class BasicAuth(namedtuple('BasicAuth', ['login', 'password', 'encoding'])):
"""Http basic authentication helper."""
def __new__(cls, login, password='', encoding='latin1'):
if login is None:
raise ValueError('None is not allowed as login value')
if password is None:
raise ValueError('None is not allowed as password value')
if ':' in login:
raise ValueError(
'A ":" is not allowed in login (RFC 1945#section-11.1)')
return super().__new__(cls, login, password, encoding)
@classmethod
def decode(cls, auth_header, encoding='latin1'):
"""Create a BasicAuth object from an Authorization HTTP header."""
split = auth_header.strip().split(' ')
if len(split) == 2:
if split[0].strip().lower() != 'basic':
raise ValueError('Unknown authorization method %s' % split[0])
to_decode = split[1]
else:
raise ValueError('Could not parse authorization header.')
try:
username, _, password = base64.b64decode(
to_decode.encode('ascii')
).decode(encoding).partition(':')
except binascii.Error:
raise ValueError('Invalid base64 encoding.')
return cls(username, password, encoding=encoding)
@classmethod
def from_url(cls, url, *, encoding='latin1'):
"""Create BasicAuth from url."""
if not isinstance(url, URL):
raise TypeError("url should be yarl.URL instance")
if url.user is None:
return None
return cls(url.user, url.password or '', encoding=encoding)
def encode(self):
"""Encode credentials."""
creds = ('%s:%s' % (self.login, self.password)).encode(self.encoding)
return 'Basic %s' % base64.b64encode(creds).decode(self.encoding)
def strip_auth_from_url(url):
auth = BasicAuth.from_url(url)
if auth is None:
return url, None
else:
return url.with_user(None), auth
ProxyInfo = namedtuple('ProxyInfo', 'proxy proxy_auth')
def proxies_from_env():
proxy_urls = {k: URL(v) for k, v in getproxies().items()
if k in ('http', 'https')}
stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()}
ret = {}
for proto, val in stripped.items():
proxy, auth = val
if proxy.scheme == 'https':
client_logger.warning(
"HTTPS proxies %s are not supported, ignoring", proxy)
continue
ret[proto] = ProxyInfo(proxy, auth)
return ret
if PY_352:
def create_future(loop):
return loop.create_future()
else:
def create_future(loop): # pragma: no cover
"""Compatibility wrapper for the loop.create_future() call introduced in
3.5.2."""
return asyncio.Future(loop=loop)
def current_task(loop=None):
if loop is None:
loop = asyncio.get_event_loop()
task = asyncio.Task.current_task(loop=loop)
if task is None:
if hasattr(loop, 'current_task'):
task = loop.current_task()
return task
def isasyncgenfunction(obj):
if hasattr(inspect, 'isasyncgenfunction'):
return inspect.isasyncgenfunction(obj)
return False
MimeType = namedtuple('MimeType', 'type subtype suffix parameters')
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
mimetype is a MIME type string.
Returns a MimeType object.
Example:
>>> parse_mimetype('text/html; charset=utf-8')
MimeType(type='text', subtype='html', suffix='',
parameters={'charset': 'utf-8'})
"""
if not mimetype:
return MimeType(type='', subtype='', suffix='', parameters={})
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = dict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return MimeType(type=mtype, subtype=stype, suffix=suffix,
parameters=params)
def guess_filename(obj, default=None):
name = getattr(obj, 'name', None)
if name and isinstance(name, str) and name[0] != '<' and name[-1] != '>':
return Path(name).name
return default
def content_disposition_header(disptype, quote_fields=True, **params):
"""Sets ``Content-Disposition`` header.
disptype is a disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
params is a dict with disposition params.
"""
if not disptype or not (TOKEN > set(disptype)):
raise ValueError('bad content disposition type {!r}'
''.format(disptype))
value = disptype
if params:
lparams = []
for key, val in params.items():
if not key or not (TOKEN > set(key)):
raise ValueError('bad content disposition parameter'
' {!r}={!r}'.format(key, val))
qval = quote(val, '') if quote_fields else val
lparams.append((key, '"%s"' % qval))
if key == 'filename':
lparams.append(('filename*', "utf-8''" + qval))
sparams = '; '.join('='.join(pair) for pair in lparams)
value = '; '.join((value, sparams))
return value
class AccessLogger(AbstractAccessLogger):
"""Helper object to log access.
Usage:
log = logging.getLogger("spam")
log_format = "%a %{User-Agent}i"
access_logger = AccessLogger(log, log_format)
access_logger.log(request, response, time)
Format:
%% The percent sign
%a Remote IP-address (IP-address of proxy if using reverse proxy)
%t Time when the request was started to process
%P The process ID of the child that serviced the request
%r First line of request
%s Response status code
%b Size of response in bytes, including HTTP headers
%T Time taken to serve the request, in seconds
%Tf Time taken to serve the request, in seconds with floating fraction
in .06f format
%D Time taken to serve the request, in microseconds
%{FOO}i request.headers['FOO']
%{FOO}o response.headers['FOO']
%{FOO}e os.environ['FOO']
"""
LOG_FORMAT_MAP = {
'a': 'remote_address',
't': 'request_time',
'P': 'process_id',
'r': 'first_request_line',
's': 'response_status',
'b': 'response_size',
'T': 'request_time',
'Tf': 'request_time_frac',
'D': 'request_time_micro',
'i': 'request_header',
'o': 'response_header',
}
LOG_FORMAT = '%a %l %u %t "%r" %s %b "%{Referrer}i" "%{User-Agent}i"'
FORMAT_RE = re.compile(r'%(\{([A-Za-z0-9\-_]+)\}([ioe])|[atPrsbOD]|Tf?)')
CLEANUP_RE = re.compile(r'(%[^s])')
_FORMAT_CACHE = {}
KeyMethod = namedtuple('KeyMethod', 'key method')
def __init__(self, logger, log_format=LOG_FORMAT):
"""Initialise the logger.
logger is a logger object to be used for logging.
log_format is an string with apache compatible log format description.
"""
super().__init__(logger, log_format=log_format)
_compiled_format = AccessLogger._FORMAT_CACHE.get(log_format)
if not _compiled_format:
_compiled_format = self.compile_format(log_format)
AccessLogger._FORMAT_CACHE[log_format] = _compiled_format
self._log_format, self._methods = _compiled_format
def compile_format(self, log_format):
"""Translate log_format into form usable by modulo formatting
All known atoms will be replaced with %s
Also methods for formatting of those atoms will be added to
_methods in apropriate order
For example we have log_format = "%a %t"
This format will be translated to "%s %s"
Also contents of _methods will be
[self._format_a, self._format_t]
These method will be called and results will be passed
to translated string format.
Each _format_* method receive 'args' which is list of arguments
given to self.log
Exceptions are _format_e, _format_i and _format_o methods which
also receive key name (by functools.partial)
"""
log_format = log_format.replace("%l", "-")
log_format = log_format.replace("%u", "-")
# list of (key, method) tuples, we don't use an OrderedDict as users
# can repeat the same key more than once
methods = list()
for atom in self.FORMAT_RE.findall(log_format):
if atom[1] == '':
format_key = self.LOG_FORMAT_MAP[atom[0]]
m = getattr(AccessLogger, '_format_%s' % atom[0])
else:
format_key = (self.LOG_FORMAT_MAP[atom[2]], atom[1])
m = getattr(AccessLogger, '_format_%s' % atom[2])
m = functools.partial(m, atom[1])
methods.append(self.KeyMethod(format_key, m))
log_format = self.FORMAT_RE.sub(r'%s', log_format)
log_format = self.CLEANUP_RE.sub(r'%\1', log_format)
return log_format, methods
@staticmethod
def _format_i(key, request, response, time):
if request is None:
return '(no headers)'
# suboptimal, make istr(key) once
return request.headers.get(key, '-')
@staticmethod
def _format_o(key, request, response, time):
# suboptimal, make istr(key) once
return response.headers.get(key, '-')
@staticmethod
def _format_a(request, response, time):
if request is None:
return '-'
ip = request.remote
return ip if ip is not None else '-'
@staticmethod
def _format_t(request, response, time):
return datetime.datetime.utcnow().strftime('[%d/%b/%Y:%H:%M:%S +0000]')
@staticmethod
def _format_P(request, response, time):
return "<%s>" % os.getpid()
@staticmethod
def _format_r(request, response, time):
if request is None:
return '-'
return '%s %s HTTP/%s.%s' % tuple((request.method,
request.path_qs) + request.version)
@staticmethod
def _format_s(request, response, time):
return response.status
@staticmethod
def _format_b(request, response, time):
return response.body_length
@staticmethod
def _format_T(request, response, time):
return round(time)
@staticmethod
def _format_Tf(request, response, time):
return '%06f' % time
@staticmethod
def _format_D(request, response, time):
return round(time * 1000000)
def _format_line(self, request, response, time):
return ((key, method(request, response, time))
for key, method in self._methods)
def log(self, request, response, time):
try:
fmt_info = self._format_line(request, response, time)
values = list()
extra = dict()
for key, value in fmt_info:
values.append(value)
if key.__class__ is str:
extra[key] = value
else:
extra[key[0]] = {key[1]: value}
self.logger.info(self._log_format % tuple(values), extra=extra)
except Exception:
self.logger.exception("Error in logging")
class reify:
"""Use as a class method decorator. It operates almost exactly like
the Python `@property` decorator, but it puts the result of the
method it decorates into the instance dict after the first call,
effectively replacing the function it decorates with an instance
variable. It is, in Python parlance, a data descriptor.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except Exception: # pragma: no cover
self.__doc__ = ""
self.name = wrapped.__name__
def __get__(self, inst, owner, _sentinel=sentinel):
try:
try:
return inst._cache[self.name]
except KeyError:
val = self.wrapped(inst)
inst._cache[self.name] = val
return val
except AttributeError:
if inst is None:
return self
raise
def __set__(self, inst, value):
raise AttributeError("reified property is read-only")
_ipv4_pattern = (r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
_ipv6_pattern = (
r'^(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}'
r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}$)(([0-9A-F]{1,4}:){0,5}|:)'
r'((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})'
r'(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])|(?:[A-F0-9]{1,4}:){7}'
r'[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}$)'
r'(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}'
r':|:(:[A-F0-9]{1,4}){7})$')
_ipv4_regex = re.compile(_ipv4_pattern)
_ipv6_regex = re.compile(_ipv6_pattern, flags=re.IGNORECASE)
_ipv4_regexb = re.compile(_ipv4_pattern.encode('ascii'))
_ipv6_regexb = re.compile(_ipv6_pattern.encode('ascii'), flags=re.IGNORECASE)
def is_ip_address(host):
if host is None:
return False
if isinstance(host, str):
if _ipv4_regex.match(host) or _ipv6_regex.match(host):
return True
else:
return False
elif isinstance(host, (bytes, bytearray, memoryview)):
if _ipv4_regexb.match(host) or _ipv6_regexb.match(host):
return True
else:
return False
else:
raise TypeError("{} [{}] is not a str or bytes"
.format(host, type(host)))
_cached_current_datetime = None
_cached_formatted_datetime = None
def rfc822_formatted_time():
global _cached_current_datetime
global _cached_formatted_datetime
now = int(time.time())
if now != _cached_current_datetime:
# Weekday and month names for HTTP date/time formatting;
# always English!
# Tuples are constants stored in codeobject!
_weekdayname = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
_monthname = ("", # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
_cached_formatted_datetime = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
_cached_current_datetime = now
return _cached_formatted_datetime
def _weakref_handle(info):
ref, name = info
ob = ref()
if ob is not None:
with suppress(Exception):
getattr(ob, name)()
def weakref_handle(ob, name, timeout, loop, ceil_timeout=True):
if timeout is not None and timeout > 0:
when = loop.time() + timeout
if ceil_timeout:
when = ceil(when)
return loop.call_at(when, _weakref_handle, (weakref.ref(ob), name))
def call_later(cb, timeout, loop):
if timeout is not None and timeout > 0:
when = ceil(loop.time() + timeout)
return loop.call_at(when, cb)
class TimeoutHandle:
""" Timeout handle """
def __init__(self, loop, timeout):
self._timeout = timeout
self._loop = loop
self._callbacks = []
def register(self, callback, *args, **kwargs):
self._callbacks.append((callback, args, kwargs))
def close(self):
self._callbacks.clear()
def start(self):
if self._timeout is not None and self._timeout > 0:
at = ceil(self._loop.time() + self._timeout)
return self._loop.call_at(at, self.__call__)
def timer(self):
if self._timeout is not None and self._timeout > 0:
timer = TimerContext(self._loop)
self.register(timer.timeout)
else:
timer = TimerNoop()
return timer
def __call__(self):
for cb, args, kwargs in self._callbacks:
with suppress(Exception):
cb(*args, **kwargs)
self._callbacks.clear()
class TimerNoop:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class TimerContext:
""" Low resolution timeout context manager """
def __init__(self, loop):
self._loop = loop
self._tasks = []
self._cancelled = False
def __enter__(self):
task = current_task(loop=self._loop)
if task is None:
raise RuntimeError('Timeout context manager should be used '
'inside a task')
if self._cancelled:
task.cancel()
raise asyncio.TimeoutError from None
self._tasks.append(task)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._tasks:
self._tasks.pop()
if exc_type is asyncio.CancelledError and self._cancelled:
raise asyncio.TimeoutError from None
def timeout(self):
if not self._cancelled:
for task in set(self._tasks):
task.cancel()
self._cancelled = True
class CeilTimeout(async_timeout.timeout):
def __enter__(self):
if self._timeout is not None:
self._task = current_task(loop=self._loop)
if self._task is None:
raise RuntimeError(
'Timeout context manager should be used inside a task')
self._cancel_handler = self._loop.call_at(
ceil(self._loop.time() + self._timeout), self._cancel_task)
return self
class HeadersMixin:
_content_type = None
_content_dict = None
_stored_content_type = sentinel
def _parse_content_type(self, raw):
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = 'application/octet-stream'
self._content_dict = {}
else:
self._content_type, self._content_dict = cgi.parse_header(raw)
@property
def content_type(self, *, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of content part for Content-Type HTTP header."""
raw = self._headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type
@property
def charset(self, *, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of charset part for Content-Type HTTP header."""
raw = self._headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get('charset')
@property
def content_length(self, *, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
"""The value of Content-Length HTTP header."""
content_length = self._headers.get(_CONTENT_LENGTH)
if content_length:
return int(content_length)
|
|
from django import template
from ttag import utils, args
class Options(object):
def __init__(self, meta, *args, **kwargs):
super(Options, self).__init__(*args, **kwargs)
self.positional_args = []
self.named_args = {}
# A list of argument names that are inherited from bases
self.parent_args = []
self.name = getattr(meta, 'name', None)
self.block = getattr(meta, 'block', False)
self.end_block = getattr(meta, 'end_block', 'end%(name)s')
@property
def args(self):
if not hasattr(self, '_args'):
args = dict([(arg.name, arg) for arg in self.positional_args])
args.update(self.named_args)
self._args = args
return self._args
def reset_args(self):
if hasattr(self, '_args'):
del self._args
def post_process(self):
pass
def _get_end_block(self):
return self._end_block % {'name': self.name}
def _set_end_block(self, value):
self._end_block = value
end_block = property(_get_end_block, _set_end_block)
class DeclarativeArgsMetaclass(type):
options_class = Options
def __new__(cls, name, bases, attrs):
super_new = super(DeclarativeArgsMetaclass, cls).__new__
parents = [b for b in bases if isinstance(b, DeclarativeArgsMetaclass)]
if not parents:
return super_new(cls, name, bases, attrs)
try:
meta = attrs.pop('Meta')
except KeyError:
meta = None
opts = cls.options_class(meta)
library = getattr(meta, 'library', None)
if library:
if not isinstance(library, template.Library):
raise TypeError("A valid library is required.")
# Generate the tag name if it wasn't explicitly provided.
if not opts.name:
opts.name = utils.get_default_name(name)
# Set the class name to the name defined, ensuring the defined name
# will be used when registering the tag with a Django tag library.
name = opts.name
all_args = [(arg_name.rstrip('_'), attrs.pop(arg_name))
for arg_name, obj in attrs.items()
if isinstance(obj, args.Arg)]
all_args.sort(key=lambda x: x[1].creation_counter)
# Put the positional and named arguments in their respective places.
optional_positional = False
for arg_name, arg in all_args:
arg.name = arg_name
if arg.positional:
if arg.required:
if optional_positional:
raise template.TemplateSyntaxError(
"Required '%s' positional argument of '%s' cannot "
"exist after optional positional arguments." % (
arg.name,
opts.name,
)
)
else:
optional_positional = True
opts.positional_args.append(arg)
else:
opts.named_args[arg_name] = arg
# If this class is subclassing another Tag, add that tag's positional
# arguments before ones declared here. The bases are looped in reverse
# to preserve the correct order of positional arguments and correctly
# override named arguments.
for base in bases[::-1]:
base_opts = getattr(base, '_meta', None)
if hasattr(base_opts, 'positional_args'):
opts.positional_args = base_opts.positional_args + \
opts.positional_args
if hasattr(base_opts, 'named_args'):
for arg_name, arg in base_opts.named_args.iteritems():
if arg_name not in opts.named_args:
opts.named_args[arg_name] = arg
opts.parent_args.append(arg_name)
attrs['_meta'] = opts
opts.post_process()
# Create the new class.
new_class = super_new(cls, name, bases, attrs)
# Register the tag if a tag library was provided.
if library:
library.tag(opts.name, new_class)
return new_class
class BaseTag(template.Node):
"""
A template tag.
"""
def __init__(self, parser, token):
self._vars = {}
tokens = list(utils.smarter_split(token.contents))[1:]
self._process_positional_args(parser, tokens)
self._process_named_args(parser, tokens)
if self._meta.block:
nodelists = {}
block_names = [self._meta.end_block]
other_blocks = isinstance(self._meta.block, dict) and \
self._meta.block or {}
block_names.extend(other_blocks)
current = ''
while True:
attr = 'nodelist%s' % (current and '_%s' % current or '')
nodelists[attr] = parser.parse(block_names)
current = parser.next_token().contents
if current == self._meta.end_block:
break
for name, required in other_blocks.iteritems():
if name in nodelists:
continue
if required:
raise template.TemplateSyntaxError('Expected {%% %s %%}' %
name)
nodelists[name] = template.NodeList()
self.child_nodelists = list(nodelists)
for attr, nodelist in nodelists.iteritems():
setattr(self, attr, nodelist)
def _valid_named_args(self):
"""
Return a list of named arguments. Keyword arguments are appended with a
``=`` so they can be checked for in :meth:`Arg.is_token_named_arg`.
"""
return [arg.keyword and '%s=' % name or name
for name, arg in self._meta.named_args.iteritems()]
def _process_positional_args(self, parser, tokens):
named_args = self._valid_named_args()
for arg in self._meta.positional_args:
value = arg.consume(parser, tokens, named_args)
if value is None:
if arg.default is not None:
self._vars[arg.name] = arg.default
elif arg.required:
raise template.TemplateSyntaxError(
"'%s' positional argument to '%s' is required" % (
arg.name,
self._meta.name,
)
)
else:
self._vars[arg.name] = value
def _process_named_args(self, parser, tokens):
named_args = self._valid_named_args()
while tokens:
arg_name = tokens[0]
keyword = '=' in arg_name
if keyword:
arg_name, tokens[0] = arg_name.split('=', 1)
else:
del tokens[0]
try:
arg = self._meta.named_args[arg_name]
except KeyError:
raise template.TemplateSyntaxError(
"'%s' does not take argument '%s'" % (self._meta.name,
arg_name)
)
if not keyword and arg.keyword:
raise template.TemplateSyntaxError(
"'%s' expected '%s=...'" % (self._meta.name, arg_name)
)
if keyword and not arg.keyword:
raise template.TemplateSyntaxError(
"'%s' didn't expect an '=' after '%s'" % (self._meta.name,
arg_name)
)
value = arg.consume(parser, tokens, named_args)
self._vars[arg.name] = value
# Handle missing items: required, default.
for arg_name, arg in self._meta.named_args.iteritems():
if arg.name in self._vars:
continue
if arg.default is not None:
self._vars[arg.name] = arg.default
elif arg.required:
raise template.TemplateSyntaxError(
"'%s' argument to '%s' is required" % (arg_name,
self._meta.name)
)
def clean(self, data, context):
"""
Additional tag-wide argument cleaning after each individual Arg's
``clean`` has been called.
"""
return data
def render(self, context):
"""
Render the tag.
"""
data = self.resolve(context)
return self.output(data)
def output(self, data):
raise NotImplementedError("Tag subclasses must implement this method.")
def resolve(self, context):
"""
Resolve variables and run clean methods, returning a dictionary
containing the cleaned data.
Cleaning happens after variable/filter resolution.
Cleaning order is similar to forms:
1) The argument's ``.clean()`` method.
2) The tag's ``clean_ARGNAME()`` method, if any.
3) The tag's ``.clean()`` method.
"""
data = {}
for name, value in self._vars.iteritems():
arg = self._meta.args[name]
value = arg.resolve(value, context)
value = arg.clean(value)
try:
tag_arg_clean = getattr(self, 'clean_%s' % arg.name)
except AttributeError:
pass
else:
value = tag_arg_clean(value)
data[name] = value
try:
data = self.clean(data, context)
except TypeError:
# Before version 2.0, clean accepted only the data parameter, keep
# supporting that.
data = self.clean(data)
return data
class Tag(BaseTag):
# This is a separate class from BaseTag in order to abstract the way
# arguments are specified. This class (Tag) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a tag using declarative syntax.
# BaseTag itself has no way of designating arguments.
__metaclass__ = DeclarativeArgsMetaclass
|
|
import django
from django.apps import apps
from django.conf import settings
from django.db.models import AutoField, CharField, ForeignKey, BigIntegerField, OneToOneField
from django_sharding_library.constants import Backends
from django_sharding_library.utils import create_postgres_global_sequence, create_postgres_shard_id_function, get_next_sharded_id
try:
from django.db.models import BigAutoField
except ImportError:
from django.utils.translation import ugettext_lazy as _
class BigAutoField(AutoField):
description = _("Big (8 byte) integer")
def db_type(self, connection):
if connection.settings_dict['ENGINE'] in Backends.MYSQL:
return 'serial'
if connection.settings_dict['ENGINE'] in Backends.POSTGRES:
return 'bigserial'
return super(BigAutoField, self).db_type(connection)
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class ShardedIDFieldMixin(object):
"""
An autoincrimenting field which takes an id generator class instance
as an argument and uses the generator to assign each new object a unique
id.
Note: This currently must be the primary key of the model and although this
may be updated in the future, it should not hinder the app to use other
candidates as unique fields.
"""
def __init__(self, *args, **kwargs):
# Remove the strategy from the kwargs so that it doesn't get passed to Django.
setattr(self, 'strategy', kwargs['strategy'])
del kwargs['strategy']
return super(ShardedIDFieldMixin, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ShardedIDFieldMixin, self).deconstruct()
# Add the strategy from the kwargs so that it does get passed to our model.
kwargs['strategy'] = getattr(self, 'strategy')
return name, path, args, kwargs
def get_pk_value_on_save(self, instance):
if not instance.pk:
return self.strategy.get_next_id()
return instance.pk
class TableShardedIDField(ShardedIDFieldMixin, BigAutoField):
"""
An autoincrimenting field which takes a `source_table_name` as an argument in
order to generate unqiue ids for the sharded model. i.e. `app.model_name`.
"""
def __init__(self, *args, **kwargs):
from django_sharding_library.id_generation_strategies import TableStrategy
kwargs['strategy'] = TableStrategy(backing_model_name=kwargs['source_table_name'])
setattr(self, 'source_table_name', kwargs['source_table_name'])
del kwargs['source_table_name']
return super(TableShardedIDField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(TableShardedIDField, self).deconstruct()
kwargs['source_table_name'] = getattr(self, 'source_table_name')
return name, path, args, kwargs
class ShardedUUID4Field(ShardedIDFieldMixin, CharField):
def __init__(self, *args, **kwargs):
from django_sharding_library.id_generation_strategies import UUIDStrategy
kwargs['strategy'] = UUIDStrategy()
return super(ShardedUUID4Field, self).__init__(*args, **kwargs)
def get_pk_value_on_save(self, instance):
return self.strategy.get_next_id(instance.get_shard())
class ShardStorageFieldMixin(object):
"""
A mixin for a field used to store a shard for in an instance or parent of an instance.
"""
def __init__(self, *args, **kwargs):
setattr(self, 'django_sharding__stores_shard', True)
setattr(self, 'django_sharding__shard_group', kwargs['shard_group'])
del kwargs['shard_group']
return super(ShardStorageFieldMixin, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ShardStorageFieldMixin, self).deconstruct()
kwargs['shard_group'] = getattr(self, 'django_sharding__shard_group')
return name, path, args, kwargs
class ShardLocalStorageFieldMixin(ShardStorageFieldMixin):
"""
The ShardLocalStorageFieldMixin is used for when the shard is stored on the model that
is being sharded by. i.e. Storing the shard on the User model and sharding by the User.
"""
def __init__(self, *args, **kwargs):
setattr(self, 'django_sharding__use_signal', True)
return super(ShardLocalStorageFieldMixin, self).__init__(*args, **kwargs)
def deconstruct(self):
return super(ShardLocalStorageFieldMixin, self).deconstruct()
class ShardStorageCharField(ShardLocalStorageFieldMixin, CharField):
"""
A simple char field that stores a shard and uses a signal to generate
the shard using a pre_save signal.
"""
pass
class ShardForeignKeyStorageFieldMixin(ShardStorageFieldMixin):
"""
A mixin for a field used to store a foreign key to another table which
stores the shard, usually a table which inherits from the ShardStorageModel.
"""
def __init__(self, *args, **kwargs):
setattr(self, 'django_sharding__stores_shard', True)
model_class = kwargs.get('to', args and args[0])
if type(model_class) == str:
app_label = model_class.split('.')[0]
app = apps.get_app_config(app_label)
if (1, 11) > django.VERSION:
model_class = app.get_model(model_class[len(app_label) + 1:])
else:
model_class = app.get_model(model_class[len(app_label) + 1:], require_ready=False)
setattr(self, 'django_sharding__shard_storage_table', model_class)
return super(ShardForeignKeyStorageFieldMixin, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ShardForeignKeyStorageFieldMixin, self).deconstruct()
return name, path, args, kwargs
def pre_save(self, model_instance, add):
self.save_shard(model_instance)
return super(ShardForeignKeyStorageFieldMixin, self).pre_save(model_instance, add)
def save_shard(self, model_instance):
shard_key = model_instance.get_shard_key()
if not getattr(model_instance, self.name):
shard_storage_table = getattr(self, 'django_sharding__shard_storage_table')
shard_group = getattr(self, 'django_sharding__shard_group')
app_config_app_label = getattr(settings, 'DJANGO_SHARDING_SETTINGS', {}).get('APP_CONFIG_APP', 'django_sharding')
bucketer = apps.get_app_config(app_config_app_label).get_bucketer(shard_group)
shard = bucketer.pick_shard(model_instance)
shard_object, _ = shard_storage_table.objects.get_or_create(shard_key=shard_key)
if not shard_object.shard:
shard_object.shard = shard
shard_object.save()
setattr(model_instance, self.name, shard_object)
class ShardForeignKeyStorageField(ShardForeignKeyStorageFieldMixin, ForeignKey):
"""
A simple char field that stores a shard and uses a signal to generate
the shard using a pre_save signal.
"""
pass
class BasePostgresShardGeneratedIDField(object):
def __init__(self, *args, **kwargs):
if not hasattr(settings, 'SHARD_EPOCH'):
raise ValueError("PostgresShardGeneratedIDField requires a SHARD_EPOCH to be defined in your settings file.")
return super(BasePostgresShardGeneratedIDField, self).__init__(*args, **kwargs)
@staticmethod
def migration_receiver(*args, **kwargs):
sequence_name = "global_id_sequence"
db_alias = kwargs.get('using')
if not db_alias:
raise EnvironmentError("A pre-migration receiver did not receive a database alias. "
"Perhaps your app is not registered correctly?")
if settings.DATABASES[db_alias]['ENGINE'] in Backends.POSTGRES:
shard_id = settings.DATABASES[db_alias].get('SHARD_ID', 0)
create_postgres_global_sequence(sequence_name, db_alias, True)
create_postgres_shard_id_function(sequence_name, db_alias, shard_id)
class PostgresShardGeneratedIDAutoField(BasePostgresShardGeneratedIDField, BigAutoField):
"""
A field that uses a Postgres stored procedure to return an ID generated on the database.
"""
def db_type(self, connection, *args, **kwargs):
try:
from django.db.backends.postgresql.base import DatabaseWrapper as PostgresDatabaseWrapper
except ImportError:
from django.db.backends.postgresql_psycopg2.base import DatabaseWrapper as PostgresDatabaseWrapper
if connection.vendor == PostgresDatabaseWrapper.vendor:
return "bigint DEFAULT next_sharded_id()"
else:
return super(PostgresShardGeneratedIDAutoField, self).db_type(connection)
class PostgresShardGeneratedIDField(BasePostgresShardGeneratedIDField, BigIntegerField):
"""
A field that uses a Postgres stored procedure to return an ID generated on the database.
Generates them prior to save with a seperate call to the DB.
"""
def get_shard_from_id(self, instance_id):
group = getattr(self, 'django_sharding__shard_group', None)
shard_id_to_find = int(bin(instance_id)[-23:-10], 2) # We know where the shard id is stored in the PK's bits.
# We can check the shard id from the PK against the shard ID in the databases config
for alias, db_settings in settings.DATABASES.items():
if db_settings["SHARD_GROUP"] == group and db_settings["SHARD_ID"] == shard_id_to_find:
return alias
return None # Return None if we could not determine the shard so we can fall through to the next shard grab attempt
def get_pk_value_on_save(self, instance):
return self.generate_id(instance)
def pre_save(self, model_instance, add):
if getattr(model_instance, self.attname, None) is not None:
return super(PostgresShardGeneratedIDField, self).pre_save(model_instance, add)
value = self.generate_id(model_instance)
setattr(model_instance, self.attname, value)
return value
@staticmethod
def generate_id(instance):
shard = instance._state.db or instance.get_shard()
return get_next_sharded_id(shard)
class PostgresShardForeignKey(ForeignKey):
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.target_field
if rel_field.get_internal_type() == "BigIntegerField":
return BigIntegerField().db_type(connection=connection)
return super(PostgresShardForeignKey, self).db_type(connection)
class PostgresShardOneToOne(OneToOneField):
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.target_field
if rel_field.get_internal_type() == "BigIntegerField":
return BigIntegerField().db_type(connection=connection)
return super(PostgresShardOneToOne, self).db_type(connection)
|
|
#!/usr/bin/env python
"""Usage: python suppress_output.py COMMAND...
Run the given command and print "in progress..." messages to stderr,
one per minute, as long as the command is producing some output. If
the command has not recently produced any output, no further messages
will be printed until it does.
When the command exits with return code 0, exit with code 0.
When the command exits with any other return code, print all output
produced by the command and exit with the same return code.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import subprocess
import signal
import time
import tempfile
import shutil
TIMEOUT = 60
def main():
command = sys.argv[1:]
with tempfile.TemporaryFile("a+b", 0) as log:
p = subprocess.Popen(command, stdout=log, stderr=subprocess.STDOUT)
# Rather than handling some signals ourselves, forward them.
def forward_signal(signum, frame):
p.send_signal(signum)
signal.signal(signal.SIGINT, forward_signal)
signal.signal(signal.SIGTERM, forward_signal)
signal.signal(signal.SIGQUIT, forward_signal)
signal.signal(signal.SIGHUP, forward_signal)
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output).
start_time = time.time()
last_blip = time.time()
last_log_size = log.tell()
counter = [0]
# Just poll it -- other approaches are more complex
try:
sleep_time = 0.001
while p.poll() is None:
sleep_time = min(2*sleep_time, 0.5)
time.sleep(sleep_time)
if time.time() - last_blip > TIMEOUT:
log_size = log.tell()
if log_size > last_log_size:
msg = " ... in progress ({0} elapsed)".format(elapsed(time.time() - start_time))
print(msg, file=sys.stderr)
sys.stderr.flush()
counter[0] += 1
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except: # noqa: E722
p.terminate()
raise
if counter[0] > 0:
if ret == 0:
msg = " ... ok ({0} elapsed)".format(elapsed(time.time() - start_time))
print(msg, file=sys.stderr)
sys.stderr.flush()
else:
msg = " ... failed ({0} elapsed, exit code {1})".format(
elapsed(time.time() - start_time), ret)
print(msg, file=sys.stderr)
sys.stderr.flush()
if ret != 0:
log.seek(0)
shutil.copyfileobj(log, sys.stdout.buffer)
sys.exit(ret)
def elapsed(t):
if t < 0:
sgn = '-'
t = abs(t)
else:
sgn = ''
if t < 60:
return "{0}{1:.0f} s".format(sgn, round(t))
elif t < 3600:
mins, secs = divmod(t, 60)
return "{0}{1:.0f} min {2:.0f} s".format(sgn, mins, secs)
else:
hours, mins = divmod(t, 3600)
mins, secs = divmod(mins, 60)
return "{0}{1:.0f} h {2:.0f} min {3:.0f} s".format(sgn, hours, mins, secs)
def test_elapsed():
assert elapsed(0.4) == '0 s'
assert elapsed(30.3) == '30 s'
assert elapsed(59.5) == '60 s'
assert elapsed(60.5) == '1 min 0 s'
assert elapsed(2*60 + 40.51) == '2 min 41 s'
assert elapsed(60 * 59.999) == '59 min 60 s'
assert elapsed(60 * 60.0) == '1 h 0 min 0 s'
assert elapsed(266*3600 + 13*60 + 12.4243) == '266 h 13 min 12 s'
def test_exitcode():
r0 = subprocess.call([sys.executable, __file__, sys.executable, '-c',
'import sys; sys.exit(0)'])
assert r0 == 0
r1 = subprocess.call([sys.executable, __file__, sys.executable, '-c',
'import sys; sys.exit(1)'])
assert r1 == 1
rs = subprocess.call([sys.executable, __file__, sys.executable, '-c',
'import os; os.kill(os.getpid(), 15)'])
assert rs != 0
def test_suppress(tmpdir):
p = subprocess.Popen([sys.executable, __file__, sys.executable, '-c',
'import sys; '
'sys.stdout.write("OUT"); '
'sys.stderr.write("ERR"); '
'sys.exit(0)'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
assert p.returncode == 0
assert out == b''
assert err == b''
p = subprocess.Popen([sys.executable, __file__, sys.executable, '-c',
'import sys; '
'sys.stdout.write("OUT"); '
'sys.stdout.flush(); '
'sys.stderr.write("ERR"); '
'sys.exit(1)'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
assert p.returncode == 1
assert out == b'OUTERR'
assert err == b''
def run_script_fast(path, script):
fn = os.path.join(path, 'suppress_output.py')
with open(__file__, 'rb') as f:
text = f.read()
text = text.replace(b'TIMEOUT = 60', b'TIMEOUT = 1')
with open(fn, 'wb') as f:
f.write(text)
p = subprocess.Popen([sys.executable, fn, sys.executable, '-c', script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
def test_suppress_long_ok(tmpdir):
returncode, out, err = run_script_fast(str(tmpdir),
'import sys, time; '
'sys.stdout.write("OUT"); '
'time.sleep(1.5); '
'sys.stderr.write("ERR"); '
'sys.exit(0)')
assert returncode == 0
assert out == b''
assert re.match(b'^ \.\.\. in progress \([0-9 sminh]* elapsed\)\n'
b' \.\.\. ok \([0-9 sminh]* elapsed\)\n$', err,
re.S)
def test_suppress_long_failed(tmpdir):
returncode, out, err = run_script_fast(str(tmpdir),
'import sys, time; '
'sys.stdout.write("OUT"); '
'time.sleep(1.5); '
'sys.stdout.flush(); '
'sys.stderr.write("ERR"); '
'sys.exit(1)')
assert returncode == 1
assert out == b'OUTERR'
assert re.match(b'^ \.\.\. in progress \([0-9 sminh]* elapsed\)\n'
b' \.\.\. failed \([0-9 sminh]* elapsed, exit code 1\)\n$', err,
re.S)
if __name__ == "__main__":
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cloudfiles
import httplib
import json
import mox
from django import http
from django.conf import settings
from django_openstack import api
from glance import client as glance_client
from mox import IsA
from novaclient.v1_1 import client
from openstack import compute as OSCompute
from openstackx import admin as OSAdmin
from openstackx import auth as OSAuth
from openstackx import extras as OSExtras
from django_openstack import test
from django_openstack.middleware import keystone
TEST_CONSOLE_KIND = 'vnc'
TEST_EMAIL = 'test@test.com'
TEST_HOSTNAME = 'hostname'
TEST_INSTANCE_ID = '2'
TEST_PASSWORD = '12345'
TEST_PORT = 8000
TEST_RETURN = 'retValue'
TEST_TENANT_DESCRIPTION = 'tenantDescription'
TEST_TENANT_ID = '1234'
TEST_TOKEN = 'aToken'
TEST_TOKEN_ID = 'userId'
TEST_URL = 'http://%s:%s/something/v1.0' % (TEST_HOSTNAME, TEST_PORT)
TEST_USERNAME = 'testUser'
class Server(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, image, attrs=None):
self.id = id
self.image = image
if attrs is not None:
self.attrs = attrs
def __eq__(self, other):
if self.id != other.id or \
self.image['id'] != other.image['id']:
return False
for k in self.attrs:
if other.attrs.__getattr__(k) != v:
return False
return True
def __ne__(self, other):
return not self == other
class Tenant(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, description, enabled):
self.id = id
self.description = description
self.enabled = enabled
def __eq__(self, other):
return self.id == other.id and \
self.description == other.description and \
self.enabled == other.enabled
def __ne__(self, other):
return not self == other
class Token(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, username, tenant_id, serviceCatalog=None):
self.id = id
self.user = {'name': username}
self.tenant_id = tenant_id
self.serviceCatalog = serviceCatalog
def __eq__(self, other):
return self.id == other.id and \
self.user['name'] == other.user['name'] and \
self.tenant_id == other.tenant_id and \
self.serviceCatalog == other.serviceCatalog
def __ne__(self, other):
return not self == other
class APIResource(api.APIResourceWrapper):
""" Simple APIResource for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerObject=None):
if innerObject is None:
class InnerAPIResource(object):
pass
innerObject = InnerAPIResource()
innerObject.foo = 'foo'
innerObject.bar = 'bar'
return APIResource(innerObject)
class APIDict(api.APIDictWrapper):
""" Simple APIDict for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerDict=None):
if innerDict is None:
innerDict = {'foo': 'foo',
'bar': 'bar'}
return APIDict(innerDict)
class NovaClientTestMixin(object):
def setUp(self):
super(NovaClientTestMixin, self).setUp()
self._original_novaclient = api.novaclient
api.novaclient = lambda request: self.stub_novaclient()
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(client, 'Client')
self.novaclient = self.mox.CreateMock(client.Client)
return self.novaclient
def tearDown(self):
super(NovaClientTestMixin, self).tearDown()
api.novaclient = self._original_novaclient
class APIResourceWrapperTests(test.TestCase):
def test_get_attribute(self):
resource = APIResource.get_instance()
self.assertEqual(resource.foo, 'foo')
def test_get_invalid_attribute(self):
resource = APIResource.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
def test_get_inner_missing_attribute(self):
resource = APIResource.get_instance()
with self.assertRaises(AttributeError):
resource.baz
class APIDictWrapperTests(test.TestCase):
# APIDict allows for both attribute access and dictionary style [element]
# style access. Test both
def test_get_item(self):
resource = APIDict.get_instance()
self.assertEqual(resource.foo, 'foo')
self.assertEqual(resource['foo'], 'foo')
def test_get_invalid_item(self):
resource = APIDict.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
with self.assertRaises(KeyError):
resource['missing']
def test_get_inner_missing_attribute(self):
resource = APIDict.get_instance()
with self.assertRaises(AttributeError):
resource.baz
with self.assertRaises(KeyError):
resource['baz']
def test_get_with_default(self):
resource = APIDict.get_instance()
self.assertEqual(resource.get('foo'), 'foo')
self.assertIsNone(resource.get('baz'))
self.assertEqual('retValue', resource.get('baz', 'retValue'))
# Wrapper classes that only define _attrs don't need extra testing.
# Wrapper classes that have other attributes or methods need testing
class ImageWrapperTests(test.TestCase):
dict_with_properties = {
'properties':
{'image_state': 'running'},
'size': 100,
}
dict_without_properties = {
'size': 100,
}
def test_get_properties(self):
image = api.Image(self.dict_with_properties)
image_props = image.properties
self.assertIsInstance(image_props, api.ImageProperties)
self.assertEqual(image_props.image_state, 'running')
def test_get_other(self):
image = api.Image(self.dict_with_properties)
self.assertEqual(image.size, 100)
def test_get_properties_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
image.properties
def test_get_other_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', image._attrs,
msg="Test assumption broken. Find new missing attribute")
image.missing
class ServerWrapperTests(test.TestCase):
HOST = 'hostname'
ID = '1'
IMAGE_NAME = 'imageName'
IMAGE_OBJ = {'id': '3', 'links': [{'href': '3', u'rel': u'bookmark'}]}
def setUp(self):
super(ServerWrapperTests, self).setUp()
# these are all objects "fetched" from the api
self.inner_attrs = {'host': self.HOST}
self.inner_server = Server(self.ID, self.IMAGE_OBJ, self.inner_attrs)
self.inner_server_no_attrs = Server(self.ID, self.IMAGE_OBJ)
#self.request = self.mox.CreateMock(http.HttpRequest)
def test_get_attrs(self):
server = api.Server(self.inner_server, self.request)
attrs = server.attrs
# for every attribute in the "inner" object passed to the api wrapper,
# see if it can be accessed through the api.ServerAttribute instance
for k in self.inner_attrs:
self.assertEqual(attrs.__getattr__(k), self.inner_attrs[k])
def test_get_other(self):
server = api.Server(self.inner_server, self.request)
self.assertEqual(server.id, self.ID)
def test_get_attrs_missing(self):
server = api.Server(self.inner_server_no_attrs, self.request)
with self.assertRaises(AttributeError):
server.attrs
def test_get_other_missing(self):
server = api.Server(self.inner_server, self.request)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', server._attrs,
msg="Test assumption broken. Find new missing attribute")
server.missing
def test_image_name(self):
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
self.IMAGE_OBJ['id']
).AndReturn(api.Image({'name': self.IMAGE_NAME}))
server = api.Server(self.inner_server, self.request)
self.mox.ReplayAll()
image_name = server.image_name
self.assertEqual(image_name, self.IMAGE_NAME)
self.mox.VerifyAll()
class ApiHelperTests(test.TestCase):
""" Tests for functions that don't use one of the api objects """
def test_url_for(self):
GLANCE_URL = 'http://glance/glanceapi/'
NOVA_URL = 'http://nova/novapi/'
url = api.url_for(self.request, 'glance')
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'glance', admin=False)
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'glance', admin=True)
self.assertEqual(url, GLANCE_URL + 'admin')
url = api.url_for(self.request, 'nova')
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'nova', admin=False)
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'nova', admin=True)
self.assertEqual(url, NOVA_URL + 'admin')
self.assertNotIn('notAnApi', self.request.user.service_catalog,
'Select a new nonexistent service catalog key')
with self.assertRaises(api.ServiceCatalogException):
url = api.url_for(self.request, 'notAnApi')
class AccountApiTests(test.TestCase):
def stub_account_api(self):
self.mox.StubOutWithMock(api, 'account_api')
account_api = self.mox.CreateMock(OSExtras.Account)
api.account_api(IsA(http.HttpRequest)).AndReturn(account_api)
return account_api
def test_get_account_api(self):
self.mox.StubOutClassWithMocks(OSExtras, 'Account')
OSExtras.Account(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(
IsA(http.HttpRequest), 'identity', True).AndReturn(TEST_URL)
api.url_for(
IsA(http.HttpRequest), 'identity', True).AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.account_api(self.request))
self.mox.VerifyAll()
def test_tenant_create(self):
DESCRIPTION = 'aDescription'
ENABLED = True
account_api = self.stub_account_api()
account_api.tenants = self.mox.CreateMockAnything()
account_api.tenants.create(TEST_TENANT_ID, DESCRIPTION,
ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_create(self.request, TEST_TENANT_ID,
DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_get(self):
account_api = self.stub_account_api()
account_api.tenants = self.mox.CreateMockAnything()
account_api.tenants.get(TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_get(self.request, TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_list(self):
tenants = (TEST_RETURN, TEST_RETURN + '2')
account_api = self.stub_account_api()
account_api.tenants = self.mox.CreateMockAnything()
account_api.tenants.list().AndReturn(tenants)
self.mox.ReplayAll()
ret_val = api.tenant_list(self.request)
self.assertEqual(len(ret_val), len(tenants))
for tenant in ret_val:
self.assertIsInstance(tenant, api.Tenant)
self.assertIn(tenant._apiresource, tenants)
self.mox.VerifyAll()
def test_tenant_update(self):
DESCRIPTION = 'aDescription'
ENABLED = True
account_api = self.stub_account_api()
account_api.tenants = self.mox.CreateMockAnything()
account_api.tenants.update(TEST_TENANT_ID, DESCRIPTION,
ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_update(self.request, TEST_TENANT_ID,
DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_create(self):
account_api = self.stub_account_api()
account_api.users = self.mox.CreateMockAnything()
account_api.users.create(TEST_USERNAME, TEST_EMAIL, TEST_PASSWORD,
TEST_TENANT_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_create(self.request, TEST_USERNAME, TEST_EMAIL,
TEST_PASSWORD, TEST_TENANT_ID, True)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_delete(self):
account_api = self.stub_account_api()
account_api.users = self.mox.CreateMockAnything()
account_api.users.delete(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_delete(self.request, TEST_USERNAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_user_get(self):
account_api = self.stub_account_api()
account_api.users = self.mox.CreateMockAnything()
account_api.users.get(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_get(self.request, TEST_USERNAME)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_list(self):
users = (TEST_USERNAME, TEST_USERNAME + '2')
account_api = self.stub_account_api()
account_api.users = self.mox.CreateMockAnything()
account_api.users.list().AndReturn(users)
self.mox.ReplayAll()
ret_val = api.user_list(self.request)
self.assertEqual(len(ret_val), len(users))
for user in ret_val:
self.assertIsInstance(user, api.User)
self.assertIn(user._apiresource, users)
self.mox.VerifyAll()
def test_user_update_email(self):
account_api = self.stub_account_api()
account_api.users = self.mox.CreateMockAnything()
account_api.users.update_email(TEST_USERNAME,
TEST_EMAIL).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_email(self.request, TEST_USERNAME,
TEST_EMAIL)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_password(self):
account_api = self.stub_account_api()
account_api.users = self.mox.CreateMockAnything()
account_api.users.update_password(TEST_USERNAME,
TEST_PASSWORD).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_password(self.request, TEST_USERNAME,
TEST_PASSWORD)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_tenant(self):
account_api = self.stub_account_api()
account_api.users = self.mox.CreateMockAnything()
account_api.users.update_tenant(TEST_USERNAME,
TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_tenant(self.request, TEST_USERNAME,
TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class AdminApiTests(test.TestCase):
def stub_admin_api(self, count=1):
self.mox.StubOutWithMock(api, 'admin_api')
admin_api = self.mox.CreateMock(OSAdmin.Admin)
for i in range(count):
api.admin_api(IsA(http.HttpRequest)).AndReturn(admin_api)
return admin_api
def test_get_admin_api(self):
self.mox.StubOutClassWithMocks(OSAdmin, 'Admin')
OSAdmin.Admin(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'nova', True).AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'nova', True).AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.admin_api(self.request))
self.mox.VerifyAll()
def test_flavor_create(self):
FLAVOR_DISK = 1000
FLAVOR_ID = 6
FLAVOR_MEMORY = 1024
FLAVOR_NAME = 'newFlavor'
FLAVOR_VCPU = 2
admin_api = self.stub_admin_api()
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.create(FLAVOR_NAME, FLAVOR_MEMORY, FLAVOR_VCPU,
FLAVOR_DISK, FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_create(self.request, FLAVOR_NAME,
str(FLAVOR_MEMORY), str(FLAVOR_VCPU),
str(FLAVOR_DISK), FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_flavor_delete(self):
FLAVOR_ID = 6
admin_api = self.stub_admin_api(count=2)
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.delete(FLAVOR_ID, False).AndReturn(TEST_RETURN)
admin_api.flavors.delete(FLAVOR_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_delete(self.request, FLAVOR_ID)
self.assertIsNone(ret_val)
ret_val = api.flavor_delete(self.request, FLAVOR_ID, purge=True)
self.assertIsNone(ret_val)
def test_service_get(self):
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.get(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_get(self.request, NAME)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_service_list(self):
services = (TEST_RETURN, TEST_RETURN + '2')
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.list().AndReturn(services)
self.mox.ReplayAll()
ret_val = api.service_list(self.request)
for service in ret_val:
self.assertIsInstance(service, api.Services)
self.assertIn(service._apiresource, services)
self.mox.VerifyAll()
def test_service_update(self):
ENABLED = True
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.update(NAME, ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_update(self.request, NAME, ENABLED)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class AuthApiTests(test.TestCase):
def test_get_auth_api(self):
settings.OPENSTACK_KEYSTONE_URL = TEST_URL
self.mox.StubOutClassWithMocks(OSAuth, 'Auth')
OSAuth.Auth(management_url=settings.OPENSTACK_KEYSTONE_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.auth_api())
self.mox.VerifyAll()
def test_token_get_tenant(self):
self.mox.StubOutWithMock(api, 'auth_api')
auth_api_mock = self.mox.CreateMockAnything()
api.auth_api().AndReturn(auth_api_mock)
tenants_mock = self.mox.CreateMockAnything()
auth_api_mock.tenants = tenants_mock
tenant_list = [Tenant('notTheDroid',
'notTheDroid_desc',
False),
Tenant(TEST_TENANT_ID,
TEST_TENANT_DESCRIPTION,
True),
]
tenants_mock.for_token('aToken').AndReturn(tenant_list)
self.request.session = {'token': 'aToken'}
self.mox.ReplayAll()
ret_val = api.token_get_tenant(self.request, TEST_TENANT_ID)
self.assertEqual(tenant_list[1], ret_val)
self.mox.VerifyAll()
def test_token_get_tenant_no_tenant(self):
self.mox.StubOutWithMock(api, 'auth_api')
auth_api_mock = self.mox.CreateMockAnything()
api.auth_api().AndReturn(auth_api_mock)
tenants_mock = self.mox.CreateMockAnything()
auth_api_mock.tenants = tenants_mock
tenant_list = [Tenant('notTheDroid',
'notTheDroid_desc',
False),
]
tenants_mock.for_token('aToken').AndReturn(tenant_list)
self.request.session = {'token': 'aToken'}
self.mox.ReplayAll()
ret_val = api.token_get_tenant(self.request, TEST_TENANT_ID)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_token_list_tenants(self):
self.mox.StubOutWithMock(api, 'auth_api')
auth_api_mock = self.mox.CreateMockAnything()
api.auth_api().AndReturn(auth_api_mock)
tenants_mock = self.mox.CreateMockAnything()
auth_api_mock.tenants = tenants_mock
tenant_list = [Tenant('notTheDroid',
'notTheDroid_desc',
False),
Tenant(TEST_TENANT_ID,
TEST_TENANT_DESCRIPTION,
True),
]
tenants_mock.for_token('aToken').AndReturn(tenant_list)
self.mox.ReplayAll()
ret_val = api.token_list_tenants(self.request, 'aToken')
for tenant in ret_val:
self.assertIn(tenant, tenant_list)
self.mox.VerifyAll()
def test_token_create(self):
self.mox.StubOutWithMock(api, 'auth_api')
auth_api_mock = self.mox.CreateMockAnything()
api.auth_api().AndReturn(auth_api_mock)
tokens_mock = self.mox.CreateMockAnything()
auth_api_mock.tokens = tokens_mock
test_token = Token(TEST_TOKEN_ID, TEST_USERNAME, TEST_TENANT_ID)
tokens_mock.create(TEST_TENANT_ID, TEST_USERNAME,
TEST_PASSWORD).AndReturn(test_token)
self.mox.ReplayAll()
ret_val = api.token_create(self.request, TEST_TENANT_ID,
TEST_USERNAME, TEST_PASSWORD)
self.assertEqual(test_token, ret_val)
self.mox.VerifyAll()
class ComputeApiTests(NovaClientTestMixin, test.TestCase):
def stub_compute_api(self, count=1):
self.mox.StubOutWithMock(api, 'compute_api')
compute_api = self.mox.CreateMock(OSCompute.Compute)
for i in range(count):
api.compute_api(IsA(http.HttpRequest)).AndReturn(compute_api)
return compute_api
def test_get_compute_api(self):
class ComputeClient(object):
__slots__ = ['auth_token', 'management_url']
self.mox.StubOutClassWithMocks(OSCompute, 'Compute')
compute_api = OSCompute.Compute(auth_token=TEST_TOKEN,
management_url=TEST_URL)
compute_api.client = ComputeClient()
self.mox.StubOutWithMock(api, 'url_for')
# called three times? Looks like a good place for optimization
api.url_for(IsA(http.HttpRequest), 'nova').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'nova').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'nova').AndReturn(TEST_URL)
self.mox.ReplayAll()
compute_api = api.compute_api(self.request)
self.assertIsNotNone(compute_api)
self.assertEqual(compute_api.client.auth_token, TEST_TOKEN)
self.assertEqual(compute_api.client.management_url, TEST_URL)
self.mox.VerifyAll()
def test_flavor_get(self):
FLAVOR_ID = 6
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.get(FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_get(self.request, FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_server_delete(self):
INSTANCE = 'anInstance'
compute_api = self.stub_compute_api()
compute_api.servers = self.mox.CreateMockAnything()
compute_api.servers.delete(INSTANCE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_delete(self.request, INSTANCE)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_reboot(self):
INSTANCE_ID = '2'
HARDNESS = 'diamond'
self.mox.StubOutWithMock(api, 'server_get')
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(OSCompute.servers.REBOOT_HARD).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(HARDNESS).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
self.mox.ReplayAll()
ret_val = api.server_reboot(self.request, INSTANCE_ID)
self.assertIsNone(ret_val)
ret_val = api.server_reboot(self.request, INSTANCE_ID,
hardness=HARDNESS)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_create(self):
NAME = 'server'
IMAGE = 'anImage'
FLAVOR = 'cherry'
USER_DATA = {'nuts': 'berries'}
KEY = 'user'
SECGROUP = self.mox.CreateMock(api.SecurityGroup)
server = self.mox.CreateMock(OSCompute.Server)
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create(NAME, IMAGE, FLAVOR, userdata=USER_DATA,
security_groups=[SECGROUP], key_name=KEY)\
.AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_create(self.request, NAME, IMAGE, FLAVOR,
KEY, USER_DATA, [SECGROUP])
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class ExtrasApiTests(NovaClientTestMixin, test.TestCase):
def stub_extras_api(self, count=1):
self.mox.StubOutWithMock(api, 'extras_api')
extras_api = self.mox.CreateMock(OSExtras.Extras)
for i in range(count):
api.extras_api(IsA(http.HttpRequest)).AndReturn(extras_api)
return extras_api
def test_get_extras_api(self):
self.mox.StubOutClassWithMocks(OSExtras, 'Extras')
OSExtras.Extras(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'nova').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'nova').AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.extras_api(self.request))
self.mox.VerifyAll()
def test_console_create(self):
extras_api = self.stub_extras_api(count=2)
extras_api.consoles = self.mox.CreateMockAnything()
extras_api.consoles.create(
TEST_INSTANCE_ID, TEST_CONSOLE_KIND).AndReturn(TEST_RETURN)
extras_api.consoles.create(
TEST_INSTANCE_ID, 'text').AndReturn(TEST_RETURN + '2')
self.mox.ReplayAll()
ret_val = api.console_create(self.request,
TEST_INSTANCE_ID,
TEST_CONSOLE_KIND)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
ret_val = api.console_create(self.request, TEST_INSTANCE_ID)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN + '2')
self.mox.VerifyAll()
def test_flavor_list(self):
flavors = (TEST_RETURN, TEST_RETURN + '2')
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.list().AndReturn(flavors)
self.mox.ReplayAll()
ret_val = api.flavor_list(self.request)
self.assertEqual(len(ret_val), len(flavors))
for flavor in ret_val:
self.assertIsInstance(flavor, api.Flavor)
self.assertIn(flavor._apiresource, flavors)
self.mox.VerifyAll()
def test_server_list(self):
servers = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.list().AndReturn(servers)
self.mox.ReplayAll()
ret_val = api.server_list(self.request)
self.assertEqual(len(ret_val), len(servers))
for server in ret_val:
self.assertIsInstance(server, api.Server)
self.assertIn(server._apiresource, servers)
self.mox.VerifyAll()
def test_usage_get(self):
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.get(TEST_TENANT_ID, 'start',
'end').AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.usage_get(self.request, TEST_TENANT_ID, 'start', 'end')
self.assertIsInstance(ret_val, api.Usage)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_usage_list(self):
usages = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.list('start', 'end').AndReturn(usages)
self.mox.ReplayAll()
ret_val = api.usage_list(self.request, 'start', 'end')
self.assertEqual(len(ret_val), len(usages))
for usage in ret_val:
self.assertIsInstance(usage, api.Usage)
self.assertIn(usage._apiresource, usages)
self.mox.VerifyAll()
def test_server_get(self):
INSTANCE_ID = '2'
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.get(INSTANCE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_get(self.request, INSTANCE_ID)
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class APIExtensionTests(NovaClientTestMixin, test.TestCase):
def setUp(self):
super(APIExtensionTests, self).setUp()
keypair = api.KeyPair(APIResource.get_instance())
keypair.id = 1
keypair.name = TEST_RETURN
self.keypair = keypair
self.keypairs = [keypair, ]
floating_ip = api.FloatingIp(APIResource.get_instance())
floating_ip.id = 1
floating_ip.fixed_ip = '10.0.0.4'
floating_ip.instance_id = 1
floating_ip.ip = '58.58.58.58'
self.floating_ip = floating_ip
self.floating_ips = [floating_ip, ]
server = api.Server(APIResource.get_instance(), self.request)
server.id = 1
self.server = server
self.servers = [server, ]
def test_server_snapshot_create(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create_image(IsA(int), IsA(str)).\
AndReturn(self.server)
self.mox.ReplayAll()
server = api.snapshot_create(self.request, 1, 'test-snapshot')
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_tenant_floating_ip_list(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.list().AndReturn(self.floating_ips)
self.mox.ReplayAll()
floating_ips = api.tenant_floating_ip_list(self.request)
self.assertEqual(len(floating_ips), len(self.floating_ips))
self.assertIsInstance(floating_ips[0], api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_get(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_get(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_allocate(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.create().AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_allocate(self.request)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_release(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.delete(1).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_release(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_server_remove_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.remove_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_remove_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_server_add_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.add_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_add_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_keypair_create(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.create(IsA(str)).AndReturn(self.keypair)
self.mox.ReplayAll()
ret_val = api.keypair_create(self.request, TEST_RETURN)
self.assertIsInstance(ret_val, api.KeyPair)
self.assertEqual(ret_val.name, self.keypair.name)
self.mox.VerifyAll()
def test_keypair_delete(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.delete(IsA(int))
self.mox.ReplayAll()
ret_val = api.keypair_delete(self.request, self.keypair.id)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_keypair_list(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.list().AndReturn(self.keypairs)
self.mox.ReplayAll()
ret_val = api.keypair_list(self.request)
self.assertEqual(len(ret_val), len(self.keypairs))
for keypair in ret_val:
self.assertIsInstance(keypair, api.KeyPair)
self.mox.VerifyAll()
class GlanceApiTests(test.TestCase):
def stub_glance_api(self, count=1):
self.mox.StubOutWithMock(api, 'glance_api')
glance_api = self.mox.CreateMock(glance_client.Client)
glance_api.token = TEST_TOKEN
for i in range(count):
api.glance_api(IsA(http.HttpRequest)).AndReturn(glance_api)
return glance_api
def test_get_glance_api(self):
self.mox.StubOutClassWithMocks(glance_client, 'Client')
client_instance = glance_client.Client(TEST_HOSTNAME, TEST_PORT,
auth_tok=TEST_TOKEN)
# Normally ``auth_tok`` is set in ``Client.__init__``, but mox doesn't
# duplicate that behavior so we set it manually.
client_instance.auth_tok = TEST_TOKEN
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'glance').AndReturn(TEST_URL)
self.mox.ReplayAll()
ret_val = api.glance_api(self.request)
self.assertIsNotNone(ret_val)
self.assertEqual(ret_val.auth_tok, TEST_TOKEN)
self.mox.VerifyAll()
def test_image_create(self):
IMAGE_FILE = 'someData'
IMAGE_META = {'metadata': 'foo'}
glance_api = self.stub_glance_api()
glance_api.add_image(IMAGE_META, IMAGE_FILE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_create(self.request, IMAGE_META, IMAGE_FILE)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
def test_image_delete(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.delete_image(IMAGE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_delete(self.request, IMAGE_ID)
self.assertEqual(ret_val, TEST_RETURN)
self.mox.VerifyAll()
def test_image_get(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.get_image(IMAGE_ID).AndReturn([TEST_RETURN])
self.mox.ReplayAll()
ret_val = api.image_get(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
def test_image_list_detailed(self):
images = (TEST_RETURN, TEST_RETURN + '2')
glance_api = self.stub_glance_api()
glance_api.get_images_detailed().AndReturn(images)
self.mox.ReplayAll()
ret_val = api.image_list_detailed(self.request)
self.assertEqual(len(ret_val), len(images))
for image in ret_val:
self.assertIsInstance(image, api.Image)
self.assertIn(image._apidict, images)
self.mox.VerifyAll()
def test_image_update(self):
IMAGE_ID = '1'
IMAGE_META = {'metadata': 'foobar'}
glance_api = self.stub_glance_api(count=2)
glance_api.update_image(IMAGE_ID, image_meta={}).AndReturn(TEST_RETURN)
glance_api.update_image(IMAGE_ID,
image_meta=IMAGE_META).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_update(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
ret_val = api.image_update(self.request,
IMAGE_ID,
image_meta=IMAGE_META)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
class SwiftApiTests(test.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.request = http.HttpRequest()
self.request.session = dict()
self.request.session['token'] = TEST_TOKEN
def tearDown(self):
self.mox.UnsetStubs()
def stub_swift_api(self, count=1):
self.mox.StubOutWithMock(api, 'swift_api')
swift_api = self.mox.CreateMock(cloudfiles.connection.Connection)
for i in range(count):
api.swift_api(IsA(http.HttpRequest)).AndReturn(swift_api)
return swift_api
def test_swift_get_containers(self):
containers = (TEST_RETURN, TEST_RETURN + '2')
swift_api = self.stub_swift_api()
swift_api.get_all_containers().AndReturn(containers)
self.mox.ReplayAll()
ret_val = api.swift_get_containers(self.request)
self.assertEqual(len(ret_val), len(containers))
for container in ret_val:
self.assertIsInstance(container, api.Container)
self.assertIn(container._apiresource, containers)
self.mox.VerifyAll()
def test_swift_create_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
self.mox.StubOutWithMock(api, 'swift_container_exists')
api.swift_container_exists(self.request,
NAME).AndReturn(False)
swift_api.create_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_create_container(self.request, NAME)
self.assertIsInstance(ret_val, api.Container)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_swift_delete_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
swift_api.delete_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_container(self.request, NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_objects(self):
NAME = 'containerName'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(prefix=None).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request, NAME)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_get_objects_with_prefix(self):
NAME = 'containerName'
PREFIX = 'prefacedWith'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(prefix=PREFIX).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request,
NAME,
prefix=PREFIX)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_upload_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'someData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.create_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.write(OBJECT_DATA).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_upload_object(self.request,
CONTAINER_NAME,
OBJECT_NAME,
OBJECT_DATA)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_delete_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.delete_object(OBJECT_NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_object(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_object_data(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'objectData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.stream().AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
ret_val = api.swift_get_object_data(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertEqual(ret_val, OBJECT_DATA)
self.mox.VerifyAll()
def test_swift_object_exists(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
self.mox.ReplayAll()
ret_val = api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertTrue(ret_val)
self.mox.VerifyAll()
def test_swift_copy_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
self.mox.StubOutWithMock(api, 'swift_object_exists')
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME).AndReturn(False)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.copy_to(CONTAINER_NAME, OBJECT_NAME)
self.mox.ReplayAll()
ret_val = api.swift_copy_object(self.request, CONTAINER_NAME,
OBJECT_NAME, CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import os
from builtins import object, open, str
from multiprocessing import cpu_count
from future.utils import text_type
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.dependency_context import DependencyContext
from pants.backend.jvm.subsystems.java import Java
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.zinc import Zinc
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin
from pants.backend.jvm.tasks.classpath_products import ClasspathEntry
from pants.backend.jvm.tasks.jvm_compile.class_not_found_error_patterns import \
CLASS_NOT_FOUND_ERROR_PATTERNS
from pants.backend.jvm.tasks.jvm_compile.compile_context import CompileContext
from pants.backend.jvm.tasks.jvm_compile.execution_graph import (ExecutionFailure, ExecutionGraph,
Job)
from pants.backend.jvm.tasks.jvm_compile.missing_dependency_finder import (CompileErrorExtractor,
MissingDependencyFinder)
from pants.backend.jvm.tasks.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.worker_pool import WorkerPool
from pants.base.workunit import WorkUnitLabel
from pants.engine.fs import PathGlobs, PathGlobsAndRoot
from pants.java.distribution.distribution import DistributionLocator
from pants.option.compiler_option_sets_mixin import CompilerOptionSetsMixin
from pants.reporting.reporting_utils import items_to_report_element
from pants.util.contextutil import Timer
from pants.util.dirutil import (fast_relpath, read_file, safe_delete, safe_mkdir, safe_rmtree,
safe_walk)
from pants.util.fileutil import create_size_estimators
from pants.util.memo import memoized_method, memoized_property
class JvmCompile(CompilerOptionSetsMixin, NailgunTaskBase):
"""A common framework for JVM compilation.
To subclass for a specific JVM language, implement the static values and methods
mentioned below under "Subclasses must implement".
"""
size_estimators = create_size_estimators()
@classmethod
def size_estimator_by_name(cls, estimation_strategy_name):
return cls.size_estimators[estimation_strategy_name]
@classmethod
def register_options(cls, register):
super(JvmCompile, cls).register_options(register)
register('--args', advanced=True, type=list,
default=list(cls.get_args_default(register.bootstrap)), fingerprint=True,
help='Pass these extra args to the compiler.')
register('--clear-invalid-analysis', advanced=True, type=bool,
help='When set, any invalid/incompatible analysis files will be deleted '
'automatically. When unset, an error is raised instead.')
# TODO(#7682): convert these into option sets!
register('--warnings', default=True, type=bool, fingerprint=True,
help='Compile with all configured warnings enabled.')
register('--warning-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_warning_args_default()),
help='Extra compiler args to use when warnings are enabled.')
register('--no-warning-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_no_warning_args_default()),
help='Extra compiler args to use when warnings are disabled.')
register('--debug-symbols', type=bool, fingerprint=True,
help='Compile with debug symbol enabled.')
register('--debug-symbol-args', advanced=True, type=list, fingerprint=True,
default=['-C-g:lines,source,vars'],
help='Extra args to enable debug symbol.')
register('--delete-scratch', advanced=True, default=True, type=bool,
help='Leave intermediate scratch files around, for debugging build problems.')
register('--worker-count', advanced=True, type=int, default=cpu_count(),
help='The number of concurrent workers to use when '
'compiling with {task}. Defaults to the '
'current machine\'s CPU count.'.format(task=cls._name))
register('--size-estimator', advanced=True,
choices=list(cls.size_estimators.keys()), default='filesize',
help='The method of target size estimation. The size estimator estimates the size '
'of targets in order to build the largest targets first (subject to dependency '
'constraints). Choose \'random\' to choose random sizes for each target, which '
'may be useful for distributed builds.')
register('--capture-classpath', advanced=True, type=bool, default=True,
fingerprint=True,
help='Capture classpath to per-target newline-delimited text files. These files will '
'be packaged into any jar artifacts that are created from the jvm targets.')
register('--suggest-missing-deps', type=bool,
help='Suggest missing dependencies on a best-effort basis from target\'s transitive'
'deps for compilation failures that are due to class not found.')
register('--buildozer',
help='Path to buildozer for suggest-missing-deps command lines. '
'If absent, no command line will be suggested to fix missing deps.')
register('--missing-deps-not-found-msg', advanced=True, type=str,
help='The message to print when pants can\'t find any suggestions for targets '
'containing the classes not found during compilation. This should '
'likely include a link to documentation about dependency management.',
default='Please see https://www.pantsbuild.org/3rdparty_jvm.html#strict-dependencies '
'for more information.')
register('--class-not-found-error-patterns', advanced=True, type=list,
default=CLASS_NOT_FOUND_ERROR_PATTERNS,
help='List of regular expression patterns that extract class not found '
'compile errors.')
register('--use-classpath-jars', advanced=True, type=bool, fingerprint=True,
help='Use jar files on the compile_classpath. Note: Using this option degrades '
'incremental compile between targets.')
@classmethod
def implementation_version(cls):
return super(JvmCompile, cls).implementation_version() + [('JvmCompile', 3)]
@classmethod
def prepare(cls, options, round_manager):
super(JvmCompile, cls).prepare(options, round_manager)
round_manager.require_data('compile_classpath')
# Require codegen we care about
# TODO(John Sirois): roll this up in Task - if the list of labels we care about for a target
# predicate to filter the full build graph is exposed, the requirement can be made automatic
# and in turn codegen tasks could denote the labels they produce automating wiring of the
# produce side
round_manager.optional_data('java')
round_manager.optional_data('scala')
# Allow the deferred_sources_mapping to take place first
round_manager.optional_data('deferred_sources')
# Subclasses must implement.
# --------------------------
_name = None
# The name used in JvmPlatform to refer to this compiler task.
compiler_name = None
@classmethod
def subsystem_dependencies(cls):
return super(JvmCompile, cls).subsystem_dependencies() + (DependencyContext,
Java,
JvmPlatform,
ScalaPlatform,
Zinc.Factory)
@classmethod
def name(cls):
return cls._name
@classmethod
def get_args_default(cls, bootstrap_option_values):
"""Override to set default for --args option.
:param bootstrap_option_values: The values of the "bootstrap options" (e.g., pants_workdir).
Implementations can use these when generating the default.
See src/python/pants/options/options_bootstrapper.py for
details.
"""
return ()
@classmethod
def get_warning_args_default(cls):
"""Override to set default for --warning-args option."""
return ()
@classmethod
def get_no_warning_args_default(cls):
"""Override to set default for --no-warning-args option."""
return ()
@property
def cache_target_dirs(self):
return True
@memoized_property
def _zinc(self):
return Zinc.Factory.global_instance().create(self.context.products, self.execution_strategy)
def _zinc_tool_classpath(self, toolname):
return self._zinc.tool_classpath_from_products(self.context.products,
toolname,
scope=self.options_scope)
def _zinc_tool_jar(self, toolname):
return self._zinc.tool_jar_from_products(self.context.products,
toolname,
scope=self.options_scope)
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def compile(self, ctx, args, dependency_classpath, upstream_analysis,
settings, compiler_option_sets, zinc_file_manager,
javac_plugin_map, scalac_plugin_map):
"""Invoke the compiler.
Subclasses must implement. Must raise TaskError on compile failure.
:param CompileContext ctx: A CompileContext for the target to compile.
:param list args: Arguments to the compiler (such as javac or zinc).
:param list dependency_classpath: List of classpath entries of type ClasspathEntry for
dependencies.
:param upstream_analysis: A map from classpath entry to analysis file for dependencies.
:param JvmPlatformSettings settings: platform settings determining the -source, -target, etc for
javac to use.
:param list compiler_option_sets: The compiler_option_sets flags for the target.
:param zinc_file_manager: whether to use zinc provided file manager.
:param javac_plugin_map: Map of names of javac plugins to use to their arguments.
:param scalac_plugin_map: Map of names of scalac plugins to use to their arguments.
"""
raise NotImplementedError()
# Subclasses may override.
# ------------------------
def extra_compile_time_classpath_elements(self):
"""Extra classpath elements common to all compiler invocations.
These should be of type ClasspathEntry, but strings are also supported for backwards
compatibility.
E.g., jars for compiler plugins.
These are added at the end of the classpath, after any dependencies, so that if they
overlap with any explicit dependencies, the compiler sees those first. This makes
missing dependency accounting much simpler.
"""
return []
def scalac_plugin_classpath_elements(self):
"""Classpath entries containing scalac plugins."""
return []
def write_extra_resources(self, compile_context):
"""Writes any extra, out-of-band resources for a target to its classes directory.
E.g., targets that produce scala compiler plugins or annotation processor files
produce an info file. The resources will be added to the runtime_classpath.
Returns a list of pairs (root, [absolute paths of files under root]).
"""
pass
def create_empty_extra_products(self):
"""Create any products the subclass task supports in addition to the runtime_classpath.
The runtime_classpath is constructed by default.
"""
def register_extra_products_from_contexts(self, targets, compile_contexts):
"""Allows subclasses to register additional products for targets.
It is called for valid targets at start, then for each completed invalid target,
separately, during compilation.
"""
def select_runtime_context(self, ccs):
"""Select the context that contains the paths for runtime classpath artifacts.
Subclasses may have more than one type of context."""
return ccs
def __init__(self, *args, **kwargs):
super(JvmCompile, self).__init__(*args, **kwargs)
self._targets_to_compile_settings = None
# JVM options for running the compiler.
self._jvm_options = self.get_options().jvm_options
self._args = list(self.get_options().args)
if self.get_options().warnings:
self._args.extend(self.get_options().warning_args)
else:
self._args.extend(self.get_options().no_warning_args)
if self.get_options().debug_symbols:
self._args.extend(self.get_options().debug_symbol_args)
# The ivy confs for which we're building.
self._confs = Zinc.DEFAULT_CONFS
# Determines which sources are relevant to this target.
self._sources_predicate = self.select_source
self._delete_scratch = self.get_options().delete_scratch
self._clear_invalid_analysis = self.get_options().clear_invalid_analysis
try:
worker_count = self.get_options().worker_count
except AttributeError:
# tasks that don't support concurrent execution have no worker_count registered
worker_count = 1
self._worker_count = worker_count
self._size_estimator = self.size_estimator_by_name(self.get_options().size_estimator)
@memoized_property
def _missing_deps_finder(self):
dep_analyzer = JvmDependencyAnalyzer(get_buildroot(),
self.context.products.get_data('runtime_classpath'))
return MissingDependencyFinder(dep_analyzer, CompileErrorExtractor(
self.get_options().class_not_found_error_patterns))
def create_compile_context(self, target, target_workdir):
return CompileContext(target,
os.path.join(target_workdir, 'z.analysis'),
ClasspathEntry(os.path.join(target_workdir, 'classes')),
ClasspathEntry(os.path.join(target_workdir, 'z.jar')),
os.path.join(target_workdir, 'logs'),
os.path.join(target_workdir, 'zinc_args'),
self._compute_sources_for_target(target))
def execute(self):
if JvmPlatform.global_instance().get_options().compiler != self.compiler_name:
# If the requested compiler is not the one supported by this task,
# bail early.
return
# In case we have no relevant targets and return early, create the requested product maps.
self.create_empty_extra_products()
relevant_targets = list(self.context.targets(predicate=self.select))
if not relevant_targets:
return
# Clone the compile_classpath to the runtime_classpath.
classpath_product = self.create_runtime_classpath()
fingerprint_strategy = DependencyContext.global_instance().create_fingerprint_strategy(
classpath_product)
# Note, JVM targets are validated (`vts.update()`) as they succeed. As a result,
# we begin writing artifacts out to the cache immediately instead of waiting for
# all targets to finish.
with self.invalidated(relevant_targets,
invalidate_dependents=True,
fingerprint_strategy=fingerprint_strategy,
topological_order=True) as invalidation_check:
compile_contexts = {vt.target: self.create_compile_context(vt.target, vt.results_dir)
for vt in invalidation_check.all_vts}
self.do_compile(
invalidation_check,
compile_contexts,
classpath_product,
)
if not self.get_options().use_classpath_jars:
# Once compilation has completed, replace the classpath entry for each target with
# its jar'd representation.
for ccs in compile_contexts.values():
cc = self.select_runtime_context(ccs)
for conf in self._confs:
classpath_product.remove_for_target(cc.target, [(conf, cc.classes_dir.path)])
classpath_product.add_for_target(cc.target, [(conf, cc.jar_file.path)])
def _classpath_for_context(self, context):
if self.get_options().use_classpath_jars:
return context.jar_file
return context.classes_dir
def create_runtime_classpath(self):
compile_classpath = self.context.products.get_data('compile_classpath')
classpath_product = self.context.products.get_data('runtime_classpath')
if not classpath_product:
classpath_product = self.context.products.get_data('runtime_classpath', compile_classpath.copy)
else:
classpath_product.update(compile_classpath)
return classpath_product
def do_compile(self, invalidation_check, compile_contexts, classpath_product):
"""Executes compilations for the invalid targets contained in a single chunk."""
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
valid_targets = [vt.target for vt in invalidation_check.all_vts if vt.valid]
if self.execution_strategy == self.HERMETIC:
self._set_directory_digests_for_valid_target_classpath_directories(valid_targets, compile_contexts)
for valid_target in valid_targets:
cc = self.select_runtime_context(compile_contexts[valid_target])
classpath_product.add_for_target(
valid_target,
[(conf, self._classpath_for_context(cc)) for conf in self._confs],
)
self.register_extra_products_from_contexts(valid_targets, compile_contexts)
if not invalid_targets:
return
# This ensures the workunit for the worker pool is set before attempting to compile.
with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \
as workunit:
# This uses workunit.parent as the WorkerPool's parent so that child workunits
# of different pools will show up in order in the html output. This way the current running
# workunit is on the bottom of the page rather than possibly in the middle.
worker_pool = WorkerPool(workunit.parent,
self.context.run_tracker,
self._worker_count)
# Prepare the output directory for each invalid target, and confirm that analysis is valid.
for target in invalid_targets:
cc = self.select_runtime_context(compile_contexts[target])
safe_mkdir(cc.classes_dir.path)
# Now create compile jobs for each invalid target one by one, using the classpath
# generated by upstream JVM tasks and our own prepare_compile().
jobs = self._create_compile_jobs(compile_contexts,
invalid_targets,
invalidation_check.invalid_vts,
classpath_product)
exec_graph = ExecutionGraph(jobs, self.get_options().print_exception_stacktrace)
try:
exec_graph.execute(worker_pool, self.context.log)
except ExecutionFailure as e:
raise TaskError("Compilation failure: {}".format(e))
def _record_compile_classpath(self, classpath, target, outdir):
relative_classpaths = [fast_relpath(path, self.get_options().pants_workdir) for path in classpath]
text = '\n'.join(relative_classpaths)
path = os.path.join(outdir, 'compile_classpath', '{}.txt'.format(target.id))
safe_mkdir(os.path.dirname(path), clean=False)
with open(path, 'w') as f:
f.write(text)
def _set_directory_digests_for_valid_target_classpath_directories(self, valid_targets, compile_contexts):
snapshots = self.context._scheduler.capture_snapshots(
tuple(PathGlobsAndRoot(PathGlobs(
[self._get_relative_classes_dir_from_target(target, compile_contexts)]
), get_buildroot()) for target in valid_targets))
[self._set_directory_digest_for_compile_context(
snapshot.directory_digest, target, compile_contexts)
for target, snapshot in list(zip(valid_targets, snapshots))]
def _get_relative_classes_dir_from_target(self, target, compile_contexts):
cc = self.select_runtime_context(compile_contexts[target])
return fast_relpath(cc.classes_dir.path, get_buildroot()) + '/**'
def _set_directory_digest_for_compile_context(self, directory_digest, target, compile_contexts):
cc = self.select_runtime_context(compile_contexts[target])
new_classpath_entry = ClasspathEntry(cc.classes_dir.path, directory_digest)
cc.classes_dir = new_classpath_entry
def _compile_vts(self, vts, ctx, upstream_analysis, dependency_classpath, progress_message, settings,
compiler_option_sets, zinc_file_manager, counter):
"""Compiles sources for the given vts into the given output dir.
:param vts: VersionedTargetSet with one entry for the target.
:param ctx: - A CompileContext instance for the target.
:param dependency_classpath: A list of classpath entries of type ClasspathEntry for dependencies
May be invoked concurrently on independent target sets.
Postcondition: The individual targets in vts are up-to-date, as if each were
compiled individually.
"""
if not ctx.sources:
self.context.log.warn('Skipping {} compile for targets with no sources:\n {}'
.format(self.name(), vts.targets))
else:
counter_val = str(counter()).rjust(counter.format_length(), ' ')
counter_str = '[{}/{}] '.format(counter_val, counter.size)
# Do some reporting.
self.context.log.info(
counter_str,
'Compiling ',
items_to_report_element(ctx.sources, '{} source'.format(self.name())),
' in ',
items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
' (',
progress_message,
').')
with self.context.new_workunit('compile', labels=[WorkUnitLabel.COMPILER]) as compile_workunit:
try:
directory_digest = self.compile(
ctx,
self._args,
dependency_classpath,
upstream_analysis,
settings,
compiler_option_sets,
zinc_file_manager,
self._get_plugin_map('javac', Java.global_instance(), ctx.target),
self._get_plugin_map('scalac', ScalaPlatform.global_instance(), ctx.target),
)
self._capture_logs(compile_workunit, ctx.log_dir)
return directory_digest
except TaskError:
if self.get_options().suggest_missing_deps:
logs = [path
for _, name, _, path in self._find_logs(compile_workunit)
if name == self.name()]
if logs:
self._find_missing_deps(logs, ctx.target)
raise
def _capture_logs(self, workunit, destination):
safe_mkdir(destination, clean=True)
for idx, name, output_name, path in self._find_logs(workunit):
os.link(path, os.path.join(destination, '{}-{}-{}.log'.format(name, idx, output_name)))
def _get_plugin_map(self, compiler, options_src, target):
"""Returns a map of plugin to args, for the given compiler.
Only plugins that must actually be activated will be present as keys in the map.
Plugins with no arguments will have an empty list as a value.
Active plugins and their args will be gathered from (in order of precedence):
- The <compiler>_plugins and <compiler>_plugin_args fields of the target, if it has them.
- The <compiler>_plugins and <compiler>_plugin_args options of this task, if it has them.
- The <compiler>_plugins and <compiler>_plugin_args fields of this task, if it has them.
Note that in-repo plugins will not be returned, even if requested, when building
themselves. Use published versions of those plugins for that.
See:
- examples/src/java/org/pantsbuild/example/javac/plugin/README.md.
- examples/src/scala/org/pantsbuild/example/scalac/plugin/README.md
:param compiler: one of 'javac', 'scalac'.
:param options_src: A JvmToolMixin instance providing plugin options.
:param target: The target whose plugins we compute.
"""
# Note that we get() options and getattr() target fields and task methods,
# so we're robust when those don't exist (or are None).
plugins_key = '{}_plugins'.format(compiler)
requested_plugins = (
tuple(getattr(self, plugins_key, []) or []) +
tuple(options_src.get_options().get(plugins_key, []) or []) +
tuple((getattr(target, plugins_key, []) or []))
)
# Allow multiple flags and also comma-separated values in a single flag.
requested_plugins = {p for val in requested_plugins for p in val.split(',')}
plugin_args_key = '{}_plugin_args'.format(compiler)
available_plugin_args = {}
available_plugin_args.update(getattr(self, plugin_args_key, {}) or {})
available_plugin_args.update(options_src.get_options().get(plugin_args_key, {}) or {})
available_plugin_args.update(getattr(target, plugin_args_key, {}) or {})
# From all available args, pluck just the ones for the selected plugins.
plugin_map = {}
for plugin in requested_plugins:
# Don't attempt to use a plugin while building that plugin.
# This avoids a bootstrapping problem. Note that you can still
# use published plugins on themselves, just not in-repo plugins.
if target not in self._plugin_targets(compiler).get(plugin, {}):
plugin_map[plugin] = available_plugin_args.get(plugin, [])
return plugin_map
def _find_logs(self, compile_workunit):
"""Finds all logs under the given workunit."""
for idx, workunit in enumerate(compile_workunit.children):
for output_name, outpath in workunit.output_paths().items():
if output_name in ('stdout', 'stderr'):
yield idx, workunit.name, output_name, outpath
def _find_missing_deps(self, compile_logs, target):
with self.context.new_workunit('missing-deps-suggest', labels=[WorkUnitLabel.COMPILER]):
compile_failure_log = '\n'.join(read_file(log) for log in compile_logs)
missing_dep_suggestions, no_suggestions = self._missing_deps_finder.find(
compile_failure_log, target)
if missing_dep_suggestions:
self.context.log.info('Found the following deps from target\'s transitive '
'dependencies that provide the missing classes:')
suggested_deps = set()
for classname, candidates in missing_dep_suggestions.items():
suggested_deps.add(list(candidates)[0])
self.context.log.info(' {}: {}'.format(classname, ', '.join(candidates)))
# We format the suggested deps with single quotes and commas so that
# they can be easily cut/pasted into a BUILD file.
formatted_suggested_deps = ["'%s'," % dep for dep in suggested_deps]
suggestion_msg = (
'\nIf the above information is correct, '
'please add the following to the dependencies of ({}):\n {}\n'
.format(target.address.spec, '\n '.join(sorted(list(formatted_suggested_deps))))
)
path_to_buildozer = self.get_options().buildozer
if path_to_buildozer:
suggestion_msg += ("\nYou can do this by running:\n"
" {buildozer} 'add dependencies {deps}' {target}".format(
buildozer=path_to_buildozer,
deps=" ".join(sorted(suggested_deps)),
target=target.address.spec)
)
self.context.log.info(suggestion_msg)
if no_suggestions:
self.context.log.warn('Unable to find any deps from target\'s transitive '
'dependencies that provide the following missing classes:')
no_suggestion_msg = '\n '.join(sorted(list(no_suggestions)))
self.context.log.warn(' {}'.format(no_suggestion_msg))
self.context.log.warn(self.get_options().missing_deps_not_found_msg)
def _upstream_analysis(self, compile_contexts, classpath_entries):
"""Returns tuples of classes_dir->analysis_file for the closure of the target."""
# Reorganize the compile_contexts by class directory.
compile_contexts_by_directory = {}
for compile_context in compile_contexts.values():
compile_context = self.select_runtime_context(compile_context)
compile_contexts_by_directory[compile_context.classes_dir.path] = compile_context
# If we have a compile context for the target, include it.
for entry in classpath_entries:
path = entry.path
if not path.endswith('.jar'):
compile_context = compile_contexts_by_directory.get(path)
if not compile_context:
self.context.log.debug('Missing upstream analysis for {}'.format(path))
else:
yield compile_context.classes_dir.path, compile_context.analysis_file
def exec_graph_key_for_target(self, compile_target):
return "compile({})".format(compile_target.address.spec)
def _create_compile_jobs(self, compile_contexts, invalid_targets, invalid_vts, classpath_product):
class Counter(object):
def __init__(self, size, initial=0):
self.size = size
self.count = initial
def __call__(self):
self.count += 1
return self.count
def format_length(self):
return len(str(self.size))
counter = Counter(len(invalid_vts))
jobs = []
jobs.extend(self.pre_compile_jobs(counter))
invalid_target_set = set(invalid_targets)
for ivts in invalid_vts:
# Invalidated targets are a subset of relevant targets: get the context for this one.
compile_target = ivts.target
invalid_dependencies = self._collect_invalid_compile_dependencies(compile_target,
invalid_target_set,
compile_contexts)
jobs.extend(
self.create_compile_jobs(compile_target, compile_contexts, invalid_dependencies, ivts,
counter, classpath_product))
counter.size = len(jobs)
return jobs
def pre_compile_jobs(self, counter):
"""Override this to provide jobs that are not related to particular targets.
This is only called when there are invalid targets."""
return []
def create_compile_jobs(self, compile_target, all_compile_contexts, invalid_dependencies, ivts,
counter, classpath_product):
context_for_target = all_compile_contexts[compile_target]
compile_context = self.select_runtime_context(context_for_target)
job = Job(self.exec_graph_key_for_target(compile_target),
functools.partial(
self._default_work_for_vts,
ivts,
compile_context,
'runtime_classpath',
counter,
all_compile_contexts,
classpath_product),
[self.exec_graph_key_for_target(target) for target in invalid_dependencies],
self._size_estimator(compile_context.sources),
# If compilation and analysis work succeeds, validate the vts.
# Otherwise, fail it.
on_success=ivts.update,
on_failure=ivts.force_invalidate)
return [job]
def check_cache(self, vts, counter):
"""Manually checks the artifact cache (usually immediately before compilation.)
Returns true if the cache was hit successfully, indicating that no compilation is necessary.
"""
if not self.artifact_cache_reads_enabled():
return False
cached_vts, _, _ = self.check_artifact_cache([vts])
if not cached_vts:
self.context.log.debug('Missed cache during double check for {}'
.format(vts.target.address.spec))
return False
assert cached_vts == [vts], (
'Cache returned unexpected target: {} vs {}'.format(cached_vts, [vts])
)
self.context.log.info('Hit cache during double check for {}'.format(vts.target.address.spec))
counter()
return True
def should_compile_incrementally(self, vts, ctx):
"""Check to see if the compile should try to re-use the existing analysis.
Returns true if we should try to compile the target incrementally.
"""
if not vts.is_incremental:
return False
if not self._clear_invalid_analysis:
return True
return os.path.exists(ctx.analysis_file)
def _record_target_stats(self, target, classpath_len, sources_len, compiletime, is_incremental,
stats_key):
def record(k, v):
self.context.run_tracker.report_target_info(self.options_scope, target, [stats_key, k], v)
record('time', compiletime)
record('classpath_len', classpath_len)
record('sources_len', sources_len)
record('incremental', is_incremental)
def _collect_invalid_compile_dependencies(self, compile_target, invalid_target_set,
compile_contexts):
# Collects all invalid dependencies that are not dependencies of other invalid dependencies
# within the closure of compile_target.
invalid_dependencies = OrderedSet()
def work(target):
pass
def predicate(target):
if target is compile_target:
return True
if target in invalid_target_set:
invalid_dependencies.add(target)
return self._on_invalid_compile_dependency(target, compile_target, compile_contexts)
return True
compile_target.walk(work, predicate)
return invalid_dependencies
def _on_invalid_compile_dependency(self, dep, compile_target, compile_contexts):
"""Decide whether to continue searching for invalid targets to use in the execution graph.
By default, don't recurse because once we have an invalid dependency, we can rely on its
dependencies having been compiled already.
Override to adjust this behavior."""
return False
def _create_context_jar(self, compile_context):
"""Jar up the compile_context to its output jar location.
TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
compile inputs would make the compiler's analysis useless.
see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
"""
root = compile_context.classes_dir.path
with compile_context.open_jar(mode='w') as jar:
for abs_sub_dir, dirnames, filenames in safe_walk(root):
for name in dirnames + filenames:
abs_filename = os.path.join(abs_sub_dir, name)
arcname = fast_relpath(abs_filename, root)
jar.write(abs_filename, arcname)
def _compute_sources_for_target(self, target):
"""Computes and returns the sources (relative to buildroot) for the given target."""
def resolve_target_sources(target_sources):
resolved_sources = []
for tgt in target_sources:
if tgt.has_sources():
resolved_sources.extend(tgt.sources_relative_to_buildroot())
return resolved_sources
sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)]
# TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets.
if hasattr(target, 'java_sources') and target.java_sources:
sources.extend(resolve_target_sources(target.java_sources))
return sources
@memoized_property
def _extra_compile_time_classpath(self):
"""Compute any extra compile-time-only classpath elements."""
def extra_compile_classpath_iter():
for conf in self._confs:
for jar in self.extra_compile_time_classpath_elements():
yield (conf, jar)
return list(extra_compile_classpath_iter())
@memoized_method
def _plugin_targets(self, compiler):
"""Returns a map from plugin name to the targets that build that plugin."""
if compiler == 'javac':
plugin_cls = JavacPlugin
elif compiler == 'scalac':
plugin_cls = ScalacPlugin
else:
raise TaskError('Unknown JVM compiler: {}'.format(compiler))
plugin_tgts = self.context.targets(predicate=lambda t: isinstance(t, plugin_cls))
return {t.plugin: t.closure() for t in plugin_tgts}
@staticmethod
def _local_jvm_distribution(settings=None):
settings_args = [settings] if settings else []
try:
local_distribution = JvmPlatform.preferred_jvm_distribution(settings_args, strict=True)
except DistributionLocator.Error:
local_distribution = JvmPlatform.preferred_jvm_distribution(settings_args, strict=False)
return local_distribution
class _HermeticDistribution(object):
def __init__(self, home_path, distribution):
self._underlying = distribution
self._home = home_path
def find_libs(self, names):
underlying_libs = self._underlying.find_libs(names)
return [self._rehome(l) for l in underlying_libs]
def find_libs_path_globs(self, names):
libs_abs = self._underlying.find_libs(names)
libs_unrooted = [self._unroot_lib_path(l) for l in libs_abs]
path_globs = PathGlobsAndRoot(
PathGlobs(tuple(libs_unrooted)),
text_type(self._underlying.home))
return (libs_unrooted, path_globs)
@property
def java(self):
return os.path.join(self._home, 'bin', 'java')
@property
def home(self):
return self._home
@property
def underlying_home(self):
return self._underlying.home
def _unroot_lib_path(self, path):
return path[len(self._underlying.home)+1:]
def _rehome(self, l):
return os.path.join(self._home, self._unroot_lib_path(l))
def _get_jvm_distribution(self):
# TODO We may want to use different jvm distributions depending on what
# java version the target expects to be compiled against.
# See: https://github.com/pantsbuild/pants/issues/6416 for covering using
# different jdks in remote builds.
local_distribution = self._local_jvm_distribution()
return self.execution_strategy_enum.resolve_for_enum_variant({
self.SUBPROCESS: lambda: local_distribution,
self.NAILGUN: lambda: local_distribution,
self.HERMETIC: lambda: self._HermeticDistribution('.jdk', local_distribution),
})()
def _default_work_for_vts(self, vts, ctx, input_classpath_product_key, counter, all_compile_contexts, output_classpath_product):
progress_message = ctx.target.address.spec
# Double check the cache before beginning compilation
hit_cache = self.check_cache(vts, counter)
if not hit_cache:
# Compute the compile classpath for this target.
dependency_cp_entries = self._zinc.compile_classpath_entries(
input_classpath_product_key,
ctx.target,
extra_cp_entries=self._extra_compile_time_classpath,
)
upstream_analysis = dict(self._upstream_analysis(all_compile_contexts, dependency_cp_entries))
is_incremental = self.should_compile_incrementally(vts, ctx)
if not is_incremental:
# Purge existing analysis file in non-incremental mode.
safe_delete(ctx.analysis_file)
# Work around https://github.com/pantsbuild/pants/issues/3670
safe_rmtree(ctx.classes_dir.path)
dep_context = DependencyContext.global_instance()
tgt, = vts.targets
compiler_option_sets = dep_context.defaulted_property(tgt, 'compiler_option_sets')
zinc_file_manager = dep_context.defaulted_property(tgt, 'zinc_file_manager')
with Timer() as timer:
directory_digest = self._compile_vts(vts,
ctx,
upstream_analysis,
dependency_cp_entries,
progress_message,
tgt.platform,
compiler_option_sets,
zinc_file_manager,
counter)
ctx.classes_dir = ClasspathEntry(ctx.classes_dir.path, directory_digest)
self._record_target_stats(tgt,
len(dependency_cp_entries),
len(ctx.sources),
timer.elapsed,
is_incremental,
'compile')
# Write any additional resources for this target to the target workdir.
self.write_extra_resources(ctx)
# Jar the compiled output.
self._create_context_jar(ctx)
# Update the products with the latest classes.
output_classpath_product.add_for_target(
ctx.target,
[(conf, self._classpath_for_context(ctx)) for conf in self._confs],
)
self.register_extra_products_from_contexts([ctx.target], all_compile_contexts)
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import iso8601
import mock
from oslo_utils import fixture as utils_fixture
import webob.exc
from jacket.api.compute.openstack import api_version_request as api_version
from jacket.api.compute.openstack.compute.legacy_v2.contrib import services \
as services_v2
from jacket.api.compute.openstack.compute import services as services_v21
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi as os_wsgi
from jacket.compute import availability_zones
from jacket.compute.cells import utils as cells_utils
from jacket.compute.cloud import cells_api
from jacket import context
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute.servicegroup.drivers import db as db_driver
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit.objects import test_service
fake_services_list = [
dict(test_service.fake_service,
binary='compute-scheduler',
host='host1',
id=1,
disabled=True,
topic='scheduler',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
last_seen_up=datetime.datetime(2012, 10, 29, 13, 42, 2),
forced_down=False,
disabled_reason='test1'),
dict(test_service.fake_service,
binary='compute-compute',
host='host1',
id=2,
disabled=True,
topic='compute',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
last_seen_up=datetime.datetime(2012, 10, 29, 13, 42, 5),
forced_down=False,
disabled_reason='test2'),
dict(test_service.fake_service,
binary='compute-scheduler',
host='host2',
id=3,
disabled=False,
topic='scheduler',
updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=datetime.datetime(2012, 9, 19, 6, 55, 34),
forced_down=False,
disabled_reason=None),
dict(test_service.fake_service,
binary='compute-compute',
host='host2',
id=4,
disabled=True,
topic='compute',
updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=datetime.datetime(2012, 9, 18, 8, 3, 38),
forced_down=False,
disabled_reason='test4'),
# NOTE(rpodolyaka): API services are special case and must be filtered out
dict(test_service.fake_service,
binary='compute-osapi_compute',
host='host2',
id=5,
disabled=False,
topic=None,
updated_at=None,
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=None,
forced_down=False,
disabled_reason=None),
dict(test_service.fake_service,
binary='compute-metadata',
host='host2',
id=6,
disabled=False,
topic=None,
updated_at=None,
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=None,
forced_down=False,
disabled_reason=None),
]
class FakeRequest(object):
environ = {"compute.context": context.get_admin_context()}
GET = {}
def __init__(self, version=os_wsgi.DEFAULT_API_VERSION): # version='2.1'):
super(FakeRequest, self).__init__()
self.api_version_request = api_version.APIVersionRequest(version)
class FakeRequestWithService(FakeRequest):
GET = {"binary": "compute-compute"}
class FakeRequestWithHost(FakeRequest):
GET = {"host": "host1"}
class FakeRequestWithHostService(FakeRequest):
GET = {"host": "host1", "binary": "compute-compute"}
def fake_service_get_all(services):
def service_get_all(context, filters=None, set_zones=False):
if set_zones or 'availability_zone' in filters:
return availability_zones.set_availability_zones(context,
services)
return services
return service_get_all
def fake_db_api_service_get_all(context, disabled=None):
return fake_services_list
def fake_db_service_get_by_host_binary(services):
def service_get_by_host_binary(context, host, binary):
for service in services:
if service['host'] == host and service['binary'] == binary:
return service
raise exception.HostBinaryNotFound(host=host, binary=binary)
return service_get_by_host_binary
def fake_service_get_by_host_binary(context, host, binary):
fake = fake_db_service_get_by_host_binary(fake_services_list)
return fake(context, host, binary)
def _service_get_by_id(services, value):
for service in services:
if service['id'] == value:
return service
return None
def fake_db_service_update(services):
def service_update(context, service_id, values):
service = _service_get_by_id(services, service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
service = copy.deepcopy(service)
service.update(values)
return service
return service_update
def fake_service_update(context, service_id, values):
fake = fake_db_service_update(fake_services_list)
return fake(context, service_id, values)
def fake_utcnow():
return datetime.datetime(2012, 10, 29, 13, 42, 11)
class ServicesTestV21(test.TestCase):
service_is_up_exc = webob.exc.HTTPInternalServerError
bad_request = exception.ValidationError
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def _set_up_controller(self):
self.controller = services_v21.ServiceController()
def setUp(self):
super(ServicesTestV21, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self._set_up_controller()
self.controller.host_api.service_get_all = (
mock.Mock(side_effect=fake_service_get_all(fake_services_list)))
self.useFixture(utils_fixture.TimeFixture(fake_utcnow()))
self.stub_out('compute.db.service_get_by_host_and_binary',
fake_db_service_get_by_host_binary(fake_services_list))
self.stub_out('compute.db.service_update',
fake_db_service_update(fake_services_list))
self.req = fakes.HTTPRequest.blank('')
def _process_output(self, services, has_disabled=False, has_id=False):
return services
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'disabled_reason': 'test1',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'id': 2,
'status': 'disabled',
'disabled_reason': 'test2',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'compute-scheduler',
'host': 'host2',
'zone': 'internal',
'id': 3,
'status': 'enabled',
'disabled_reason': None,
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'compute-compute',
'host': 'host2',
'zone': 'compute',
'id': 4,
'status': 'disabled',
'disabled_reason': 'test4',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'disabled_reason': 'test1',
'id': 1,
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'disabled_reason': 'test2',
'id': 2,
'zone': 'compute',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'compute-compute',
'host': 'host2',
'zone': 'compute',
'disabled_reason': 'test4',
'id': 4,
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'status': 'disabled',
'state': 'up',
'id': 2,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'compute-scheduler',
'host': 'host2',
'zone': 'internal',
'status': 'enabled',
'id': 3,
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'disabled_reason': None},
{'binary': 'compute-compute',
'host': 'host2',
'zone': 'compute',
'id': 4,
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'zone': 'internal',
'id': 1,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'compute-compute',
'host': 'host2',
'id': 4,
'zone': 'compute',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'status': 'disabled',
'id': 2,
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_services_detail_with_delete_extension(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'id': 1,
'zone': 'internal',
'disabled_reason': 'test1',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'compute-compute',
'host': 'host1',
'id': 2,
'zone': 'compute',
'disabled_reason': 'test2',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'compute-scheduler',
'host': 'host2',
'disabled_reason': None,
'id': 3,
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'compute-compute',
'host': 'host2',
'id': 4,
'disabled_reason': 'test4',
'zone': 'compute',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response, has_id=True)
self.assertEqual(res_dict, response)
def test_services_enable(self):
def _service_update(context, service_id, values):
self.assertIsNone(values['disabled_reason'])
return dict(test_service.fake_service, id=service_id, **values)
self.stub_out('compute.db.service_update', _service_update)
body = {'host': 'host1', 'binary': 'compute-compute'}
res_dict = self.controller.update(self.req, "enable", body=body)
self.assertEqual(res_dict['service']['status'], 'enabled')
self.assertNotIn('disabled_reason', res_dict['service'])
def test_services_enable_with_invalid_host(self):
body = {'host': 'invalid', 'binary': 'compute-compute'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"enable",
body=body)
def test_services_enable_with_invalid_binary(self):
body = {'host': 'host1', 'binary': 'invalid'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"enable",
body=body)
def test_services_disable(self):
body = {'host': 'host1', 'binary': 'compute-compute'}
res_dict = self.controller.update(self.req, "disable", body=body)
self.assertEqual(res_dict['service']['status'], 'disabled')
self.assertNotIn('disabled_reason', res_dict['service'])
def test_services_disable_with_invalid_host(self):
body = {'host': 'invalid', 'binary': 'compute-compute'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"disable",
body=body)
def test_services_disable_with_invalid_binary(self):
body = {'host': 'host1', 'binary': 'invalid'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"disable",
body=body)
def test_services_disable_log_reason(self):
self.ext_mgr.extensions['os-extended-services'] = True
body = {'host': 'host1',
'binary': 'compute-compute',
'disabled_reason': 'test-reason',
}
res_dict = self.controller.update(self.req,
"disable-log-reason",
body=body)
self.assertEqual(res_dict['service']['status'], 'disabled')
self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
def test_mandatory_reason_field(self):
self.ext_mgr.extensions['os-extended-services'] = True
body = {'host': 'host1',
'binary': 'compute-compute',
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, self.req, "disable-log-reason",
body=body)
def test_invalid_reason_field(self):
self.ext_mgr.extensions['os-extended-services'] = True
reason = 'a' * 256
body = {'host': 'host1',
'binary': 'compute-compute',
'disabled_reason': reason,
}
self.assertRaises(self.bad_request,
self.controller.update, self.req, "disable-log-reason",
body=body)
def test_services_delete(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
with mock.patch.object(self.controller.host_api,
'service_delete') as service_delete:
self.controller.delete(self.req, '1')
service_delete.assert_called_once_with(
self.req.environ['compute.context'], '1')
self.assertEqual(self.controller.delete.wsgi_code, 204)
def test_services_delete_not_found(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, 1234)
def test_services_delete_bad_request(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, self.req, 'abc')
# This test is just to verify that the servicegroup API gets used when
# calling the API
@mock.patch.object(db_driver.DbDriver, 'is_up', side_effect=KeyError)
def test_services_with_exception(self, mock_is_up):
req = FakeRequestWithHostService()
self.assertRaises(self.service_is_up_exc, self.controller.index, req)
class ServicesTestV211(ServicesTestV21):
wsgi_api_version = '2.11'
def test_services_list(self):
req = FakeRequest(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'forced_down': False,
'disabled_reason': 'test1',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'id': 2,
'status': 'disabled',
'disabled_reason': 'test2',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'compute-scheduler',
'host': 'host2',
'zone': 'internal',
'id': 3,
'status': 'enabled',
'disabled_reason': None,
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'compute-compute',
'host': 'host2',
'zone': 'compute',
'id': 4,
'status': 'disabled',
'disabled_reason': 'test4',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
req = FakeRequestWithHost(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'disabled_reason': 'test1',
'id': 1,
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'disabled_reason': 'test2',
'id': 2,
'zone': 'compute',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'compute-compute',
'host': 'host2',
'zone': 'compute',
'disabled_reason': 'test4',
'id': 4,
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequest(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'status': 'disabled',
'state': 'up',
'id': 2,
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'compute-scheduler',
'host': 'host2',
'zone': 'internal',
'status': 'enabled',
'id': 3,
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'disabled_reason': None},
{'binary': 'compute-compute',
'host': 'host2',
'zone': 'compute',
'id': 4,
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHost(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'zone': 'internal',
'id': 1,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'compute-compute',
'host': 'host2',
'id': 4,
'zone': 'compute',
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHostService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-compute',
'host': 'host1',
'zone': 'compute',
'status': 'disabled',
'id': 2,
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_services_detail_with_delete_extension(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
req = FakeRequest(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'compute-scheduler',
'host': 'host1',
'id': 1,
'zone': 'internal',
'disabled_reason': 'test1',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'compute-compute',
'host': 'host1',
'id': 2,
'zone': 'compute',
'disabled_reason': 'test2',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'compute-scheduler',
'host': 'host2',
'disabled_reason': None,
'id': 3,
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'compute-compute',
'host': 'host2',
'id': 4,
'disabled_reason': 'test4',
'zone': 'compute',
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response, has_id=True)
self.assertEqual(res_dict, response)
class ServicesTestV20(ServicesTestV21):
service_is_up_exc = KeyError
bad_request = webob.exc.HTTPBadRequest
def setUp(self):
super(ServicesTestV20, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.non_admin_req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
self.controller = services_v2.ServiceController(self.ext_mgr)
def test_services_delete_not_enabled(self):
self.assertRaises(webob.exc.HTTPMethodNotAllowed,
self.controller.delete, self.req, '300')
def _process_output(self, services, has_disabled=False, has_id=False):
for service in services['services']:
if not has_disabled:
service.pop('disabled_reason')
if not has_id:
service.pop('id')
return services
def test_update_with_non_admin(self):
self.assertRaises(exception.AdminRequired, self.controller.update,
self.non_admin_req, fakes.FAKE_UUID, body={})
def test_delete_with_non_admin(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
self.assertRaises(exception.AdminRequired, self.controller.delete,
self.non_admin_req, fakes.FAKE_UUID)
def test_index_with_non_admin(self):
self.assertRaises(exception.AdminRequired, self.controller.index,
self.non_admin_req)
class ServicesCellsTestV21(test.TestCase):
def setUp(self):
super(ServicesCellsTestV21, self).setUp()
host_api = cells_api.HostAPI()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self._set_up_controller()
self.controller.host_api = host_api
self.useFixture(utils_fixture.TimeFixture(fake_utcnow()))
services_list = []
for service in fake_services_list:
service = service.copy()
del service['version']
service_obj = compute.Service(**service)
service_proxy = cells_utils.ServiceProxy(service_obj, 'cell1')
services_list.append(service_proxy)
host_api.cells_rpcapi.service_get_all = (
mock.Mock(side_effect=fake_service_get_all(services_list)))
def _set_up_controller(self):
self.controller = services_v21.ServiceController()
def _process_out(self, res_dict):
for res in res_dict['services']:
res.pop('disabled_reason')
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
req = FakeRequest()
res_dict = self.controller.index(req)
utc = iso8601.iso8601.Utc()
response = {'services': [
{'id': 'cell1@1',
'binary': 'compute-scheduler',
'host': 'cell1@host1',
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
tzinfo=utc)},
{'id': 'cell1@2',
'binary': 'compute-compute',
'host': 'cell1@host1',
'zone': 'compute',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
tzinfo=utc)},
{'id': 'cell1@3',
'binary': 'compute-scheduler',
'host': 'cell1@host2',
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
tzinfo=utc)},
{'id': 'cell1@4',
'binary': 'compute-compute',
'host': 'cell1@host2',
'zone': 'compute',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
tzinfo=utc)}]}
self._process_out(res_dict)
self.assertEqual(response, res_dict)
class ServicesCellsTestV20(ServicesCellsTestV21):
def _set_up_controller(self):
self.controller = services_v2.ServiceController(self.ext_mgr)
def _process_out(self, res_dict):
pass
class ServicesPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServicesPolicyEnforcementV21, self).setUp()
self.controller = services_v21.ServiceController()
self.req = fakes.HTTPRequest.blank('')
def test_update_policy_failed(self):
rule_name = "os_compute_api:os-services"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, fakes.FAKE_UUID,
body={'host': 'host1',
'binary': 'compute-compute'})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = "os_compute_api:os-services"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-services"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_noop, ugettext_lazy
from django.utils.translation import ugettext as _
from corehq.apps.es import users as user_es, filters
from corehq.apps.domain.models import Domain
from corehq.apps.groups.models import Group
from corehq.apps.reports.util import namedtupledict
from corehq.apps.users.models import CommCareUser
from corehq.util import remove_dups
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.commtrack.models import SQLLocation
from .. import util
from ..models import HQUserType, HQUserToggle
from ..analytics.esaccessors import get_user_stubs, get_group_stubs
from .base import (
BaseMultipleOptionFilter,
BaseReportFilter,
BaseSingleOptionFilter,
)
class UserOrGroupFilter(BaseSingleOptionFilter):
slug = "view_by"
label = ugettext_noop("View by Users or Groups")
default_text = ugettext_noop("Users")
options = [('groups', 'Groups')]
class UserTypeFilter(BaseReportFilter):
# note, don't use this as a guideline for anything.
slug = "ufilter"
label = ugettext_lazy("User Type")
template = "reports/filters/filter_users.html"
@property
def filter_context(self):
toggle, show_filter = self.get_user_filter(self.request)
return {
'show_user_filter': show_filter,
'toggle_users': toggle,
}
@classmethod
def get_user_filter(cls, request):
return get_user_toggle(request)
class SelectMobileWorkerFilter(BaseSingleOptionFilter):
slug = 'individual'
label = ugettext_noop("Select Mobile Worker")
default_text = ugettext_noop("All Mobile Workers")
@property
def filter_context(self):
user_filter, _ = UserTypeFilter.get_user_filter(self.request)
context = super(SelectMobileWorkerFilter, self).filter_context
context['select'].update({
'default_text': self.get_default_text(user_filter),
})
return context
@property
def options(self):
users = util.user_list(self.domain)
return [(user.user_id,
"%s%s" % (user.username_in_report, "" if user.is_active else " (Inactive)"))
for user in users]
@classmethod
def get_default_text(cls, user_filter):
default = cls.default_text
if user_filter[HQUserType.ADMIN].show or \
user_filter[HQUserType.DEMO_USER].show or user_filter[HQUserType.UNKNOWN].show:
default = _('%s & Others') % _(default)
return default
class AltPlaceholderMobileWorkerFilter(SelectMobileWorkerFilter):
default_text = ugettext_noop('Enter a worker')
class SelectCaseOwnerFilter(SelectMobileWorkerFilter):
label = ugettext_noop("Select Case Owner")
default_text = ugettext_noop("All Case Owners")
@property
def options(self):
options = [(group._id, "%s (Group)" % group.name) for group in Group.get_case_sharing_groups(self.domain)]
user_options = super(SelectCaseOwnerFilter, self).options
options.extend(user_options)
return options
class BaseGroupedMobileWorkerFilter(BaseSingleOptionFilter):
"""
This is a little field for use when a client really wants to filter by
individuals from a specific group. Since by default we still want to
show all the data, no filtering is done unless the special group filter
is selected.
"""
group_names = []
@property
def options(self):
options = []
for group_name in self.group_names:
group = Group.by_name(self.domain, group_name)
if group:
users = group.get_users(is_active=True, only_commcare=True)
options.extend([(u.user_id, u.username_in_report) for u in users])
return options
class EmwfUtils(object):
def __init__(self, domain):
self.domain = domain
def user_tuple(self, u):
user = util._report_user_dict(u)
uid = "u__%s" % user['user_id']
name = "%s [user]" % user['username_in_report']
return (uid, name)
def reporting_group_tuple(self, g):
return ("g__%s" % g['_id'], '%s [group]' % g['name'])
def user_type_tuple(self, t):
return (
"t__%s" % (t),
"[%s]" % HQUserType.human_readable[t]
)
def location_tuple(self, location):
return ("l__%s" % location.location_id,
'%s [location]' % location.get_path_display())
@property
@memoized
def static_options(self):
static_options = [("t__0", _("[All mobile workers]"))]
types = ['DEMO_USER', 'ADMIN', 'UNKNOWN']
if Domain.get_by_name(self.domain).commtrack_enabled:
types.append('COMMTRACK')
for t in types:
user_type = getattr(HQUserType, t)
static_options.append(self.user_type_tuple(user_type))
return static_options
_UserData = namedtupledict('_UserData', (
'users',
'admin_and_demo_users',
'groups',
'users_by_group',
'combined_users',
))
class ExpandedMobileWorkerFilter(BaseMultipleOptionFilter):
"""
To get raw filter results:
mobile_user_and_group_slugs = request.GET.getlist(ExpandedMobileWorkerFilter.slug)
user_ids = emwf.selected_user_ids(mobile_user_and_group_slugs)
user_types = emwf.selected_user_types(mobile_user_and_group_slugs)
group_ids = emwf.selected_group_ids(mobile_user_and_group_slugs)
"""
slug = "emw"
label = ugettext_lazy("Groups or Users")
default_options = None
placeholder = ugettext_lazy(
"Specify groups and users to include in the report")
is_cacheable = False
options_url = 'emwf_options'
@property
@memoized
def utils(self):
return EmwfUtils(self.domain)
@staticmethod
def selected_user_ids(mobile_user_and_group_slugs):
return [u[3:] for u in mobile_user_and_group_slugs if u.startswith("u__")]
@staticmethod
def selected_user_types(mobile_user_and_group_slugs):
"""
usage: ``HQUserType.DEMO_USER in selected_user_types``
"""
return [int(t[3:]) for t in mobile_user_and_group_slugs
if t.startswith("t__") and t[3:].isdigit()]
@classmethod
def selected_group_ids(cls, mobile_user_and_group_slugs):
return cls.selected_reporting_group_ids(mobile_user_and_group_slugs)
@staticmethod
def selected_reporting_group_ids(mobile_user_and_group_slugs):
return [g[3:] for g in mobile_user_and_group_slugs if g.startswith("g__")]
@staticmethod
def selected_location_ids(mobile_user_and_group_slugs):
return [l[3:] for l in mobile_user_and_group_slugs if l.startswith("l__")]
@staticmethod
def show_all_mobile_workers(mobile_user_and_group_slugs):
return 't__0' in mobile_user_and_group_slugs
def get_default_selections(self):
defaults = [('t__0', _("[All mobile workers]"))]
if self.request.project.commtrack_enabled:
defaults.append(self.utils.user_type_tuple(HQUserType.COMMTRACK))
return defaults
@property
@memoized
def selected(self):
selected_ids = self.request.GET.getlist(self.slug)
if not selected_ids:
return [{'id': url_id, 'text': text}
for url_id, text in self.get_default_selections()]
selected = (self.selected_static_options(selected_ids) +
self._selected_user_entries(selected_ids) +
self._selected_group_entries(selected_ids) +
self._selected_location_entries(selected_ids))
known_ids = dict(selected)
return [
{'id': id, 'text': known_ids[id]}
for id in selected_ids
if id in known_ids
]
def selected_static_options(self, mobile_user_and_group_slugs):
return [option for option in self.utils.static_options
if option[0] in mobile_user_and_group_slugs]
def _selected_user_entries(self, mobile_user_and_group_slugs):
user_ids = self.selected_user_ids(mobile_user_and_group_slugs)
if not user_ids:
return []
results = get_user_stubs(user_ids)
return [self.utils.user_tuple(hit) for hit in results]
def _selected_groups_query(self, mobile_user_and_group_slugs):
group_ids = self.selected_group_ids(mobile_user_and_group_slugs)
if not group_ids:
return []
return get_group_stubs(group_ids)
def _selected_group_entries(self, mobile_user_and_group_slugs):
groups = self._selected_groups_query(mobile_user_and_group_slugs)
return [self.utils.reporting_group_tuple(group)
for group in groups
if group.get("reporting", False)]
def _selected_location_entries(self, mobile_user_and_group_slugs):
location_ids = self.selected_location_ids(mobile_user_and_group_slugs)
if not location_ids:
return []
return map(self.utils.location_tuple,
SQLLocation.objects.filter(location_id__in=location_ids))
@property
def filter_context(self):
context = super(ExpandedMobileWorkerFilter, self).filter_context
url = reverse(self.options_url, args=[self.domain])
context.update({'endpoint': url})
return context
@classmethod
def user_es_query(cls, domain, mobile_user_and_group_slugs):
user_ids = cls.selected_user_ids(mobile_user_and_group_slugs)
user_types = cls.selected_user_types(mobile_user_and_group_slugs)
group_ids = cls.selected_group_ids(mobile_user_and_group_slugs)
user_type_filters = []
if HQUserType.ADMIN in user_types:
user_type_filters.append(user_es.admin_users())
if HQUserType.UNKNOWN in user_types:
user_type_filters.append(user_es.unknown_users())
user_type_filters.append(user_es.web_users())
if HQUserType.DEMO_USER in user_types:
user_type_filters.append(user_es.demo_users())
q = user_es.UserES().domain(domain)
if HQUserType.REGISTERED in user_types:
# return all users with selected user_types
user_type_filters.append(user_es.mobile_users())
return q.OR(*user_type_filters)
else:
# return matching user types and exact matches
id_filter = filters.OR(
filters.term("_id", user_ids),
filters.term("__group_ids", group_ids),
)
if user_type_filters:
return q.OR(
id_filter,
filters.OR(*user_type_filters),
)
else:
return q.filter(id_filter)
@classmethod
def pull_users_and_groups(cls, domain, mobile_user_and_group_slugs,
include_inactive=False, limit_user_ids=None):
user_ids = cls.selected_user_ids(mobile_user_and_group_slugs)
user_types = cls.selected_user_types(mobile_user_and_group_slugs)
group_ids = cls.selected_group_ids(mobile_user_and_group_slugs)
users = []
if limit_user_ids:
user_ids = set(limit_user_ids).intersection(set(user_ids))
if user_ids or HQUserType.REGISTERED in user_types:
users = util.get_all_users_by_domain(
domain=domain,
user_ids=user_ids,
simplified=True,
CommCareUser=CommCareUser,
)
user_filter = tuple([HQUserToggle(id, id in user_types) for id in range(4)])
other_users = util.get_all_users_by_domain(
domain=domain,
user_filter=user_filter,
simplified=True,
CommCareUser=CommCareUser,
include_inactive=include_inactive
)
groups = [Group.get(g) for g in group_ids]
all_users = users + other_users
user_dict = {}
for group in groups:
user_dict["%s|%s" % (group.name, group._id)] = util.get_all_users_by_domain(
group=group,
simplified=True
)
users_in_groups = [user for sublist in user_dict.values() for user in sublist]
users_by_group = user_dict
combined_users = remove_dups(all_users + users_in_groups, "user_id")
return _UserData(
users=all_users,
admin_and_demo_users=other_users,
groups=groups,
users_by_group=users_by_group,
combined_users=combined_users,
)
@property
def options(self):
return [('t__0', _("[All mobile workers]"))]
@classmethod
def for_user(cls, user_id):
return {
cls.slug: 'u__%s' % user_id
}
@classmethod
def for_reporting_group(cls, group_id):
return {
cls.slug: 'g__%s' % group_id
}
def get_user_toggle(request):
ufilter = group = individual = show_commtrack = None
try:
request_obj = request.POST if request.method == 'POST' else request.GET
if request_obj.get('ufilter', ''):
ufilter = request_obj.getlist('ufilter')
group = request_obj.get('group', '')
individual = request_obj.get('individual', '')
show_commtrack = request.project.commtrack_enabled
except (KeyError, AttributeError):
pass
show_filter = True
toggle = HQUserType.commtrack_defaults() if show_commtrack else HQUserType.use_defaults()
if ufilter and not (group or individual):
toggle = HQUserType.use_filter(ufilter)
elif group or individual:
show_filter = False
return toggle, show_filter
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import pymel.core as pm
from shapes import Shape
from character import Character
from joint import SpineJoints, JointChain
from curve import Curve
from anima.rig.drawNode import DrawNode
class Limb(object):
def __init__(self, limbName_in):
# Name of the Limb
self._limbName = limbName_in
# MainCtrl of the Limb
self._mainCtrl = None
# Creates a network for the Limb
self._network = Network(limbName_in) # TODO: What is network
# Character Link
self._charName = None
def _validate_charName(self, charName_in):
"""validates the given charName_in"""
if charName_in == None:
raise TypeError("%s.name can not be None!" %
self.__class__.__name__)
if not isinstance(charName_in, (Character)):
raise TypeError("%s.name should be an instance of Character!" %
self.__class__.__name__)
if charName_in == "":
raise ValueError("%s.name can not be an empty string!" %
self.__class__.__name__)
return charName_in
def _validate_mainCtrl(self, mainCtrl):
"""validates the given mainCtrl"""
if mainCtrl != None:
self.mainCtrl = pm.nt.Transform(mainCtrl)
def do_spine_ik(self, curve_in):
print self.curve_in
print self.name
class FkLimb(object):
def __init__(self):
# Name of the FK LIMB
self._fkLimbName = None
# FK Controllers
self._fkControllers = None
# FK Joints
self._fkJoints = None
# FK Utilities
self._fkUtilities = None
# FK NODES : self.fkLimbNodes = Nodes(fkLimbName)
# Used for network connection
self.fkLimbNodes = None
def create_fk_limb(self, name_in, positions, frontAxis=None):
self._fkLimbName = name_in
self._fkJoints = JointChain(name_in, positions)
self._fkJoints.orient_joint_chain(frontAxis=frontAxis)
class IkSpineLimb(object):
def __init__(self):
self._limbName = None
self._joints = None
self._curve = None
self._ikHandle = None
self._effector = None
self._clusters = []
self._network = None
self._scaleMD = None
self._factors = None
self._hipCtrl = None
self._shoulderCtrl = None
self._COGCtrl = None
self._stuff = []
# *************************************************************************
# IKSPINE BASE SETUP METHODS
def create_spine(self, name_in, curve_in, frontAxis="z"):
#self._network = Network(name_in)
self._limbName = name_in
# You can change createion method with a Joint Chain Class
# JointChain(name_in, jointPositions)
# self.joint.orientChain
self.joints = SpineJoints(name_in, curve_in)
self.joints.orient_spine(frontAxis)
ikSolver = pm.ikHandle(sj=self.joints.startJoint,
ee=self.joints.endJoint,
tws="linear",
cra=True,
pcv=False,
ns=2,
sol="ikSplineSolver",
name=(name_in + "_IKSpine"))
self._ikHandle = pm.rename(ikSolver[0], (name_in + "_IK_Spine"))
self._effector = pm.rename(ikSolver[0],
(name_in + "_IK_SpineEffector"))
self._curve = Curve((name_in + "_IKSpineCurve"), ikSolver[2])
def create_clusters(self):
for i in range(0, self._curve.numCVs):
pm.select(self._curve.curveNode.cv[i])
tempClstr = DrawNode(
Shape.cluster,
self._limbName + "IK_SpineCl_#"
)
tempClstr.create_axialCor()
self.clusters.append(tempClstr)
#self.clusters[i].create_axialCor()
def make_stretchy(self):
#check joints
"""
"""
self._scaleMD = pm.createNode("multiplyDivide",
n=self.limbName + "_scaleMD")
pm.connectAttr(self.curve.curveInfo.arcLength, self.scaleMD.input1X)
pm.setAttr(self.scaleMD.input2X, self.curve.arclen)
pm.setAttr(self.scaleMD.operation, 2)
for jnt in self.joints.jointChain:
factor = pm.createNode("multiplyDivide", n="factor_" + jnt)
pm.connectAttr(self.scaleMD.outputX, factor.input1X)
pm.setAttr(factor.input2X, (pm.getAttr(jnt.ty)))
pm.connectAttr(factor.outputX, jnt.ty)
def create_controllers(self):
#Check if clusters is not an empty list
# Hip Ctrl Create
"""
"""
self._hipCtrl = DrawNode(Shape.ikCtrl, 'hip_ctrl')
self.hipCtrl.temp_constrain(self.clusters[0].drawnNode)
self.hipCtrl.create_axialCor()
#parent Hip Clusters to Hip Control
pm.parent(self.clusters[0].axialCor, self.clusters[1].axialCor,
self.hipCtrl.drawnNode)
# Shoulder Ctrl Create
self._shoulderCtrl = DrawNode(Shape.circle, 'shoulder_ctrl')
self.shoulderCtrl.temp_constrain(
self.clusters[(len(self.clusters) - 1)].drawnNode)
self.shoulderCtrl.create_axialCor()
# COG Ctrl Create
self._COGCtrl = DrawNode(Shape.cube, 'COG_ctrl')
self._COGCtrl.temp_constrain(self.hipCtrl.drawnNode)
self._COGCtrl.create_axialCor()
#parent Shoulder Clusters to Shoulder Control
pm.parent(self.clusters[(len(self.clusters) - 1)].axialCor,
self.clusters[(len(self.clusters) - 2)].axialCor,
self._shoulderCtrl.drawnNode)
# Create Mid Cluster Control Transforms and Constrains
mid_cluster = self.clusters[2].drawnNode
tempCluster_const_1 = DrawNode(Shape.transform,
"C_IK_SpineCl_ConstGrp")
tempCluster_const_1.temp_constrain(mid_cluster)
pm.parent(tempCluster_const_1.drawnNode, self.hipCtrl.drawnNode)
tempCluster_const_2 = DrawNode(Shape.transform,
"C_IK_SpineCl_ConstGrp")
tempCluster_const_2.temp_constrain(mid_cluster)
pm.parent(tempCluster_const_2.drawnNode, self.shoulderCtrl.drawnNode)
tempCluster_const_1.constrain(mid_cluster, targetType='targetObj')
tempCluster_const_2.constrain(mid_cluster, targetType='targetObj')
self.stuff = tempCluster_const_1
self.stuff = tempCluster_const_2
#if spine has zero joint it calls an unique function
self.unique_spine_zero_controller()
def unique_spine_zero_controller(self):
# Create Root Costrain Jnt Unde Hip cotrol
# Duplicate zero Jnt
tempConst = pm.duplicate(self.joints.zeroJoint, po=True,
name=("Const_" + self.joints.zeroJoint ))
rootConst_jnt = tempConst[0]
pm.parent(rootConst_jnt, self.hipCtrl.drawnNode)
pm.pointConstraint(rootConst_jnt, self.joints.zeroJoint)
pm.orientConstraint(rootConst_jnt, self.joints.zeroJoint)
pm.setAttr(rootConst_jnt.visibility, 0)
self._stuff.append(rootConst_jnt)
def organize_DAG(self):
pass
def fk_create(self, numOfFkCtrl = 3):
fkJointsPos = []
fkJointsPos.append(self.joints.zeroPos)
for i in xrange(numOfFkCtrl, self.joints._numOfJoints - numOfFkCtrl,
numOfFkCtrl):
fkJointsPos.append(self.joints.jointPos[i])
fkJointsPos.append(self.joints.endPos)
print fkJointsPos
fkSetup = FkLimb()
fkSetup.create_fk_limb("back_FK_", fkJointsPos)
# *************************************************************************
# PROPERTIES
@property
def spineJoints(self):
return self._joints
@spineJoints.setter
def spineJoints(self, joints_in):
self._joints = joints_in
@property
def curve(self):
return self._curve
@property
def clusters(self):
return self._clusters
@clusters.setter
def clusters(self, node_in):
self._clusters.append(node_in)
@property
def hipCtrl(self):
return self._hipCtrl
@hipCtrl.setter
def hipCtrl(self, name_in):
if self._hipCtrl is not None:
pm.rename(self._hipCtrl.drawnNode, name_in)
@property
def shoulderCtrl(self):
return self._shoulderCtrl
@shoulderCtrl.setter
def shoulderCtrl(self, name_in):
if self._shoulderCtrl != None:
pm.rename(self._shoulderCtrl.drawnNode, name_in)
@property
def stuff(self):
return self._stuff
@stuff.setter
def stuff(self, stuff_in):
self._stuff.append(stuff_in)
@property
def limbName(self):
return self._limbName
@property
def scaleMD(self):
return self._scaleMD
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import os
import uuid
import fixtures
from migrate.versioning import api as migration_api
from migrate.versioning import repository
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
import sqlalchemy
from cinder.db import migration
import cinder.db.sqlalchemy.migrate_repo
from cinder.volume import group_types as volume_group_types
class MigrationsMixin(test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
BOOL_TYPE = sqlalchemy.types.BOOLEAN
TIME_TYPE = sqlalchemy.types.DATETIME
INTEGER_TYPE = sqlalchemy.types.INTEGER
VARCHAR_TYPE = sqlalchemy.types.VARCHAR
@property
def INIT_VERSION(self):
return migration.INIT_VERSION
@property
def REPOSITORY(self):
migrate_file = cinder.db.sqlalchemy.migrate_repo.__file__
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_file)))
@property
def migration_api(self):
return migration_api
@property
def migrate_engine(self):
return self.engine
def get_table_ref(self, engine, name, metadata):
metadata.bind = engine
return sqlalchemy.Table(name, metadata, autoload=True)
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(MigrationsMixin.BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
print('%s.%s()' % (resource, op)) # noqa
raise Exception(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(MigrationsMixin.BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
def migrate_up(self, version, with_data=False):
# NOTE(dulek): This is a list of migrations where we allow dropping
# things. The rules for adding things here are very very specific.
# Insight on how to drop things from the DB in a backward-compatible
# manner is provided in Cinder's developer documentation.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE
exceptions = [
# NOTE(dulek): 62 alters the column type from boolean to integer to
# fix the bug 1518363. If we've followed the guidelines for live
# schema upgrades we would end up either waiting 3 releases to fix
# a simple bug or trigger a rebuild index operation in migration
# (because constraint was impossible to delete without deleting
# other foreign key constraints). Either way it's harsh... We've
# decided to go with alter to minimise upgrade impact. The only
# consequence for deployments running recent MySQL is inability
# to perform volume-type-access modifications while running this
# migration.
62,
# NOTE(dulek): 66 sets reservations.usage_id to nullable. This is
# 100% backward compatible and according to MySQL docs such ALTER
# is performed with the same restrictions as column addition, which
# we of course allow.
66,
# NOTE(dulek): 73 drops tables and columns we've stopped using a
# release ago.
73,
# NOTE(ameade): 87 sets messages.request_id to nullable. This
# should be safe for the same reason as migration 66.
87,
]
# NOTE(dulek): We only started requiring things be additive in
# Mitaka, so ignore all migrations before that point.
MITAKA_START = 61
if version >= MITAKA_START and version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with MigrationsMixin.BannedDBSchemaOperations(banned):
super(MigrationsMixin, self).migrate_up(version, with_data)
def _pre_upgrade_004(self, engine):
"""Change volume types to UUID """
data = {
'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test2',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test3',
'volume_type_id': 3},
],
'volume_types': [{'name': 'vtype1'},
{'name': 'vtype2'},
{'name': 'vtype3'},
],
'volume_type_extra_specs': [{'volume_type_id': 1,
'key': 'v1',
'value': 'hotep',
},
{'volume_type_id': 1,
'key': 'v2',
'value': 'bending rodrigez',
},
{'volume_type_id': 2,
'key': 'v3',
'value': 'bending rodrigez',
},
]}
volume_types = db_utils.get_table(engine, 'volume_types')
for vtype in data['volume_types']:
r = volume_types.insert().values(vtype).execute()
vtype['id'] = r.inserted_primary_key[0]
volume_type_es = db_utils.get_table(engine, 'volume_type_extra_specs')
for vtes in data['volume_type_extra_specs']:
r = volume_type_es.insert().values(vtes).execute()
vtes['id'] = r.inserted_primary_key[0]
volumes = db_utils.get_table(engine, 'volumes')
for vol in data['volumes']:
r = volumes.insert().values(vol).execute()
vol['id'] = r.inserted_primary_key[0]
return data
def _check_004(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
v1 = volumes.select(volumes.c.id ==
data['volumes'][0]['id']
).execute().first()
v2 = volumes.select(volumes.c.id ==
data['volumes'][1]['id']
).execute().first()
v3 = volumes.select(volumes.c.id ==
data['volumes'][2]['id']
).execute().first()
volume_types = db_utils.get_table(engine, 'volume_types')
vt1 = volume_types.select(volume_types.c.name ==
data['volume_types'][0]['name']
).execute().first()
vt2 = volume_types.select(volume_types.c.name ==
data['volume_types'][1]['name']
).execute().first()
vt3 = volume_types.select(volume_types.c.name ==
data['volume_types'][2]['name']
).execute().first()
vtes = db_utils.get_table(engine, 'volume_type_extra_specs')
vtes1 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][0]['key']
).execute().first()
vtes2 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][1]['key']
).execute().first()
vtes3 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][2]['key']
).execute().first()
self.assertEqual(v1['volume_type_id'], vt1['id'])
self.assertEqual(v2['volume_type_id'], vt1['id'])
self.assertEqual(v3['volume_type_id'], vt3['id'])
self.assertEqual(vtes1['volume_type_id'], vt1['id'])
self.assertEqual(vtes2['volume_type_id'], vt1['id'])
self.assertEqual(vtes3['volume_type_id'], vt2['id'])
def _check_005(self, engine, data):
"""Test that adding source_volid column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.source_volid.type,
self.VARCHAR_TYPE)
def _check_006(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_location.type,
self.VARCHAR_TYPE)
def _check_007(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
fkey, = snapshots.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _pre_upgrade_008(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
def _check_008(self, engine, data):
"""Test that adding and removing the backups table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"backups"))
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(backups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.container.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.fail_reason.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service_metadata.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(backups.c.object_count.type,
self.INTEGER_TYPE)
def _check_009(self, engine, data):
"""Test adding snapshot_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
snapshot_metadata = db_utils.get_table(engine, 'snapshot_metadata')
self.assertIsInstance(snapshot_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(snapshot_metadata.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.value.type,
self.VARCHAR_TYPE)
def _check_010(self, engine, data):
"""Test adding transfers table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"transfers"))
transfers = db_utils.get_table(engine, 'transfers')
self.assertIsInstance(transfers.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(transfers.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.salt.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.crypt_hash.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.expires_at.type,
self.TIME_TYPE)
def _check_011(self, engine, data):
"""Test adding transfers table works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('bootable', volumes.c)
self.assertIsInstance(volumes.c.bootable.type,
self.BOOL_TYPE)
def _check_012(self, engine, data):
"""Test that adding attached_host column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.attached_host.type,
self.VARCHAR_TYPE)
def _check_013(self, engine, data):
"""Test that adding provider_geometry column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_geometry.type,
self.VARCHAR_TYPE)
def _check_014(self, engine, data):
"""Test that adding _name_id column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c._name_id.type,
self.VARCHAR_TYPE)
def _check_015(self, engine, data):
"""Test removing migrations table works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
"migrations"))
def _check_016(self, engine, data):
"""Test that dropping xen storage manager tables works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_flavors'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_backend_config'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_volume'))
def _check_017(self, engine, data):
"""Test that added encryption information works correctly."""
# encryption key UUID
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('encryption_key_id', volumes.c)
self.assertIsInstance(volumes.c.encryption_key_id.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIn('encryption_key_id', snapshots.c)
self.assertIsInstance(snapshots.c.encryption_key_id.type,
self.VARCHAR_TYPE)
self.assertIn('volume_type_id', snapshots.c)
self.assertIsInstance(snapshots.c.volume_type_id.type,
self.VARCHAR_TYPE)
# encryption types table
encryption = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryption.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.cipher.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.key_size.type,
self.INTEGER_TYPE)
self.assertIsInstance(encryption.c.provider.type,
self.VARCHAR_TYPE)
def _check_018(self, engine, data):
"""Test that added qos_specs table works correctly."""
self.assertTrue(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
qos_specs = db_utils.get_table(engine, 'quality_of_service_specs')
self.assertIsInstance(qos_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(qos_specs.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.specs_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.value.type,
self.VARCHAR_TYPE)
def _check_019(self, engine, data):
"""Test that adding migration_status column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.migration_status.type,
self.VARCHAR_TYPE)
def _check_020(self, engine, data):
"""Test adding volume_admin_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
volume_admin_metadata = db_utils.get_table(engine,
'volume_admin_metadata')
self.assertIsInstance(volume_admin_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_admin_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_admin_metadata.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.value.type,
self.VARCHAR_TYPE)
def _verify_quota_defaults(self, engine):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_021(self, engine, data):
"""Test adding default data for quota classes works correctly."""
self._verify_quota_defaults(engine)
def _check_022(self, engine, data):
"""Test that adding disabled_reason column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.disabled_reason.type,
self.VARCHAR_TYPE)
def _check_023(self, engine, data):
"""Test that adding reservations index works correctly."""
reservations = db_utils.get_table(engine, 'reservations')
index_columns = []
for idx in reservations.indexes:
if idx.name == 'reservations_deleted_expire_idx':
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(['deleted', 'expire']),
sorted(index_columns))
def _check_024(self, engine, data):
"""Test adding replication columns to volume table."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_extended_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_driver_data.type,
self.VARCHAR_TYPE)
def _check_025(self, engine, data):
"""Test adding table and columns for consistencygroups."""
# Test consistencygroup_id is in Table volumes
metadata = sqlalchemy.MetaData()
volumes = self.get_table_ref(engine, 'volumes', metadata)
self.assertIsInstance(volumes.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
# Test cgsnapshot_id is in Table snapshots
snapshots = self.get_table_ref(engine, 'snapshots', metadata)
self.assertIsInstance(snapshots.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
# Test Table consistencygroups exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"consistencygroups"))
consistencygroups = self.get_table_ref(engine,
'consistencygroups',
metadata)
self.assertIsInstance(consistencygroups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(consistencygroups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.status.type,
self.VARCHAR_TYPE)
# Test Table cgsnapshots exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"cgsnapshots"))
cgsnapshots = self.get_table_ref(engine,
'cgsnapshots',
metadata)
self.assertIsInstance(cgsnapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(cgsnapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.status.type,
self.VARCHAR_TYPE)
# Verify foreign keys are created
fkey, = volumes.c.consistencygroup_id.foreign_keys
self.assertEqual(consistencygroups.c.id, fkey.column)
self.assertEqual(1, len(volumes.foreign_keys))
fkey, = snapshots.c.cgsnapshot_id.foreign_keys
self.assertEqual(cgsnapshots.c.id, fkey.column)
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEqual(volumes.c.id, fkey.column)
# 2 foreign keys in Table snapshots
self.assertEqual(2, len(snapshots.foreign_keys))
def _pre_upgrade_026(self, engine):
"""Test adding default data for consistencygroups quota class."""
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_026(self, engine, data):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(4, num_defaults)
def _check_032(self, engine, data):
"""Test adding volume_type_projects table works correctly."""
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_type_projects.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_type_projects.c.project_id.type,
self.VARCHAR_TYPE)
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.is_public.type,
self.BOOL_TYPE)
def _check_033(self, engine, data):
"""Test adding encryption_id column to encryption table."""
encryptions = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryptions.c.encryption_id.type,
self.VARCHAR_TYPE)
def _check_034(self, engine, data):
"""Test adding description columns to volume_types table."""
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.description.type,
self.VARCHAR_TYPE)
def _check_035(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_036(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_037(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
def _check_038(self, engine, data):
"""Test adding and removing driver_initiator_data table."""
has_table = engine.dialect.has_table(engine.connect(),
"driver_initiator_data")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'driver_initiator_data'
)
self.assertIsInstance(private_data.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.initiator.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.namespace.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.value.type,
self.VARCHAR_TYPE)
def _check_039(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.parent_id.type,
self.VARCHAR_TYPE)
def _check_40(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('instance_uuid', volumes.c)
self.assertNotIn('attached_host', volumes.c)
self.assertNotIn('attach_time', volumes.c)
self.assertNotIn('mountpoint', volumes.c)
self.assertIsInstance(volumes.c.multiattach.type,
self.BOOL_TYPE)
attachments = db_utils.get_table(engine, 'volume_attachment')
self.assertIsInstance(attachments.c.attach_mode.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.instance_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attached_host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.mountpoint.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attach_status.type,
self.VARCHAR_TYPE)
def _check_041(self, engine, data):
"""Test that adding modified_at column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.modified_at.type,
self.TIME_TYPE)
def _check_048(self, engine, data):
quotas = db_utils.get_table(engine, 'quotas')
self.assertIsInstance(quotas.c.allocated.type,
self.INTEGER_TYPE)
def _check_049(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.temp_volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.temp_snapshot_id.type,
self.VARCHAR_TYPE)
def _check_050(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.previous_status.type,
self.VARCHAR_TYPE)
def _check_051(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.source_cgid.type,
self.VARCHAR_TYPE)
def _check_052(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_auth.type,
self.VARCHAR_TYPE)
def _check_053(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.rpc_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.rpc_available_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_available_version.type,
self.VARCHAR_TYPE)
def _check_054(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.num_dependent_backups.type,
self.INTEGER_TYPE)
def _check_055(self, engine, data):
"""Test adding image_volume_cache_entries table."""
has_table = engine.dialect.has_table(engine.connect(),
"image_volume_cache_entries")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'image_volume_cache_entries'
)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.last_used.type,
self.TIME_TYPE)
def _check_061(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.data_timestamp.type,
self.TIME_TYPE)
def _check_062(self, engine, data):
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
def _check_064(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.restore_volume_id.type,
self.VARCHAR_TYPE)
def _check_065(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.frozen.type,
self.BOOL_TYPE)
self.assertIsInstance(services.c.active_backend_id.type,
self.VARCHAR_TYPE)
def _check_066(self, engine, data):
reservations = db_utils.get_table(engine, 'reservations')
self.assertIsInstance(reservations.c.allocated_id.type,
self.INTEGER_TYPE)
def __check_cinderbase_fields(self, columns):
"""Check fields inherited from CinderBase ORM class."""
self.assertIsInstance(columns.created_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.updated_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.deleted_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.deleted.type, self.BOOL_TYPE)
def _check_067(self, engine, data):
iscsi_targets = db_utils.get_table(engine, 'iscsi_targets')
fkey, = iscsi_targets.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _check_074(self, engine, data):
"""Test adding message table."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"messages"))
messages = db_utils.get_table(engine, 'messages')
self.assertIsInstance(messages.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(messages.c.message_level.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.request_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.event_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_type.type,
self.VARCHAR_TYPE)
def _check_075(self, engine, data):
"""Test adding cluster table and cluster_id fields."""
self.assertTrue(engine.dialect.has_table(engine.connect(), 'clusters'))
clusters = db_utils.get_table(engine, 'clusters')
columns = clusters.c
self.__check_cinderbase_fields(columns)
# Cluster specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.name.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.binary.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.disabled.type, self.BOOL_TYPE)
self.assertIsInstance(columns.disabled_reason.type, self.VARCHAR_TYPE)
# Check that we have added cluster_name field to all required tables
for table_name in ('services', 'consistencygroups', 'volumes'):
table = db_utils.get_table(engine, table_name)
self.assertIsInstance(table.c.cluster_name.type,
self.VARCHAR_TYPE)
def _check_076(self, engine, data):
workers = db_utils.get_table(engine, 'workers')
columns = workers.c
self.__check_cinderbase_fields(columns)
# Workers specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.resource_type.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.resource_id.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.status.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.service_id.type, self.INTEGER_TYPE)
def _check_077(self, engine, data):
"""Test adding group types and specs tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_types"))
group_types = db_utils.get_table(engine, 'group_types')
self.assertIsInstance(group_types.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_types.c.is_public.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_specs"))
group_specs = db_utils.get_table(engine, 'group_type_specs')
self.assertIsInstance(group_specs.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(group_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.value.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_projects"))
type_projects = db_utils.get_table(engine, 'group_type_projects')
self.assertIsInstance(type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(type_projects.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(type_projects.c.project_id.type,
self.VARCHAR_TYPE)
def _check_078(self, engine, data):
"""Test adding groups tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"groups"))
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(groups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.status.type,
self.VARCHAR_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_volume_type_mapping"))
mapping = db_utils.get_table(engine, 'group_volume_type_mapping')
self.assertIsInstance(mapping.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(mapping.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(mapping.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(mapping.c.group_id.type,
self.VARCHAR_TYPE)
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.group_id.type,
self.VARCHAR_TYPE)
quota_classes = db_utils.get_table(engine, 'quota_classes')
rows = quota_classes.count().\
where(quota_classes.c.resource == 'groups').\
execute().scalar()
self.assertEqual(1, rows)
def _check_079(self, engine, data):
"""Test adding group_snapshots tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_snapshots"))
group_snapshots = db_utils.get_table(engine, 'group_snapshots')
self.assertIsInstance(group_snapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_snapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.status.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.source_group_id.type,
self.VARCHAR_TYPE)
def _check_086(self, engine, data):
"""Test inserting default cgsnapshot group type."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_types"))
group_types = db_utils.get_table(engine, 'group_types')
t1 = (group_types.select(group_types.c.name ==
volume_group_types.DEFAULT_CGSNAPSHOT_TYPE).
execute().first())
self.assertIsNotNone(t1)
group_specs = db_utils.get_table(engine, 'group_type_specs')
specs = group_specs.select(
group_specs.c.group_type_id == t1.id and
group_specs.c.key == 'consistent_group_snapshot_enabled'
).execute().first()
self.assertIsNotNone(specs)
self.assertEqual('<is> True', specs.value)
def _check_087(self, engine, data):
"""Test request_id column in messages is nullable."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"messages"))
messages = db_utils.get_table(engine, 'messages')
self.assertIsInstance(messages.c.request_id.type,
self.VARCHAR_TYPE)
self.assertTrue(messages.c.request_id.nullable)
def _check_088(self, engine, data):
"""Test adding replication data to cluster table."""
clusters = db_utils.get_table(engine, 'clusters')
self.assertIsInstance(clusters.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(clusters.c.active_backend_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(clusters.c.frozen.type,
self.BOOL_TYPE)
def _check_089(self, engine, data):
"""Test adding cluster_name to image volume cache table."""
image_cache = db_utils.get_table(engine, 'image_volume_cache_entries')
self.assertIsInstance(image_cache.c.cluster_name.type,
self.VARCHAR_TYPE)
def _check_090(self, engine, data):
"""Test adding race_preventer to workers table."""
workers = db_utils.get_table(engine, 'workers')
self.assertIsInstance(workers.c.race_preventer.type,
self.INTEGER_TYPE)
def test_walk_versions(self):
self.walk_versions(False, False)
class TestSqliteMigrations(test_base.DbTestCase,
MigrationsMixin):
pass
class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase,
MigrationsMixin):
BOOL_TYPE = sqlalchemy.dialects.mysql.TINYINT
def test_mysql_innodb(self):
"""Test that table creation on mysql only builds InnoDB tables."""
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
# sanity check
migration.db_sync(engine=self.migrate_engine)
total = self.migrate_engine.execute(
"SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='{0}'".format(
self.migrate_engine.url.database))
self.assertGreater(total.scalar(), 0,
msg="No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase,
MigrationsMixin):
TIME_TYPE = sqlalchemy.types.TIMESTAMP
|
|
import sys
import time
import unittest
from django.conf import settings
from django.db import transaction, connection
from django.db.utils import ConnectionHandler, DEFAULT_DB_ALIAS, DatabaseError
from django.test import (TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from django.utils import unittest
from models import Person
# Some tests require threading, which might not be available. So create a
# skip-test decorator for those test functions.
try:
import threading
except ImportError:
threading = None
requires_threading = unittest.skipUnless(threading, 'requires threading')
class SelectForUpdateTests(TransactionTestCase):
def setUp(self):
transaction.enter_transaction_management(True)
transaction.managed(True)
self.person = Person.objects.create(name='Reinhardt')
# We have to commit here so that code in run_select_for_update can
# see this data.
transaction.commit()
# We need another database connection to test that one connection
# issuing a SELECT ... FOR UPDATE will block.
new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS]
# We need to set settings.DEBUG to True so we can capture
# the output SQL to examine.
self._old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
try:
# We don't really care if this fails - some of the tests will set
# this in the course of their run.
transaction.managed(False)
transaction.leave_transaction_management()
except transaction.TransactionManagementError:
pass
self.new_connection.close()
settings.DEBUG = self._old_debug
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
def start_blocking_transaction(self):
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
result = self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection._rollback()
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
def check_exc(self, exc):
self.failUnless(isinstance(exc, DatabaseError))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
# In Python 2.6 beta and some final releases, exceptions raised in __len__
# are swallowed (Python issue 1242657), so these cases return an empty
# list, rather than raising an exception. Not a lot we can do about that,
# unfortunately, due to the way Python handles list() calls internally.
# Thus, we skip this test for Python 2.6.
@requires_threading
@skipUnlessDBFeature('has_select_for_update_nowait')
@unittest.skipIf(sys.version_info[:2] == (2, 6), "Python version is 2.6")
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
In general, this will be run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
transaction.enter_transaction_management(True)
transaction.managed(True)
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
transaction.commit()
except DatabaseError, e:
status.append(e)
except Exception, e:
raise
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError, 'Thread did not run and block'
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.failIf(thread.isAlive())
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError, e:
status.append(e)
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_dirty_managed(self):
""" Check that a select_for_update sets the transaction to be
dirty when executed under txn management. Setting the txn dirty
means that it will be either committed or rolled back by Django,
which will release any locks held by the SELECT FOR UPDATE.
"""
people = list(Person.objects.select_for_update())
self.assertTrue(transaction.is_dirty())
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_not_dirty_unmanaged(self):
""" If we're not under txn management, the txn will never be
marked as dirty.
"""
transaction.managed(False)
transaction.leave_transaction_management()
people = list(Person.objects.select_for_update())
self.assertFalse(transaction.is_dirty())
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parsing Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_parsing_ops import *
# pylint: enable=wildcard-import,undefined-variable
from tensorflow.python.platform import tf_logging
ops.NotDifferentiable("DecodeRaw")
ops.NotDifferentiable("ParseTensor")
ops.NotDifferentiable("StringToNumber")
class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
"""Configuration for parsing a variable-length input feature.
Fields:
dtype: Data type of input.
"""
pass
class SparseFeature(
collections.namedtuple(
"SparseFeature",
["index_key", "value_key", "dtype", "size", "already_sorted"])):
"""Configuration for parsing a sparse input feature from an `Example`.
Note, preferrably use `VarLenFeature` (possibly in combination with a
`SequenceExample`) in order to parse out `SparseTensor`s instead of
`SparseFeature` due to its simplicity.
Closely mimicking the `SparseTensor` that will be obtained by parsing an
`Example` with a `SparseFeature` config, a `SparseFeature` contains a
* `value_key`: The name of key for a `Feature` in the `Example` whose parsed
`Tensor` will be the resulting `SparseTensor.values`.
* `index_key`: A list of names - one for each dimension in the resulting
`SparseTensor` whose `indices[i][dim]` indicating the position of
the `i`-th value in the `dim` dimension will be equal to the `i`-th value in
the Feature with key named `index_key[dim]` in the `Example`.
* `size`: A list of ints for the resulting `SparseTensor.dense_shape`.
For example, we can represent the following 2D `SparseTensor`
```python
SparseTensor(indices=[[3, 1], [20, 0]],
values=[0.5, -1.0]
dense_shape=[100, 3])
```
with an `Example` input proto
```python
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix0" value { int64_list { value: [ 3, 20 ] } } }
feature { key: "ix1" value { int64_list { value: [ 1, 0 ] } } }
}
```
and `SparseFeature` config with 2 `index_key`s
```python
SparseFeature(index_key=["ix0", "ix1"],
value_key="val",
dtype=tf.float32,
size=[100, 3])
```
Fields:
index_key: A single string name or a list of string names of index features.
For each key the underlying feature's type must be `int64` and its length
must always match that of the `value_key` feature.
To represent `SparseTensor`s with a `dense_shape` of `rank` higher than 1
a list of length `rank` should be used.
value_key: Name of value feature. The underlying feature's type must
be `dtype` and its length must always match that of all the `index_key`s'
features.
dtype: Data type of the `value_key` feature.
size: A Python int or list thereof specifying the dense shape. Should be a
list if and only if `index_key` is a list. In that case the list must be
equal to the length of `index_key`. Each for each entry `i` all values in
the `index_key`[i] feature must be in `[0, size[i])`.
already_sorted: A Python boolean to specify whether the values in
`value_key` are already sorted by their index position. If so skip
sorting. False by default (optional).
"""
pass
SparseFeature.__new__.__defaults__ = (False,)
class FixedLenFeature(collections.namedtuple(
"FixedLenFeature", ["shape", "dtype", "default_value"])):
"""Configuration for parsing a fixed-length input feature.
To treat sparse input as dense, provide a `default_value`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data.
dtype: Data type of input.
default_value: Value to be used if an example is missing this feature. It
must be compatible with `dtype` and of the specified `shape`.
"""
pass
FixedLenFeature.__new__.__defaults__ = (None,)
class FixedLenSequenceFeature(collections.namedtuple(
"FixedLenSequenceFeature",
["shape", "dtype", "allow_missing", "default_value"])):
"""Configuration for parsing a variable-length input feature into a `Tensor`.
The resulting `Tensor` of parsing a single `SequenceExample` or `Example` has
a static `shape` of `[None] + shape` and the specified `dtype`.
The resulting `Tensor` of parsing a `batch_size` many `Example`s has
a static `shape` of `[batch_size, None] + shape` and the specified `dtype`.
The entries in the `batch` from different `Examples` will be padded with
`default_value` to the maximum length present in the `batch`.
To treat a sparse input as dense, provide `allow_missing=True`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data for dimension 2 and higher. First dimension is
of variable length `None`.
dtype: Data type of input.
allow_missing: Whether to allow this feature to be missing from a feature
list item. Is available only for parsing `SequenceExample` not for
parsing `Examples`.
default_value: Scalar value to be used to pad multiple `Example`s to their
maximum length. Irrelevant for parsing a single `Example` or
`SequenceExample`. Defaults to "" for dtype string and 0 otherwise
(optional).
"""
pass
FixedLenSequenceFeature.__new__.__defaults__ = (False, None)
def _features_to_raw_params(features, types):
"""Split feature tuples into raw params used by `gen_parsing_ops`.
Args:
features: A `dict` mapping feature keys to objects of a type in `types`.
types: Type of features to allow, among `FixedLenFeature`, `VarLenFeature`,
`SparseFeature`, and `FixedLenSequenceFeature`.
Returns:
Tuple of `sparse_keys`, `sparse_types`, `dense_keys`, `dense_types`,
`dense_defaults`, `dense_shapes`.
Raises:
ValueError: if `features` contains an item not in `types`, or an invalid
feature.
"""
sparse_keys = []
sparse_types = []
dense_keys = []
dense_types = []
dense_defaults = {}
dense_shapes = []
if features:
# NOTE: We iterate over sorted keys to keep things deterministic.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, VarLenFeature):
if VarLenFeature not in types:
raise ValueError("Unsupported VarLenFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
sparse_keys.append(key)
sparse_types.append(feature.dtype)
elif isinstance(feature, SparseFeature):
if SparseFeature not in types:
raise ValueError("Unsupported SparseFeature %s.", feature)
if not feature.index_key:
raise ValueError(
"Missing index_key for SparseFeature %s.", feature)
if not feature.value_key:
raise ValueError(
"Missing value_key for SparseFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
index_keys = feature.index_key
if isinstance(index_keys, str):
index_keys = [index_keys]
elif len(index_keys) > 1:
tf_logging.warning("SparseFeature is a complicated feature config "
"and should only be used after careful "
"consideration of VarLenFeature.")
for index_key in sorted(index_keys):
if index_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(index_key)]
if dtype != dtypes.int64:
raise ValueError("Conflicting type %s vs int64 for feature %s." %
(dtype, index_key))
else:
sparse_keys.append(index_key)
sparse_types.append(dtypes.int64)
if feature.value_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(feature.value_key)]
if dtype != feature.dtype:
raise ValueError("Conflicting type %s vs %s for feature %s." % (
dtype, feature.dtype, feature.value_key))
else:
sparse_keys.append(feature.value_key)
sparse_types.append(feature.dtype)
elif isinstance(feature, FixedLenFeature):
if FixedLenFeature not in types:
raise ValueError("Unsupported FixedLenFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
feature_tensor_shape = tensor_shape.as_shape(feature.shape)
if (feature.shape and feature_tensor_shape.ndims and
feature_tensor_shape.dims[0].value is None):
raise ValueError("First dimension of shape for feature %s unknown. "
"Consider using FixedLenSequenceFeature." % key)
if (feature.shape is not None and
not feature_tensor_shape.is_fully_defined()):
raise ValueError("All dimensions of shape for feature %s need to be "
"known but received %s." % (key, str(feature.shape)))
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
elif isinstance(feature, FixedLenSequenceFeature):
if FixedLenSequenceFeature not in types:
raise ValueError("Unsupported FixedLenSequenceFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.allow_missing:
dense_defaults[key] = None
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
else:
raise ValueError("Invalid feature %s:%s." % (key, feature))
return (
sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes)
def _construct_sparse_tensors_for_sparse_features(features, tensor_dict):
"""Merges SparseTensors of indices and values of SparseFeatures.
Constructs new dict based on `tensor_dict`. For `SparseFeatures` in the values
of `features` expects their `index_key`s and `index_value`s to be present in
`tensor_dict` mapping to `SparseTensor`s. Constructs a single `SparseTensor`
from them, and adds it to the result with the key from `features`.
Copies other keys and values from `tensor_dict` with keys present in
`features`.
Args:
features: A `dict` mapping feature keys to `SparseFeature` values.
Values of other types will be ignored.
tensor_dict: A `dict` mapping feature keys to `Tensor` and `SparseTensor`
values. Expected to contain keys of the `SparseFeature`s' `index_key`s and
`value_key`s and mapping them to `SparseTensor`s.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Similar
to `tensor_dict` except each `SparseFeature`s in `features` results in a
single `SparseTensor`.
"""
tensor_dict = dict(tensor_dict) # Do not modify argument passed in.
# Construct SparseTensors for SparseFeatures.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, SparseFeature):
if isinstance(feature.index_key, str):
sp_ids = tensor_dict[feature.index_key]
else:
sp_ids = [tensor_dict[index_key] for index_key in feature.index_key]
sp_values = tensor_dict[feature.value_key]
tensor_dict[key] = sparse_ops.sparse_merge(
sp_ids,
sp_values,
vocab_size=feature.size,
already_sorted=feature.already_sorted)
# Remove tensors from dictionary that were only used to construct
# SparseTensors for SparseFeature.
for key in set(tensor_dict) - set(features):
del tensor_dict[key]
return tensor_dict
def parse_example(serialized, features, name=None, example_names=None):
# pylint: disable=line-too-long
"""Parses `Example` protos into a `dict` of tensors.
Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`. We refer to `serialized` as a batch with
`batch_size` many entries of individual `Example` protos.
`example_names` may contain descriptive names for the corresponding serialized
protos. These may be useful for debugging purposes, but they have no effect on
the output. If not `None`, `example_names` must be the same length as
`serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
identifies the example in `serialized`, and `index` is the value's index in
the list of values associated with that feature and example.
Each `SparseFeature` maps to a `SparseTensor` of the specified type
representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`.
Its `values` come from the feature in the examples with key `value_key`.
A `values[i]` comes from a position `k` in the feature of an example at batch
entry `batch`. This positional information is recorded in `indices[i]` as
`[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of
the feature in the example at with key `SparseFeature.index_key[j].
In other words, we split the indices (except the first index indicating the
batch entry) of a `SparseTensor` by dimension into different features of the
`Example`. Due to its complexity a `VarLenFeature` should be preferred over a
`SparseFeature` whenever possible.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type
(or `tf.float32` if not specified) and shape
`(serialized.size(), None) + df.shape`.
All examples in `serialized` will be padded with `default_value` along the
second dimension.
Examples:
For example, if one expects a `tf.float32` `VarLenFeature` `ft` and three
serialized `Example`s are provided:
```
serialized = [
features
{ feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } },
features
{ feature []},
features
{ feature { key: "ft" value { float_list { value: [3.0] } } }
]
```
then the output will look like:
```
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
dense_shape=(3, 2)) }
```
If instead a `FixedLenSequenceFeature` with `default_value = -1.0` and
`shape=[]` is used then the output will look like:
```
{"ft": [[1.0, 2.0], [3.0, -1.0]]}
```
Given two `Example` input protos in `serialized`:
```
[
features {
feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } }
feature { key: "gps" value { float_list { value: [] } } }
},
features {
feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } }
feature { key: "dank" value { int64_list { value: [ 42 ] } } }
feature { key: "gps" value { } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"kw": VarLenFeature(tf.string),
"dank": VarLenFeature(tf.int64),
"gps": VarLenFeature(tf.float32),
}
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["knit", "big", "emmy"]
dense_shape=[2, 2]),
"dank": SparseTensor(
indices=[[1, 0]],
values=[42],
dense_shape=[2, 1]),
"gps": SparseTensor(
indices=[],
values=[],
dense_shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
```
[
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
}
]
```
We can use arguments:
```
example_names: ["input0", "input1"],
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
An alternative to `VarLenFeature` to obtain a `SparseTensor` is
`SparseFeature`. For example, given two `Example` input protos in
`serialized`:
```
[
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 3, 20 ] } } }
},
features {
feature { key: "val" value { float_list { value: [ 0.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 42 ] } } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"sparse": SparseFeature(
index_key="ix", value_key="val", dtype=tf.float32, size=100),
}
```
Then the output is a dictionary:
```python
{
"sparse": SparseTensor(
indices=[[0, 3], [0, 20], [1, 42]],
values=[0.5, -1.0, 0.0]
dense_shape=[2, 100]),
}
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing: features was %s." % features)
if features:
modified_features = dict(features) # Create a copy to modify
for key, feature in features.items():
if isinstance(feature, FixedLenSequenceFeature):
if not feature.allow_missing:
raise ValueError("Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True.")
modified_features[key] = FixedLenSequenceFeature(
[None] + list(feature.shape),
feature.dtype,
feature.allow_missing,
feature.default_value)
features = modified_features
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, SparseFeature, FixedLenFeature, FixedLenSequenceFeature])
outputs = _parse_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses `Example` protos.
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
name: A name for this operation (optional).
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
with ops.name_scope(name, "ParseExample", [serialized, names]):
names = [] if names is None else names
dense_defaults = {} if dense_defaults is None else dense_defaults
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = (
[[]] * len(dense_keys) if dense_shapes is None else dense_shapes)
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d"
% (len(dense_shapes), num_dense))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) != len(num_dense): %d vs. %d"
% (len(dense_types), num_dense))
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d"
% (len(sparse_types), num_sparse))
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s" %
set(dense_keys).intersection(set(sparse_keys)))
# Convert dense_shapes to TensorShape object.
dense_shapes = [tensor_shape.as_shape(shape) for shape in dense_shapes]
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
dense_shape = dense_shapes[i]
if (dense_shape.ndims is not None and dense_shape.ndims > 0 and
dense_shape[0].value is None):
# Variable stride dense shape, the default value should be a
# scalar padding value
if default_value is None:
default_value = ops.convert_to_tensor(
"" if dense_types[i] == dtypes.string else 0,
dtype=dense_types[i])
else:
# Reshape to a scalar to ensure user gets an error if they
# provide a tensor that's not intended to be a padding value
# (0 or 2+ elements).
key_name = "padding_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, [])
else:
if default_value is None:
default_value = constant_op.constant([], dtype=dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, dense_shape)
dense_defaults_vec.append(default_value)
# Finally, convert dense_shapes to TensorShapeProto
dense_shapes = [shape.as_proto() for shape in dense_shapes]
# pylint: disable=protected-access
outputs = gen_parsing_ops._parse_example(
serialized=serialized,
names=names,
dense_defaults=dense_defaults_vec,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_shapes=dense_shapes,
name=name)
# pylint: enable=protected-access
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(sparse_indices, sparse_values, sparse_shapes)]
return dict(zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
def parse_single_example(serialized, features, name=None, example_names=None):
"""Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (`batch_size`) entry of the shape vector is removed (it is now a
single element vector).
One might see performance advantages by batching `Example` protos with
`parse_example` instead of using this function directly.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_single_example_raw` documentation for more details.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
name: A name for this operation (optional).
example_names: (Optional) A scalar string Tensor, the associated name.
See `_parse_single_example_raw` documentation for more details.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing features.")
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features, [VarLenFeature, FixedLenFeature, SparseFeature])
outputs = _parse_single_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_single_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses a single `Example` proto.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_example_raw` documentation for more details.
names: (Optional) A scalar string Tensor, the associated name.
See `_parse_example_raw` documentation for more details.
sparse_keys: See `_parse_example_raw` documentation for more details.
sparse_types: See `_parse_example_raw` documentation for more details.
dense_keys: See `_parse_example_raw` documentation for more details.
dense_types: See `_parse_example_raw` documentation for more details.
dense_defaults: See `_parse_example_raw` documentation for more details.
dense_shapes: See `_parse_example_raw` documentation for more details.
name: A name for this operation (optional).
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
with ops.name_scope(name, "ParseSingleExample", [serialized, names]):
serialized = ops.convert_to_tensor(serialized)
serialized_shape = serialized.get_shape()
if serialized_shape.ndims is not None:
if serialized_shape.ndims != 0:
raise ValueError("Input serialized must be a scalar")
else:
serialized = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(serialized), 0),
["Input serialized must be a scalar"],
name="SerializedIsScalar")],
serialized,
name="SerializedDependencies")
serialized = array_ops.expand_dims(serialized, 0)
if names is not None:
names = ops.convert_to_tensor(names)
names_shape = names.get_shape()
if names_shape.ndims is not None:
if names_shape.ndims != 0:
raise ValueError("Input names must be a scalar")
else:
names = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(names), 0),
["Input names must be a scalar"],
name="NamesIsScalar")],
names,
name="NamesDependencies")
names = array_ops.expand_dims(names, 0)
outputs = _parse_example_raw(
serialized,
names=names,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_types=dense_types,
dense_defaults=dense_defaults,
dense_shapes=dense_shapes,
name=name)
if dense_keys is not None:
for d in dense_keys:
d_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", d)
outputs[d] = array_ops.squeeze(
outputs[d], [0], name="Squeeze_%s" % d_name)
if sparse_keys is not None:
for s in sparse_keys:
s_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", s)
outputs[s] = sparse_tensor.SparseTensor(
array_ops.slice(outputs[s].indices,
[0, 1], [-1, -1], name="Slice_Indices_%s" % s_name),
outputs[s].values,
array_ops.slice(outputs[s].dense_shape,
[1], [-1], name="Squeeze_Shape_%s" % s_name))
return outputs
def parse_single_sequence_example(
serialized, context_features=None, sequence_features=None,
example_name=None, name=None):
# pylint: disable=line-too-long
"""Parses a single `SequenceExample` proto.
Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
proto given in `serialized`.
This op parses a serialize sequence example into a tuple of dictionaries
mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_features`, and the second dictionary contains mappings for keys
appearing in `sequence_features`.
At least one of `context_features` and `sequence_features` must be provided
and non-empty.
The `context_features` keys are associated with a `SequenceExample` as a
whole, independent of time / frame. In contrast, the `sequence_features` keys
provide a way to access variable-length data within the `FeatureList` section
of the `SequenceExample` proto. While the shapes of `context_features` values
are fixed with respect to frame, the frame dimension (the first dimension)
of `sequence_features` values may vary between `SequenceExample` protos,
and even between `feature_list` keys within the same `SequenceExample`.
`context_features` contains `VarLenFeature` and `FixedLenFeature` objects.
Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`
is mapped to a `Tensor`, of the specified type, shape, and default value.
`sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`
objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each
`FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.
The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where
`T` is the length of the associated `FeatureList` in the `SequenceExample`.
For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of
static shape `[None]` and dynamic shape `[T]`, while
`FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor`
of static shape `[None, k]` and dynamic shape `[T, k]`.
Each `SparseTensor` corresponding to `sequence_features` represents a ragged
vector. Its indices are `[time, index]`, where `time` is the `FeatureList`
entry and `index` is the value's index in the list of values associated with
that time.
`FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature`
entries with `allow_missing=True` are optional; otherwise, we will fail if
that `Feature` or `FeatureList` is missing from any example in `serialized`.
`example_name` may contain a descriptive name for the corresponding serialized
proto. This may be useful for debugging purposes, but it has no effect on the
output. If not `None`, `example_name` must be a scalar.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. These features are associated with a
`SequenceExample` as a whole.
sequence_features: A `dict` mapping feature keys to
`FixedLenSequenceFeature` or `VarLenFeature` values. These features are
associated with data within the `FeatureList` section of the
`SequenceExample` proto.
example_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: if any feature is invalid.
"""
# pylint: enable=line-too-long
if not (context_features or sequence_features):
raise ValueError("Missing features.")
(context_sparse_keys, context_sparse_types, context_dense_keys,
context_dense_types, context_dense_defaults,
context_dense_shapes) = _features_to_raw_params(
context_features, [VarLenFeature, FixedLenFeature])
(feature_list_sparse_keys, feature_list_sparse_types,
feature_list_dense_keys, feature_list_dense_types,
feature_list_dense_defaults,
feature_list_dense_shapes) = _features_to_raw_params(
sequence_features, [VarLenFeature, FixedLenSequenceFeature])
return _parse_single_sequence_example_raw(
serialized, context_sparse_keys, context_sparse_types,
context_dense_keys, context_dense_types, context_dense_defaults,
context_dense_shapes, feature_list_sparse_keys,
feature_list_sparse_types, feature_list_dense_keys,
feature_list_dense_types, feature_list_dense_shapes,
feature_list_dense_defaults, example_name, name)
def _parse_single_sequence_example_raw(serialized,
context_sparse_keys=None,
context_sparse_types=None,
context_dense_keys=None,
context_dense_types=None,
context_dense_defaults=None,
context_dense_shapes=None,
feature_list_sparse_keys=None,
feature_list_sparse_types=None,
feature_list_dense_keys=None,
feature_list_dense_types=None,
feature_list_dense_shapes=None,
feature_list_dense_defaults=None,
debug_name=None,
name=None):
"""Parses a single `SequenceExample` proto.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_sparse_keys: A list of string keys in the `SequenceExample`'s
features. The results for these keys will be returned as
`SparseTensor` objects.
context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
context_dense_types: A list of DTypes, same length as `context_dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the context_dense_keys of the feature.
context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
The shape of the data for each context_dense feature referenced by
`context_dense_keys`. Required for any input tensors identified by
`context_dense_keys` whose shapes are anything other than `[]` or `[1]`.
feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s
feature_lists. The results for these keys will be returned as
`SparseTensor` objects.
feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
features_lists. The results for these keys will be returned as `Tensor`s.
feature_list_dense_types: A list of `DTypes`, same length as
`feature_list_dense_keys`. Only `tf.float32` (`FloatList`),
`tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
feature_list_dense_shapes: A list of tuples, same length as
`feature_list_dense_keys`. The shape of the data for each
`FeatureList` feature referenced by `feature_list_dense_keys`.
feature_list_dense_defaults: A dict mapping key strings to values.
The only currently allowed value is `None`. Any key appearing
in this dict with value `None` is allowed to be missing from the
`SequenceExample`. If missing, the key is treated as zero-length.
debug_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: If context_sparse and context_dense key sets intersect,
if input lengths do not match up, or if a value in
feature_list_dense_defaults is not None.
TypeError: if feature_list_dense_defaults is not either None or a dict.
"""
with ops.name_scope(name, "ParseSingleSequenceExample", [serialized]):
context_dense_defaults = (
{} if context_dense_defaults is None else context_dense_defaults)
context_sparse_keys = (
[] if context_sparse_keys is None else context_sparse_keys)
context_sparse_types = (
[] if context_sparse_types is None else context_sparse_types)
context_dense_keys = (
[] if context_dense_keys is None else context_dense_keys)
context_dense_types = (
[] if context_dense_types is None else context_dense_types)
context_dense_shapes = (
[[]] * len(context_dense_keys)
if context_dense_shapes is None else context_dense_shapes)
feature_list_sparse_keys = (
[] if feature_list_sparse_keys is None else feature_list_sparse_keys)
feature_list_sparse_types = (
[] if feature_list_sparse_types is None else feature_list_sparse_types)
feature_list_dense_keys = (
[] if feature_list_dense_keys is None else feature_list_dense_keys)
feature_list_dense_types = (
[] if feature_list_dense_types is None else feature_list_dense_types)
feature_list_dense_shapes = (
[[]] * len(feature_list_dense_keys)
if feature_list_dense_shapes is None else feature_list_dense_shapes)
feature_list_dense_defaults = (
dict() if feature_list_dense_defaults is None
else feature_list_dense_defaults)
debug_name = "" if debug_name is None else debug_name
# Internal
feature_list_dense_missing_assumed_empty = []
num_context_dense = len(context_dense_keys)
num_feature_list_dense = len(feature_list_dense_keys)
num_context_sparse = len(context_sparse_keys)
num_feature_list_sparse = len(feature_list_sparse_keys)
if len(context_dense_shapes) != num_context_dense:
raise ValueError(
"len(context_dense_shapes) != len(context_dense_keys): %d vs. %d"
% (len(context_dense_shapes), num_context_dense))
if len(context_dense_types) != num_context_dense:
raise ValueError(
"len(context_dense_types) != len(num_context_dense): %d vs. %d"
% (len(context_dense_types), num_context_dense))
if len(feature_list_dense_shapes) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_shapes) != len(feature_list_dense_keys): "
"%d vs. %d" % (len(feature_list_dense_shapes),
num_feature_list_dense))
if len(feature_list_dense_types) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_types) != len(num_feature_list_dense):"
"%d vs. %d" % (len(feature_list_dense_types), num_feature_list_dense))
if len(context_sparse_types) != num_context_sparse:
raise ValueError(
"len(context_sparse_types) != len(context_sparse_keys): %d vs. %d"
% (len(context_sparse_types), num_context_sparse))
if len(feature_list_sparse_types) != num_feature_list_sparse:
raise ValueError(
"len(feature_list_sparse_types) != len(feature_list_sparse_keys): "
"%d vs. %d"
% (len(feature_list_sparse_types), num_feature_list_sparse))
if (num_context_dense + num_context_sparse
+ num_feature_list_dense + num_feature_list_sparse) == 0:
raise ValueError(
"Must provide at least one context_sparse key, context_dense key, "
", feature_list_sparse key, or feature_list_dense key")
if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):
raise ValueError(
"context_dense and context_sparse keys must not intersect; "
"intersection: %s" %
set(context_dense_keys).intersection(set(context_sparse_keys)))
if not set(feature_list_dense_keys).isdisjoint(
set(feature_list_sparse_keys)):
raise ValueError(
"feature_list_dense and feature_list_sparse keys must not intersect; "
"intersection: %s" %
set(feature_list_dense_keys).intersection(
set(feature_list_sparse_keys)))
if not isinstance(feature_list_dense_defaults, dict):
raise TypeError("feature_list_dense_defaults must be a dict")
for k, v in feature_list_dense_defaults.items():
if v is not None:
raise ValueError("Value feature_list_dense_defaults[%s] must be None"
% k)
feature_list_dense_missing_assumed_empty.append(k)
context_dense_defaults_vec = []
for i, key in enumerate(context_dense_keys):
default_value = context_dense_defaults.get(key)
if default_value is None:
default_value = constant_op.constant([], dtype=context_dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=context_dense_types[i], name=key_name)
default_value = array_ops.reshape(
default_value, context_dense_shapes[i])
context_dense_defaults_vec.append(default_value)
context_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in context_dense_shapes]
feature_list_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in feature_list_dense_shapes]
# pylint: disable=protected-access
outputs = gen_parsing_ops._parse_single_sequence_example(
serialized=serialized,
debug_name=debug_name,
context_dense_defaults=context_dense_defaults_vec,
context_sparse_keys=context_sparse_keys,
context_sparse_types=context_sparse_types,
context_dense_keys=context_dense_keys,
context_dense_shapes=context_dense_shapes,
feature_list_sparse_keys=feature_list_sparse_keys,
feature_list_sparse_types=feature_list_sparse_types,
feature_list_dense_keys=feature_list_dense_keys,
feature_list_dense_types=feature_list_dense_types,
feature_list_dense_shapes=feature_list_dense_shapes,
feature_list_dense_missing_assumed_empty=(
feature_list_dense_missing_assumed_empty),
name=name)
# pylint: enable=protected-access
(context_sparse_indices, context_sparse_values,
context_sparse_shapes, context_dense_values,
feature_list_sparse_indices, feature_list_sparse_values,
feature_list_sparse_shapes, feature_list_dense_values) = outputs
context_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(context_sparse_indices,
context_sparse_values,
context_sparse_shapes)]
feature_list_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(feature_list_sparse_indices,
feature_list_sparse_values,
feature_list_sparse_shapes)]
context_output = dict(
zip(context_sparse_keys + context_dense_keys,
context_sparse_tensors + context_dense_values))
feature_list_output = dict(
zip(feature_list_sparse_keys + feature_list_dense_keys,
feature_list_sparse_tensors + feature_list_dense_values))
return (context_output, feature_list_output)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for comparing proto2 messages in Python.
ProtoEq() compares two proto2 messages for equality.
ClearDefaultValuedFields() recursively clears the fields that are set to their
default values. This is useful for comparing protocol buffers where the
semantics of unset fields and default valued fields are the same.
assertProtoEqual() is useful for unit tests. It produces much more helpful
output than assertEqual() for proto2 messages, e.g. this:
outer {
inner {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
...compared to the default output from assertEqual() that looks like this:
AssertionError: <my.Msg object at 0x9fb353c> != <my.Msg object at 0x9fb35cc>
Call it inside your unit test's googletest.TestCase subclasses like this:
from tensorflow.python.util.protobuf import compare
class MyTest(googletest.TestCase):
...
def testXXX(self):
...
compare.assertProtoEqual(self, a, b)
Alternatively:
from tensorflow.python.util.protobuf import compare
class MyTest(compare.ProtoAssertions, googletest.TestCase):
...
def testXXX(self):
...
self.assertProtoEqual(a, b)
"""
import difflib
import six
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import message
from google.protobuf import text_format
from ..compat import collections_abc
def assertProtoEqual(self, a, b, check_initialized=True, # pylint: disable=invalid-name
normalize_numbers=False, msg=None):
"""Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: googletest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subclass thereof.
check_initialized: boolean, whether to fail if either a or b isn't
initialized.
normalize_numbers: boolean, whether to normalize types and precision of
numbers before comparison.
msg: if specified, is used as the error message on failure.
"""
pool = descriptor_pool.Default()
if isinstance(a, six.string_types):
a = text_format.Merge(a, b.__class__(), descriptor_pool=pool)
for pb in a, b:
if check_initialized:
errors = pb.FindInitializationErrors()
if errors:
self.fail('Initialization errors: %s\n%s' % (errors, pb))
if normalize_numbers:
NormalizeNumberFields(pb)
a_str = text_format.MessageToString(a, descriptor_pool=pool)
b_str = text_format.MessageToString(b, descriptor_pool=pool)
# Some Python versions would perform regular diff instead of multi-line
# diff if string is longer than 2**16. We substitute this behavior
# with a call to unified_diff instead to have easier-to-read diffs.
# For context, see: https://bugs.python.org/issue11763.
if len(a_str) < 2**16 and len(b_str) < 2**16:
self.assertMultiLineEqual(a_str, b_str, msg=msg)
else:
diff = ''.join(
difflib.unified_diff(a_str.splitlines(True), b_str.splitlines(True)))
if diff:
self.fail('%s :\n%s' % (msg, diff))
def NormalizeNumberFields(pb):
"""Normalizes types and precisions of number fields in a protocol buffer.
Due to subtleties in the python protocol buffer implementation, it is possible
for values to have different types and precision depending on whether they
were set and retrieved directly or deserialized from a protobuf. This function
normalizes integer values to ints and longs based on width, 32-bit floats to
five digits of precision to account for python always storing them as 64-bit,
and ensures doubles are floating point for when they're set to integers.
Modifies pb in place. Recurses into nested objects.
Args:
pb: proto2 message.
Returns:
the given pb, modified in place.
"""
for desc, values in pb.ListFields():
is_repeated = True
if desc.label != descriptor.FieldDescriptor.LABEL_REPEATED:
is_repeated = False
values = [values]
normalized_values = None
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if desc.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_SINT64):
normalized_values = [int(x) for x in values]
elif desc.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_ENUM):
normalized_values = [int(x) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT:
normalized_values = [round(x, 6) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE:
normalized_values = [round(float(x), 7) for x in values]
if normalized_values is not None:
if is_repeated:
pb.ClearField(desc.name)
getattr(pb, desc.name).extend(normalized_values)
else:
setattr(pb, desc.name, normalized_values[0])
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE or
desc.type == descriptor.FieldDescriptor.TYPE_GROUP):
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
desc.message_type.has_options and
desc.message_type.GetOptions().map_entry):
# This is a map, only recurse if the values have a message type.
if (desc.message_type.fields_by_number[2].type ==
descriptor.FieldDescriptor.TYPE_MESSAGE):
for v in six.itervalues(values):
NormalizeNumberFields(v)
else:
for v in values:
# recursive step
NormalizeNumberFields(v)
return pb
def _IsMap(value):
return isinstance(value, collections_abc.Mapping)
def _IsRepeatedContainer(value):
if isinstance(value, six.string_types):
return False
try:
iter(value)
return True
except TypeError:
return False
def ProtoEq(a, b):
"""Compares two proto2 objects for equality.
Recurses into nested messages. Uses list (not set) semantics for comparing
repeated fields, ie duplicates and order matter.
Args:
a: A proto2 message or a primitive.
b: A proto2 message or a primitive.
Returns:
`True` if the messages are equal.
"""
def Format(pb):
"""Returns a dictionary or unchanged pb bases on its type.
Specifically, this function returns a dictionary that maps tag
number (for messages) or element index (for repeated fields) to
value, or just pb unchanged if it's neither.
Args:
pb: A proto2 message or a primitive.
Returns:
A dict or unchanged pb.
"""
if isinstance(pb, message.Message):
return dict((desc.number, value) for desc, value in pb.ListFields())
elif _IsMap(pb):
return dict(pb.items())
elif _IsRepeatedContainer(pb):
return dict(enumerate(list(pb)))
else:
return pb
a, b = Format(a), Format(b)
# Base case
if not isinstance(a, dict) or not isinstance(b, dict):
return a == b
# This list performs double duty: it compares two messages by tag value *or*
# two repeated fields by element, in order. the magic is in the format()
# function, which converts them both to the same easily comparable format.
for tag in sorted(set(a.keys()) | set(b.keys())):
if tag not in a or tag not in b:
return False
else:
# Recursive step
if not ProtoEq(a[tag], b[tag]):
return False
# Didn't find any values that differed, so they're equal!
return True
class ProtoAssertions(object):
"""Mix this into a googletest.TestCase class to get proto2 assertions.
Usage:
class SomeTestCase(compare.ProtoAssertions, googletest.TestCase):
...
def testSomething(self):
...
self.assertProtoEqual(a, b)
See module-level definitions for method documentation.
"""
# pylint: disable=invalid-name
def assertProtoEqual(self, *args, **kwargs):
return assertProtoEqual(self, *args, **kwargs)
|
|
import pygame
import stale
import random
from gracz import Player
from platforma import Platform
from wrogowie import Enemy
class Level(object):
""" Ogolna klasa poziomow gry """
def __init__(self):
self.platform_list = pygame.sprite.Group()
self.enemy_list = pygame.sprite.Group()
#self.player = player
self.background = None
def update(self):
""" Odswieza stan poziomu """
self.platform_list.update()
self.enemy_list.update()
def draw(self, screen):
""" Rysuj poziom """
# Draw the background
screen.fill(stale.BLUE)
screen.blit(self.background, [0,0])
# Draw all the sprite lists that we have
self.platform_list.draw(screen)
self.enemy_list.draw(screen)
# Create platforms for the level
class Level_01(Level):
""" Poziom pierwszy - ogrod """
def __init__(self):
Level.__init__(self)
self.background = pygame.image.load("kwiaty.png").convert()
# Array with width, height, x, and y of platform
level = [[300, 50, 300, 150],
[400,50,400,300],
[500,50,200,450],
[500,50, 400,600]]
color = stale.PINK
# Go through the array above and add platforms
for platform in level:
block = Platform(platform[0], platform[1],color)
block.rect.x = platform[2]
block.rect.y = platform[3]
#block.player = self.player
self.platform_list.add(block)
rycerz = Enemy()
rycerz.level = self
# Ulokowianie rycerzy; obecnie randomowe
rycerz.rect.x = block.rect.x + random.randrange(platform[0]-101)
rycerz.rect.y = block.rect.y - 85
# Dodaj rycerzy do listy wrogow i sprajtow
self.enemy_list.add(rycerz)
class Level_02(Level):
""" Poziom 2 - klatka schodowa """
def __init__(self):
Level.__init__(self)
self.background = pygame.image.load("schody.png").convert()
level = [[500, 50, 0, 150],
[600, 50, 300, 300],
[400, 50, 100, 450],
[500, 50, 500, 600]]
color = stale.SELEDYN
for platform in level:
block = Platform(platform[0], platform[1],color)
block.rect.x = platform[2]
block.rect.y = platform[3]
#block.player = self.player
self.platform_list.add(block)
#self.enemy_list.append(rycerz)
rycerz = Enemy()
rycerz.level = self
# Ulokowianie rycerzy; obecnie randomowe
rycerz.rect.x = block.rect.x + random.randrange(platform[0]-101)
rycerz.rect.y = block.rect.y - 85
# Dodaj rycerzy do listy wrogow i sprajtow
self.enemy_list.add(rycerz)
class Level_03(Level):
""" Poziom 3 - lazienka """
def __init__(self):
Level.__init__(self)
self.background = pygame.image.load("prysznic.png").convert()
level = [[800, 50, 200, 600],
[300, 50, 0, 450],
[400, 50, 10, 300],
[300, 50, 0, 150],
[300, 50, 700, 150],
[200, 50, 500, 300],
[500, 50, 500, 450]]
color = stale.ORANGE
for platform in level:
block = Platform(platform[0], platform[1],color)
block.rect.x = platform[2]
block.rect.y = platform[3]
#block.player = self.player
self.platform_list.add(block)
#self.enemy_list.append(rycerz)
rycerz = Enemy()
rycerz.level = self
# Ulokowianie rycerzy; obecnie randomowe
rycerz.rect.x = block.rect.x + random.randrange(platform[0]-101)
rycerz.rect.y = block.rect.y - 85
# Dodaj rycerzy do listy wrogow i sprajtow
self.enemy_list.add(rycerz)
class Level_04(Level):
""" Poziom 4 - biblioteka """
def __init__(self):
Level.__init__(self)
self.background = pygame.image.load("library.png").convert()
level = [[300, 50, 300, 150],
[150, 50, 200, 300],
[200, 50, 700, 300],
[500, 50, 200, 600],
[300, 50, 500, 450]]
color = stale.BLUE
for platform in level:
block = Platform(platform[0], platform[1],color)
block.rect.x = platform[2]
block.rect.y = platform[3]
#block.player = self.player
self.platform_list.add(block)
#self.enemy_list.append(rycerz)
rycerz = Enemy()
rycerz.level = self
# Ulokowianie rycerzy; obecnie randomowe
rycerz.rect.x = block.rect.x + random.randrange(platform[0]-101)
rycerz.rect.y = block.rect.y - 85
# Dodaj rycerzy do listy wrogow i sprajtow
self.enemy_list.add(rycerz)
class Level_05(Level):
""" Poziom 5 - magiczne studio czy cos """
def __init__(self):
Level.__init__(self)
self.background = pygame.image.load("level5.png").convert()
level = [[300, 50, 0, 300],
[500, 50, 200, 450],
[400, 50, 400, 600],
[500, 50, 400, 150]]
color = stale.GREEN
for platform in level:
block = Platform(platform[0], platform[1],color)
block.rect.x = platform[2]
block.rect.y = platform[3]
#block.player = self.player
self.platform_list.add(block)
#self.enemy_list.append(rycerz)
rycerz = Enemy()
rycerz.level = self
# Ulokowianie rycerzy; obecnie randomowe
rycerz.rect.x = block.rect.x + random.randrange(platform[0]-101)
rycerz.rect.y = block.rect.y - 85
self.enemy_list.add(rycerz)
class Koniec(Level):
""" Koniec gry """
def __init__(self):
Level.__init__(self)
self.background = pygame.Surface([1000,700])
self.background.fill(stale.BLACK)
|
|
import time
from copy import deepcopy
from typing import Any, Dict, List, Optional
import orjson
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Draft
class DraftCreationTests(ZulipTestCase):
def create_and_check_drafts_for_success(
self,
draft_dicts: List[Dict[str, Any]],
expected_draft_dicts: Optional[List[Dict[str, Any]]] = None,
) -> None:
hamlet = self.example_user("hamlet")
# Now send a POST request to the API endpoint.
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_success(resp)
# Finally check to make sure that the drafts were actually created properly.
new_draft_dicts = []
for draft in Draft.objects.filter(user_profile=hamlet).order_by("last_edit_time"):
draft_dict = draft.to_dict()
draft_dict.pop("id")
new_draft_dicts.append(draft_dict)
if expected_draft_dicts is None:
expected_draft_dicts = draft_dicts
self.assertEqual(new_draft_dicts, expected_draft_dicts)
def create_and_check_drafts_for_error(
self, draft_dicts: List[Dict[str, Any]], expected_message: str
) -> None:
hamlet = self.example_user("hamlet")
initial_count = Draft.objects.count()
# Now send a POST request to the API endpoint.
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_error(resp, expected_message)
# Make sure that there are no drafts in the database at the
# end of the test. Drafts should never be created in error
# conditions.
self.assertEqual(Draft.objects.count(), initial_count)
def test_require_enable_drafts_synchronization(self) -> None:
hamlet = self.example_user("hamlet")
hamlet.enable_drafts_synchronization = False
hamlet.save()
payload = {"drafts": "[]"}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_error(resp, "User has disabled synchronizing drafts.")
def test_create_one_stream_draft_properly(self) -> None:
hamlet = self.example_user("hamlet")
visible_stream_name = self.get_streams(hamlet)[0]
visible_stream_id = self.get_stream_id(visible_stream_name)
draft_dicts = [
{
"type": "stream",
"to": [visible_stream_id],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}
]
self.create_and_check_drafts_for_success(draft_dicts)
def test_create_one_personal_message_draft_properly(self) -> None:
zoe = self.example_user("ZOE")
draft_dicts = [
{
"type": "private",
"to": [zoe.id],
"topic": "This topic should be ignored.",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}
]
expected_draft_dicts = [
{
"type": "private",
"to": [zoe.id],
"topic": "", # For private messages the topic should be ignored.
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}
]
self.create_and_check_drafts_for_success(draft_dicts, expected_draft_dicts)
def test_create_one_group_personal_message_draft_properly(self) -> None:
zoe = self.example_user("ZOE")
othello = self.example_user("othello")
draft_dicts = [
{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "This topic should be ignored.",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}
]
expected_draft_dicts = [
{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "", # For private messages the topic should be ignored.
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}
]
self.create_and_check_drafts_for_success(draft_dicts, expected_draft_dicts)
def test_create_batch_of_drafts_properly(self) -> None:
hamlet = self.example_user("hamlet")
visible_stream_name = self.get_streams(hamlet)[0]
visible_stream_id = self.get_stream_id(visible_stream_name)
zoe = self.example_user("ZOE")
othello = self.example_user("othello")
draft_dicts = [
{
"type": "stream",
"to": [visible_stream_id],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}, # Stream message draft
{
"type": "private",
"to": [zoe.id],
"topic": "",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479020,
}, # Private message draft
{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479021,
}, # Private group message draft
]
self.create_and_check_drafts_for_success(draft_dicts)
def test_missing_timestamps(self) -> None:
"""If a timestamp is not provided for a draft dict then it should be automatically
filled in."""
hamlet = self.example_user("hamlet")
visible_stream_name = self.get_streams(hamlet)[0]
visible_stream_id = self.get_stream_id(visible_stream_name)
draft_dicts = [
{
"type": "stream",
"to": [visible_stream_id],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
}
]
initial_count = Draft.objects.count()
current_time = int(time.time())
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
ids = orjson.loads(resp.content)["ids"]
self.assert_json_success(resp)
new_drafts = Draft.objects.filter(id__gte=ids[0])
self.assertEqual(Draft.objects.count() - initial_count, 1)
new_draft = new_drafts[0].to_dict()
self.assertTrue(isinstance(new_draft["timestamp"], int))
# Since it would be too tricky to get the same times, perform
# a relative check.
self.assertTrue(new_draft["timestamp"] >= current_time)
def test_invalid_timestamp(self) -> None:
draft_dicts = [
{
"type": "stream",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": -10.10,
}
]
self.create_and_check_drafts_for_error(draft_dicts, "Timestamp must not be negative.")
def test_create_non_stream_draft_with_no_recipient(self) -> None:
"""When "to" is an empty list, the type should become "" as well."""
draft_dicts = [
{
"type": "private",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
{
"type": "",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
]
expected_draft_dicts = [
{
"type": "",
"to": [],
"topic": "",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
{
"type": "",
"to": [],
"topic": "",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
},
]
self.create_and_check_drafts_for_success(draft_dicts, expected_draft_dicts)
def test_create_stream_draft_with_no_recipient(self) -> None:
draft_dicts = [
{
"type": "stream",
"to": [],
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 15954790199,
}
]
self.create_and_check_drafts_for_error(
draft_dicts, "Must specify exactly 1 stream ID for stream messages"
)
def test_create_stream_draft_for_inaccessible_stream(self) -> None:
# When the user does not have permission to access the stream:
stream = self.make_stream("Secret Society", invite_only=True)
draft_dicts = [
{
"type": "stream",
"to": [stream.id], # This can't be accessed by hamlet.
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}
]
self.create_and_check_drafts_for_error(draft_dicts, "Invalid stream id")
# When the stream itself does not exist:
draft_dicts = [
{
"type": "stream",
"to": [99999999999999], # Hopefully, this doesn't exist.
"topic": "sync drafts",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 1595479019,
}
]
self.create_and_check_drafts_for_error(draft_dicts, "Invalid stream id")
def test_create_personal_message_draft_for_non_existing_user(self) -> None:
draft_dicts = [
{
"type": "private",
"to": [99999999999999], # Hopefully, this doesn't exist either.
"topic": "This topic should be ignored.",
"content": "What if we made it possible to sync drafts in Zulip?",
"timestamp": 1595479019,
}
]
self.create_and_check_drafts_for_error(draft_dicts, "Invalid user ID 99999999999999")
def test_create_draft_with_null_bytes(self) -> None:
draft_dicts = [
{
"type": "",
"to": [],
"topic": "sync drafts.",
"content": "Some regular \x00 content here",
"timestamp": 15954790199,
}
]
self.create_and_check_drafts_for_error(draft_dicts, "Message must not contain null bytes")
draft_dicts = [
{
"type": "stream",
"to": [10],
"topic": "thinking about \x00",
"content": "Let's add backend support for syncing drafts.",
"timestamp": 15954790199,
}
]
self.create_and_check_drafts_for_error(draft_dicts, "Topic must not contain null bytes")
class DraftEditTests(ZulipTestCase):
def test_require_enable_drafts_synchronization(self) -> None:
hamlet = self.example_user("hamlet")
hamlet.enable_drafts_synchronization = False
hamlet.save()
resp = self.api_patch(hamlet, "/api/v1/drafts/1", {"draft": {}})
self.assert_json_error(resp, "User has disabled synchronizing drafts.")
def test_edit_draft_successfully(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_a = self.get_stream_id(visible_streams[0])
stream_b = self.get_stream_id(visible_streams[1])
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_a],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700,
}
resp = self.api_post(
hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()}
)
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Change the draft data.
draft_dict["content"] = "The API needs to be structured yet simple to use."
draft_dict["to"] = [stream_b]
draft_dict["topic"] = "designing drafts"
draft_dict["timestamp"] = 1595505800
# Update this change in the backend.
resp = self.api_patch(
hamlet, f"/api/v1/drafts/{new_draft_id}", {"draft": orjson.dumps(draft_dict).decode()}
)
self.assert_json_success(resp)
# Now make sure that the change was made successfully.
new_draft = Draft.objects.get(id=new_draft_id, user_profile=hamlet)
new_draft_dict = new_draft.to_dict()
new_draft_dict.pop("id")
self.assertEqual(new_draft_dict, draft_dict)
def test_edit_non_existant_draft(self) -> None:
hamlet = self.example_user("hamlet")
initial_count = Draft.objects.count()
# Try to update a draft that doesn't exist.
draft_dict = {
"type": "stream",
"to": [10],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700,
}
resp = self.api_patch(
hamlet, "/api/v1/drafts/999999999", {"draft": orjson.dumps(draft_dict).decode()}
)
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Now make sure that no changes were made.
self.assertEqual(Draft.objects.count() - initial_count, 0)
def test_edit_unowned_draft(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_id = self.get_stream_id(visible_streams[0])
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_id],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700,
}
resp = self.api_post(
hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()}
)
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Change the draft data.
modified_draft_dict = deepcopy(draft_dict)
modified_draft_dict["content"] = "???"
# Update this change in the backend as a different user.
zoe = self.example_user("ZOE")
resp = self.api_patch(
zoe, f"/api/v1/drafts/{new_draft_id}", {"draft": orjson.dumps(draft_dict).decode()}
)
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Now make sure that no changes were made.
existing_draft = Draft.objects.get(id=new_draft_id, user_profile=hamlet)
existing_draft_dict = existing_draft.to_dict()
existing_draft_dict.pop("id")
self.assertEqual(existing_draft_dict, draft_dict)
class DraftDeleteTests(ZulipTestCase):
def test_require_enable_drafts_synchronization(self) -> None:
hamlet = self.example_user("hamlet")
hamlet.enable_drafts_synchronization = False
hamlet.save()
resp = self.api_delete(hamlet, "/api/v1/drafts/1")
self.assert_json_error(resp, "User has disabled synchronizing drafts.")
def test_delete_draft_successfully(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_id = self.get_stream_id(visible_streams[0])
# Make sure that there are no drafts at the start of this test.
initial_count = Draft.objects.count()
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_id],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700,
}
resp = self.api_post(
hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()}
)
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Make sure that exactly 1 draft exists now.
self.assertEqual(Draft.objects.count() - initial_count, 1)
# Update this change in the backend.
resp = self.api_delete(hamlet, f"/api/v1/drafts/{new_draft_id}")
self.assert_json_success(resp)
# Now make sure that the there are no more drafts.
self.assertEqual(Draft.objects.count() - initial_count, 0)
def test_delete_non_existant_draft(self) -> None:
hamlet = self.example_user("hamlet")
# Make sure that no draft exists in the first place.
initial_count = Draft.objects.count()
# Try to delete a draft that doesn't exist.
resp = self.api_delete(hamlet, "/api/v1/drafts/9999999999")
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Now make sure that no drafts were made for whatever reason.
self.assertEqual(Draft.objects.count() - initial_count, 0)
def test_delete_unowned_draft(self) -> None:
hamlet = self.example_user("hamlet")
visible_streams = self.get_streams(hamlet)
stream_id = self.get_stream_id(visible_streams[0])
# Make sure that there are no drafts at the start of this test.
initial_count = Draft.objects.count()
# Create a draft.
draft_dict = {
"type": "stream",
"to": [stream_id],
"topic": "drafts",
"content": "The API should be good",
"timestamp": 1595505700,
}
resp = self.api_post(
hamlet, "/api/v1/drafts", {"drafts": orjson.dumps([draft_dict]).decode()}
)
self.assert_json_success(resp)
new_draft_id = orjson.loads(resp.content)["ids"][0]
# Delete this draft in the backend as a different user.
zoe = self.example_user("ZOE")
resp = self.api_delete(zoe, f"/api/v1/drafts/{new_draft_id}")
self.assert_json_error(resp, "Draft does not exist", status_code=404)
# Make sure that the draft was not deleted.
self.assertEqual(Draft.objects.count() - initial_count, 1)
# Now make sure that no changes were made either.
existing_draft = Draft.objects.get(id=new_draft_id, user_profile=hamlet)
existing_draft_dict = existing_draft.to_dict()
existing_draft_dict.pop("id")
self.assertEqual(existing_draft_dict, draft_dict)
class DraftFetchTest(ZulipTestCase):
def test_require_enable_drafts_synchronization(self) -> None:
hamlet = self.example_user("hamlet")
hamlet.enable_drafts_synchronization = False
hamlet.save()
resp = self.api_get(hamlet, "/api/v1/drafts")
self.assert_json_error(resp, "User has disabled synchronizing drafts.")
def test_fetch_drafts(self) -> None:
initial_count = Draft.objects.count()
hamlet = self.example_user("hamlet")
zoe = self.example_user("ZOE")
othello = self.example_user("othello")
visible_stream_id = self.get_stream_id(self.get_streams(hamlet)[0])
draft_dicts = [
{
"type": "stream",
"to": [visible_stream_id],
"topic": "thinking out loud",
"content": "What if pigs really could fly?",
"timestamp": 15954790197,
},
{
"type": "private",
"to": [zoe.id, othello.id],
"topic": "",
"content": "What if made it possible to sync drafts in Zulip?",
"timestamp": 15954790198,
},
{
"type": "private",
"to": [zoe.id],
"topic": "",
"content": "What if made it possible to sync drafts in Zulip?",
"timestamp": 15954790199,
},
]
payload = {"drafts": orjson.dumps(draft_dicts).decode()}
resp = self.api_post(hamlet, "/api/v1/drafts", payload)
self.assert_json_success(resp)
self.assertEqual(Draft.objects.count() - initial_count, 3)
zoe_draft_dicts = [
{
"type": "private",
"to": [hamlet.id],
"topic": "",
"content": "Hello there!",
"timestamp": 15954790200,
},
]
payload = {"drafts": orjson.dumps(zoe_draft_dicts).decode()}
resp = self.api_post(zoe, "/api/v1/drafts", payload)
self.assert_json_success(resp)
self.assertEqual(Draft.objects.count() - initial_count, 4)
# Now actually fetch the drafts. Make sure that hamlet gets only
# his drafts and exactly as he made them.
resp = self.api_get(hamlet, "/api/v1/drafts")
self.assert_json_success(resp)
data = orjson.loads(resp.content)
self.assertEqual(data["count"], 3)
first_draft_id = Draft.objects.filter(user_profile=hamlet).order_by("id")[0].id
expected_draft_contents = [
{"id": first_draft_id + i, **draft_dicts[i]} for i in range(0, 3)
]
self.assertEqual(data["drafts"], expected_draft_contents)
|
|
import datetime
from unittest.mock import patch
from django.core import mail
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import test
from waldur_core.core import utils as core_utils
from waldur_core.structure.tests import factories as structure_factories
from waldur_core.structure.tests import fixtures as structure_fixtures
from waldur_mastermind.invoices import models as invoices_models
from waldur_mastermind.invoices.tests import factories as invoices_factories
from waldur_mastermind.marketplace import exceptions, models, tasks
from waldur_mastermind.marketplace.tests.helpers import override_marketplace_settings
from . import factories, fixtures
class CalculateUsageForCurrentMonthTest(test.APITransactionTestCase):
def setUp(self):
offering = factories.OfferingFactory()
plan = factories.PlanFactory(offering=offering)
resource = factories.ResourceFactory(offering=offering)
category_component = factories.CategoryComponentFactory()
self.offering_component = factories.OfferingComponentFactory(
offering=offering,
parent=category_component,
billing_type=models.OfferingComponent.BillingTypes.USAGE,
)
factories.PlanComponentFactory(plan=plan, component=self.offering_component)
plan_period = models.ResourcePlanPeriod.objects.create(
resource=resource, plan=plan, start=timezone.now()
)
models.ComponentUsage.objects.create(
resource=resource,
component=self.offering_component,
usage=10,
date=datetime.datetime.now(),
billing_period=core_utils.month_start(datetime.datetime.now()),
plan_period=plan_period,
)
def test_calculate_usage_if_category_component_is_set(self):
tasks.calculate_usage_for_current_month()
self.assertEqual(models.CategoryComponentUsage.objects.count(), 2)
def test_calculate_usage_if_category_component_is_not_set(self):
self.offering_component.parent = None
self.offering_component.save()
tasks.calculate_usage_for_current_month()
self.assertEqual(models.CategoryComponentUsage.objects.count(), 0)
class NotificationTest(test.APITransactionTestCase):
def test_notify_about_resource_change(self):
project_fixture = structure_fixtures.ProjectFixture()
admin = project_fixture.admin
project = project_fixture.project
resource = factories.ResourceFactory(project=project, name='Test resource')
tasks.notify_about_resource_change(
'marketplace_resource_create_succeeded',
{'resource_name': resource.name},
resource.uuid,
)
self.assertEqual(len(mail.outbox), 1)
subject_template_name = '%s/%s_subject.txt' % (
'marketplace',
'marketplace_resource_create_succeeded',
)
subject = core_utils.format_text(
subject_template_name, {'resource_name': resource.name}
)
self.assertEqual(mail.outbox[0].subject, subject)
self.assertEqual(mail.outbox[0].to[0], admin.email)
self.assertTrue(resource.name in mail.outbox[0].body)
self.assertTrue(resource.name in mail.outbox[0].subject)
class ResourceEndDateTest(test.APITransactionTestCase):
def test_notify_about_resource_scheduled_termination(self):
fixture = fixtures.MarketplaceFixture()
admin = fixture.admin
manager = fixture.manager
tasks.notify_about_resource_termination(
fixture.resource.uuid, fixture.offering_owner.uuid,
)
recipients = {m.to[0] for m in mail.outbox}
self.assertEqual(recipients, {admin.email, manager.email})
self.assertEqual(len(mail.outbox), 2)
self.assertTrue(fixture.resource.name in mail.outbox[0].body)
self.assertTrue(fixture.resource.name in mail.outbox[0].subject)
def test_mail_is_not_sent_if_there_are_no_project_admin_or_manager(self):
fixture = fixtures.MarketplaceFixture()
tasks.notify_about_resource_termination(
fixture.resource.uuid, fixture.offering_owner.uuid,
)
self.assertEqual(len(mail.outbox), 0)
@patch('waldur_mastermind.marketplace.tasks.core_utils.broadcast_mail')
def test_notification_uses_different_templates_for_staff_and_other_users(
self, mock_broadcast_mail
):
fixture = fixtures.MarketplaceFixture()
tasks.notify_about_resource_termination(
fixture.resource.uuid, fixture.offering_owner.uuid, False
)
mock_broadcast_mail.assert_called()
self.assertEqual(
mock_broadcast_mail.call_args[0][1],
'marketplace_resource_terminatate_scheduled',
)
tasks.notify_about_resource_termination(
fixture.resource.uuid, fixture.offering_owner.uuid, True
)
mock_broadcast_mail.assert_called()
self.assertEqual(
mock_broadcast_mail.call_args[0][1],
'marketplace_resource_terminatate_scheduled_staff',
)
class TerminateResource(test.APITransactionTestCase):
def setUp(self):
fixture = structure_fixtures.UserFixture()
self.user = fixture.staff
offering = factories.OfferingFactory()
self.resource = factories.ResourceFactory(offering=offering)
factories.OrderItemFactory(
resource=self.resource,
type=models.OrderItem.Types.TERMINATE,
state=models.OrderItem.States.EXECUTING,
)
def test_raise_exception_if_order_item_has_not_been_created(self):
self.assertRaises(
exceptions.ResourceTerminateException,
tasks.terminate_resource,
core_utils.serialize_instance(self.resource),
core_utils.serialize_instance(self.user),
)
class ProjectEndDate(test.APITransactionTestCase):
def setUp(self):
# We need create a system robot account because
# account created in a migration does not exist when test is running
structure_factories.UserFactory(
first_name='System',
last_name='Robot',
username='system_robot',
description='Special user used for performing actions on behalf of Waldur.',
is_staff=True,
is_active=True,
)
core_utils.get_system_robot.cache_clear()
self.fixtures = fixtures.MarketplaceFixture()
self.fixtures.project.end_date = datetime.datetime(
day=1, month=1, year=2020
).date()
self.fixtures.project.save()
self.fixtures.resource.set_state_ok()
self.fixtures.resource.save()
def test_terminate_resources_if_project_end_date_has_been_reached(self):
with freeze_time('2020-01-02'):
tasks.terminate_resources_if_project_end_date_has_been_reached()
self.assertTrue(
models.OrderItem.objects.filter(
resource=self.fixtures.resource,
type=models.OrderItem.Types.TERMINATE,
).count()
)
order_item = models.OrderItem.objects.get(
resource=self.fixtures.resource, type=models.OrderItem.Types.TERMINATE
)
self.assertTrue(order_item.order.state, models.Order.States.EXECUTING)
@override_marketplace_settings(ENABLE_STALE_RESOURCE_NOTIFICATIONS=True)
class NotificationAboutStaleResourceTest(test.APITransactionTestCase):
def setUp(self):
project_fixture = structure_fixtures.ProjectFixture()
self.owner = project_fixture.owner
project = project_fixture.project
self.resource = factories.ResourceFactory(
project=project, name='Test resource', state=models.Resource.States.OK
)
self.resource.offering.type = 'Test.Type'
self.resource.offering.save()
def test_send_notify_if_stale_resource_exists(self):
tasks.notify_about_stale_resource()
self.assertEqual(len(mail.outbox), 1)
subject_template_name = '%s/%s_subject.txt' % (
'marketplace',
'notification_about_stale_resources',
)
subject = core_utils.format_text(subject_template_name, {})
self.assertEqual(mail.outbox[0].subject, subject)
self.assertEqual(mail.outbox[0].to[0], self.owner.email)
self.assertTrue(self.resource.name in mail.outbox[0].body)
def test_do_not_send_notify_if_stale_resource_does_not_exists(self):
item = invoices_factories.InvoiceItemFactory(resource=self.resource)
item.unit_price = 10
item.quantity = 10
item.unit = invoices_models.InvoiceItem.Units.QUANTITY
item.save()
self.assertTrue(item.price)
tasks.notify_about_stale_resource()
self.assertEqual(len(mail.outbox), 0)
def test_send_notify_if_related_invoice_item_has_not_price(self):
item = invoices_factories.InvoiceItemFactory(resource=self.resource)
item.unit_price = 0
item.save()
self.assertFalse(item.price)
tasks.notify_about_stale_resource()
self.assertEqual(len(mail.outbox), 1)
def test_send_notify_only_for_resources_belonging_to_billable_offerings(self):
self.resource.offering.billable = False
self.resource.offering.save()
tasks.notify_about_stale_resource()
self.assertEqual(len(mail.outbox), 0)
@override_marketplace_settings(ENABLE_STALE_RESOURCE_NOTIFICATIONS=False)
def test_do_not_send_notify_if_configuration_is_false(self):
tasks.notify_about_stale_resource()
self.assertEqual(len(mail.outbox), 0)
class ResourceEndDate(test.APITransactionTestCase):
def setUp(self):
# We need create a system robot account because
# account created in a migration does not exist when test is running
structure_factories.UserFactory(
first_name='System',
last_name='Robot',
username='system_robot',
description='Special user used for performing actions on behalf of Waldur.',
is_staff=True,
is_active=True,
)
core_utils.get_system_robot.cache_clear()
self.fixtures = fixtures.MarketplaceFixture()
self.resource = self.fixtures.resource
self.resource.end_date = datetime.datetime(day=1, month=1, year=2020).date()
self.resource.set_state_ok()
self.resource.save()
def test_terminate_resource_if_its_end_date_has_been_reached(self):
with freeze_time('2020-01-01'):
self.assertTrue(self.resource.is_expired)
tasks.terminate_resource_if_its_end_date_has_been_reached()
self.resource.refresh_from_db()
self.assertTrue(
models.OrderItem.objects.filter(
resource=self.fixtures.resource,
type=models.OrderItem.Types.TERMINATE,
).count()
)
order_item = models.OrderItem.objects.get(
resource=self.fixtures.resource, type=models.OrderItem.Types.TERMINATE
)
self.assertTrue(order_item.order.state, models.Order.States.EXECUTING)
|
|
# tests.test_experiment
# Tests for the experiment generation utility.
#
# Author: Benjamin Bengfort <bengfort@cs.umd.edu>
# Created: Tue Feb 23 08:09:12 2016 -0500
#
# Copyright (C) 2015 University of Maryland
# For license information, see LICENSE.txt
#
# ID: test_experiment.py [26c65a7] benjamin@bengfort.com $
"""
Tests for the experiment generation utility.
"""
##########################################################################
## Imports
##########################################################################
import os
import json
import unittest
from cloudscope.experiment import *
from cloudscope.config import settings
from cloudscope.replica import Consistency
from cloudscope.exceptions import CannotGenerateExperiments
try:
from unittest import mock
except ImportError:
import mock
##########################################################################
## Fixtures
##########################################################################
FIXTURES = os.path.join(os.path.dirname(__file__), "fixtures")
TEMPLATE = os.path.join(FIXTURES, "simulation.json")
##########################################################################
## Test Mixins
##########################################################################
class NestedAssertionMixin(object):
"""
Helper functions for performing nested assertions.
"""
def assertNestedBetween(self, d, low=1, high=10):
"""
Helper function for recursively asserting nested between properties,
namely that all values in d, so long as they aren't a dictionary, are
greater than or equal low, and less than or equal high.
Basically a helper function for test_nested_randomize_integers and
test_nested_randomize_floats.
"""
for k,v in d.iteritems():
if isinstance(v, dict):
self.assertNestedBetween(v, low, high)
else:
self.assertGreaterEqual(v, low)
self.assertLessEqual(v, high)
##########################################################################
## Helper Function Tests
##########################################################################
class ExperimentHelpersTests(unittest.TestCase, NestedAssertionMixin):
"""
Ensure the experiment helper functions behave as expected.
"""
def test_spread_evenly(self):
"""
Test the even spread across a domain
"""
expected = [
[0, 10], [10, 20], [20, 30], [30, 40], [40, 50],
[50, 60], [60, 70], [70, 80], [80, 90], [90, 100],
]
self.assertEqual(expected, list(spread(10, 0, 100)))
def test_spread_width(self):
"""
Test the spread with a specified width
"""
expected = [
[0, 5], [10, 15], [20, 25], [30, 35], [40, 45],
[50, 55], [60, 65], [70, 75], [80, 85], [90, 95],
]
self.assertEqual(expected, list(spread(10, 0, 100, 5)))
def test_simple_nested_update(self):
"""
Ensure that the nested update behaves like update
"""
d = {'a': 1, 'b': 2, 'c': 3}
u = {'c': 4, 'd': 5, 'e': 6}
e = d.copy()
e.update(u)
self.assertEqual(e, nested_update(d,u))
def test_nested_update(self):
"""
Test the nested update function
"""
d = {'I': {'A': {'a': 1}, 'B': {'a': 2}}}
u = {'I': {'B': {'b': 4}}, 'II': {'A': {'a': 5}}}
e = {'I': {'A': {'a': 1}, 'B': {'a': 2, 'b': 4}}, 'II': {'A': {'a': 5}}}
self.assertEqual(nested_update(d,u), e)
def test_nested_randomize_integers(self):
"""
Test the nested randomize function with integers
"""
d = {
'A': {
'a1': (1, 10),
'a2': (1, 10),
},
'B': {
'C': {
'c1': (1, 10),
},
'b1': (1, 10),
}
}
r = nested_randomize(d)
self.assertNestedBetween(r, 1, 10)
def test_nested_randomize_floats(self):
"""
Test the nested randomize function with floats
"""
d = {
'A': {
'a1': (0.0, 1.0),
'a2': (0.0, 1.0),
},
'B': {
'C': {
'c1': (0.0, 1.0),
},
'b1': (0.0, 1.0),
}
}
r = nested_randomize(d)
self.assertNestedBetween(r, 0.0, 1.0)
##########################################################################
## Experiment Generator Tests
##########################################################################
class ExperimentGeneratorTests(unittest.TestCase, NestedAssertionMixin):
"""
Test the ExperimentGenerator base class.
"""
def setUp(self):
with open(TEMPLATE, 'r') as f:
self.template = json.load(f)
def tearDown(self):
self.template = None
def test_get_defaults(self):
"""
Test that the options are set correctly on the experiment generator
"""
users_opts = {'minimum': 1, 'maximum': 5, 'step': 2}
generator = ExperimentGenerator(self.template, users=users_opts)
self.assertEqual(generator.options['users'], users_opts)
def test_get_defaults_partial(self):
"""
Test that partial options are set correctly on the experiment generator
"""
users_opts = {'maximum': 5}
generator = ExperimentGenerator(self.template, users=users_opts)
expected = {'minimum': 1, 'maximum': 5, 'step': 1}
self.assertEqual(generator.options['users'], expected)
def test_users_defaults(self):
"""
Assert that the experiment generator does one user by default
"""
generator = ExperimentGenerator(self.template)
expected = [1]
self.assertEqual(list(generator.users()), expected)
def test_user_generation(self):
"""
Test the n_users on experiment generation
"""
users_opts = {'minimum': 1, 'maximum': 5, 'step': 2}
expected = [1, 3, 5]
generator = ExperimentGenerator(self.template, users=users_opts)
self.assertEqual(list(generator.users()), expected)
def test_interface(self):
"""
Test the experiment generator interface
"""
with self.assertRaises(NotImplementedError):
generator = ExperimentGenerator(self.template)
for experiment in generator.generate():
print experiment
def test_iterator(self):
"""
Test the experiment generator iterator interface
"""
expected = [1,2,3]
generator = ExperimentGenerator(self.template)
generator.generate = mock.MagicMock(return_value=expected)
self.assertEqual(len(generator), 3, "len not computed correctly")
self.assertEqual(list(generator), expected)
def test_jitter_type(self):
"""
Test the experiment generator jitter type
"""
klass = ExperimentGenerator
generator = klass(self.template, count=3)
for eg in generator.jitter(3):
self.assertTrue(isinstance(eg, klass))
self.assertEqual(eg.options, generator.options)
self.assertEqual(eg.template, generator.template)
self.assertEqual(eg.count, generator.count)
def test_jitter_users(self):
"""
Test the users jitter on experiment generator
"""
klass = ExperimentGenerator
generator = klass(self.template, count=3)
users_jitter = {'minimum': (1, 10), 'maximum': (1, 10), 'step': (1, 10)}
for eg in generator.jitter(3, users=users_jitter):
self.assertNestedBetween(eg.options['users'], 1, 10)
##########################################################################
## Latency Variation Tests
##########################################################################
class LatencyVariationTests(unittest.TestCase, NestedAssertionMixin):
"""
Test the LatencyVariation experiment generator.
"""
def setUp(self):
with open(TEMPLATE, 'r') as f:
self.template = json.load(f)
def tearDown(self):
self.template = None
def test_get_defaults(self):
"""
Test that the options are set correctly on the latency variation
"""
users_opts = {'minimum': 1, 'maximum': 5, 'step': 2}
latency_opts = {'minimum': 15, 'maximum': 6000, 'max_range': 800}
generator = LatencyVariation(self.template, users=users_opts, latency=latency_opts)
self.assertEqual(generator.options['users'], users_opts)
self.assertEqual(generator.options['latency'], latency_opts)
def test_get_defaults_partial(self):
"""
Test that partial options are set correctly on the latency variation
"""
users_opts = {'maximum': 5}
latency_opts = {'max_range': 800}
generator = LatencyVariation(self.template, users=users_opts, latency=latency_opts)
expected = {
'users': {'minimum': 1, 'maximum': 5, 'step': 1},
'latency': {'minimum': 30, 'maximum': 1000, 'max_range': 800},
'traces': [],
}
self.assertEqual(generator.options, expected)
def test_latency_generator(self):
"""
Test the latencies generator function
"""
latency = {'minimum': 0, 'maximum': 1000, 'max_range': 100}
generator = LatencyVariation(self.template, latency=latency, count=10)
expected = [
{'latency_range': [0, 100], 'latency_mean': 50, 'latency_stddev': 20.0},
{'latency_range': [100, 200], 'latency_mean': 150, 'latency_stddev': 20.0},
{'latency_range': [200, 300], 'latency_mean': 250, 'latency_stddev': 20.0},
{'latency_range': [300, 400], 'latency_mean': 350, 'latency_stddev': 20.0},
{'latency_range': [400, 500], 'latency_mean': 450, 'latency_stddev': 20.0},
{'latency_range': [500, 600], 'latency_mean': 550, 'latency_stddev': 20.0},
{'latency_range': [600, 700], 'latency_mean': 650, 'latency_stddev': 20.0},
{'latency_range': [700, 800], 'latency_mean': 750, 'latency_stddev': 20.0},
{'latency_range': [800, 900], 'latency_mean': 850, 'latency_stddev': 20.0},
{'latency_range': [900, 1000], 'latency_mean': 950, 'latency_stddev': 20.0},
]
self.assertEqual(list(generator.latencies(10)), expected)
def test_generate(self):
"""
Test the latency variation generation with a single user.
"""
latency = {'minimum': 0, 'maximum': 1000, 'max_range': 100}
generator = LatencyVariation(self.template, latency=latency, count=10)
expected = [
# (variable, constant, election, heartbeat, nusers)
([0, 100], 50, [500, 1000], 250, 1),
([100, 200], 150, [1500, 3000], 750, 1),
([200, 300], 250, [2500, 5000], 1250, 1),
([300, 400], 350, [3500, 7000], 1750, 1),
([400, 500], 450, [4500, 9000], 2250, 1),
([500, 600], 550, [5500, 11000], 2750, 1),
([600, 700], 650, [6500, 13000], 3250, 1),
([700, 800], 750, [7500, 15000], 3750, 1),
([800, 900], 850, [8500, 17000], 4250, 1),
([900, 1000], 950, [9500, 19000], 4750, 1),
]
for expected, experiment in zip(expected, generator):
vrbl, cons, eto, hb, nusers = expected
self.assertEqual(experiment['meta']['users'], nusers)
for node in experiment['nodes']:
if node['consistency'] == Consistency.STRONG:
self.assertEqual(node['election_timeout'], eto)
self.assertEqual(node['heartbeat_interval'], hb)
for link in experiment['links']:
if link['connection'] == 'variable':
self.assertEqual(link['latency'], vrbl)
else:
self.assertEqual(link['latency'], cons)
def test_num_experiments(self):
"""
Test the number of experiments with both user and latency dimensions
"""
users = {'maximum': 5, 'step': 2}
latency = {'minimum': 0, 'maximum': 1000, 'max_range': 100}
generator = LatencyVariation(self.template, users=users, latency=latency, count=10)
self.assertEqual(30, len(generator))
def test_jitter_type(self):
"""
Test the latency variation jitter type
"""
klass = LatencyVariation
generator = klass(self.template, count=3)
for eg in generator.jitter(3):
self.assertTrue(isinstance(eg, klass))
self.assertEqual(eg.options, generator.options)
self.assertEqual(eg.template, generator.template)
self.assertEqual(eg.count, generator.count)
def test_jitter_latency(self):
"""
Test the latency jitter on latency variation
"""
klass = LatencyVariation
generator = klass(self.template, count=3)
latency_jitter = {'minimum': (10, 1000), 'maximum': (10, 1000), 'max_range': (10, 1000)}
for eg in generator.jitter(3, latency=latency_jitter):
self.assertNestedBetween(eg.options['latency'], 10, 1000)
##########################################################################
## AntiEntropy Variation Tests
##########################################################################
class AntiEntropyVariationTests(unittest.TestCase, NestedAssertionMixin):
"""
Test the AntiEntropyVariation experiment generator.
"""
def setUp(self):
with open(TEMPLATE, 'r') as f:
self.template = json.load(f)
def tearDown(self):
self.template = None
def test_get_defaults(self):
"""
Test that the options are set correctly on the anti-entropy variation
"""
users_opts = {'minimum': 1, 'maximum': 5, 'step': 2}
latency_opts = {'minimum': 15, 'maximum': 6000, 'max_range': 800}
ae_opts = {'minimum': 100, 'maximum': 600}
generator = AntiEntropyVariation(
self.template, users=users_opts,
latency=latency_opts, anti_entropy=ae_opts
)
self.assertEqual(generator.options['users'], users_opts)
self.assertEqual(generator.options['latency'], latency_opts)
def test_get_defaults_partial(self):
"""
Test that partial options are set correctly on the anti-entropy variation
"""
users_opts = {'maximum': 5}
latency_opts = {'max_range': 800}
ae_opts = {'minimum': 100}
generator = AntiEntropyVariation(
self.template, users=users_opts,
latency=latency_opts, anti_entropy=ae_opts
)
expected = {
'users': {'minimum': 1, 'maximum': 5, 'step': 1},
'latency': {'minimum': 30, 'maximum': 1000, 'max_range': 800},
'anti_entropy': {'minimum': 100, 'maximum': settings.simulation.anti_entropy_delay},
'traces': [],
}
self.assertEqual(generator.options, expected)
def test_jitter_type(self):
"""
Test the anti-entropy variation jitter type
"""
klass = AntiEntropyVariation
generator = klass(self.template, count=3)
for eg in generator.jitter(3):
self.assertTrue(isinstance(eg, klass))
self.assertEqual(eg.options, generator.options)
self.assertEqual(eg.template, generator.template)
self.assertEqual(eg.count, generator.count)
def test_jitter_anit_entropy(self):
"""
Test the anti-entropy delay jitter on anti-entropy variation
"""
klass = AntiEntropyVariation
generator = klass(self.template, count=3)
ae_jitter = {'minimum': (100, 600), 'maximum': (100, 600)}
for eg in generator.jitter(3, anti_entropy=ae_jitter):
self.assertNestedBetween(eg.options['anti_entropy'], 100, 600)
|
|
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 Albert Kottke
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numba
import numpy as np
from scipy.optimize import minimize
from .motion import GRAVITY
from .motion import WaveField
from .site import Layer
from .site import Location
from .site import Profile
class AbstractCalculator(object):
def __init__(self):
self._loc_input = None
self._motion = None
self._profile = None
def __call__(self, motion, profile, loc_input):
self._motion = motion
self._profile = profile
self._loc_input = loc_input
@property
def motion(self):
return self._motion
@property
def profile(self):
return self._profile
@property
def loc_input(self):
return self._loc_input
def calc_accel_tf(self, lin, lout):
raise NotImplementedError
def calc_stress_tf(self, lin, lout, damped):
raise NotImplementedError
def calc_strain_tf(self, lin, lout):
raise NotImplementedError
@numba.jit
def my_trapz(thickness, property, depth_max):
total = 0
depth = 0
for t, p in zip(thickness, property):
depth += t
if depth_max < depth:
# Partial layer
total += (t - (depth - depth_max)) * p
break
total += t * p
else:
# Final infinite layer
total += (depth_max - depth) * p
return total / depth_max
class QuarterWaveLenCalculator(AbstractCalculator):
"""Compute quarter-wave length site amplification.
No consideration for nolninearity is made by this calculator.
"""
name = "QWL"
def __init__(self, site_atten=None):
super().__init__()
self._site_atten = site_atten
def __call__(self, motion, profile, loc_input):
"""Perform the wave propagation.
Parameters
----------
motion: :class:`~.base.motion.Motion`
Input motion.
profile: :class:`~.base.site.Profile`
Site profile.
loc_input: :class:`~.base.site.Location`
Location of the input motion.
"""
super().__call__(motion, profile, loc_input)
self._crustal_amp, self._site_term = self._calc_amp(
profile.density, profile.thickness, profile.slowness
)
@property
def crustal_amp(self):
return self._crustal_amp
@property
def site_term(self):
return self._site_term
@property
def site_atten(self):
return self._site_atten
def _calc_amp(self, density, thickness, slowness):
freqs = self.motion.freqs
# 1/4 wavelength depth -- estimated for mean slowness
qwl_depth = 1 / (4 * np.mean(slowness) * freqs)
def qwl_average(param):
return np.array([my_trapz(thickness, param, qd) for qd in qwl_depth])
for _ in range(20):
qwl_slowness = qwl_average(slowness)
prev_qwl_depth = qwl_depth
qwl_depth = 1 / (4 * qwl_slowness * freqs)
if np.allclose(prev_qwl_depth, qwl_depth, rtol=0.005):
break
# FIXME return an error if not converged?
qwl_density = qwl_average(density)
crustal_amp = np.sqrt(
(density[-1] / slowness[-1]) / (qwl_density / qwl_slowness)
)
site_term = np.array(crustal_amp)
if self._site_atten:
site_term *= np.exp(-np.pi * self.site_atten * freqs)
return crustal_amp, site_term
def fit(
self,
target_type,
target,
adjust_thickness=False,
adjust_site_atten=False,
adjust_source_vel=False,
):
"""
Fit to a target crustal amplification or site term.
The fitting process adjusts the velocity, site attenuation, and layer
thickness (if enabled) to fit a target values. The frequency range is
specified by the input motion.
Parameters
----------
target_type: str
Options are 'crustal_amp' to only fit to the crustal amplification,
or 'site_term' to fit both the velocity and the site attenuation
parameter.
target: `array_like`
Target values.
adjust_thickness: bool (optional)
If the thickness of the layers is adjusted as well, default: False.
adjust_site_atten: bool (optional)
If the site attenuation is adjusted as well, default: False.
adjust_source_vel: bool (optional)
If the source velocity should be adjusted, default: False.
Returns
-------
profile: `pyrsa.site.Profile`
profile optimized to fit a target amplification.
"""
density = self.profile.density
nl = len(density)
# Slowness bounds
slowness = self.profile.slowness
thickness = self.profile.thickness
site_atten = self._site_atten
# Slowness
initial = slowness
bounds = 1 / np.tile((4000, 100), (nl, 1))
if not adjust_source_vel:
bounds[-1] = (initial[-1], initial[-1])
# Thickness bounds
if adjust_thickness:
bounds = np.r_[bounds, [[t / 2, 2 * t] for t in thickness]]
initial = np.r_[initial, thickness]
# Site attenuation bounds
if adjust_site_atten:
bounds = np.r_[bounds, [[0.0001, 0.200]]]
initial = np.r_[initial, self.site_atten]
def calc_rmse(this, that):
return np.mean(((this - that) / that) ** 2)
def err(x):
_slowness = x[0:nl]
if adjust_thickness:
_thickness = x[nl : (2 * nl)]
else:
_thickness = thickness
if adjust_site_atten:
self._site_atten = x[-1]
crustal_amp, site_term = self._calc_amp(density, _thickness, _slowness)
calc = crustal_amp if target_type == "crustal_amp" else site_term
err = 10 * calc_rmse(target, calc)
# Prefer the original values so add the difference to the error
err += calc_rmse(slowness, _slowness)
if adjust_thickness:
err += calc_rmse(thickness, _thickness)
if adjust_site_atten:
err += calc_rmse(self._site_atten, site_atten)
return err
res = minimize(err, initial, method="L-BFGS-B", bounds=bounds)
slowness = res.x[0:nl]
if adjust_thickness:
thickness = res.x[nl : (2 * nl)]
profile = Profile(
[
Layer(l.soil_type, t, 1 / s)
for l, t, s in zip(self.profile, thickness, slowness)
],
self.profile.wt_depth,
)
# Update the calculated amplificaiton
self(self.motion, profile, self.loc_input)
class LinearElasticCalculator(AbstractCalculator):
"""Class for performing linear elastic site response."""
name = "LE"
def __init__(self):
super().__init__()
self._waves_a = np.array([])
self._waves_b = np.array([])
self._wave_nums = np.array([])
def __call__(self, motion, profile, loc_input):
"""Perform the wave propagation.
Parameters
----------
motion: :class:`~.base.motion.Motion`
Input motion.
profile: :class:`~.base.site.Profile`
Site profile.
loc_input: :class:`~.base.site.Location`
Location of the input motion.
"""
super().__call__(motion, profile, loc_input)
# Set initial properties
for l in profile:
l.reset()
if l.strain is None:
l.strain = 0.0
self._calc_waves(motion.angular_freqs, profile)
def _calc_waves(self, angular_freqs, profile):
"""Compute the wave numbers and amplitudes (up- and down-going).
Parameters
----------
angular_freqs: :class:`numpy.ndarray`
Angular frequency at which the waves are computed.
profile: :class:`~.base.site.Profile`
Site profile.
"""
# Compute the complex wave numbers of the system
wave_nums = np.empty((len(profile), len(angular_freqs)), complex)
for i, l in enumerate(profile):
wave_nums[i, :] = angular_freqs / l.comp_shear_vel
# Compute the waves. In the top surface layer, the up-going and
# down-going waves have an amplitude of 1 as they are completely
# reflected at the surface.
waves_a = np.ones_like(wave_nums, complex)
waves_b = np.ones_like(wave_nums, complex)
for i, l in enumerate(profile[:-1]):
# Complex impedance -- wave number can be zero which causes an
# error.
with np.errstate(invalid="ignore"):
cimped = (wave_nums[i] * l.comp_shear_mod) / (
wave_nums[i + 1] * profile[i + 1].comp_shear_mod
)
# Complex term to simplify equations -- uses full layer height
cterm = 1j * wave_nums[i, :] * l.thickness
waves_a[i + 1, :] = 0.5 * waves_a[i] * (1 + cimped) * np.exp(
cterm
) + 0.5 * waves_b[i] * (1 - cimped) * np.exp(-cterm)
waves_b[i + 1, :] = 0.5 * waves_a[i] * (1 - cimped) * np.exp(
cterm
) + 0.5 * waves_b[i] * (1 + cimped) * np.exp(-cterm)
# Set wave amplitudes with zero frequency to 1
mask = ~np.isfinite(cimped)
waves_a[i + 1, mask] = 1.0
waves_b[i + 1, mask] = 1.0
# fixme: Better way to handle this?
# Set wave amplitudes to 1 at frequencies near 0
mask = np.isclose(angular_freqs, 0)
waves_a[-1, mask] = 1.0
waves_b[-1, mask] = 1.0
self._waves_a = waves_a
self._waves_b = waves_b
self._wave_nums = wave_nums
def wave_at_location(self, l):
"""Compute the wave field at specific location.
Parameters
----------
l : site.Location
:class:`site.Location` of the input
Returns
-------
`np.ndarray`
Amplitude and phase of waves
"""
cterm = 1j * self._wave_nums[l.index] * l.depth_within
if l.wave_field == WaveField.within:
return self._waves_a[l.index] * np.exp(cterm) + self._waves_b[
l.index
] * np.exp(-cterm)
elif l.wave_field == WaveField.outcrop:
return 2 * self._waves_a[l.index] * np.exp(cterm)
elif l.wave_field == WaveField.incoming_only:
return self._waves_a[l.index] * np.exp(cterm)
else:
raise NotImplementedError
def calc_accel_tf(self, lin, lout):
"""Compute the acceleration transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
"""
tf = self.wave_at_location(lout) / self.wave_at_location(lin)
return tf
def calc_stress_tf(self, lin, lout, damped):
"""Compute the stress transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
"""
tf = self.calc_strain_tf(lin, lout)
if damped:
# Scale by complex shear modulus to include the influence of
# damping
tf *= lout.layer.comp_shear_mod
else:
tf *= lout.layer.shear_mod
return tf
def calc_strain_tf(self, lin, lout):
"""Compute the strain transfer function from `lout` to
`location_in`.
The strain transfer function from the acceleration at layer `n`
(outcrop) to the mid-height of layer `m` (within) is defined as
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
Returns
-------
strain_tf : :class:`numpy.ndarray`
Transfer function to be applied to an acceleration FAS.
"""
# FIXME: Correct discussion for using acceleration FAS
# Strain(angFreq, z=h_m/2)
# ------------------------ =
# accel_n(angFreq)
#
# i k*_m [ A_m exp(i k*_m h_m / 2) - B_m exp(-i k*_m h_m / 2)]
# ------------------------------------------------------------
# -angFreq^2 (2 * A_n)
#
assert lout.wave_field == WaveField.within
ang_freqs = self.motion.angular_freqs
# The numerator cannot be computed using wave_at_location() because
# it is A - B.
cterm = 1j * self._wave_nums[lout.index, :] * lout.depth_within
numer = (
1j
* self._wave_nums[lout.index, :]
* (
self._waves_a[lout.index, :] * np.exp(cterm)
- self._waves_b[lout.index, :] * np.exp(-cterm)
)
)
denom = -(ang_freqs ** 2) * self.wave_at_location(lin)
# Only compute transfer function for non-zero frequencies
mask = ~np.isclose(ang_freqs, 0)
tf = np.zeros_like(mask, dtype=complex)
# Scale into units from gravity
tf[mask] = GRAVITY * numer[mask] / denom[mask]
return tf
class EquivalentLinearCalculator(LinearElasticCalculator):
"""Class for performing equivalent-linear elastic site response."""
name = "EQL"
def __init__(
self, strain_ratio=0.65, tolerance=0.01, max_iterations=15, strain_limit=0.05
):
"""Initialize the class.
Parameters
----------
strain_ratio: float, default=0.65
Ratio between the maximum strain and effective strain used to
compute strain compatible properties.
tolerance: float, default=0.01
Tolerance in the iterative properties, which would cause the
iterative process to terminate.
max_iterations: int, default=15
Maximum number of iterations to perform.
strain_limit: float, default=0.05
Limit of strain in calculations. If this strain is exceed, the
iterative calculation is ended.
"""
super().__init__()
self._strain_ratio = strain_ratio
self._tolerance = tolerance
self._max_iterations = max_iterations
self._strain_limit = strain_limit
def __call__(self, motion, profile, loc_input):
"""Perform the wave propagation.
Parameters
----------
motion: :class:`~.base.motion.Motion`
Input motion.
profile: :class:`~.base.site.Profile`
Site profile.
loc_input: :class:`~.base.site.Location`
Location of the input motion.
"""
super().__call__(motion, profile, loc_input)
self._estimate_strains()
iteration = 0
# The iteration at which strains were last limited
limited_iter = -2
limited_strains = False
while iteration < self.max_iterations:
limited_strains = False
self._calc_waves(motion.angular_freqs, profile)
for index, layer in enumerate(profile[:-1]):
loc_layer = Location(index, layer, "within", layer.thickness / 2)
# Compute the representative strain(s) within the layer. FDM
# will provide a vector of strains.
strain = self._calc_strain(loc_input, loc_layer, motion)
if self._strain_limit and np.any(strain > self._strain_limit):
limited_strains = True
strain = np.maximum(strain, self._strain_limit)
layer.strain = strain
# Maximum error (damping and shear modulus) over all layers
max_error = max(profile.max_error)
if max_error < self.tolerance:
break
# Break, if the strains were limited the last two iterations.
if limited_strains:
if limited_iter == (iteration - 1):
break
else:
limited_iter = iteration
iteration += 1
# Compute the maximum strain within the profile.
for index, layer in enumerate(profile[:-1]):
loc_layer = Location(index, layer, "within", layer.thickness / 2)
layer.strain_max = self._calc_strain_max(loc_input, loc_layer, motion)
def _estimate_strains(self):
"""Compute an estimate of the strains."""
# Estimate the strain based on the PGV and shear-wave velocity
for l in self._profile:
l.reset()
l.strain = self._motion.pgv / l.initial_shear_vel
@property
def strain_ratio(self):
return self._strain_ratio
@property
def tolerance(self):
return self._tolerance
@property
def max_iterations(self):
return self._max_iterations
@property
def strain_limit(self):
return self._strain_limit
@classmethod
def calc_strain_ratio(cls, mag):
"""Compute the effective strain ratio using Idriss and Sun (1992).
Parameters
----------
mag: float
Magnitude of the input motion.
Returns
-------
strain_ratio : float
Effective strain ratio
References
----------
.. [1] Idriss, I. M., & Sun, J. I. (1992). SHAKE91: A computer program
for conducting equivalent linear seismic response analyses of
horizontally layered soil deposits. Center for Geotechnical
Modeling, Department of Civil and Environmental Engineering,
University of California, Davis, CA.
"""
return (mag - 1) / 10
def _calc_strain(self, loc_input, loc_layer, motion, *args):
"""Compute the strain used for iterations of material properties."""
strain_max = self._calc_strain_max(loc_input, loc_layer, motion, *args)
return self.strain_ratio * strain_max
def _calc_strain_max(self, loc_input, loc_layer, motion, *args):
"""Compute the effective strain at the center of a layer."""
return motion.calc_peak(self.calc_strain_tf(loc_input, loc_layer))
class FrequencyDependentEqlCalculator(EquivalentLinearCalculator):
"""Class for performing equivalent-linear elastic site response with
frequency-dependent modulii and damping.
Parameters
----------
use_smooth_spectrum: bool, default=False
Use the Kausel & Assimaki (2002) smooth spectrum for the strain.
Otherwise, the complete Fourier amplitude spectrum is used.
strain_ratio: float, default=1.00
ratio between the maximum strain and effective strain used to compute
strain compatible properties. There is not clear guidance the use of
the effective strain ratio. However, given the nature of the method,
it would make sense not to include the an effective strain ratio.
tolerance: float, default=0.01
tolerance in the iterative properties, which would cause the iterative
process to terminate.
max_iterations: int, default=15
maximum number of iterations to perform.
References
----------
.. [1] Kausel, E., & Assimaki, D. (2002). Seismic simulation of inelastic
soils via frequency-dependent moduli and damping. Journal of
Engineering Mechanics, 128(1), 34-47.
"""
name = "FDM-KA"
def __init__(
self,
use_smooth_spectrum=False,
strain_ratio=1.0,
tolerance=0.01,
max_iterations=15,
):
"""Initialize the class."""
super().__init__(strain_ratio, tolerance, max_iterations)
# Use the smooth strain spectrum as proposed by Kausel and Assimaki
self._use_smooth_spectrum = use_smooth_spectrum
def _estimate_strains(self):
"""Estimate the strains by running an EQL site response.
This step was recommended in Section 8.3.1 of Zalachoris (2014).
"""
eql = EquivalentLinearCalculator()
eql(self._motion, self._profile, self._loc_input)
def _calc_strain(self, loc_input, loc_layer, motion, *args):
freqs = np.array(motion.freqs)
strain_tf = self.calc_strain_tf(loc_input, loc_layer)
strain_fas = np.abs(strain_tf * motion.fourier_amps)
# Maximum strain in the time domain modified by the effective strain
# ratio
strain_eff = self.strain_ratio * motion.calc_peak(strain_tf)
if self._use_smooth_spectrum:
# Equation (8)
freq_avg = np.trapz(freqs * strain_fas, x=freqs) / np.trapz(
strain_fas, x=freqs
)
# Find the average strain at frequencies less than the average
# frequency
# Equation (8)
mask = freqs < freq_avg
strain_avg = np.trapz(strain_fas[mask], x=freqs[mask]) / freq_avg
# Normalize the frequency and strain by the average values
freqs /= freq_avg
strain_fas /= strain_avg
# Fit the smoothed model at frequencies greater than the average
A = np.c_[-freqs[~mask], -np.log(freqs[~mask])]
a, b = np.linalg.lstsq(A, np.log(strain_fas[~mask]), rcond=None)[0]
# This is a modification of the published method that ensures a
# smooth transition in the strain. Make sure the frequencies are zero.
shape = np.minimum(
1,
np.exp(-a * freqs)
/ np.maximum(np.finfo(float).eps, np.power(freqs, b)),
)
strains = strain_eff * shape
else:
strains = strain_eff * strain_fas / np.max(strain_fas)
return strains
|
|
'''
Copyright 2010-2013 DIMA Research Group, TU Berlin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Mar 2, 2011
@author: Alexander Alexandrov <alexander.alexandrov@tu-berlin.de>
'''
import os, sys, tempfile, optparse, datetime, time
import BaseHTTPServer
import config
from urlparse import urlparse
from SocketServer import ThreadingMixIn
from threading import Thread, Lock, RLock
from myriad.util import sysutil, timeutil
import httplib
class DGen(object):
'''
classdocs
'''
basePath = None
dgenName = None
nodeName = None
config = None
dgenConfig = None
parser = None
logBase = None
cleanup = None
sf = None
configPath = None
nodeConfig = None
datasetID = None
executeStages = None
configName = None
serverAddress = None
dgenMaster = None
dgenNodes = None
started = None
finished = None
log = None
VERSION = "0.3.0"
def __init__(self, basePath, dgenName, argv):
'''
Constructor
'''
self.basePath = basePath
self.dgenName = dgenName
self.initialize(argv)
self.dgenNodes = []
def initialize(self, argv):
parser = optparse.OptionParser(usage="%prog [options] <dgen-config>", version=self.VERSION)
parser.add_option("-s", dest="sf", type="float", default=1.0,
help="scaling factor (sf=1 generates 1GB data)")
parser.add_option("-m", dest="dataset_id", type="str", default="default-dataset",
help="ID of the generated Myriad dataset")
parser.add_option("-x", dest="execute_stages", action="append", type="str", default=[],
help="Specify a specific stage to execute")
parser.add_option("-n", dest="node_config", type="str", default="%s-node.xml" % (self.dgenName),
help="name of the node config file (should reside in the config dir)")
parser.add_option("--config-dir", dest="config_path", type="str", default="%s/config" % (self.basePath),
help="path to the myriad config folder (TODO)")
parser.add_option("--log-dir", dest="log_dir", type="str", default=None,
help="base directory for output logging")
parser.add_option("--cleanup", dest="cleanup", action="store_true",
help="remove output from previously generated job")
self.parser = parser
args, remainder = parser.parse_args(argv)
if (len(remainder) != 1):
self.error(None, True)
raise
self.log = sysutil.createLogger("myriad.dgen")
try:
self.cleanup = args.cleanup
self.sf = args.sf
self.datasetID = args.dataset_id
self.configPath = args.config_path
self.nodeConfig = args.node_config
self.logBase = args.log_dir
self.executeStages = args.execute_stages
self.configName = remainder.pop()
# load myriad config
self.config = config.readConfig(self.dgenName, self.nodeConfig, "%s/%s-frontend.xml" % (self.configPath, self.dgenName))
# load sizing config
self.dgenConfig = config.readDGenConfig("%s/%s-node.properties" % (self.configPath, self.dgenName))
DGenNode.MAX_ATTEMPTS = int(self.dgenConfig.getProperty("coordinator.node.max.attempts", DGenNode.MAX_ATTEMPTS))
DGenNode.DEAD_TIMEOUT = datetime.timedelta(0, 0, 0, int(self.dgenConfig.getProperty("coordinator.node.dead.timeout", DGenNode.DEAD_TIMEOUT.seconds*1000)))
NodeMonitor.POLL_INTERVAL = int(self.dgenConfig.getProperty("coordinator.node.monitor.interval", NodeMonitor.POLL_INTERVAL*1000))/1000.0
if (self.logBase == None):
# create log dir
self.logBase = tempfile.mkdtemp("", "%s-frontend-%s_" % (self.dgenName, self.datasetID))
# make sure that logBase directories exist
sysutil.checkDir(self.logBase)
# register file handler to the logger
sysutil.registerFileHandler(self.log, "%s/%s-frontend.log" % (self.logBase, self.dgenName))
except:
e = sys.exc_info()[1]
self.error("unexpected error: %s" % (str(e)), True)
raise
def run(self):
'''
Srart the distributed generation process using the specified dgen configName.
'''
self.started = datetime.datetime.now()
server = None
monitor = None
try:
if (self.cleanup):
slaves = self.config.slaves(self.configName)
self.log.info("~" * 55)
self.log.info("Myriad Parallel Data Generator (Version %s)", self.VERSION)
self.log.info("~" * 55)
self.log.info("cleaning configuration `%s`", self.configName)
for h in slaves:
DGenHost(h).clean(self)
else:
master = self.config.master(self.configName)
nodes = self.config.nodes(self.configName)
self.log.info("~" * 55)
self.log.info("Myriad Parallel Data Generator (Version %s)", self.VERSION)
self.log.info("~" * 55)
self.log.info("running configuration `%s` with scaling factor %.3f", self.configName, self.sf)
self.dgenMaster = master
self.dgenNodes = [ DGenNode(n) for n in nodes ]
self.log.info("starting heartbeat server on address `%s:%d`", self.dgenMaster.name, self.dgenMaster.coorServerPort)
server = HeartbeatServer(self.datasetID, self.dgenNodes, ('0.0.0.0', self.dgenMaster.coorServerPort))
# start node monitor
self.log.info("starting node monitor thread")
monitor = NodeMonitor(self, server)
monitor.start()
# start server loop
serverThread = Thread(target=server.serveLoop)
serverThread.start()
self.log.info("starting %d generator nodes", len(self.dgenNodes))
self.startNodes()
# wait for server thread to finish (timeout and loop needed for KeyboardInterrupt)
while(serverThread.isAlive()):
serverThread.join(3.0)
# wait for monitor thread
monitor.join()
if (monitor.exception):
self.log.error("interrupting generation process after failure in node %d ", monitor.exception.id)
raise monitor.exception
# abort all running nodes
self.abortAllNodes()
self.finished = datetime.datetime.now()
self.log.info("generator process finished in %s seconds", timeutil.formatTime(self.finished - self.started))
except KeyboardInterrupt:
self.log.warning("execution interrupted by user")
if (monitor != None):
monitor.shutdown()
self.abortAllNodes()
raise
except NodeFailureException, e:
self.abortAllNodes()
if (monitor != None):
monitor.shutdown()
self.error(str(e), False)
raise
except config.UnknownConfigObjectException, e:
self.abortAllNodes()
self.error(str(e), False)
raise
except:
e = sys.exc_info()[1]
if (monitor != None):
monitor.shutdown()
self.error(str(e), False)
raise
self.abortAllNodes()
def startNodes(self):
for node in self.dgenNodes:
node.start(self, len(self.dgenNodes))
def abortAllNodes(self):
for node in self.dgenNodes:
node.abort(self, len(self.dgenNodes))
def error(self, message=None, withUsage = False):
if (withUsage):
self.parser.print_usage(sys.stderr)
if (message != None):
self.log.error("%s: error: %s", self.parser.get_prog_name(), message)
class NodeMonitor(Thread):
POLL_INTERVAL = 5.0
dgen = None
server = None
exception = None
isShutdown = False
log = None
def __init__(self, dgen, server):
Thread.__init__(self)
self.dgen = dgen
self.server = server
self.isShutdown = False
self.log = sysutil.getExistingLogger("myriad.dgen")
def run(self):
while (not self.isShutdown):
time.sleep(3.0)
self.server.nonReadyLock.acquire()
try:
if (self.server.nonReady == 0):
self.isShutdown = True
for node in self.server.nodes:
if (node.isDead()):
self.log.warning("restarting dead node #%d", node.id)
node.restart(self.dgen, len(self.server.nodes))
except NodeFailureException, e:
self.isShutdown = True
self.exception = e
self.server.nonReadyLock.release()
self.server.stopServeLoop()
def shutdown(self):
self.isShutdown = True
class HeartbeatServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
datasetID = None
nodes = []
nonReady = None
nonReadyLock = None
isShutdown = False
def __init__(self, datasetID, nodes, address):
BaseHTTPServer.HTTPServer.__init__(self, address, RequestHandler)
self.datasetID = datasetID
self.nodes = nodes
self.nonReady = len(nodes)
self.nonReadyLock = Lock()
self.isShutdown = False
def serveLoop(self):
while (not self.isShutdown):
self.handle_request()
def stopServeLoop(self):
self.isShutdown = True
self.makeSentinelRequest()
def makeSentinelRequest(self):
try:
conn = httplib.HTTPConnection(self.server_address[0], self.server_address[1])
conn.request("GET", "/sentinel")
conn.getresponse()
conn.close()
except:
pass
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
GET_REFRESH = 8000
def do_HEAD(self):
self.send_response(200)
self.end_headers()
result = urlparse(self.path, '', False)
params = dict([part.split('=') for part in result[4].split('&')])
status = int(params['status'])
if (status >= DGenNode.INITIALIZING and status <= DGenNode.ABORTED): # valid values for heartbeats
node = self.server.nodes[int(params['id'])]
node.lock.acquire()
if (status == DGenNode.READY):
self.server.nonReadyLock.acquire()
node.lastBeat = datetime.datetime.now()
if (node.status != status and node.status < status):
log = sysutil.getExistingLogger("myriad.dgen")
log.info("node %05d: %s -> %s", node.id, DGenNode.STATUS_STRING[node.status], DGenNode.STATUS_STRING[status])
node.status = status
if (node.status == DGenNode.ACTIVE):
node.progress = float(params['progress'])
elif (node.status == DGenNode.READY):
node.progress = 1.0
node.finished = datetime.datetime.now()
self.server.nonReady -= 1
elif (node.status == DGenNode.ABORTED or node.status == DGenNode.FAILED):
pass
else:
log = sysutil.getExistingLogger("myriad.dgen")
log.error("unknown status %d for node %d", node.status, node.id)
if (status == DGenNode.READY):
self.server.nonReadyLock.release()
node.lock.release()
def do_GET(self):
if (self.path == '/sentinel'):
self.send_response(200)
self.end_headers()
return
elif (self.path != '/'):
self.send_response(404)
self.end_headers()
return
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
print >> self.wfile, "<html>"
print >> self.wfile, "<head>"
print >> self.wfile, " <title>Myriad Parallel Data Generator » % s</title>" % (self.server.datasetID)
print >> self.wfile, " <script type='text/JavaScript'>"
print >> self.wfile, " <!--"
print >> self.wfile, " function timedRefresh(timeoutPeriod) {"
print >> self.wfile, " setTimeout('location.reload(true);', timeoutPeriod);"
print >> self.wfile, " }"
print >> self.wfile, " // -->"
print >> self.wfile, " </script>"
print >> self.wfile, "</head>"
print >> self.wfile, "<body style='margin: 0; padding: 2ex 2em; font-size: 14px;' onload='javascript:timedRefresh(%d);'>" % (self.GET_REFRESH)
print >> self.wfile, "<div id='header' style='text-align: center;'>"
print >> self.wfile, " <h1 style='color: #333; font-size: 2em; margin: 0 0 0.5ex 0; padding: 0;'>Myriad Parallel Data Generator</h1>"
print >> self.wfile, " <h2 style='color: #333; font-size: 1.5em; margin: 0 0 3ex 0; padding: 0;'>Job coordinator for dataset »%s« </h2>" % (self.server.datasetID)
print >> self.wfile, "</div>"
print >> self.wfile, "<table style='width: 100%; border: 1px solid #999;' cellspacing='5' cellpadding='0'>"
print >> self.wfile, "<thead>"
print >> self.wfile, "<tr>"
print >> self.wfile, " <td style='width: 10%; background: #454545; color: #fafafa; padding: 0.5ex'>Node #</td>"
print >> self.wfile, " <td style='width: 20%; background: #454545; color: #fafafa; padding: 0.5ex'>Hostname</td>"
print >> self.wfile, " <td style='width: 35%; background: #454545; color: #fafafa; padding: 0.5ex'>Progress</td>"
print >> self.wfile, " <td style='width: 15%; background: #454545; color: #fafafa; padding: 0.5ex'>Status</td>"
print >> self.wfile, " <td style='width: 10%; background: #454545; color: #fafafa; padding: 0.5ex'>Attempt #</td>"
print >> self.wfile, " <td style='width: 10%; background: #454545; color: #fafafa; padding: 0.5ex'>Time</td>"
print >> self.wfile, "</tr>"
print >> self.wfile, "</thead>"
print >> self.wfile, "<tbody>"
for n in self.server.nodes:
print >> self.wfile, "<tr>"
print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%05d</td>" % (n.id)
print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%s</td>" % (n.host)
print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'><span style='float: left; width: 15%%;'>%02d%%</span><span style='float: left; width: %d%%; border-left: 1px solid #666; background: #666; color: #666; overflow: hidden;'>»</span></td>" % (100 * n.progress, 80 * n.progress)
print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%s</td>" % (DGenNode.STATUS_STRING[n.status])
print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%d</td>" % (n.attempt)
print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%s</td>" % (timeutil.formatTime(n.lastBeat - n.started))
print >> self.wfile, "</tr>"
print >> self.wfile, "</tbody>"
print >> self.wfile, "</table>"
print >> self.wfile, "<body>"
print >> self.wfile, "</html>"
def log_request(self, code='-', size='-'):
'''
Disable request logging for the communication server
'''
pass
class DGenHost(config.Host):
'''
classdocs
'''
name = None
outputBase = None
def __init__(self, envNode):
'''
Constructor
'''
self.name = envNode.name
self.outputBase = envNode.outputBase
def clean(self, dgen):
log = sysutil.getExistingLogger("myriad.dgen")
log.info("clearing host `%s`", self.name)
os.system("ssh %s 'rm -Rf %s/%s'" % (self.name, self.outputBase, dgen.datasetID))
class DGenNode(config.Node):
'''
classdocs
'''
id = None
host = None
dgenPath = None
dgenName = None
outputBase = None
lock = None
attempt = None
progress = None
status = None
started = None
finished = None
lastBeat = None
NEW = -1
INITIALIZING = 0
ACTIVE = 1
READY = 2
ABORTED = 3
FAILED = 4
DEAD_TIMEOUT = datetime.timedelta(0, 30)
MAX_ATTEMPTS = 3
STATUS_STRING = {
-1: "NEW",
0: "INITIALIZING",
1: "ACTIVE",
2: "READY",
3: "ABORTED",
4: "FAILED",
}
def __init__(self, envNode):
'''
Constructor
'''
self.id = envNode.id
self.host = envNode.host
self.dgenPath = envNode.dgenPath
self.dgenName = envNode.dgenName
self.outputBase = envNode.outputBase
self.nodeConfig = envNode.nodeConfig
self.attempt = 0
self.resetState()
self.lock = RLock()
def start(self, dgen, nodesTotal):
self.lock.acquire();
os.system("ssh -f %s '%s/bin/%s-node -s%.3f -m%s -i%d -N%d -H%s -P%d -o%s -n%s %s > /dev/null 2> /dev/null &'" % (self.host, self.dgenPath, self.dgenName, dgen.sf, dgen.datasetID, self.id, nodesTotal, dgen.dgenMaster.name, dgen.dgenMaster.coorServerPort, self.outputBase, self.nodeConfig, ' '.join(map(lambda s: '-x%s' % s, dgen.executeStages))))
self.attempt += 1
self.resetState()
self.lock.release()
def restart(self, dgen, nodesTotal):
self.lock.acquire();
if (self.attempt < DGenNode.MAX_ATTEMPTS):
os.system("ssh -f %s '%s/bin/%s-kill %d %s > /dev/null 2> /dev/null'" % (self.host, self.dgenPath, self.dgenName, self.id, dgen.datasetID))
os.system("ssh -f %s '%s/bin/%s-node -s%.3f -m%s -i%d -N%d -H%s -P%d -o%s -n%s %s > /dev/null 2> /dev/null &'" % (self.host, self.dgenPath, self.dgenName, dgen.sf, dgen.datasetID, self.id, nodesTotal, dgen.dgenMaster.name, dgen.dgenMaster.coorServerPort, self.outputBase, self.nodeConfig, ' '.join(map(lambda s: '-x%s' % s, dgen.executeStages))))
self.attempt += 1
self.resetState()
self.lock.release()
else:
self.status = DGenNode.FAILED
self.lock.release()
raise NodeFailureException(self.id)
def abort(self, dgen, nodesTotal):
self.lock.acquire()
if (self.status < DGenNode.READY):
log = sysutil.getExistingLogger("myriad.dgen")
log.info("aborting node #%03d" % (self.id))
os.system("ssh -f %s '%s/bin/%s-kill %d %s > /dev/null 2> /dev/null'" % (self.host, self.dgenPath, self.dgenName, self.id, dgen.datasetID))
self.status = DGenNode.FAILED
self.lock.release()
def isDead(self):
self.lock.acquire()
if (self.status == DGenNode.FAILED):
self.lock.release()
raise NodeFailureException(self.id)
if (self.status == DGenNode.READY):
self.lock.release()
return False
elif (self.status == DGenNode.ABORTED):
self.lock.release()
return True
else:
diff = datetime.datetime.now() - self.lastBeat
self.lock.release()
return diff > DGenNode.DEAD_TIMEOUT
def resetState(self):
self.progress = 0.0
self.status = DGenNode.NEW
self.started = datetime.datetime.now()
self.lastBeat = datetime.datetime.now()
class NodeFailureException(RuntimeError):
id = None
def __init__(self, id):
self.id = id
def __str__(self):
return "node %d failed" % (self.id)
|
|
from ..common.geometry import Point, Polygon, Envelope, SpatialReference
from .. import AGOLTokenSecurityHandler
from .._abstract.abstract import BaseGeoEnrichment
from ..manageorg import Administration
import json
import csv
import os
import numpy as np
########################################################################
class GeoEnrichment(BaseGeoEnrichment):
""""""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_countryCodeFile = None
_dataCollectionNames = None
_countrycodes = None
_datacollectionnames = None
_dataCollectionFile = None
_base_url = "http://geoenrich.arcgis.com/arcgis/rest/services/World/geoenrichmentserver"
_url_standard_geography_query = "/StandardGeographyLevels" # returns boundaries
_url_standard_geography_query_execute = "/StandardGeographyQuery/execute" # returns report
_url_getVariables = "/GetVariables/execute" # returns report
_url_create_report = "/GeoEnrichment/createreport" # generates a report
_url_list_reports = "/Geoenrichment/Reports" # return report types for a country
_url_data_collection = "/Geoenrichment/dataCollections"
#----------------------------------------------------------------------
def __init__(self,
securityHandler,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
else:
raise Exception("A SecurityHandler object is required for this object.")
admin = Administration(securityHandler=securityHandler,
proxy_url=proxy_url,
proxy_port=proxy_port)
self._base_url = admin.portals.portalSelf.helperServices['geoenrichment']['url']
del admin
self._securityHandler = securityHandler
self._countryCodeFile = os.path.join(os.path.dirname(__file__),
"__countrycodes.csv")
self._dataCollectionFile = os.path.join(os.path.dirname(__file__),
"__datacollectionnames.csv")
self._countrycodes = self._readcsv(self._countryCodeFile)
self._dataCollectionCodes = self._readcsv(path_to_csv=self._dataCollectionFile)
#----------------------------------------------------------------------
def _readcsv(self, path_to_csv):
"""reads a csv column"""
import numpy as np
return np.genfromtxt(path_to_csv,
dtype=None,
delimiter=',',
names=True)
#----------------------------------------------------------------------
@property
def allowedTwoDigitCountryCodes(self):
"""returns a list of accepted two digit country codes"""
return self._countrycodes['Two_Digit_Country_Code']
#----------------------------------------------------------------------
@property
def allowedCountryNames(self):
"""returns a list of accepted country names"""
return self._countrycodes['Country_Name']
#----------------------------------------------------------------------
@property
def allowedThreeDigitNames(self):
"""returns a list of accepted three digit country codes"""
return self._countrycodes['Three_Digit_Country_Code']
#----------------------------------------------------------------------
@property
def dataCollectionNames(self):
"""returns a list of data collection names"""
return self._dataCollectionCodes['Data_Collection_Name']
#----------------------------------------------------------------------
def queryDataCollectionByName(self, countryName):
"""
returns a list of available data collections for a given country
name.
Inputs:
countryName - name of the country to file the data collection.
Output:
list or None. None implies could not find the countryName
"""
var = self._dataCollectionCodes
try:
return [x[0] for x in var[var['Countries'] == countryName]]
except:
return None
#----------------------------------------------------------------------
def findCountryTwoDigitCode(self, countryName):
"""
Returns the two digit code based on a country name
Inputs:
countryName - name of the country to file the data collection.
Output:
list or None. None implies could not find the countryName
"""
var = self._countrycodes
try:
return var[var['Country_Name'] == countryName][0][1]
except:
return None
#----------------------------------------------------------------------
def findCountryThreeDigitCode(self, countryName):
"""
Returns the three digit code based on a country name
Inputs:
countryName - name of the country to file the data collection.
Output:
list or None. None implies could not find the countryName
"""
var = self._countrycodes
try:
return var[var['Country_Name'] == countryName][0][2]
except:
return None
#----------------------------------------------------------------------
def __geometryToDict(self, geom):
"""converts a geometry object to a dictionary"""
if isinstance(geom, dict):
return geom
elif isinstance(geom, Point):
pt = geom.asDictionary
return {"geometry": {"x" : pt['x'], "y" : pt['y']}}
elif isinstance(geom, Polygon):
poly = geom.asDictionary
return {
"geometry" : {
"rings" : poly['rings'],
'spatialReference' : poly['spatialReference']
}
}
elif isinstance(geom, list):
return [self.__geometryToDict(g) for g in geom]
#----------------------------------------------------------------------
def lookUpReportsByCountry(self, countryName):
"""
looks up a country by it's name
Inputs
countryName - name of the country to get reports list.
"""
code = self.findCountryTwoDigitCode(countryName)
if code is None:
raise Exception("Invalid country name.")
url = self._base_url + self._url_list_reports + "/%s" % code
params = {
"f" : "json",
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def createReport(self,
out_file_path,
studyAreas,
report=None,
format="PDF",
reportFields=None,
studyAreasOptions=None,
useData=None,
inSR=4326,
):
"""
The GeoEnrichment Create Report method uses the concept of a study
area to define the location of the point or area that you want to
enrich with generated reports. This method allows you to create
many types of high-quality reports for a variety of use cases
describing the input area. If a point is used as a study area, the
service will create a 1-mile ring buffer around the point to
collect and append enrichment data. Optionally, you can create a
buffer ring or drive-time service area around the points to prepare
PDF or Excel reports for the study areas.
Note:
For full examples for each input, please review the following:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Create_report/02r30000022q000000/
Inputs:
out_file_path - save location of the report
studyAreas - Required parameter to specify a list of input
features to be enriched. The input can be a Point, Polygon,
Adress, or named administrative boundary. The locations can be
passed in as a single object or as a list of objects.
report - Default report to generate.
format - specify the generated report. Options are: XLSX or PDF
reportFields - Optional parameter specifies additional choices to
customize reports. See the URL above to see all the options.
studyAreasOptions - Optional parameter to specify enrichment
behavior. For points described as map coordinates, a 1-mile
ring area centered on each site will be used by default. You
can use this parameter to change these default settings.
With this parameter, the caller can override the default
behavior describing how the enrichment attributes are appended
to the input features described in studyAreas. For example,
you can change the output ring buffer to 5 miles, change the
number of output buffers created around each point, and also
change the output buffer type to a drive-time service area
rather than a simple ring buffer.
useData - By default, the service will automatically determine
the country or dataset that is associated with each location or
area submitted in the studyAreas parameter; however, there is
an associated computational cost which may lengthen the time it
takes to return a response. To skip this intermediate step and
potentially improve the speed and performance of the service,
the caller can specify the country or dataset information up
front through this parameter.
inSR - parameter to define the input geometries in the studyAreas
parameter in a specified spatial reference system.
"""
url = self._base_url + self._url_create_report
if isinstance(studyAreas, list) == False:
studyAreas = [studyAreas]
studyAreas = self.__geometryToDict(studyAreas)
params = {
"f" : "bin",
"studyAreas" : studyAreas,
"inSR" : inSR,
}
if not report is None:
params['report'] = report
if format is None:
format = "pdf"
elif format.lower() in ['pdf', 'xlsx']:
params['format'] = format.lower()
else:
raise AttributeError("Invalid format value.")
if not reportFields is None:
params['reportFields'] = reportFields
if not studyAreasOptions is None:
params['studyAreasOptions'] = studyAreasOptions
if not useData is None:
params['useData'] = useData
return self._download_file(url=url,
save_path=os.path.dirname(out_file_path),
file_name=None,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def dataCollections(self,
countryName=None,
addDerivativeVariables=["*"],
outFields=["*"],
suppressNullValues=False):
"""
The GeoEnrichment service uses the concept of a data collection to
define the data attributes returned by the enrichment service. Each
data collection has a unique name that acts as an ID that is passed
in the dataCollections parameter of the GeoEnrichment service.
Some data collections (such as default) can be used in all
supported countries. Other data collections may only be available
in one or a collection of countries. Data collections may only be
available in a subset of countries because of differences in the
demographic data that is available for each country. A list of data
collections for all available countries can be generated with the
data collection discover method.
For full help please go here:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Data_collections/02r30000021t000000/
Inputs:
countryName - lets the user supply and optional name of a
country in order to get information about the data collections
in that given country.
addDerivativeVariables - Optional parameter to specify a list of
field names that include variables for the derivative
statistics.
outFields - Optional parameter to specify a list of output
fields in the response.
suppressNullValues - Optional parameter to return only values
that are not NULL in the output response. Adding the optional
suppressNullValues parameter to any data collections discovery
method will reduce the size of the output that is returned
"""
if countryName is None:
url = self._base_url + self._url_data_collection
else:
url = self._base_url + self._url_data_collection + "/%s" % countryName
params = {
"f" : "token"
}
_addDerivVals = ["percent","index","average","all","*"]
if addDerivativeVariables in _addDerivVals:
params['addDerivativeVariables'] = addDerivativeVariables
if not outFields is None:
params['outFields'] = outFields
if not suppressNullValues is None and \
isinstance(suppressNullValues, bool):
if suppressNullValues:
params['suppressNullValues'] = "true"
else:
params['suppressNullValues'] = "false"
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getVariables(self,
sourceCountry,
optionalCountryDataset=None,
searchText=None):
r"""
The GeoEnrichment GetVariables helper method allows you to search
the data collections for variables that contain specific keywords.
To see the comprehensive set of global Esri Demographics data that
are available, use the interactive data browser:
http://resources.arcgis.com/en/help/arcgis-rest-api/02r3/02r300000266000000.htm#GUID-2D66F7F8-83A9-4EAA-B5E2-F09D629939CE
Inputs:
sourceCountry - specify the source country for the search. Use this
parameter to limit the search and query of standard geographic
features to one country. This parameter supports both the
two-digit and three-digit country codes illustrated in the
coverage table.
Examples
Example 1 - Set source country to the United States:
sourceCountry=US
Example 2 - Set source country to the Canada:
sourceCountry=CA
Additional notes
Currently, the service is available for Canada, the
United States and a number of European countries. Other
countries will be added in the near future.
The list of available countries and their associated
IDS are listed in the coverage section.
optionalCountryDataset - Optional parameter to specify a specific
dataset within a defined country. This parameter will not be used
in the Beta release. In the future, some countries may have two or
more datasets that may have different vintages and standard
geography areas. For example, in the United States, there may be
an optional dataset with historic census data from previous years.
Examples
optionalCountryDataset=USA_ESRI_2013
Additional notes
Most countries only have a single dataset.
The United States has multiple datasets.
searchText - Optional parameter to specify the text to query and
search the data collections for the country and datasets
specified. You can use this parameter to query and find specific
keywords that are contained in a data collection.
Default value
(null or empty)
Examples
Example 1 - Return all the data collections and variabels that contain the word furniture:
searchText=furniture
Search terms
A query is broken up into terms and operators. There are two types of terms: Single Terms and Phrases.
A Single Term is a single word such as "Income" or "Households".
A Phrase is a group of words surrounded by double quotes such as "Household Income".
Multiple terms can be combined together with Boolean operators to form a more complex query (see below).
Fields
Geography search supports fielded data. When performing a search, you can either specify a field or use search through all fields.
You can search any field by typing the field name followed by a colon ":" then the term you are looking for.
For example, to search for "Income" in the Alias field:
Alias:Income
The search supports single and multiple character wildcard searches within single terms (not within phrase queries).
To perform a single character wildcard search, use the "?" symbol.
To perform a multiple character wildcard search, use the "*" symbol.
The single character wildcard search looks for terms that match that with the single character replaced. For example, to search for "San" or "Sen" you can use the search:
Fuzzy searches
Fuzzy searches are based on the Levenshtein Distance or Edit Distance algorithm. To perform a fuzzy search, you can explicitly set a fuzzy search by using the tilde symbol "~" at the end of a Single Term.
For example, a term similar in spelling to "Hous" uses the fuzzy search:
Hous~
An additional (optional) numeric parameter can be specified after the tilde symbol ("~") to set the similarity tolerance. The value is between 0 and 1; with a value closer to 1, only terms with a higher similarity will be matched.
For example, if you only want to match terms with a similarity of 0.0 or higher, you can set the fuzzy search as follows:
hous~0.8
The default that is used if the optional similarity number is not provided is 0.5.
Boolean operators
Boolean operators allow terms to be combined through logic operators. The search supports AND, "+", OR, NOT and "-" as Boolean operators. Boolean operators must be ALL CAPS.
In searchText , the AND operator is the default conjunction operator. This means that if there is no Boolean operator between two or more terms, the AND operator is used. The AND operator matches items where both terms exist anywhere in the list of standard geography features. The symbol "&" can be used in place of the word AND.
The OR operator links two terms and finds a matching variable if either of the terms exist. This is equivalent to a union with using sets. The symbol "||" can be used in place of the word OR.
To search for features that contain either "Income" or "Wealth" use the following query:
Income OR Wealth
The "+" or required operator requires that the term after the "+" symbol exist somewhere in the attributes of a variable.
To search for features that must contain "Income" and may contain "Household" use the following query:
+Income OR Household
Escaping Special Characters
Search supports escaping special characters that are part of the query syntax. The available special characters are as follows:
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \
To escape these characters, use the \ before the character.
"""
url = self._base_url + self._url_getVariables
params = {
"f" : "json",
"sourceCountry" : sourceCountry
}
if not searchText is None:
params["searchText"] = searchText
if not optionalCountryDataset is None:
params['optionalCountryDataset'] = optionalCountryDataset
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def standardGeographyQuery(self,
sourceCountry=None,
optionalCountryDataset=None,
geographyLayers=None,
geographyIDs=None,
geographyQuery=None,
returnSubGeographyLayer=False,
subGeographyLayer=None,
subGeographyQuery=None,
outSR=4326,
returnGeometry=False,
returnCentroids=False,
generalizationLevel=0,
useFuzzySearch=False,
featureLimit=1000):
"""
The GeoEnrichment service provides a helper method that returns
standard geography IDs and features for the supported geographic
levels in the United States and Canada.
As indicated throughout this documentation guide, the GeoEnrichment
service uses the concept of a study area to define the location of
the point or area that you want to enrich with additional
information. Locations can also be passed as one or many named
statistical areas. This form of a study area lets you define an
area by the ID of a standard geographic statistical feature, such
as a census or postal area. For example, to obtain enrichment
information for a U.S. state, county or ZIP Code or a Canadian
province or postal code, the Standard Geography Query helper method
allows you to search and query standard geography areas so that
they can be used in the GeoEnrichment method to obtain facts about
the location.
The most common workflow for this service is to find a FIPS
(standard geography ID) for a geographic name. For example, you can
use this service to find the FIPS for the county of San Diego which
is 06073. You can then use this FIPS ID within the GeoEnrichment
service study area definition to get geometry and optional
demographic data for the county. This study area definition is
passed as a parameter to the GeoEnrichment service to return data
defined in the enrichment pack and optionally return geometry for
the feature.
For examples and more help with this function see:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Standard_geography_query/02r30000000q000000/
Inputs:
sourceCountry - Optional parameter to specify the source country
for the search. Use this parameter to limit the search and
query of standard geographic features to one country. This
parameter supports both the two-digit and three-digit country
codes illustrated in the coverage table.
optionalCountryDataset - Optional parameter to specify a
specific dataset within a defined country.
geographyLayers - Optional parameter to specify which standard
geography layers are being queried or searched. If this
parameter is not provided, all layers within the defined
country will be queried.
geographyIDs - Optional parameter to specify which IDs for the
standard geography layers are being queried or searched. You
can use this parameter to return attributes and/or geometry for
standard geographic areas for administrative areas where you
already know the ID, for example, if you know the Federal
Information Processing Standard (FIPS) Codes for a U.S. state
or county; or, in Canada, to return the geometry and attributes
for a Forward Sortation Area (FSA).
geographyQuery - Optional parameter to specify the text to query
and search the standard geography layers specified. You can use
this parameter to query and find standard geography features
that meet an input term, for example, for a list of all the
U.S. counties that contain the word "orange". The
geographyQuery parameter can be a string that contains one or
more words.
returnSubGeographyLayer - Use this optional parameter to return
all the subgeographic areas that are within a parent geography.
For example, you could return all the U.S. counties for a given
U.S. state or you could return all the Canadian postal areas
(FSAs) within a Census Metropolitan Area (city).
When this parameter is set to true, the output features will be
defined in the subGeographyLayer. The output geometries will be
in the spatial reference system defined by outSR.
subGeographyLayer - Use this optional parameter to return all
the subgeographic areas that are within a parent geography. For
example, you could return all the U.S. counties within a given
U.S. state or you could return all the Canadian postal areas
(FSAs) within a Census Metropolitan Areas (city).
When this parameter is set to true, the output features will be
defined in the subGeographyLayer. The output geometries will be
in the spatial reference system defined by outSR.
subGeographyQuery - Optional parameter to filter the results of
the subgeography features that are returned by a search term.
You can use this parameter to query and find subgeography
features that meet an input term. This parameter is used to
filter the list of subgeography features that are within a
parent geography. For example, you may want a list of all the
ZIP Codes that are within "San Diego County" and filter the
results so that only ZIP Codes that start with "921" are
included in the output response. The subgeography query is a
string that contains one or more words.
outSR - Optional parameter to request the output geometries in a
specified spatial reference system.
returnGeometry - Optional parameter to request the output
geometries in the response.
returnCentroids - Optional Boolean parameter to request the
output geometry to return the center point for each feature.
Use this parameter to return all the geometries as points. For
example, you could return all U.S. ZIP Code centroids (points)
rather than providing the boundaries.
generalizationLevel - Optional integer that specifies the level
of generalization or detail in the area representations of the
administrative boundary or standard geographic data layers.
Values must be whole integers from 0 through 6, where 0 is most
detailed and 6 is most generalized.
useFuzzySearch - Optional Boolean parameter to define if text
provided in the geographyQuery parameter should utilize fuzzy
search logic. Fuzzy searches are based on the Levenshtein
Distance or Edit Distance algorithm.
featureLimit - Optional integer value where you can limit the
number of features that are returned from the geographyQuery.
"""
url = self._base_url + self._url_standard_geography_query_execute
params = {
"f" : "json"
}
if not sourceCountry is None:
params['sourceCountry'] = sourceCountry
if not optionalCountryDataset is None:
params['optionalCountryDataset'] = optionalCountryDataset
if not geographyLayers is None:
params['geographylayers'] = geographyLayers
if not geographyIDs is None:
params['geographyids'] = json.dumps(geographyIDs)
if not geographyQuery is None:
params['geographyQuery'] = geographyQuery
if not returnSubGeographyLayer is None and \
isinstance(returnSubGeographyLayer, bool):
params['returnSubGeographyLayer'] = returnSubGeographyLayer
if not subGeographyLayer is None:
params['subGeographyLayer'] = json.dumps(subGeographyLayer)
if not subGeographyQuery is None:
params['subGeographyQuery'] = subGeographyQuery
if not outSR is None and \
isinstance(outSR, int):
params['outSR'] = outSR
if not returnGeometry is None and \
isinstance(returnGeometry, bool):
params['returnGeometry'] = returnGeometry
if not returnCentroids is None and \
isinstance(returnCentroids, bool):
params['returnCentroids'] = returnCentroids
if not generalizationLevel is None and \
isinstance(generalizationLevel, int):
params['generalizationLevel'] = generalizationLevel
if not useFuzzySearch is None and \
isinstance(useFuzzySearch, bool):
params['useFuzzySearch'] = json.dumps(useFuzzySearch)
if featureLimit is None:
featureLimit = 1000
elif isinstance(featureLimit, int):
params['featureLimit'] = featureLimit
else:
params['featureLimit'] = 1000
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
|
#!/usr/bin/env python
import os
import re
from optparse import OptionParser
from jinja2 import Environment, PackageLoader
from reviewboard import get_version_string
env = Environment(loader=PackageLoader('reviewboard',
'../contrib/tools/templates/extensions'))
options = None
def get_confirmation(question):
"""
Will pose the question to the user and keep asking them until they
provide an answer that starts with either a 'y' or an 'n', at which
point it will return True if it was a 'y'.
"""
while True:
response = raw_input("%s (y/n): " % question).lower()
if re.match(r'^[yn]', response) is not None:
break
print("Incorrect option '%s'" % response)
return response[0] == 'y'
class NamingConvention(object):
"""
Provides functionality for testing adherence to a naming convention
and a method for converting a string to the convention.
"""
ILLEGAL_CHARACTERS = re.compile(r'[^A-Za-z0-9 ]')
def formatted(self, string):
return False
def convert(self, string):
return string
class CamelCase(NamingConvention):
"""
This represents the camel case naming convention and is typically used for
class names. All tokens are one of the following:
1) Alphabetic and starting with a capital
2) Numeric
3) Alphanumeric and starting with a capital letter
There must be at least one token, and the first character must be a
capital letter.
"""
REGEX = re.compile(r'^[A-Z][a-z0-9]*(([0-9]+)|([A-Z][a-z0-9]*))*$')
def formatted(self, string):
return re.match(self.REGEX, string) is not None
def convert(self, string):
string = re.sub(self.ILLEGAL_CHARACTERS, " ", string)
string = re.sub(r'([0-9a-zA-Z])([A-Z])', r'\1 \2', string)
return ''.join([word.capitalize() for word in string.split()])
class LowerCaseWithUnderscores(NamingConvention):
"""
This represents the case typically used for module/package names (and
perhaps functions). All tokens are one of the following separated by
an underscore:
1) Alphabetic lower case
2) Numeric
3) Alphanumeric lower case and starting with a letter
There must be at least one token, and the first character must be a letter.
"""
REGEX = re.compile(r'^[a-z][a-z0-9]*(_+(([0-9]+)|([a-z][a-z0-9]*)))*_*$')
def formatted(self, string):
return re.match(self.REGEX, string) is not None
def convert(self, string):
string = re.sub(self.ILLEGAL_CHARACTERS, " ", string)
string = re.sub(r'([0-9a-zA-Z])([A-Z])', r'\1 \2', string)
return '_'.join(string.lower().split())
def get_formatted_string(string_type, string, fallback, case):
"""
Given the name of the type of string, the string itself, and the fallback
from which a string will be auto-generated in the given case if the given
string does not conform to the case.
"""
if string is not None:
if case.formatted(string):
return string
else:
string = case.convert(fallback)
question = "Do you wish to use %s as the %s?" % \
(string, string_type)
if not get_confirmation(question):
string = raw_input("Please input a %s: " % string_type)
while not case.formatted(string):
print("'%s' is not a valid %s." % (string, string_type))
string = raw_input("Please input a valid %s: " % string_type)
return string
def parse_options():
"""
Parses the options and stores them in the global options variable.
"""
parser = OptionParser(usage="%prog name [options]",
version="Review Board " + get_version_string())
parser.add_option("--class-name",
dest="class_name", default=None,
help="class name of extension (capitalized no spaces)")
parser.add_option("--package-name",
dest="package_name", default=None,
help="package name of extension (lower case with " \
"underscores)")
parser.add_option("--description",
dest="description", default=None,
help="description of extension")
parser.add_option("--author",
dest="author", default=None,
help="author of the extension")
parser.add_option("--dashboard-link",
dest="dashboard_link", default=None,
metavar="DASHBOARD_lINK_LABEL",
help="creates a dashboard link with this name in the " \
"review requests sidebar (optional)")
parser.add_option("--is-configurable",
dest="is_configurable", action="store_true",
default=False,
help="whether this extension is configurable")
(globals()["options"], args) = parser.parse_args()
if len(args) != 1:
print("Error: incorrect number of arguments")
parser.print_help()
exit(-1)
options.extension_name = args[0]
autofill_unprovided_options()
def autofill_unprovided_options():
"""
This will autofill all the empty 'necessary' options that can be auto-
generated from the necessary fields.
"""
options.package_name = get_formatted_string("package name",
options.package_name,
options.extension_name,
LowerCaseWithUnderscores())
options.class_name = get_formatted_string("class name",
options.class_name,
options.extension_name,
CamelCase())
if options.description is None:
options.description = "Extension %s" % options.extension_name
class TemplateBuilder(object):
"""
A builder that handles the creation of directories for the registed
template files in addition to creating the output files by filling
in the templates with the values from options.
"""
def __init__(self, package_name, options):
self.package_name = package_name
self.options = vars(options)
self.templates = {}
self.directories = set()
def add_template(self, template, target):
target = re.sub("\{\{PACKAGE\}\}", self.package_name, target)
self.templates[template] = target
directory = os.path.dirname(target)
self.add_directory(os.path.join(self.package_name, directory))
def add_directory(self, dir_name):
self.directories.add(dir_name)
def build(self):
self._build_directories()
self._fill_templates()
def _build_directories(self):
if os.path.exists(self.package_name):
question = "Directory '%s' already exists. " \
"Do you wish to continue?" \
% self.package_name
if not get_confirmation(question):
print("Exiting...")
exit(-1)
for directory in self.directories:
if not os.path.exists(directory):
os.makedirs(directory)
def _fill_templates(self):
for template, target in self.templates.iteritems():
self._write_file(template, target, self.options)
def _write_file(self, template, target, file_opts):
filepath = os.path.join(self.package_name, target)
f = open(filepath, "w")
template = env.get_template(template)
f.writelines(template.render(file_opts))
f.close()
def main():
parse_options()
builder = TemplateBuilder(options.package_name, options)
builder.add_template("setup.py", "setup.py")
builder.add_template("extension/extension.py",
"{{PACKAGE}}/extension.py")
builder.add_template("extension/__init__.py",
"{{PACKAGE}}/__init__.py")
builder.add_template("extension/admin_urls.py",
"{{PACKAGE}}/admin_urls.py")
if options.dashboard_link is not None:
builder.add_template("extension/urls.py",
"{{PACKAGE}}/urls.py")
builder.add_template("extension/templates/extension/dashboard.html",
"{{PACKAGE}}/templates/{{PACKAGE}}/dashboard.html"
)
builder.add_template("extension/views.py",
"{{PACKAGE}}/views.py")
if options.is_configurable:
builder.add_template("extension/templates/extension/configure.html",
"{{PACKAGE}}/templates/{{PACKAGE}}/configure.html"
)
builder.add_template("extension/views.py",
"{{PACKAGE}}/views.py")
builder.build()
if __name__ == "__main__":
main()
|
|
'''
===============================================================================
Interactive Image Segmentation using GrabCut algorithm.
This sample shows interactive image segmentation using grabcut algorithm.
USAGE:
python grabcut.py <filename>
README FIRST:
Two windows will show up, one for input and one for output.
At first, in input window, draw a rectangle around the object using
mouse right button. Then press 'n' to segment the object (once or a few times)
For any finer touch-ups, you can press any of the keys below and draw lines on
the areas you want. Then again press 'n' for updating the output.
Key '0' - To select areas of sure background
Key '1' - To select areas of sure foreground
Key '2' - To select areas of probable background
Key '3' - To select areas of probable foreground
Key 'n' - To update the segmentation
Key 'r' - To reset the setup
Key 's' - To save the results
===============================================================================
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
import sys
BLUE = [255,0,0] # rectangle color
RED = [0,0,255] # PR BG
GREEN = [0,255,0] # PR FG
BLACK = [0,0,0] # sure BG
WHITE = [255,255,255] # sure FG
DRAW_BG = {'color' : BLACK, 'val' : 0}
DRAW_FG = {'color' : WHITE, 'val' : 1}
DRAW_PR_FG = {'color' : GREEN, 'val' : 3}
DRAW_PR_BG = {'color' : RED, 'val' : 2}
# setting up flags
rect = (0,0,1,1)
drawing = False # flag for drawing curves
rectangle = False # flag for drawing rect
rect_over = False # flag to check if rect drawn
rect_or_mask = 100 # flag for selecting rect or mask mode
value = DRAW_FG # drawing initialized to FG
thickness = 3 # brush thickness
sliceNum = 0
prevSlice = -1
img = cv2.cvtColor(np.zeros((100,100), np.uint8), cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(np.zeros((100,100), np.uint8), cv2.COLOR_GRAY2BGR)
mask = np.zeros((100,100), np.uint8)
output = np.zeros((100,100), np.uint8)
ix = 0
iy = 0
dim = np.zeros(3,np.int32)
imgdata = np.zeros((3,3,3), np.float32)
UINT8 = 1
MAXVALUE = 255
DTYPE = np.uint8
#-------
def loadPVLData(pvl) :
imgfile = open(pvl, 'rb')
vtype = imgfile.read(1)
if vtype == b'\0' :
DTYPE = np.uint8
UINT8 = 1
MAXVALUE = 255
if vtype == b'\2' :
DTYPE = np.uint16
UINT8 = 0
MAXVALUE = 65555
dim = np.fromfile(imgfile, dtype=np.int32, count=3)
imgdata = np.fromfile(imgfile, dtype=DTYPE, count=dim[0]*dim[1]*dim[2])
imgfile.close()
imgdata = imgdata.reshape(dim)
return (dim, imgdata)
#-------
def onmouse(event,x,y,flags,param):
global img,img2,drawing,value,mask,rectangle,rect,rect_or_mask,ix,iy,rect_over
# Draw Rectangle
if event == cv2.EVENT_RBUTTONDOWN:
rectangle = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if rectangle == True:
img = img2.copy()
cv2.rectangle(img,(ix,iy),(x,y),BLUE,2)
rect = (min(ix,x),min(iy,y),abs(ix-x),abs(iy-y))
rect_or_mask = 0
elif event == cv2.EVENT_RBUTTONUP:
rectangle = False
rect_over = True
cv2.rectangle(img,(ix,iy),(x,y),BLUE,2)
rect = (min(ix,x),min(iy,y),abs(ix-x),abs(iy-y))
rect_or_mask = 0
print(" Now press the key 'n' a few times until no further change \n")
sys.stdout.flush();
# draw touchup curves
if event == cv2.EVENT_LBUTTONDOWN:
if rect_over == False:
print("first draw rectangle \n")
else:
drawing = True
cv2.circle(img,(x,y),thickness,value['color'],-1)
cv2.circle(mask,(x,y),thickness,value['val'],-1)
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cv2.circle(img,(x,y),thickness,value['color'],-1)
cv2.circle(mask,(x,y),thickness,value['val'],-1)
elif event == cv2.EVENT_LBUTTONUP:
if drawing == True:
drawing = False
cv2.circle(img,(x,y),thickness,value['color'],-1)
cv2.circle(mask,(x,y),thickness,value['val'],-1)
def on_press(k) :
global img,img2,drawing,value,mask,rectangle,rect,rect_or_mask,ix,iy,rect_over,imgdata
if k == 27: # esc to exit
exit()
elif k == ord('0'): # BG drawing
print(" mark background regions with left mouse button \n")
value = DRAW_BG
elif k == ord('1'): # FG drawing
print(" mark foreground regions with left mouse button \n")
value = DRAW_FG
elif k == ord('2'): # PR_BG drawing
value = DRAW_PR_BG
elif k == ord('3'): # PR_FG drawing
value = DRAW_PR_FG
elif k == ord('s'): # save image
bar = np.zeros((img.shape[0],5,3),np.uint8)
res = np.hstack((img2,bar,img,bar,output))
cv2.imwrite('grabcut_output.png',res)
print(" Result saved as image \n")
elif k == ord('r'): # reset everything
print("resetting \n")
rect = (0,0,1,1)
drawing = False
rectangle = False
rect_or_mask = 100
rect_over = False
value = DRAW_FG
img = img2.copy()
mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape,np.uint8) # output image to be shown
elif k == ord('n'): # segment the image
print(""" For finer touchups, mark foreground and background after pressing keys 0-3
and again press 'n' \n""")
if (rect_or_mask == 0): # grabcut with rect
bgdmodel = np.zeros((1,65),np.float64)
fgdmodel = np.zeros((1,65),np.float64)
cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_RECT)
rect_or_mask = 1
elif rect_or_mask == 1: # grabcut with mask
bgdmodel = np.zeros((1,65),np.float64)
fgdmodel = np.zeros((1,65),np.float64)
cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_MASK)
sys.stdout.flush();
def on_change(val):
global sliceNum, img, img2
sliceNum = val
img = cv2.resize(imgdata[sliceNum,:,:], (dim[1],dim[2]))
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img2 = img.copy() # a copy of original image
def grabCut() :
global img,img2,mask,output,drawing,value,rectangle,rect,rect_or_mask,ix,iy,rect_over,imgdata,sliceNum,prevSlice
img = cv2.resize(imgdata[sliceNum,:,:], (dim[1],dim[2]))
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img2 = img.copy() # a copy of original image
mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape,np.uint8) # output image to be shown
# input and output windows
cv2.namedWindow('output', cv2.WINDOW_KEEPRATIO)
cv2.namedWindow('input', cv2.WINDOW_KEEPRATIO)
h, w = img.shape[0:2]
neww = 800
newh = int(neww*(h/w))
cv2.resizeWindow('input', neww, newh)
cv2.resizeWindow('output', neww, newh)
cv2.setMouseCallback('input',onmouse)
cv2.moveWindow('input',img.shape[1]+10,90)
cv2.createTrackbar('slider', 'input', 0, dim[0]-1, on_change)
print(" Instructions: \n")
print(" Draw a rectangle around the object using right mouse button \n")
sys.stdout.flush();
while(1):
cv2.imshow('output',output)
cv2.imshow('input',img)
imageCopy = img.copy()
cv2.putText(imageCopy, str(sliceNum), (0, imageCopy.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255), 4)
cv2.imshow('input', imageCopy)
k = cv2.waitKey(1)
on_press(k)
mask2 = np.where((mask==1) + (mask==3),255,0).astype('uint8')
output = cv2.bitwise_and(img2,img2,mask=mask2)
cv2.destroyAllWindows()
def mainModule(args) :
global dim, imgdata, sliceNum
print('------------')
print('Arguments :')
for kw in kwargs:
print(kw, '-', kwargs[kw])
print('------------')
pvlfile = kwargs['volume']
segfile = kwargs['output']
dim, imgdata = loadPVLData(pvlfile)
print(dim)
print(imgdata.shape)
grabCut()
if __name__ == '__main__':
# print documentation
print(__doc__)
kwargs = {}
for a in sys.argv[1:] :
(k,v) = a.split("=")
kwargs[k] = v
mainModule(kwargs)
|
|
import numpy as np
import os, imageio
########## Slightly modified version of LLFF data loading code
########## see https://github.com/Fyusion/LLFF for original
def _minify(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from shutil import copy
from subprocess import check_output
imgdir = os.path.join(basedir, 'images')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
resizearg = '{}%'.format(100./r)
else:
name = 'images_{}x{}'.format(r[1], r[0])
resizearg = '{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
os.makedirs(imgdir)
check_output('cp {}/* {}'.format(imgdir_orig, imgdir), shell=True)
ext = imgs[0].split('.')[-1]
args = ' '.join(['mogrify', '-resize', resizearg, '-format', 'png', '*.{}'.format(ext)])
print(args)
os.chdir(imgdir)
check_output(args, shell=True)
os.chdir(wd)
if ext != 'png':
check_output('rm {}/*.{}'.format(imgdir, ext), shell=True)
print('Removed duplicates')
print('Done')
def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy'))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1,2,0])
bds = poses_arr[:, -2:].transpose([1,0])
img0 = [os.path.join(basedir, 'images', f) for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \
if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]
sh = imageio.imread(img0).shape
sfx = ''
if factor is not None:
sfx = '_{}'.format(factor)
_minify(basedir, factors=[factor])
factor = factor
elif height is not None:
factor = sh[0] / float(height)
width = int(sh[1] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
elif width is not None:
factor = sh[1] / float(width)
height = int(sh[0] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
else:
factor = 1
imgdir = os.path.join(basedir, 'images' + sfx)
if not os.path.exists(imgdir):
print( imgdir, 'does not exist, returning' )
return
imgfiles = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir)) if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')]
if poses.shape[-1] != len(imgfiles):
print( 'Mismatch between imgs {} and poses {} !!!!'.format(len(imgfiles), poses.shape[-1]) )
return
sh = imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1./factor
if not load_imgs:
return poses, bds
def imread(f):
if f.endswith('png'):
return imageio.imread(f, ignoregamma=True)
else:
return imageio.imread(f)
imgs = imgs = [imread(f)[...,:3]/255. for f in imgfiles]
imgs = np.stack(imgs, -1)
print('Loaded image data', imgs.shape, poses[:,-1,0])
return poses, bds, imgs
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def ptstocam(pts, c2w):
tt = np.matmul(c2w[:3,:3].T, (pts-c2w[:3,3])[...,np.newaxis])[...,0]
return tt
def poses_avg(poses):
hwf = poses[0, :3, -1:]
center = poses[:, :3, 3].mean(0)
vec2 = normalize(poses[:, :3, 2].sum(0))
up = poses[:, :3, 1].sum(0)
c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
return c2w
def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N):
render_poses = []
rads = np.array(list(rads) + [1.])
hwf = c2w[:,4:5]
for theta in np.linspace(0., 2. * np.pi * rots, N+1)[:-1]:
c = np.dot(c2w[:3,:4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta*zrate), 1.]) * rads)
z = normalize(c - np.dot(c2w[:3,:4], np.array([0,0,-focal, 1.])))
render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
return render_poses
def recenter_poses(poses):
poses_ = poses+0
bottom = np.reshape([0,0,0,1.], [1,4])
c2w = poses_avg(poses)
c2w = np.concatenate([c2w[:3,:4], bottom], -2)
bottom = np.tile(np.reshape(bottom, [1,1,4]), [poses.shape[0],1,1])
poses = np.concatenate([poses[:,:3,:4], bottom], -2)
poses = np.linalg.inv(c2w) @ poses
poses_[:,:3,:4] = poses[:,:3,:4]
poses = poses_
return poses
#####################
def spherify_poses(poses, bds):
p34_to_44 = lambda p : np.concatenate([p, np.tile(np.reshape(np.eye(4)[-1,:], [1,1,4]), [p.shape[0], 1,1])], 1)
rays_d = poses[:,:3,2:3]
rays_o = poses[:,:3,3:4]
def min_line_dist(rays_o, rays_d):
A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0,2,1])
b_i = -A_i @ rays_o
pt_mindist = np.squeeze(-np.linalg.inv((np.transpose(A_i, [0,2,1]) @ A_i).mean(0)) @ (b_i).mean(0))
return pt_mindist
pt_mindist = min_line_dist(rays_o, rays_d)
center = pt_mindist
up = (poses[:,:3,3] - center).mean(0)
vec0 = normalize(up)
vec1 = normalize(np.cross([.1,.2,.3], vec0))
vec2 = normalize(np.cross(vec0, vec1))
pos = center
c2w = np.stack([vec1, vec2, vec0, pos], 1)
poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:,:3,:4])
rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:,:3,3]), -1)))
sc = 1./rad
poses_reset[:,:3,3] *= sc
bds *= sc
rad *= sc
centroid = np.mean(poses_reset[:,:3,3], 0)
zh = centroid[2]
radcircle = np.sqrt(rad**2-zh**2)
new_poses = []
for th in np.linspace(0.,2.*np.pi, 120):
camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
up = np.array([0,0,-1.])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(vec2, up))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin
p = np.stack([vec0, vec1, vec2, pos], 1)
new_poses.append(p)
new_poses = np.stack(new_poses, 0)
new_poses = np.concatenate([new_poses, np.broadcast_to(poses[0,:3,-1:], new_poses[:,:3,-1:].shape)], -1)
poses_reset = np.concatenate([poses_reset[:,:3,:4], np.broadcast_to(poses[0,:3,-1:], poses_reset[:,:3,-1:].shape)], -1)
return poses_reset, new_poses, bds
def load_llff_data(basedir, factor=8, recenter=True, bd_factor=.75, spherify=False, path_zflat=False):
poses, bds, imgs = _load_data(basedir, factor=factor) # factor=8 downsamples original imgs by 8x
print('Loaded', basedir, bds.min(), bds.max())
# Correct rotation matrix ordering and move variable dim to axis 0
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)
images = imgs
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1. if bd_factor is None else 1./(bds.min() * bd_factor)
poses[:,:3,3] *= sc
bds *= sc
if recenter:
poses = recenter_poses(poses)
if spherify:
poses, render_poses, bds = spherify_poses(poses, bds)
else:
c2w = poses_avg(poses)
print('recentered', c2w.shape)
print(c2w[:3,:4])
## Get spiral
# Get average pose
up = normalize(poses[:, :3, 1].sum(0))
# Find a reasonable "focus depth" for this dataset
close_depth, inf_depth = bds.min()*.9, bds.max()*5.
dt = .75
mean_dz = 1./(((1.-dt)/close_depth + dt/inf_depth))
focal = mean_dz
# Get radii for spiral path
shrink_factor = .8
zdelta = close_depth * .2
tt = poses[:,:3,3] # ptstocam(poses[:3,3,:].T, c2w).T
rads = np.percentile(np.abs(tt), 90, 0)
c2w_path = c2w
N_views = 120
N_rots = 2
if path_zflat:
# zloc = np.percentile(tt, 10, 0)[2]
zloc = -close_depth * .1
c2w_path[:3,3] = c2w_path[:3,3] + zloc * c2w_path[:3,2]
rads[2] = 0.
N_rots = 1
N_views/=2
# Generate poses for spiral path
render_poses = render_path_spiral(c2w_path, up, rads, focal, zdelta, zrate=.5, rots=N_rots, N=N_views)
render_poses = np.array(render_poses).astype(np.float32)
c2w = poses_avg(poses)
print('Data:')
print(poses.shape, images.shape, bds.shape)
dists = np.sum(np.square(c2w[:3,3] - poses[:,:3,3]), -1)
i_test = np.argmin(dists)
print('HOLDOUT view is', i_test)
images = images.astype(np.float32)
poses = poses.astype(np.float32)
return images, poses, bds, render_poses, i_test
|
|
# -*- coding: utf-8 -*-
import re
from os.path import abspath, join
from sqlparse import sql, tokens as T
from sqlparse.engine import FilterStack
from sqlparse.lexer import tokenize
from sqlparse.pipeline import Pipeline
from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
String, Whitespace)
from sqlparse.utils import memoize_generator
from sqlparse.utils import split_unquoted_newlines
# --------------------------
# token process
class _CaseFilter:
ttype = None
def __init__(self, case=None):
if case is None:
case = 'upper'
assert case in ['lower', 'upper', 'capitalize']
self.convert = getattr(unicode, case)
def process(self, stack, stream):
for ttype, value in stream:
if ttype in self.ttype:
value = self.convert(value)
yield ttype, value
class KeywordCaseFilter(_CaseFilter):
ttype = T.Keyword
class IdentifierCaseFilter(_CaseFilter):
ttype = (T.Name, T.String.Symbol)
def process(self, stack, stream):
for ttype, value in stream:
if ttype in self.ttype and not value.strip()[0] == '"':
value = self.convert(value)
yield ttype, value
class TruncateStringFilter:
def __init__(self, width, char):
self.width = max(width, 1)
self.char = unicode(char)
def process(self, stack, stream):
for ttype, value in stream:
if ttype is T.Literal.String.Single:
if value[:2] == '\'\'':
inner = value[2:-2]
quote = u'\'\''
else:
inner = value[1:-1]
quote = u'\''
if len(inner) > self.width:
value = u''.join((quote, inner[:self.width], self.char,
quote))
yield ttype, value
class GetComments:
"""Get the comments from a stack"""
def process(self, stack, stream):
for token_type, value in stream:
if token_type in Comment:
yield token_type, value
class StripComments:
"""Strip the comments from a stack"""
def process(self, stack, stream):
for token_type, value in stream:
if token_type not in Comment:
yield token_type, value
def StripWhitespace(stream):
"Strip the useless whitespaces from a stream leaving only the minimal ones"
last_type = None
has_space = False
ignore_group = frozenset((Comparison, Punctuation))
for token_type, value in stream:
# We got a previous token (not empty first ones)
if last_type:
if token_type in Whitespace:
has_space = True
continue
# Ignore first empty spaces and dot-commas
elif token_type in (Whitespace, Whitespace.Newline, ignore_group):
continue
# Yield a whitespace if it can't be ignored
if has_space:
if not ignore_group.intersection((last_type, token_type)):
yield Whitespace, ' '
has_space = False
# Yield the token and set its type for checking with the next one
yield token_type, value
last_type = token_type
class IncludeStatement:
"""Filter that enable a INCLUDE statement"""
def __init__(self, dirpath=".", maxrecursive=10, raiseexceptions=False):
if maxrecursive <= 0:
raise ValueError('Max recursion limit reached')
self.dirpath = abspath(dirpath)
self.maxRecursive = maxrecursive
self.raiseexceptions = raiseexceptions
self.detected = False
@memoize_generator
def process(self, stack, stream):
# Run over all tokens in the stream
for token_type, value in stream:
# INCLUDE statement found, set detected mode
if token_type in Name and value.upper() == 'INCLUDE':
self.detected = True
continue
# INCLUDE statement was found, parse it
elif self.detected:
# Omit whitespaces
if token_type in Whitespace:
continue
# Found file path to include
if token_type in String.Symbol:
# if token_type in tokens.String.Symbol:
# Get path of file to include
path = join(self.dirpath, value[1:-1])
try:
f = open(path)
raw_sql = f.read()
f.close()
# There was a problem loading the include file
except IOError, err:
# Raise the exception to the interpreter
if self.raiseexceptions:
raise
# Put the exception as a comment on the SQL code
yield Comment, u'-- IOError: %s\n' % err
else:
# Create new FilterStack to parse readed file
# and add all its tokens to the main stack recursively
try:
filtr = IncludeStatement(self.dirpath,
self.maxRecursive - 1,
self.raiseexceptions)
# Max recursion limit reached
except ValueError, err:
# Raise the exception to the interpreter
if self.raiseexceptions:
raise
# Put the exception as a comment on the SQL code
yield Comment, u'-- ValueError: %s\n' % err
stack = FilterStack()
stack.preprocess.append(filtr)
for tv in stack.run(raw_sql):
yield tv
# Set normal mode
self.detected = False
# Don't include any token while in detected mode
continue
# Normal token
yield token_type, value
# ----------------------
# statement process
class StripCommentsFilter:
def _get_next_comment(self, tlist):
# TODO(andi) Comment types should be unified, see related issue38
token = tlist.token_next_by_instance(0, sql.Comment)
if token is None:
token = tlist.token_next_by_type(0, T.Comment)
return token
def _process(self, tlist):
token = self._get_next_comment(tlist)
while token:
tidx = tlist.token_index(token)
prev = tlist.token_prev(tidx, False)
next_ = tlist.token_next(tidx, False)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev is not None and next_ is not None
and not prev.is_whitespace() and not next_.is_whitespace()
and not (prev.match(T.Punctuation, '(')
or next_.match(T.Punctuation, ')'))):
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
else:
tlist.tokens.pop(tidx)
token = self._get_next_comment(tlist)
def process(self, stack, stmt):
[self.process(stack, sgroup) for sgroup in stmt.get_sublists()]
self._process(stmt)
class StripWhitespaceFilter:
def _stripws(self, tlist):
func_name = '_stripws_%s' % tlist.__class__.__name__.lower()
func = getattr(self, func_name, self._stripws_default)
func(tlist)
def _stripws_default(self, tlist):
last_was_ws = False
for token in tlist.tokens:
if token.is_whitespace():
if last_was_ws:
token.value = ''
else:
token.value = ' '
last_was_ws = token.is_whitespace()
def _stripws_identifierlist(self, tlist):
# Removes newlines before commas, see issue140
last_nl = None
for token in tlist.tokens[:]:
if (token.ttype is T.Punctuation
and token.value == ','
and last_nl is not None):
tlist.tokens.remove(last_nl)
if token.is_whitespace():
last_nl = token
else:
last_nl = None
return self._stripws_default(tlist)
def _stripws_parenthesis(self, tlist):
if tlist.tokens[1].is_whitespace():
tlist.tokens.pop(1)
if tlist.tokens[-2].is_whitespace():
tlist.tokens.pop(-2)
self._stripws_default(tlist)
def process(self, stack, stmt, depth=0):
[self.process(stack, sgroup, depth + 1)
for sgroup in stmt.get_sublists()]
self._stripws(stmt)
if (
depth == 0
and stmt.tokens
and stmt.tokens[-1].is_whitespace()
):
stmt.tokens.pop(-1)
class ReindentFilter:
def __init__(self, width=2, char=' ', line_width=None):
self.width = width
self.char = char
self.indent = 0
self.offset = 0
self.line_width = line_width
self._curr_stmt = None
self._last_stmt = None
def _flatten_up_to_token(self, token):
"""Yields all tokens up to token plus the next one."""
# helper for _get_offset
iterator = self._curr_stmt.flatten()
for t in iterator:
yield t
if t == token:
raise StopIteration
def _get_offset(self, token):
raw = ''.join(map(unicode, self._flatten_up_to_token(token)))
line = raw.splitlines()[-1]
# Now take current offset into account and return relative offset.
full_offset = len(line) - len(self.char * (self.width * self.indent))
return full_offset - self.offset
def nl(self):
# TODO: newline character should be configurable
space = (self.char * ((self.indent * self.width) + self.offset))
# Detect runaway indenting due to parsing errors
if len(space) > 200:
# something seems to be wrong, flip back
self.indent = self.offset = 0
space = (self.char * ((self.indent * self.width) + self.offset))
ws = '\n' + space
return sql.Token(T.Whitespace, ws)
def _split_kwds(self, tlist):
split_words = ('FROM', 'STRAIGHT_JOIN$', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT', 'HAVING')
def _next_token(i):
t = tlist.token_next_match(i, T.Keyword, split_words,
regex=True)
if t and t.value.upper() == 'BETWEEN':
t = _next_token(tlist.token_index(t) + 1)
if t and t.value.upper() == 'AND':
t = _next_token(tlist.token_index(t) + 1)
return t
idx = 0
token = _next_token(idx)
added = set()
while token:
prev = tlist.token_prev(tlist.token_index(token), False)
offset = 1
if prev and prev.is_whitespace() and prev not in added:
tlist.tokens.pop(tlist.token_index(prev))
offset += 1
uprev = unicode(prev)
if (prev and (uprev.endswith('\n') or uprev.endswith('\r'))):
nl = tlist.token_next(token)
else:
nl = self.nl()
added.add(nl)
tlist.insert_before(token, nl)
token = _next_token(tlist.token_index(nl) + offset)
def _split_statements(self, tlist):
idx = 0
token = tlist.token_next_by_type(idx, (T.Keyword.DDL, T.Keyword.DML))
while token:
prev = tlist.token_prev(tlist.token_index(token), False)
if prev and prev.is_whitespace():
tlist.tokens.pop(tlist.token_index(prev))
# only break if it's not the first token
if prev:
nl = self.nl()
tlist.insert_before(token, nl)
token = tlist.token_next_by_type(tlist.token_index(token) + 1,
(T.Keyword.DDL, T.Keyword.DML))
def _process(self, tlist):
func_name = '_process_%s' % tlist.__class__.__name__.lower()
func = getattr(self, func_name, self._process_default)
func(tlist)
def _process_where(self, tlist):
token = tlist.token_next_match(0, T.Keyword, 'WHERE')
try:
tlist.insert_before(token, self.nl())
except ValueError: # issue121, errors in statement
pass
self.indent += 1
self._process_default(tlist)
self.indent -= 1
def _process_having(self, tlist):
token = tlist.token_next_match(0, T.Keyword, 'HAVING')
try:
tlist.insert_before(token, self.nl())
except ValueError: # issue121, errors in statement
pass
self.indent += 1
self._process_default(tlist)
self.indent -= 1
def _process_parenthesis(self, tlist):
first = tlist.token_next(0)
indented = False
if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):
self.indent += 1
tlist.tokens.insert(0, self.nl())
indented = True
num_offset = self._get_offset(
tlist.token_next_match(0, T.Punctuation, '('))
self.offset += num_offset
self._process_default(tlist, stmts=not indented)
if indented:
self.indent -= 1
self.offset -= num_offset
def _process_identifierlist(self, tlist):
identifiers = list(tlist.get_identifiers())
if len(identifiers) > 1 and not tlist.within(sql.Function):
first = list(identifiers[0].flatten())[0]
if self.char == '\t':
# when using tabs we don't count the actual word length
# in spaces.
num_offset = 1
else:
num_offset = self._get_offset(first) - len(first.value)
self.offset += num_offset
for token in identifiers[1:]:
tlist.insert_before(token, self.nl())
self.offset -= num_offset
self._process_default(tlist)
def _process_case(self, tlist):
is_first = True
num_offset = None
case = tlist.tokens[0]
outer_offset = self._get_offset(case) - len(case.value)
self.offset += outer_offset
for cond, value in tlist.get_cases():
if is_first:
tcond = list(cond[0].flatten())[0]
is_first = False
num_offset = self._get_offset(tcond) - len(tcond.value)
self.offset += num_offset
continue
if cond is None:
token = value[0]
else:
token = cond[0]
tlist.insert_before(token, self.nl())
# Line breaks on group level are done. Now let's add an offset of
# 5 (=length of "when", "then", "else") and process subgroups.
self.offset += 5
self._process_default(tlist)
self.offset -= 5
if num_offset is not None:
self.offset -= num_offset
end = tlist.token_next_match(0, T.Keyword, 'END')
tlist.insert_before(end, self.nl())
self.offset -= outer_offset
def _process_default(self, tlist, stmts=True, kwds=True):
if stmts:
self._split_statements(tlist)
if kwds:
self._split_kwds(tlist)
[self._process(sgroup) for sgroup in tlist.get_sublists()]
def process(self, stack, stmt):
if isinstance(stmt, sql.Statement):
self._curr_stmt = stmt
self._process(stmt)
if isinstance(stmt, sql.Statement):
if self._last_stmt is not None:
if unicode(self._last_stmt).endswith('\n'):
nl = '\n'
else:
nl = '\n\n'
stmt.tokens.insert(
0, sql.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
# FIXME: Doesn't work ;)
class RightMarginFilter:
keep_together = (
# sql.TypeCast, sql.Identifier, sql.Alias,
)
def __init__(self, width=79):
self.width = width
self.line = ''
def _process(self, stack, group, stream):
for token in stream:
if token.is_whitespace() and '\n' in token.value:
if token.value.endswith('\n'):
self.line = ''
else:
self.line = token.value.splitlines()[-1]
elif (token.is_group()
and not token.__class__ in self.keep_together):
token.tokens = self._process(stack, token, token.tokens)
else:
val = unicode(token)
if len(self.line) + len(val) > self.width:
match = re.search('^ +', self.line)
if match is not None:
indent = match.group()
else:
indent = ''
yield sql.Token(T.Whitespace, '\n%s' % indent)
self.line = indent
self.line += val
yield token
def process(self, stack, group):
return
group.tokens = self._process(stack, group, group.tokens)
class ColumnsSelect:
"""Get the columns names of a SELECT query"""
def process(self, stack, stream):
mode = 0
oldValue = ""
parenthesis = 0
for token_type, value in stream:
# Ignore comments
if token_type in Comment:
continue
# We have not detected a SELECT statement
if mode == 0:
if token_type in Keyword and value == 'SELECT':
mode = 1
# We have detected a SELECT statement
elif mode == 1:
if value == 'FROM':
if oldValue:
yield oldValue
mode = 3 # Columns have been checked
elif value == 'AS':
oldValue = ""
mode = 2
elif (token_type == Punctuation
and value == ',' and not parenthesis):
if oldValue:
yield oldValue
oldValue = ""
elif token_type not in Whitespace:
if value == '(':
parenthesis += 1
elif value == ')':
parenthesis -= 1
oldValue += value
# We are processing an AS keyword
elif mode == 2:
# We check also for Keywords because a bug in SQLParse
if token_type == Name or token_type == Keyword:
yield value
mode = 1
# ---------------------------
# postprocess
class SerializerUnicode:
def process(self, stack, stmt):
raw = unicode(stmt)
lines = split_unquoted_newlines(raw)
res = '\n'.join(line.rstrip() for line in lines)
return res
def Tokens2Unicode(stream):
result = ""
for _, value in stream:
result += unicode(value)
return result
class OutputFilter:
varname_prefix = ''
def __init__(self, varname='sql'):
self.varname = self.varname_prefix + varname
self.count = 0
def _process(self, stream, varname, has_nl):
raise NotImplementedError
def process(self, stack, stmt):
self.count += 1
if self.count > 1:
varname = '%s%d' % (self.varname, self.count)
else:
varname = self.varname
has_nl = len(unicode(stmt).strip().splitlines()) > 1
stmt.tokens = self._process(stmt.tokens, varname, has_nl)
return stmt
class OutputPythonFilter(OutputFilter):
def _process(self, stream, varname, has_nl):
# SQL query asignation to varname
if self.count > 1:
yield sql.Token(T.Whitespace, '\n')
yield sql.Token(T.Name, varname)
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Operator, '=')
yield sql.Token(T.Whitespace, ' ')
if has_nl:
yield sql.Token(T.Operator, '(')
yield sql.Token(T.Text, "'")
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
if token.is_whitespace() and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, " '")
yield sql.Token(T.Whitespace, '\n')
# Quote header on secondary lines
yield sql.Token(T.Whitespace, ' ' * (len(varname) + 4))
yield sql.Token(T.Text, "'")
# Indentation
after_lb = token.value.split('\n', 1)[1]
if after_lb:
yield sql.Token(T.Whitespace, after_lb)
continue
# Token has escape chars
elif "'" in token.value:
token.value = token.value.replace("'", "\\'")
# Put the token
yield sql.Token(T.Text, token.value)
# Close quote
yield sql.Token(T.Text, "'")
if has_nl:
yield sql.Token(T.Operator, ')')
class OutputPHPFilter(OutputFilter):
varname_prefix = '$'
def _process(self, stream, varname, has_nl):
# SQL query asignation to varname (quote header)
if self.count > 1:
yield sql.Token(T.Whitespace, '\n')
yield sql.Token(T.Name, varname)
yield sql.Token(T.Whitespace, ' ')
if has_nl:
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Operator, '=')
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Text, '"')
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
if token.is_whitespace() and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, ' ";')
yield sql.Token(T.Whitespace, '\n')
# Quote header on secondary lines
yield sql.Token(T.Name, varname)
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Operator, '.=')
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Text, '"')
# Indentation
after_lb = token.value.split('\n', 1)[1]
if after_lb:
yield sql.Token(T.Whitespace, after_lb)
continue
# Token has escape chars
elif '"' in token.value:
token.value = token.value.replace('"', '\\"')
# Put the token
yield sql.Token(T.Text, token.value)
# Close quote
yield sql.Token(T.Text, '"')
yield sql.Token(T.Punctuation, ';')
class Limit:
"""Get the LIMIT of a query.
If not defined, return -1 (SQL specification for no LIMIT query)
"""
def process(self, stack, stream):
index = 7
stream = list(stream)
stream.reverse()
# Run over all tokens in the stream from the end
for token_type, value in stream:
index -= 1
# if index and token_type in Keyword:
if index and token_type in Keyword and value == 'LIMIT':
return stream[4 - index][1]
return -1
def compact(stream):
"""Function that return a compacted version of the stream"""
pipe = Pipeline()
pipe.append(StripComments())
pipe.append(StripWhitespace)
return pipe(stream)
|
|
#
# wc.py: functions for interacting with a Subversion working copy
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import os
import sys
import re
import logging
import pprint
if sys.version_info[0] >= 3:
# Python >=3.0
from io import StringIO
from urllib.parse import quote as urllib_quote
else:
# Python <3.0
from cStringIO import StringIO
from urllib import quote as urllib_quote
import svntest
logger = logging.getLogger()
#
# 'status -v' output looks like this:
#
# "%c%c%c%c%c%c%c %c %6s %6s %-12s %s\n"
#
# (Taken from 'print_status' in subversion/svn/status.c.)
#
# Here are the parameters. The middle number or string in parens is the
# match.group(), followed by a brief description of the field:
#
# - text status (1) (single letter)
# - prop status (1) (single letter)
# - wc-lockedness flag (2) (single letter: "L" or " ")
# - copied flag (3) (single letter: "+" or " ")
# - switched flag (4) (single letter: "S", "X" or " ")
# - repos lock status (5) (single letter: "K", "O", "B", "T", " ")
# - tree conflict flag (6) (single letter: "C" or " ")
#
# [one space]
#
# - out-of-date flag (7) (single letter: "*" or " ")
#
# [three spaces]
#
# - working revision ('wc_rev') (either digits or "-", "?" or " ")
#
# [one space]
#
# - last-changed revision (either digits or "?" or " ")
#
# [one space]
#
# - last author (optional string of non-whitespace
# characters)
#
# [spaces]
#
# - path ('path') (string of characters until newline)
#
# Working revision, last-changed revision, and last author are whitespace
# only if the item is missing.
#
_re_parse_status = re.compile('^([?!MACDRUGXI_~ ][MACDRUG_ ])'
'([L ])'
'([+ ])'
'([SX ])'
'([KOBT ])'
'([C ]) '
'([* ]) +'
'((?P<wc_rev>\d+|-|\?) +(\d|-|\?)+ +(\S+) +)?'
'(?P<path>.+)$')
_re_parse_status_ex = re.compile('^ ('
'( \> moved (from (?P<moved_from>.+)|to (?P<moved_to>.*)))'
'|( \> swapped places with (?P<swapped_with>.+).*)'
'|(\> (?P<tc>.+))'
')$')
_re_parse_skipped = re.compile("^(Skipped[^']*) '(.+)'( --.*)?\n")
_re_parse_summarize = re.compile("^([MAD ][M ]) (.+)\n")
_re_parse_checkout = re.compile('^([RMAGCUDE_ B][MAGCUDE_ ])'
'([B ])'
'([CAUD ])\s+'
'(.+)')
_re_parse_co_skipped = re.compile('^(Restored|Skipped|Removed external)'
'\s+\'(.+)\'(( --|: ).*)?')
_re_parse_co_restored = re.compile('^(Restored)\s+\'(.+)\'')
# Lines typically have a verb followed by whitespace then a path.
_re_parse_commit_ext = re.compile('^(([A-Za-z]+( [a-z]+)*)) \'(.+)\'( --.*)?')
_re_parse_commit = re.compile('^(\w+( \(bin\))?)\s+(.+)')
#rN: eids 0 15 branches 4
_re_parse_eid_header = re.compile('^r(-1|[0-9]+): eids ([0-9]+) ([0-9]+) '
'branches ([0-9]+)$')
# B0.2 root-eid 3
_re_parse_eid_branch = re.compile('^(B[0-9.]+) root-eid ([0-9]+) num-eids ([0-9]+)( from [^ ]*)?$')
_re_parse_eid_merge_history = re.compile('merge-history: merge-ancestors ([0-9]+)')
# e4: normal 6 C
_re_parse_eid_ele = re.compile('^e([0-9]+): (none|normal|subbranch) '
'(-1|[0-9]+) (.*)$')
class State:
"""Describes an existing or expected state of a working copy.
The primary metaphor here is a dictionary of paths mapping to instances
of StateItem, which describe each item in a working copy.
Note: the paths should be *relative* to the root of the working copy,
using '/' for the separator (see to_relpath()), and the root of the
working copy is identified by the empty path: ''.
"""
def __init__(self, wc_dir, desc):
"Create a State using the specified description."
assert isinstance(desc, dict)
self.wc_dir = wc_dir
self.desc = desc # dictionary: path -> StateItem
def add(self, more_desc):
"Add more state items into the State."
assert isinstance(more_desc, dict)
self.desc.update(more_desc)
def add_state(self, parent, state, strict=False):
"Import state items from a State object, reparent the items to PARENT."
assert isinstance(state, State)
for path, item in state.desc.items():
if strict:
path = parent + path
elif path == '':
path = parent
else:
path = parent + '/' + path
self.desc[path] = item
def remove(self, *paths):
"Remove PATHS from the state (the paths must exist)."
for path in paths:
del self.desc[to_relpath(path)]
def remove_subtree(self, *paths):
"Remove PATHS recursively from the state (the paths must exist)."
for subtree_path in paths:
subtree_path = to_relpath(subtree_path)
for path, item in svntest.main.ensure_list(self.desc.items()):
if path == subtree_path or path[:len(subtree_path) + 1] == subtree_path + '/':
del self.desc[path]
def copy(self, new_root=None):
"""Make a deep copy of self. If NEW_ROOT is not None, then set the
copy's wc_dir NEW_ROOT instead of to self's wc_dir."""
desc = { }
for path, item in self.desc.items():
desc[path] = item.copy()
if new_root is None:
new_root = self.wc_dir
return State(new_root, desc)
def tweak(self, *args, **kw):
"""Tweak the items' values.
Each argument in ARGS is the path of a StateItem that already exists in
this State. Each keyword argument in KW is a modifiable property of
StateItem.
The general form of this method is .tweak([paths...,] key=value...). If
one or more paths are provided, then those items' values are
modified. If no paths are given, then all items are modified.
"""
if args:
for path in args:
try:
path_ref = self.desc[to_relpath(path)]
except KeyError as e:
e.args = ["Path '%s' not present in WC state descriptor" % path]
raise
path_ref.tweak(**kw)
else:
for item in self.desc.values():
item.tweak(**kw)
def tweak_some(self, filter, **kw):
"Tweak the items for which the filter returns true."
for path, item in self.desc.items():
if list(filter(path, item)):
item.tweak(**kw)
def rename(self, moves):
"""Change the path of some items.
MOVES is a dictionary mapping source path to destination
path. Children move with moved parents. All subtrees are moved in
reverse depth order to temporary storage before being moved in
depth order to the final location. This allows nested moves.
"""
temp = {}
for src, dst in sorted(moves.items(), key=lambda pair: pair[0])[::-1]:
temp[src] = {}
for path, item in svntest.main.ensure_list(self.desc.items()):
if path == src or path[:len(src) + 1] == src + '/':
temp[src][path] = item;
del self.desc[path]
for src, dst in sorted(moves.items(), key=lambda pair: pair[1]):
for path, item in temp[src].items():
if path == src:
new_path = dst
else:
new_path = dst + path[len(src):]
self.desc[new_path] = item
def subtree(self, subtree_path):
"""Return a State object which is a deep copy of the sub-tree
beneath SUBTREE_PATH (which is assumed to be rooted at the tree of
this State object's WC_DIR). Exclude SUBTREE_PATH itself."""
desc = { }
for path, item in self.desc.items():
if path[:len(subtree_path) + 1] == subtree_path + '/':
desc[path[len(subtree_path) + 1:]] = item.copy()
return State(self.wc_dir, desc)
def write_to_disk(self, target_dir):
"""Construct a directory structure on disk, matching our state.
WARNING: any StateItem that does not have contents (.contents is None)
is assumed to be a directory.
"""
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for path, item in self.desc.items():
fullpath = os.path.join(target_dir, path)
if item.contents is None:
# a directory
if not os.path.exists(fullpath):
os.makedirs(fullpath)
else:
# a file
# ensure its directory exists
dirpath = os.path.dirname(fullpath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
# write out the file contents now
svntest.main.file_write(fullpath, item.contents, 'wb')
def normalize(self):
"""Return a "normalized" version of self.
A normalized version has the following characteristics:
* wc_dir == ''
* paths use forward slashes
* paths are relative
If self is already normalized, then it is returned. Otherwise, a
new State is constructed with (shallow) references to self's
StateItem instances.
If the caller needs a fully disjoint State, then use .copy() on
the result.
"""
if self.wc_dir == '':
return self
base = to_relpath(os.path.normpath(self.wc_dir))
desc = dict([(repos_join(base, path), item)
for path, item in self.desc.items()])
for path, item in desc.copy().items():
if item.moved_from or item.moved_to:
i = item.copy()
if i.moved_from:
i.moved_from = to_relpath(os.path.normpath(
repos_join(base, i.moved_from)))
if i.moved_to:
i.moved_to = to_relpath(os.path.normpath(
repos_join(base, i.moved_to)))
desc[path] = i
return State('', desc)
def compare(self, other):
"""Compare this State against an OTHER State.
Three new set objects will be returned: CHANGED, UNIQUE_SELF, and
UNIQUE_OTHER. These contain paths of StateItems that are different
between SELF and OTHER, paths of items unique to SELF, and paths
of item that are unique to OTHER, respectively.
"""
assert isinstance(other, State)
norm_self = self.normalize()
norm_other = other.normalize()
# fast-path the easy case
if norm_self == norm_other:
fs = frozenset()
return fs, fs, fs
paths_self = set(norm_self.desc.keys())
paths_other = set(norm_other.desc.keys())
changed = set()
for path in paths_self.intersection(paths_other):
if norm_self.desc[path] != norm_other.desc[path]:
changed.add(path)
return changed, paths_self - paths_other, paths_other - paths_self
def compare_and_display(self, label, other):
"""Compare this State against an OTHER State, and display differences.
Information will be written to stdout, displaying any differences
between the two states. LABEL will be used in the display. SELF is the
"expected" state, and OTHER is the "actual" state.
If any changes are detected/displayed, then SVNTreeUnequal is raised.
"""
norm_self = self.normalize()
norm_other = other.normalize()
changed, unique_self, unique_other = norm_self.compare(norm_other)
if not changed and not unique_self and not unique_other:
return
# Use the shortest path as a way to find the "root-most" affected node.
def _shortest_path(path_set):
shortest = None
for path in path_set:
if shortest is None or len(path) < len(shortest):
shortest = path
return shortest
if changed:
path = _shortest_path(changed)
display_nodes(label, path, norm_self.desc[path], norm_other.desc[path])
elif unique_self:
path = _shortest_path(unique_self)
default_singleton_handler('actual ' + label, path, norm_self.desc[path])
elif unique_other:
path = _shortest_path(unique_other)
default_singleton_handler('expected ' + label, path,
norm_other.desc[path])
raise svntest.tree.SVNTreeUnequal
def tweak_for_entries_compare(self):
for path, item in self.desc.copy().items():
if item.status and path in self.desc:
# If this is an unversioned tree-conflict, remove it.
# These are only in their parents' THIS_DIR, they don't have entries.
if item.status[0] in '!?' and item.treeconflict == 'C' and \
item.entry_status is None:
del self.desc[path]
# Normal externals are not stored in the parent wc, drop the root
# and everything in these working copies
elif item.status == 'X ' or item.prev_status == 'X ':
del self.desc[path]
for p, i in self.desc.copy().items():
if p.startswith(path + '/'):
del self.desc[p]
elif item.entry_kind == 'file':
# A file has no descendants in svn_wc_entry_t
for p, i in self.desc.copy().items():
if p.startswith(path + '/'):
del self.desc[p]
else:
# when reading the entry structures, we don't examine for text or
# property mods, so clear those flags. we also do not examine the
# filesystem, so we cannot detect missing or obstructed files.
if item.status[0] in 'M!~':
item.status = ' ' + item.status[1]
if item.status[1] == 'M':
item.status = item.status[0] + ' '
# under wc-ng terms, we may report a different revision than the
# backwards-compatible code should report. if there is a special
# value for compatibility, then use it.
if item.entry_rev is not None:
item.wc_rev = item.entry_rev
item.entry_rev = None
# status might vary as well, e.g. when a directory is missing
if item.entry_status is not None:
item.status = item.entry_status
item.entry_status = None
if item.entry_copied is not None:
item.copied = item.entry_copied
item.entry_copied = None
if item.writelocked:
# we don't contact the repository, so our only information is what
# is in the working copy. 'K' means we have one and it matches the
# repos. 'O' means we don't have one but the repos says the item
# is locked by us, elsewhere. 'T' means we have one, and the repos
# has one, but it is now owned by somebody else. 'B' means we have
# one, but the repos does not.
#
# for each case of "we have one", set the writelocked state to 'K',
# and clear it to None for the others. this will match what is
# generated when we examine our working copy state.
if item.writelocked in 'TB':
item.writelocked = 'K'
elif item.writelocked == 'O':
item.writelocked = None
item.moved_from = None
item.moved_to = None
if path == '':
item.switched = None
item.treeconflict = None
def old_tree(self):
"Return an old-style tree (for compatibility purposes)."
nodelist = [ ]
for path, item in self.desc.items():
nodelist.append(item.as_node_tuple(os.path.join(self.wc_dir, path)))
tree = svntest.tree.build_generic_tree(nodelist)
if 0:
check = tree.as_state()
if self != check:
logger.warn(pprint.pformat(self.desc))
logger.warn(pprint.pformat(check.desc))
# STATE -> TREE -> STATE is lossy.
# In many cases, TREE -> STATE -> TREE is not.
# Even though our conversion from a TREE has lost some information, we
# may be able to verify that our lesser-STATE produces the same TREE.
svntest.tree.compare_trees('mismatch', tree, check.old_tree())
return tree
def __str__(self):
return str(self.old_tree())
def __eq__(self, other):
if not isinstance(other, State):
return False
norm_self = self.normalize()
norm_other = other.normalize()
return norm_self.desc == norm_other.desc
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_status(cls, lines, wc_dir=None):
"""Create a State object from 'svn status' output."""
def not_space(value):
if value and value != ' ':
return value
return None
def parse_move(path, wc_dir):
if path.startswith('../'):
# ../ style paths are relative from the status root
return to_relpath(os.path.normpath(repos_join(wc_dir, path)))
else:
# Other paths are just relative from cwd
return to_relpath(path)
if not wc_dir:
wc_dir = ''
desc = { }
last = None
for line in lines:
if line.startswith('DBG:'):
continue
match = _re_parse_status.search(line)
if not match or match.group(10) == '-':
ex_match = _re_parse_status_ex.search(line)
if ex_match:
if ex_match.group('moved_from'):
path = to_relpath(ex_match.group('moved_from'))
last.tweak(moved_from = parse_move(path, wc_dir))
elif ex_match.group('moved_to'):
path = to_relpath(ex_match.group('moved_to'))
last.tweak(moved_to = parse_move(path, wc_dir))
elif ex_match.group('swapped_with'):
path = to_relpath(ex_match.group('swapped_with'))
last.tweak(moved_to = parse_move(path, wc_dir))
last.tweak(moved_from = parse_move(path, wc_dir))
# Parse TC description?
# ignore non-matching lines, or items that only exist on repos
continue
prev_status = None
prev_treeconflict = None
path = to_relpath(match.group('path'))
if path == '.':
path = ''
if path in desc:
prev_status = desc[path].status
prev_treeconflict = desc[path].treeconflict
item = StateItem(status=match.group(1),
locked=not_space(match.group(2)),
copied=not_space(match.group(3)),
switched=not_space(match.group(4)),
writelocked=not_space(match.group(5)),
treeconflict=not_space(match.group(6)),
wc_rev=not_space(match.group('wc_rev')),
prev_status=prev_status,
prev_treeconflict =prev_treeconflict
)
desc[path] = item
last = item
return cls('', desc)
@classmethod
def from_skipped(cls, lines):
"""Create a State object from 'Skipped' lines."""
desc = { }
for line in lines:
if line.startswith('DBG:'):
continue
match = _re_parse_skipped.search(line)
if match:
desc[to_relpath(match.group(2))] = StateItem(
verb=(match.group(1).strip(':')))
return cls('', desc)
@classmethod
def from_summarize(cls, lines):
"""Create a State object from 'svn diff --summarize' lines."""
desc = { }
for line in lines:
if line.startswith('DBG:'):
continue
match = _re_parse_summarize.search(line)
if match:
desc[to_relpath(match.group(2))] = StateItem(status=match.group(1))
return cls('', desc)
@classmethod
def from_checkout(cls, lines, include_skipped=True):
"""Create a State object from 'svn checkout' lines."""
if include_skipped:
re_extra = _re_parse_co_skipped
else:
re_extra = _re_parse_co_restored
desc = { }
for line in lines:
if line.startswith('DBG:'):
continue
match = _re_parse_checkout.search(line)
if match:
if match.group(3) != ' ':
treeconflict = match.group(3)
else:
treeconflict = None
path = to_relpath(match.group(4))
prev_status = None
prev_verb = None
prev_treeconflict = None
if path in desc:
prev_status = desc[path].status
prev_verb = desc[path].verb
prev_treeconflict = desc[path].treeconflict
desc[path] = StateItem(status=match.group(1),
treeconflict=treeconflict,
prev_status=prev_status,
prev_verb=prev_verb,
prev_treeconflict=prev_treeconflict)
else:
match = re_extra.search(line)
if match:
path = to_relpath(match.group(2))
prev_status = None
prev_verb = None
prev_treeconflict = None
if path in desc:
prev_status = desc[path].status
prev_verb = desc[path].verb
prev_treeconflict = desc[path].treeconflict
desc[path] = StateItem(verb=match.group(1),
prev_status=prev_status,
prev_verb=prev_verb,
prev_treeconflict=prev_treeconflict)
return cls('', desc)
@classmethod
def from_commit(cls, lines):
"""Create a State object from 'svn commit' lines."""
desc = { }
for line in lines:
if line.startswith('DBG:') or line.startswith('Transmitting'):
continue
if line.startswith('Committing transaction'):
continue
match = _re_parse_commit_ext.search(line)
if match:
desc[to_relpath(match.group(4))] = StateItem(verb=match.group(1))
continue
match = _re_parse_commit.search(line)
if match:
desc[to_relpath(match.group(3))] = StateItem(verb=match.group(1))
return cls('', desc)
@classmethod
def from_wc(cls, base, load_props=False, ignore_svn=True,
keep_eol_style=False):
"""Create a State object from a working copy.
Walks the tree at PATH, building a State based on the actual files
and directories found. If LOAD_PROPS is True, then the properties
will be loaded for all nodes (Very Expensive!). If IGNORE_SVN is
True, then the .svn subdirectories will be excluded from the State.
If KEEP_EOL_STYLE is set, don't let Python normalize the EOL when
reading working copy contents as text files. It has no effect on
binary files.
"""
if not base:
# we're going to walk the base, and the OS wants "."
base = '.'
desc = { }
dot_svn = svntest.main.get_admin_name()
for dirpath, dirs, files in os.walk(base):
parent = path_to_key(dirpath, base)
if ignore_svn and dot_svn in dirs:
dirs.remove(dot_svn)
for name in dirs + files:
node = os.path.join(dirpath, name)
if os.path.isfile(node):
try:
if keep_eol_style:
contents = open(node, 'r', newline='').read()
else:
contents = open(node, 'r').read()
except:
contents = open(node, 'rb').read()
else:
contents = None
desc[repos_join(parent, name)] = StateItem(contents=contents)
if load_props:
paths = [os.path.join(base, to_ospath(p)) for p in desc.keys()]
paths.append(base)
all_props = svntest.tree.get_props(paths)
for node, props in all_props.items():
if node == base:
desc['.'] = StateItem(props=props)
else:
if base == '.':
# 'svn proplist' strips './' from the paths. put it back on.
node = os.path.join('.', node)
desc[path_to_key(node, base)].props = props
return cls('', desc)
@classmethod
def from_entries(cls, base):
"""Create a State object from a working copy, via the old "entries" API.
Walks the tree at PATH, building a State based on the information
provided by the old entries API, as accessed via the 'entries-dump'
program.
"""
if not base:
# we're going to walk the base, and the OS wants "."
base = '.'
if os.path.isfile(base):
# a few tests run status on a single file. quick-and-dirty this. we
# really should analyze the entry (similar to below) to be general.
dirpath, basename = os.path.split(base)
entries = svntest.main.run_entriesdump(dirpath)
return cls('', {
to_relpath(base): StateItem.from_entry(entries[basename]),
})
desc = { }
dump_data = svntest.main.run_entriesdump_tree(base)
if not dump_data:
# Probably 'svn status' run on an actual only node
# ### Improve!
return cls('', desc)
dirent_join = repos_join
if len(base) == 2 and base[1:]==':' and sys.platform=='win32':
# We have a win32 drive relative path... Auch. Fix joining
def drive_join(a, b):
if len(a) == 2:
return a+b
else:
return repos_join(a,b)
dirent_join = drive_join
for parent, entries in sorted(dump_data.items()):
parent_url = entries[''].url
for name, entry in entries.items():
# if the entry is marked as DELETED *and* it is something other than
# schedule-add, then skip it. we can add a new node "over" where a
# DELETED node lives.
if entry.deleted and entry.schedule != 1:
continue
# entries that are ABSENT don't show up in status
if entry.absent:
continue
# entries that are User Excluded don't show up in status
if entry.depth == -1:
continue
if name and entry.kind == 2:
# stub subdirectory. leave a "missing" StateItem in here. note
# that we can't put the status as "! " because that gets tweaked
# out of our expected tree.
item = StateItem(status=' ', wc_rev='?')
desc[dirent_join(parent, name)] = item
continue
item = StateItem.from_entry(entry)
if name:
desc[dirent_join(parent, name)] = item
implied_url = repos_join(parent_url, svn_uri_quote(name))
else:
item._url = entry.url # attach URL to directory StateItems
desc[parent] = item
grandpa, this_name = repos_split(parent)
if grandpa in desc:
implied_url = repos_join(desc[grandpa]._url,
svn_uri_quote(this_name))
else:
implied_url = None
if implied_url and implied_url != entry.url:
item.switched = 'S'
if entry.file_external:
item.switched = 'X'
return cls('', desc)
@classmethod
def from_eids(cls, lines):
# Need to read all elements in a branch before we can construct
# the full path to an element.
# For the full path we use <branch-id>/<path-within-branch>.
def eid_path(eids, eid):
ele = eids[eid]
if ele[0] == '-1':
return ele[1]
parent_path = eid_path(eids, ele[0])
if parent_path == '':
return ele[1]
return parent_path + '/' + ele[1]
def eid_full_path(eids, eid, branch_id):
path = eid_path(eids, eid)
if path == '':
return branch_id
return branch_id + '/' + path
def add_to_desc(eids, desc, branch_id):
for k, v in eids.items():
desc[eid_full_path(eids, k, branch_id)] = StateItem(eid=k)
branch_id = None
eids = {}
desc = {}
for line in lines:
match = _re_parse_eid_ele.search(line)
if match and match.group(2) != 'none':
eid = match.group(1)
parent_eid = match.group(3)
path = match.group(4)
if path == '.':
path = ''
eids[eid] = [parent_eid, path]
match = _re_parse_eid_branch.search(line)
if match:
if branch_id:
add_to_desc(eids, desc, branch_id)
eids = {}
branch_id = match.group(1)
root_eid = match.group(2)
match = _re_parse_eid_merge_history.search(line)
if match:
### TODO: store the merge history
pass
add_to_desc(eids, desc, branch_id)
return cls('', desc)
class StateItem:
"""Describes an individual item within a working copy.
Note that the location of this item is not specified. An external
mechanism, such as the State class, will provide location information
for each item.
"""
def __init__(self, contents=None, props=None,
status=None, verb=None, wc_rev=None, entry_kind=None,
entry_rev=None, entry_status=None, entry_copied=None,
locked=None, copied=None, switched=None, writelocked=None,
treeconflict=None, moved_from=None, moved_to=None,
prev_status=None, prev_verb=None, prev_treeconflict=None,
eid=None):
# provide an empty prop dict if it wasn't provided
if props is None:
props = { }
### keep/make these ints one day?
if wc_rev is not None:
wc_rev = str(wc_rev)
if eid is not None:
eid = str(eid)
# Any attribute can be None if not relevant, unless otherwise stated.
# A string of content (if the node is a file).
self.contents = contents
# A dictionary mapping prop name to prop value; never None.
self.props = props
# A two-character string from the first two columns of 'svn status'.
self.status = status
self.prev_status = prev_status
# The action word such as 'Adding' printed by commands like 'svn update'.
self.verb = verb
self.prev_verb = prev_verb
# The base revision number of the node in the WC, as a string.
self.wc_rev = wc_rev
# If 'file' specifies that the node is a file, and as such has no svn_wc_entry_t
# descendants
self.entry_kind = None
# These will be set when we expect the wc_rev/status to differ from those
# found in the entries code.
self.entry_rev = entry_rev
self.entry_status = entry_status
self.entry_copied = entry_copied
# For the following attributes, the value is the status character of that
# field from 'svn status', except using value None instead of status ' '.
self.locked = locked
self.copied = copied
self.switched = switched
self.writelocked = writelocked
# Value 'C', 'A', 'D' or ' ', or None as an expected status meaning 'do not check'.
self.treeconflict = treeconflict
self.prev_treeconflict = prev_treeconflict
# Relative paths to the move locations
self.moved_from = moved_from
self.moved_to = moved_to
self.eid = eid
def copy(self):
"Make a deep copy of self."
new = StateItem()
vars(new).update(vars(self))
new.props = self.props.copy()
return new
def tweak(self, **kw):
for name, value in kw.items():
# Refine the revision args (for now) to ensure they are strings.
if value is not None and name == 'wc_rev':
value = str(value)
if value is not None and name == 'eid':
value = str(value)
setattr(self, name, value)
def __eq__(self, other):
if not isinstance(other, StateItem):
return False
v_self = dict([(k, v) for k, v in vars(self).items()
if not k.startswith('_') and not k.startswith('entry_')])
v_other = dict([(k, v) for k, v in vars(other).items()
if not k.startswith('_') and not k.startswith('entry_')])
if self.wc_rev == '0' and self.status == 'A ':
v_self['wc_rev'] = '-'
if other.wc_rev == '0' and other.status == 'A ':
v_other['wc_rev'] = '-'
return v_self == v_other
def __ne__(self, other):
return not self.__eq__(other)
def as_node_tuple(self, path):
atts = { }
if self.status is not None:
atts['status'] = self.status
if self.prev_status is not None:
atts['prev_status'] = self.prev_status
if self.verb is not None:
atts['verb'] = self.verb
if self.prev_verb is not None:
atts['prev_verb'] = self.prev_verb
if self.wc_rev is not None:
atts['wc_rev'] = self.wc_rev
if self.locked is not None:
atts['locked'] = self.locked
if self.copied is not None:
atts['copied'] = self.copied
if self.switched is not None:
atts['switched'] = self.switched
if self.writelocked is not None:
atts['writelocked'] = self.writelocked
if self.treeconflict is not None:
atts['treeconflict'] = self.treeconflict
if self.prev_treeconflict is not None:
atts['prev_treeconflict'] = self.prev_treeconflict
if self.moved_from is not None:
atts['moved_from'] = self.moved_from
if self.moved_to is not None:
atts['moved_to'] = self.moved_to
if self.eid is not None:
atts['eid'] = self.eid
return (os.path.normpath(path), self.contents, self.props, atts)
@classmethod
def from_entry(cls, entry):
status = ' '
if entry.schedule == 1: # svn_wc_schedule_add
status = 'A '
elif entry.schedule == 2: # svn_wc_schedule_delete
status = 'D '
elif entry.schedule == 3: # svn_wc_schedule_replace
status = 'R '
elif entry.conflict_old:
### I'm assuming we only need to check one, rather than all conflict_*
status = 'C '
### is this the sufficient? guessing here w/o investigation.
if entry.prejfile:
status = status[0] + 'C'
if entry.locked:
locked = 'L'
else:
locked = None
if entry.copied:
wc_rev = '-'
copied = '+'
else:
if entry.revision == -1:
wc_rev = '?'
else:
wc_rev = entry.revision
copied = None
### figure out switched
switched = None
if entry.lock_token:
writelocked = 'K'
else:
writelocked = None
return cls(status=status,
wc_rev=wc_rev,
locked=locked,
copied=copied,
switched=switched,
writelocked=writelocked,
)
if os.sep == '/':
to_relpath = to_ospath = lambda path: path
else:
def to_relpath(path):
"""Return PATH but with all native path separators changed to '/'."""
return path.replace(os.sep, '/')
def to_ospath(path):
"""Return PATH but with each '/' changed to the native path separator."""
return path.replace('/', os.sep)
def path_to_key(path, base):
"""Return the relative path that represents the absolute path PATH under
the absolute path BASE. PATH must be a path under BASE. The returned
path has '/' separators."""
if path == base:
return ''
if base.endswith(os.sep) or base.endswith('/') or base.endswith(':'):
# Special path format on Windows:
# 'C:/' Is a valid root which includes its separator ('C:/file')
# 'C:' is a valid root which isn't followed by a separator ('C:file')
#
# In this case, we don't need a separator between the base and the path.
pass
else:
# Account for a separator between the base and the relpath we're creating
base += os.sep
assert path.startswith(base), "'%s' is not a prefix of '%s'" % (base, path)
return to_relpath(path[len(base):])
def repos_split(repos_relpath):
"""Split a repos path into its directory and basename parts."""
idx = repos_relpath.rfind('/')
if idx == -1:
return '', repos_relpath
return repos_relpath[:idx], repos_relpath[idx+1:]
def repos_join(base, path):
"""Join two repos paths. This generally works for URLs too."""
if base == '':
return path
elif path == '':
return base
elif base[len(base)-1:] == '/':
return base + path
else:
return base + '/' + path
def svn_uri_quote(url):
# svn defines a different set of "safe" characters than Python does, so
# we need to avoid escaping them. see subr/path.c:uri_char_validity[]
return urllib_quote(url, "!$&'()*+,-./:=@_~")
# ------------
def python_sqlite_can_read_wc():
"""Check if the Python builtin is capable enough to peek into wc.db"""
try:
db = svntest.sqlite3.connect('')
c = db.cursor()
c.execute('select sqlite_version()')
ver = tuple(map(int, c.fetchall()[0][0].split('.')))
return ver >= (3, 6, 18) # Currently enough (1.7-1.9)
except:
return False
def open_wc_db(local_path):
"""Open the SQLite DB for the WC path LOCAL_PATH.
Return (DB object, WC root path, WC relpath of LOCAL_PATH)."""
dot_svn = svntest.main.get_admin_name()
root_path = local_path
relpath = ''
while True:
db_path = os.path.join(root_path, dot_svn, 'wc.db')
try:
db = svntest.sqlite3.connect(db_path)
break
except: pass
head, tail = os.path.split(root_path)
if head == root_path:
raise svntest.Failure("No DB for " + local_path)
root_path = head
relpath = os.path.join(tail, relpath).replace(os.path.sep, '/').rstrip('/')
return db, root_path, relpath
# ------------
def text_base_path(file_path):
"""Return the path to the text-base file for the versioned file
FILE_PATH."""
info = svntest.actions.run_and_parse_info(file_path)[0]
checksum = info['Checksum']
db, root_path, relpath = open_wc_db(file_path)
# Calculate single DB location
dot_svn = svntest.main.get_admin_name()
fn = os.path.join(root_path, dot_svn, 'pristine', checksum[0:2], checksum)
# For SVN_WC__VERSION < 29
if os.path.isfile(fn):
return fn
# For SVN_WC__VERSION >= 29
if os.path.isfile(fn + ".svn-base"):
return fn + ".svn-base"
raise svntest.Failure("No pristine text for " + relpath)
def sqlite_stmt(wc_root_path, stmt):
"""Execute STMT on the SQLite wc.db in WC_ROOT_PATH and return the
results."""
db = open_wc_db(wc_root_path)[0]
c = db.cursor()
c.execute(stmt)
return c.fetchall()
def sqlite_exec(wc_root_path, stmt):
"""Execute STMT on the SQLite wc.db in WC_ROOT_PATH and return the
results."""
db = open_wc_db(wc_root_path)[0]
c = db.cursor()
c.execute(stmt)
db.commit()
# ------------
### probably toss these at some point. or major rework. or something.
### just bootstrapping some changes for now.
#
def item_to_node(path, item):
tree = svntest.tree.build_generic_tree([item.as_node_tuple(path)])
while tree.children:
assert len(tree.children) == 1
tree = tree.children[0]
return tree
### yanked from tree.compare_trees()
def display_nodes(label, path, expected, actual):
'Display two nodes, expected and actual.'
expected = item_to_node(path, expected)
actual = item_to_node(path, actual)
o = StringIO()
o.write("=============================================================\n")
o.write("Expected '%s' and actual '%s' in %s tree are different!\n"
% (expected.name, actual.name, label))
o.write("=============================================================\n")
o.write("EXPECTED NODE TO BE:\n")
o.write("=============================================================\n")
expected.pprint(o)
o.write("=============================================================\n")
o.write("ACTUAL NODE FOUND:\n")
o.write("=============================================================\n")
actual.pprint(o)
logger.warn(o.getvalue())
o.close()
### yanked from tree.py
def default_singleton_handler(description, path, item):
node = item_to_node(path, item)
logger.warn("Couldn't find node '%s' in %s tree" % (node.name, description))
o = StringIO()
node.pprint(o)
logger.warn(o.getvalue())
o.close()
raise svntest.tree.SVNTreeUnequal
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.framework.meta_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import random
import shutil
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner_impl
# pylint: disable=invalid-name
def _TestDir(test_name):
test_dir = os.path.join(test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
# pylint: enable=invalid-name
class SimpleMetaGraphTest(test.TestCase):
def testNoVariables(self):
test_dir = _TestDir("no_variables")
filename = os.path.join(test_dir, "metafile")
input_feed_value = -10 # Arbitrary input value for feed_dict.
orig_graph = ops.Graph()
with self.test_session(graph=orig_graph) as sess:
# Create a minimal graph with zero variables.
input_tensor = array_ops.placeholder(
dtypes.float32, shape=[], name="input")
offset = constant_op.constant(42, dtype=dtypes.float32, name="offset")
output_tensor = math_ops.add(input_tensor, offset, name="add_offset")
# Add input and output tensors to graph collections.
ops.add_to_collection("input_tensor", input_tensor)
ops.add_to_collection("output_tensor", output_tensor)
output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
self.assertEqual(output_value, 32)
# Generates MetaGraphDef.
meta_graph_def, var_list = meta_graph.export_scoped_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
collection_list=["input_tensor", "output_tensor"],
saver_def=None)
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
self.assertEqual({}, var_list)
# Create a clean graph and import the MetaGraphDef nodes.
new_graph = ops.Graph()
with self.test_session(graph=new_graph) as sess:
# Import the previously export meta graph.
meta_graph.import_scoped_meta_graph(filename)
# Re-exports the current graph state for comparison to the original.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(filename +
"_new")
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
# Ensures that we can still get a reference to our graph collections.
new_input_tensor = ops.get_collection("input_tensor")[0]
new_output_tensor = ops.get_collection("output_tensor")[0]
# Verifies that the new graph computes the same result as the original.
new_output_value = sess.run(new_output_tensor,
{new_input_tensor: input_feed_value})
self.assertEqual(new_output_value, output_value)
def testStrippedOpListNestedFunctions(self):
with self.test_session():
# Square two levels deep
@function.Defun(dtypes.int32)
def f0(x):
return math_ops.square(x)
@function.Defun(dtypes.int32)
def f1(x):
return f0(x)
# At this point we've defined two functions but haven't called them, so
# there should be no used ops.
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(len(op_list.op), 0)
# If we call the function on a constant, there should be two ops
_ = f1(constant_op.constant(7))
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(["Const", "Square"], [op.name for op in op_list.op])
def testStrippedOpListRecursiveFunctions(self):
# The function module doesn't support recursive functions, so we build a
# recursive function situation by ourselves: A calls B calls A and Const.
graph = graph_pb2.GraphDef()
a = graph.library.function.add()
b = graph.library.function.add()
a.signature.name = "A"
b.signature.name = "B"
a.node_def.add().op = "B"
b.node_def.add().op = "Const"
b.node_def.add().op = "A"
# Use A in the graph
graph.node.add().op = "A"
# The stripped op list should contain just Const.
op_list = meta_graph.stripped_op_list_for_graph(graph)
self.assertEqual(["Const"], [op.name for op in op_list.op])
class ScopedMetaGraphTest(test.TestCase):
def _testScopedExport(self, test_dir, exported_filenames):
graph = ops.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
colocate_constraint = constant_op.constant(1.2, name="constraint")
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops.name_scope("hidden1"):
with graph.colocate_with(colocate_constraint.op):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(
loop_cond,
loop_body, [
constant_op.constant(0), variables.Variable(
array_ops.zeros([32]), name="biases")
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops.add_to_collection("logits", logits)
# Exports each sub-graph.
# Exports the first one with unbound_inputs_col_name set to default.
orig_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[0]),
graph=ops.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden1/biases:0", "hidden1/weights:0"],
sorted(var_names))
# Exports the rest with no unbound_inputs_col_name.
orig_meta_graph2, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[1]),
graph=ops.get_default_graph(),
export_scope="hidden2",
unbound_inputs_col_name=None)
orig_meta_graph3, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[2]),
graph=ops.get_default_graph(),
export_scope="softmax_linear",
unbound_inputs_col_name=None)
return [orig_meta_graph1, orig_meta_graph2, orig_meta_graph3]
def _testScopedImport(self, test_dir, exported_filenames):
graph = ops.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
import_scope="new_hidden1")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"image:0": new_image},
import_scope="new_hidden1")
# Verifies we can import the original "hidden1" into "new_hidden1".
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden1/biases:0", "new_hidden1/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "hidden2" into "new_hidden2".
hidden1 = array_ops.identity(
graph.as_graph_element("new_hidden1/Relu:0"), name="hidden1/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[1]),
graph=graph,
input_map={"$unbound_inputs_hidden1/Relu": hidden1},
import_scope="new_hidden2",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden2/biases:0", "new_hidden2/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "softmax_linear" into
# "new_softmax_linear".
hidden2 = array_ops.identity(
graph.as_graph_element("new_hidden2/Relu:0"), name="hidden2/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[2]),
graph=graph,
input_map={"$unbound_inputs_hidden2/Relu": hidden2},
import_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(
["new_softmax_linear/biases:0", "new_softmax_linear/weights:0"],
sorted(new_var_names))
# Exports the scoped meta graphs again.
new_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph2, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden2", unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph3, var_list = meta_graph.export_scoped_meta_graph(
graph=graph,
export_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
return [new_meta_graph1, new_meta_graph2, new_meta_graph3]
# Verifies that we can export the subgraph under each layer and import
# them into new layers in a new graph.
def testScopedExportAndImport(self):
test_dir = _TestDir("scoped_export_import")
filenames = [
"exported_hidden1.pbtxt", "exported_hidden2.pbtxt",
"exported_softmax_linear.pbtxt"
]
orig_meta_graphs = self._testScopedExport(test_dir, filenames)
new_meta_graphs = self._testScopedImport(test_dir, filenames)
# Delete the unbound_inputs to allow directly calling ProtoEqual.
del orig_meta_graphs[0].collection_def["unbound_inputs"]
del new_meta_graphs[0].collection_def["unbound_inputs"]
for a, b in zip(orig_meta_graphs, new_meta_graphs):
test_util.assert_meta_graph_protos_equal(self, a, b)
def testScopedImportUnderNameScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="bar")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/bar/myvar:0")
def testScopedImportWithSelectedCollections(self):
meta_graph_filename = os.path.join(
_TestDir("selected_collections_import"), "meta_graph.pb")
graph = ops.Graph()
# Add a variable to populate two collections. The functionality tested is
# not specific to variables, but using variables in the test is convenient.
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True)
self.assertTrue(
all([
graph.get_collection(key)
for key in
[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]
]))
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
def _test_import(include_collection_keys, omit_collection_keys):
assert set(include_collection_keys).isdisjoint(omit_collection_keys)
newgraph = ops.Graph()
import_scope = "some_scope_name"
def _restore_collections_predicate(collection_key):
return (collection_key in include_collection_keys and
collection_key not in omit_collection_keys)
meta_graph.import_scoped_meta_graph(
meta_graph_filename,
graph=newgraph,
import_scope=import_scope,
restore_collections_predicate=_restore_collections_predicate)
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in include_collection_keys
]
self.assertTrue(all(collection_values))
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in omit_collection_keys
]
self.assertFalse(any(collection_values))
_test_import(
include_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
],
omit_collection_keys=[])
_test_import(
include_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES],
omit_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES])
_test_import(
include_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES],
omit_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES])
_test_import(
include_collection_keys=[],
omit_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
])
def _testScopedExportWithQueue(self, test_dir, exported_filename):
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("queue1"):
input_queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
enqueue = input_queue.enqueue((9876), name="enqueue")
close = input_queue.close(name="close")
qr = queue_runner_impl.QueueRunner(input_queue, [enqueue], close)
queue_runner_impl.add_queue_runner(qr)
input_queue.dequeue(name="dequeue")
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops.get_default_graph(),
export_scope="queue1")
return orig_meta_graph
def _testScopedImportWithQueue(self, test_dir, exported_filename,
new_exported_filename):
graph = ops.Graph()
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
import_scope="new_queue1")
graph.as_graph_element("new_queue1/dequeue:0")
graph.as_graph_element("new_queue1/close")
with graph.as_default():
new_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, new_exported_filename),
graph=graph,
export_scope="new_queue1")
return new_meta_graph
# Verifies that we can export the subgraph containing a FIFOQueue under
# "queue1" and import it into "new_queue1" in a new graph.
def testScopedWithQueue(self):
test_dir = _TestDir("scoped_with_queue")
orig_meta_graph = self._testScopedExportWithQueue(test_dir,
"exported_queue1.pbtxt")
new_meta_graph = self._testScopedImportWithQueue(
test_dir, "exported_queue1.pbtxt", "exported_new_queue1.pbtxt")
self.assertProtoEquals(orig_meta_graph, new_meta_graph)
# Verifies that we can export a subgraph in a nested name scope containing a
# "hidden1/hidden2" and import it into "new_hidden1/new_hidden2" in a new
# graph.
def doTestExportNestedNames(self, use_resource=False):
graph1 = ops.Graph()
with graph1.as_default():
with ops.name_scope("hidden1/hidden2/hidden3"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
if use_resource:
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = resource_variable_ops.ResourceVariable(
[0.1] * 3, name="biases")
else:
biases1 = variables.Variable([0.1] * 3, name="biases")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
orig_meta_graph, var_list = meta_graph.export_scoped_meta_graph(
export_scope="hidden1/hidden2", graph=graph1)
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(var_list.keys()))
self.assertEqual([
"hidden1/hidden2/hidden3/biases:0", "hidden1/hidden2/hidden3/weights:0"
], sorted(var_names))
for node in orig_meta_graph.graph_def.node:
self.assertTrue(node.name.startswith("hidden3"))
graph2 = ops.Graph()
new_var_list = meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1/new_hidden2", graph=graph2)
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(new_var_list.keys()))
new_var_names = [v.name for _, v in new_var_list.items()]
self.assertEqual([
"new_hidden1/new_hidden2/hidden3/biases:0",
"new_hidden1/new_hidden2/hidden3/weights:0"
], sorted(new_var_names))
nodes = [
"new_hidden1/new_hidden2/hidden3/biases/Assign",
"new_hidden1/new_hidden2/hidden3/weights/Assign"
]
expected = [
b"loc:@new_hidden1/new_hidden2/hidden3/biases",
b"loc:@new_hidden1/new_hidden2/hidden3/weights"
]
for n, e in zip(nodes, expected):
self.assertEqual([e], graph2.get_operation_by_name(n).get_attr("_class"))
def testExportNestedNames(self):
self.doTestExportNestedNames(use_resource=False)
def testExportNestedNamesResource(self):
self.doTestExportNestedNames(use_resource=True)
def testPotentialCycle(self):
graph1 = ops.Graph()
with graph1.as_default():
a = constant_op.constant(1.0, shape=[2, 2])
b = constant_op.constant(2.0, shape=[2, 2])
matmul = math_ops.matmul(a, b)
with ops.name_scope("hidden1"):
c = nn_ops.relu(matmul)
d = constant_op.constant(3.0, shape=[2, 2])
matmul = math_ops.matmul(c, d)
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
export_scope="hidden1", graph=graph1)
graph2 = ops.Graph()
with graph2.as_default():
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1")
meta_graph.import_scoped_meta_graph(
orig_meta_graph,
import_scope="new_hidden1",
input_map={
"$unbound_inputs_MatMul": constant_op.constant(
4.0, shape=[2, 2])
})
def testClearDevices(self):
graph1 = ops.Graph()
with graph1.as_default():
with ops.device("/device:CPU:0"):
a = variables.Variable(
constant_op.constant(
1.0, shape=[2, 2]), name="a")
with ops.device("/job:ps/replica:0/task:0/gpu:0"):
b = variables.Variable(
constant_op.constant(
2.0, shape=[2, 2]), name="b")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
math_ops.matmul(a, b, name="matmul")
self.assertEqual("/device:CPU:0", str(graph1.as_graph_element("a").device))
self.assertEqual("/job:ps/replica:0/task:0/device:GPU:0",
str(graph1.as_graph_element("b").device))
self.assertEqual("/job:localhost/replica:0/task:0/device:CPU:0",
str(graph1.as_graph_element("matmul").device))
# Verifies that devices are cleared on export.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on export when passing in graph_def.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on import.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=False)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=True)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
class MetaGraphWithVariableScopeTest(test.TestCase):
def testMetricsCollection(self):
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
meta_graph_filename = os.path.join(
_TestDir("metrics_export"), "meta_graph.pb")
graph = ops.Graph()
with self.test_session(graph=graph) as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
_, update_op = metrics.mean(values)
initializer = variables.local_variables_initializer()
sess.run(initializer)
sess.run(update_op)
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
# Verifies that importing a meta_graph with LOCAL_VARIABLES collection
# works correctly.
graph = ops.Graph()
with self.test_session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(meta_graph_filename)
initializer = variables.local_variables_initializer()
sess.run(initializer)
# Verifies that importing an old meta_graph where "local_variables"
# collection is of node_list type works, but cannot build initializer
# with the collection.
graph = ops.Graph()
with self.test_session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(
test.test_src_dir_path(
"python/framework/testdata/metrics_export_meta_graph.pb"))
self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
2)
with self.assertRaisesRegexp(
AttributeError, "'Tensor' object has no attribute 'initializer'"):
initializer = variables.local_variables_initializer()
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python3
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import json
import sys
import traceback
from collections import OrderedDict
def merge_schema(directory, definitions, to_merge):
for schema in to_merge:
if not '$ref' in schema:
raise ValueError("no $ref in allOf")
path, link = schema['$ref'].split('#')
ref = load_json_schema(directory, path)
defnref = link.split('/')[-1]
definitions.update(ref[defnref])
def load_json_schema(directory, path, schemas={}):
if path in schemas:
return schemas[path]
data = json.load(open(os.path.join(directory, path), "r"))
if not data['$schema'].startswith("http://json-schema.org/schema"):
raise ValueError("not a JSON schema")
definitions = data.get("definitions", {})
if not definitions:
raise ValueError("empty definition block")
if not 'title' in data:
raise ValueError("JSON schema without title")
required = set(data.get('required', []))
for rt, descr in definitions.items():
if 'allOf' in descr:
merge_schema(directory, descr, descr['allOf'])
del descr['allOf']
if 'properties' in descr:
for field, props in descr['properties'].items():
doc = props.get('description', '')
props['read_only'] = doc.startswith('ReadOnly,')
props['required'] = field in required
if props['read_only']:
props['description'] = props['description'][len('ReadOnly,'):].strip()
descr['title'] = data['title']
schemas[path] = definitions
return definitions
JSON_TO_C = {
"string": "char *",
"integer": "int32_t",
"boolean": "bool",
"number": "float"
}
JSON_TO_C_TMP = {}
JSON_TO_C_TMP.update(JSON_TO_C)
JSON_TO_C_TMP['string'] = "const char *"
JSON_TO_C_TMP['number'] = "double"
JSON_TO_FLOW_GET_PKT = {
"string": "sol_flow_packet_get_string",
"integer": "sol_flow_packet_get_irange_value",
"boolean": "sol_flow_packet_get_boolean",
"number": "sol_flow_packet_get_drange_value"
}
JSON_TO_FLOW_SEND_PKT = {
"string": "sol_flow_send_string_packet",
"integer": "sol_flow_send_irange_value_packet",
"boolean": "sol_flow_send_boolean_packet",
"number": "sol_flow_send_drange_value_packet"
}
JSON_TO_INIT = {
"string": "NULL",
"integer": "0",
"boolean": "false",
"number": "0.0f"
}
JSON_TO_SOL_JSON = {
"string": "string",
"integer": "int",
"boolean": "boolean",
"number": "float"
}
def object_fields_common_c(state_struct_name, name, props):
fields = []
for prop_name, descr in props.items():
doc = '/* %s */' % descr.get('description', '???')
if 'enum' in descr:
var_type = 'enum %s_%s' % (state_struct_name, prop_name)
else:
var_type = JSON_TO_C[descr['type']]
fields.append("%s %s; %s" % (var_type, prop_name, doc))
return '\n'.join(fields)
def generate_object_serialize_fn_common_c(state_struct_name, name, props, client):
fmtstrings = []
for prop_name, prop_descr in props.items():
if client and prop_descr['read_only']:
continue
if 'enum' in prop_descr:
fmtstrings.append('\\"%s\\":\\"%%s\\"' % prop_name)
elif prop_descr['type'] == 'string':
fmtstrings.append('\\"%s\\":\\"%%s\\"' % prop_name)
elif prop_descr['type'] == 'boolean':
fmtstrings.append('\\"%s\\":%%s' % prop_name)
elif prop_descr['type'] == 'integer':
fmtstrings.append('\\"%s\\":%%d' % prop_name)
elif prop_descr['type'] == 'number':
fmtstrings.append('\\"%s\\":%%f' % prop_name)
else:
raise ValueError("invalid property type: %s" % prop_descr['type'])
fields = []
for prop_name, prop_descr in props.items():
if client and prop_descr['read_only']:
continue
if 'enum' in prop_descr:
fields.append('%s_%s_tbl[state->state.%s].key' % (state_struct_name, prop_name, prop_name))
elif prop_descr['type'] == 'boolean':
fields.append('(state->state.%s)?"true":"false"' % prop_name)
elif prop_descr['type'] == 'string':
fields.append('ESCAPE_STRING(state->state.%s)' % prop_name)
else:
fields.append('state->state.%s' % prop_name)
if not fields:
return ''
return '''static uint8_t *
%(struct_name)s_serialize(struct %(type)s_resource *resource, uint16_t *length)
{
struct %(struct_name)s *state = (struct %(struct_name)s *)resource;
char *payload;
int r;
r = asprintf(&payload, "{%(fmtstrings)s}", %(fields)s);
if (r < 0)
return NULL;
if (r >= 0xffff) {
free(payload);
errno = -ENOMEM;
return NULL;
}
*length = (uint16_t)r;
return (uint8_t *)payload;
}
''' % {
'type': 'client' if client else 'server',
'struct_name': name,
'fmtstrings': ','.join(fmtstrings),
'fields': ','.join(fields)
}
def get_type_from_property(prop):
if 'type' in prop:
return prop['type']
if 'enum' in prop:
return 'enum:%s' % ','.join(prop['enum'])
raise ValueError('Unknown type for property')
def object_serialize_fn_common_c(state_struct_name, name, props, client, equivalent={}):
def props_are_equivalent(p1, p2):
# This disconsiders comments
p1 = {k: get_type_from_property(v) for k, v in p1.items()}
p2 = {k: get_type_from_property(v) for k, v in p2.items()}
return p1 == p2
for item_name, item_props in equivalent.items():
if item_props[0] == client and props_are_equivalent(props, item_props[1]):
return '''static uint8_t *
%(struct_name)s_serialize(struct %(type)s_resource *resource, uint16_t *length)
{
return %(item_name)s_serialize(resource, length); /* %(item_name)s is equivalent to %(struct_name)s */
}
''' % {
'item_name': item_name,
'struct_name': name,
'type': 'client' if client else 'server'
}
equivalent[name] = (client, props)
return generate_object_serialize_fn_common_c(state_struct_name, name, props, client)
def object_serialize_fn_client_c(state_struct_name, name, props):
return object_serialize_fn_common_c(state_struct_name, name, props, True)
def object_serialize_fn_server_c(state_struct_name, name, props):
return object_serialize_fn_common_c(state_struct_name, name, props, False)
def get_field_integer_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
if (!json_token_to_int32(&value, &fields.%(field_name)s))
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_number_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
if (!json_token_to_float(&value, &fields.%(field_name)s))
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_string_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
if (!json_token_to_string(&value, &fields.%(field_name)s))
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_boolean_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
if (!json_token_to_bool(&value, &fields.%(field_name)s))
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_enum_client_c(id, struct_name, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
int16_t val = sol_str_table_lookup_fallback(%(struct_name)s_%(field_name)s_tbl,
SOL_STR_SLICE_STR(value.start, value.end - value.start), -1);
if (val < 0)
RETURN_ERROR(-EINVAL);
fields.%(field_name)s = (enum %(struct_name)s_%(field_name)s)val;
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'struct_name': struct_name,
'field_name': name,
'field_name_len': len(name),
'id': id
}
def object_fields_deserializer(name, props):
id = 0
fields = []
for prop_name, prop in props.items():
if 'enum' in prop:
fields.append(get_field_enum_client_c(id, name, prop_name, prop))
elif prop['type'] == 'string':
fields.append(get_field_string_client_c(id, prop_name, prop))
elif prop['type'] == 'integer':
fields.append(get_field_integer_client_c(id, prop_name, prop))
elif prop['type'] == 'number':
fields.append(get_field_number_client_c(id, prop_name, prop))
elif prop['type'] == 'boolean':
fields.append(get_field_boolean_client_c(id, prop_name, prop))
else:
raise ValueError('unknown field type: %s' % prop['type'])
id += 1
return '\n'.join(fields)
def generate_object_deserialize_fn_common_c(name, props):
fields_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
fields_init.append('.%s = state->%s,' % (field_name, field_name))
elif field_props['type'] == 'string':
fields_init.append('.%s = strdup(state->%s),' % (field_name, field_name))
else:
fields_init.append('.%s = state->%s,' % (field_name, field_name))
fields_free = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
fields_free.append('free(fields.%s);' % (field_name))
update_state = []
for field_name, field_props in props.items():
if not 'enum' in field_props and field_props.get('type') == 'string':
update_state.append('free(state->%s);' % field_name)
update_state.append('state->%s = fields.%s;' % (field_name, field_name))
return '''static int
%(struct_name)s_deserialize(struct %(struct_name)s *state,
const uint8_t *payload, uint16_t payload_len, uint32_t decode_mask)
{
#define RETURN_ERROR(errcode) do { err = (errcode); goto out; } while(0)
struct sol_json_scanner scanner;
struct sol_json_token token, key, value;
enum sol_json_loop_reason reason;
int err = 0;
struct %(struct_name)s fields = {
%(fields_init)s
};
sol_json_scanner_init(&scanner, payload, payload_len);
SOL_JSON_SCANNER_OBJECT_LOOP(&scanner, &token, &key, &value, reason) {
%(deserializers)s
}
if (reason != SOL_JSON_LOOP_REASON_OK)
RETURN_ERROR(-EINVAL);
%(update_state)s
return 0;
out:
%(free_fields)s
return err;
#undef RETURN_ERROR
}
''' % {
'struct_name': name,
'fields': object_fields_common_c(name, name, props),
'fields_init': '\n'.join(fields_init),
'deserializers': object_fields_deserializer(name, props),
'free_fields': '\n'.join(fields_free),
'update_state': '\n'.join(update_state)
}
def object_deserialize_fn_common_c(name, props, equivalent={}):
def props_are_equivalent(p1, p2):
p1 = {k: get_type_from_property(v) for k, v in p1.items()}
p2 = {k: get_type_from_property(v) for k, v in p2.items()}
return p1 == p2
for item_name, item_props in equivalent.items():
if props_are_equivalent(props, item_props):
return '''static int
%(struct_name)s_deserialize(struct %(struct_name)s *state,
const uint8_t *payload, uint16_t payload_len, uint32_t decode_mask)
{
/* %(item_name)s is equivalent to %(struct_name)s */
return %(item_name)s_deserialize((struct %(item_name)s *)state, payload, payload_len, decode_mask);
}
''' % {
'item_name': item_name,
'struct_name': name
}
equivalent[name] = props
return generate_object_deserialize_fn_common_c(name, props)
def object_deserialize_fn_client_c(state_struct_name, name, props):
return '''static int
%(struct_name)s_deserialize(struct client_resource *resource, const uint8_t *payload, uint16_t payload_len)
{
struct %(struct_name)s *res = (struct %(struct_name)s *)resource;
return %(state_struct_name)s_deserialize(&res->state, payload, payload_len, ~0);
}
''' % {
'struct_name': name,
'state_struct_name': state_struct_name
}
def object_deserialize_fn_server_c(state_struct_name, name, props):
decode_mask = 0
id = 0
for field_name, field_props in props.items():
if not field_props['read_only']:
decode_mask |= 1<<id
id += 1
if not decode_mask:
return ''
return '''static int
%(struct_name)s_deserialize(struct server_resource *resource, const uint8_t *payload, uint16_t payload_len)
{
struct %(struct_name)s *res = (struct %(struct_name)s *)resource;
return %(state_struct_name)s_deserialize(&res->state, payload, payload_len, 0x%(decode_mask)x);
}
''' % {
'struct_name': name,
'state_struct_name': state_struct_name,
'decode_mask': decode_mask
}
def object_inform_flow_fn_common_c(state_struct_name, name, props, client):
send_flow_pkts = []
for field_name, field_props in props.items():
if 'enum' in field_props:
fn = 'sol_flow_send_string_packet'
val = '%(struct_name)s_%(field_name)s_tbl[state->state.%(field_name)s].key' % {
'struct_name': state_struct_name,
'field_name': field_name
}
else:
fn = JSON_TO_FLOW_SEND_PKT[field_props['type']]
val = 'state->state.%(field_name)s' % {
'field_name': field_name
}
send_flow_pkts.append('''%(flow_send_fn)s(resource->node, SOL_FLOW_NODE_TYPE_%(STRUCT_NAME)s__OUT__OUT_%(FIELD_NAME)s, %(val)s);''' % {
'flow_send_fn': fn,
'STRUCT_NAME': name.upper(),
'FIELD_NAME': field_name.upper(),
'val': val
})
return '''static void %(struct_name)s_inform_flow(struct %(type)s_resource *resource)
{
struct %(struct_name)s *state = (struct %(struct_name)s *)resource;
%(send_flow_pkts)s
}
''' % {
'type': 'client' if client else 'server',
'struct_name': name,
'send_flow_pkts': '\n'.join(send_flow_pkts)
}
def object_inform_flow_fn_client_c(state_struct_name, name, props):
return object_inform_flow_fn_common_c(state_struct_name, name, props, True)
def object_inform_flow_fn_server_c(state_struct_name, name, props):
read_only = all(field_props['read_only'] for field_name, field_props in props.items())
return '' if read_only else object_inform_flow_fn_common_c(state_struct_name, name, props, False)
def object_open_fn_client_c(state_struct_name, resource_type, name, props):
field_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
init = '(enum %s_%s)0' % (state_struct_name, field_name)
else:
init = JSON_TO_INIT[field_props.get('type', 'integer')]
field_init.append('''resource->state.%(field_name)s = %(init)s;''' % {
'field_name': field_name,
'init': init
})
no_inputs = all(field_props['read_only'] for field_name, field_props in props.items())
if no_inputs:
serialize_fn = 'NULL'
else:
serialize_fn = '%s_serialize' % name
return '''static int
%(struct_name)s_open(struct sol_flow_node *node, void *data, const struct sol_flow_node_options *options)
{
const struct sol_flow_node_type_%(struct_name)s_options *node_opts =
(const struct sol_flow_node_type_%(struct_name)s_options *)options;
static const struct client_resource_funcs funcs = {
.serialize = %(serialize_fn)s,
.deserialize = %(struct_name)s_deserialize,
.inform_flow = %(struct_name)s_inform_flow,
.found_port = SOL_FLOW_NODE_TYPE_%(STRUCT_NAME)s__OUT__FOUND
};
struct %(struct_name)s *resource = data;
int r;
r = client_resource_init(node, &resource->base, "%(resource_type)s", node_opts->hwaddr, &funcs);
if (!r) {
%(field_init)s
}
return 0;
}
''' % {
'struct_name': name,
'STRUCT_NAME': name.upper(),
'resource_type': resource_type,
'field_init': '\n'.join(field_init),
'serialize_fn': serialize_fn
}
def object_open_fn_server_c(state_struct_name, resource_type, name, props, definitions={'id':0}):
def_id = definitions['id']
definitions['id'] += 1
no_inputs = all(field_props['read_only'] for field_name, field_props in props.items())
if no_inputs:
deserialize_fn_name = 'NULL'
inform_flow_fn_name = 'NULL'
else:
deserialize_fn_name = '%s_deserialize' % name
inform_flow_fn_name = '%s_inform_flow' % name
field_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
init = '(enum %s_%s)0' % (state_struct_name, field_name)
else:
init = JSON_TO_INIT[field_props.get('type', 'integer')]
field_init.append('''resource->state.%(field_name)s = %(init)s;''' % {
'field_name': field_name,
'init': init
})
return '''static int
%(struct_name)s_open(struct sol_flow_node *node, void *data, const struct sol_flow_node_options *options)
{
static const struct sol_str_slice rt_slice = SOL_STR_SLICE_LITERAL("%(resource_type)s");
static const struct sol_str_slice def_slice = SOL_STR_SLICE_LITERAL("/etta/%(def_id)x");
static const struct server_resource_funcs funcs = {
.serialize = %(struct_name)s_serialize,
.deserialize = %(deserialize_fn_name)s,
.inform_flow = %(inform_flow_fn_name)s
};
struct %(struct_name)s *resource = data;
int r;
r = server_resource_init(&resource->base, node, rt_slice, def_slice, &funcs);
if (!r) {
%(field_init)s
}
return r;
}
''' % {
'struct_name': name,
'resource_type': resource_type,
'def_id': def_id,
'deserialize_fn_name': deserialize_fn_name,
'inform_flow_fn_name': inform_flow_fn_name,
'field_init': '\n'.join(field_init)
}
def object_close_fn_client_c(name, props):
destroy_fields = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
destroy_fields.append('free(resource->state.%s);' % field_name)
return '''static void %(struct_name)s_close(struct sol_flow_node *node, void *data)
{
struct %(struct_name)s *resource = data;
%(destroy_fields)s
client_resource_close(&resource->base);
}
''' % {
'struct_name': name,
'destroy_fields': '\n'.join(destroy_fields)
}
def object_close_fn_server_c(name, props):
destroy_fields = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
destroy_fields.append('free(resource->state.%s);' % field_name)
return '''static void %(struct_name)s_close(struct sol_flow_node *node, void *data)
{
struct %(struct_name)s *resource = data;
%(destroy_fields)s
server_resource_close(&resource->base);
}
''' % {
'struct_name': name,
'destroy_fields': '\n'.join(destroy_fields)
}
def object_setters_fn_common_c(state_struct_name, name, props, client):
fields = []
for field, descr in props.items():
if client and descr['read_only']:
continue
if 'enum' in descr:
fields.append('''static int
%(struct_name)s_set_%(field_name)s(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct %(struct_name)s *resource = data;
const char *var;
if (!sol_flow_packet_get_string(packet, &var)) {
int16_t val = sol_str_table_lookup_fallback(%(state_struct_name)s_%(field_name)s_tbl,
sol_str_slice_from_str(var), -1);
if (val >= 0) {
resource->state.%(field_name)s = (enum %(state_struct_name)s_%(field_name)s)val;
%(type)s_resource_schedule_update(&resource->base);
return 0;
}
return -ENOENT;
}
return -EINVAL;
}
''' % {
'field_name': field,
'FIELD_NAME': field.upper(),
'state_struct_name': state_struct_name,
'STATE_STRUCT_NAME': state_struct_name.upper(),
'struct_name': name,
'type': 'client' if client else 'server'
})
else:
fields.append('''static int
%(struct_name)s_set_%(field_name)s(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct %(struct_name)s *resource = data;
%(c_type_tmp)s var;
int r;
r = %(c_getter)s(packet, &var);
if (!r) {
resource->state.%(field_name)s = (%(c_type)s) var;
%(type)s_resource_schedule_update(&resource->base);
}
return r;
}
''' % {
'struct_name': name,
'field_name': field,
'c_type': JSON_TO_C[descr['type']],
'c_type_tmp': JSON_TO_C_TMP[descr['type']],
'c_getter': JSON_TO_FLOW_GET_PKT[descr['type']],
'type': 'client' if client else 'server'
})
return '\n'.join(fields)
def object_setters_fn_client_c(state_struct_name, name, props):
return object_setters_fn_common_c(state_struct_name, name, props, True)
def object_setters_fn_server_c(state_struct_name, name, props):
return object_setters_fn_common_c(state_struct_name, name, props, False)
def generate_enums_common_c(name, props):
output = []
for field, descr in props.items():
if 'enum' in descr:
if 'description' in descr:
output.append('''/* %s */''' % descr['description'])
output.append('''enum %(struct_name)s_%(field_name)s { %(items)s };''' % {
'struct_name': name,
'field_name': field,
'items': ', '.join(('%s_%s_%s' % (name, field, item)).upper() for item in descr['enum'])
})
output.append('''static const struct sol_str_table %(struct_name)s_%(field_name)s_tbl[] = {
%(items)s,
{ }
};''' % {
'struct_name': name,
'field_name': field,
'items': ',\n'.join('SOL_STR_TABLE_ITEM(\"%s\", %s_%s_%s)' % (
item, name.upper(), field.upper(), item.upper()) for item in descr['enum'])
})
return '\n'.join(output)
def generate_object_client_c(resource_type, state_struct_name, name, props):
return """struct %(struct_name)s {
struct client_resource base;
struct %(state_struct_name)s state;
};
%(serialize_fn)s
%(deserialize_fn)s
%(inform_flow_fn)s
%(open_fn)s
%(close_fn)s
%(setters_fn)s
""" % {
'state_struct_name': state_struct_name,
'struct_name': name,
'serialize_fn': object_serialize_fn_client_c(state_struct_name, name, props),
'deserialize_fn': object_deserialize_fn_client_c(state_struct_name, name, props),
'inform_flow_fn': object_inform_flow_fn_client_c(state_struct_name, name, props),
'open_fn': object_open_fn_client_c(state_struct_name, resource_type, name, props),
'close_fn': object_close_fn_client_c(name, props),
'setters_fn': object_setters_fn_client_c(state_struct_name, name, props)
}
def generate_object_server_c(resource_type, state_struct_name, name, props):
return """struct %(struct_name)s {
struct server_resource base;
struct %(state_struct_name)s state;
};
%(serialize_fn)s
%(deserialize_fn)s
%(inform_flow_fn)s
%(open_fn)s
%(close_fn)s
%(setters_fn)s
""" % {
'struct_name': name,
'state_struct_name': state_struct_name,
'serialize_fn': object_serialize_fn_server_c(state_struct_name, name, props),
'deserialize_fn': object_deserialize_fn_server_c(state_struct_name, name, props),
'inform_flow_fn': object_inform_flow_fn_server_c(state_struct_name, name, props),
'open_fn': object_open_fn_server_c(state_struct_name, resource_type, name, props),
'close_fn': object_close_fn_server_c(name, props),
'setters_fn': object_setters_fn_server_c(state_struct_name, name, props)
}
def generate_object_common_c(name, props):
return """%(enums)s
struct %(struct_name)s {
%(struct_fields)s
};
%(deserialize_fn)s
""" % {
'enums': generate_enums_common_c(name, props),
'struct_name': name,
'struct_fields': object_fields_common_c(name, name, props),
'deserialize_fn': object_deserialize_fn_common_c(name, props),
}
def generate_object_json(resource_type, struct_name, node_name, title, props, server):
in_ports = []
for prop_name, prop_descr in props.items():
if not server and prop_descr['read_only']:
continue
in_ports.append({
'data_type': JSON_TO_SOL_JSON[prop_descr.get('type', 'string')],
'description': prop_descr.get('description', '???'),
'methods': {
'process': '%s_set_%s' % (struct_name, prop_name)
},
'name': 'IN_%s' % prop_name.upper()
})
if server:
out_ports = []
else:
out_ports = [{
'data_type': 'boolean',
'description': 'Outputs true if resource was found, false if not, or if unreachable',
'name': 'FOUND'
}]
for prop_name, prop_descr in props.items():
out_ports.append({
'data_type': JSON_TO_SOL_JSON[prop_descr.get('type', 'string')],
'description': prop_descr.get('description', '???'),
'name': 'OUT_%s' % prop_name.upper()
})
output = {
'methods': {
'open': '%s_open' % struct_name,
'close': '%s_close' % struct_name
},
'private_data_type': struct_name,
'name': node_name,
'url': 'http://solettaproject.org/doc/latest/components/%s.html' % node_name.replace('/', '-')
}
if server:
output.update({
'category': 'iot/server',
'description': 'OIC Server (%s)' % title
})
else:
output.update({
'category': 'iot/client',
'description': 'OIC Client (%s)' % title,
'options': {
'version': 1,
'members': [
{
'data_type': 'string',
'description': 'Hardware address of the device (MAC address, etc)',
'name': 'hwaddr'
}
]
}
})
if in_ports:
output['in_ports'] = in_ports
if out_ports:
output['out_ports'] = out_ports
return output
def generate_object(rt, title, props):
def type_value(item):
return '%s %s' % (get_type_from_property(item[1]), item[0])
resource_type = rt
if rt.startswith('oic.r.'):
rt = rt[len('oic.r.'):]
elif rt.startswith('core.'):
rt = rt[len('core.'):]
c_identifier = rt.replace(".", "_").lower()
flow_identifier = rt.replace(".", "-").lower()
client_node_name = "oic/client-%s" % flow_identifier
client_struct_name = "oic_client_%s" % c_identifier
server_node_name = "oic/server-%s" % flow_identifier
server_struct_name = "oic_server_%s" % c_identifier
state_struct_name = "oic_state_%s" % c_identifier
new_props = OrderedDict()
for k, v in sorted(props.items(), key=type_value):
new_props[k] = v
props = new_props
retval = {
'c_common': generate_object_common_c(state_struct_name, props),
'c_client': generate_object_client_c(resource_type, state_struct_name, client_struct_name, props),
'c_server': generate_object_server_c(resource_type, state_struct_name, server_struct_name, props),
'json_client': generate_object_json(resource_type, client_struct_name, client_node_name, title, props, False),
'json_server': generate_object_json(resource_type, server_struct_name, server_node_name, title, props, True)
}
return retval
def generate_for_schema(directory, path):
j = load_json_schema(directory, path)
for rt, defn in j.items():
if not (rt.startswith("oic.r.") or rt.startswith("core.")):
raise ValueError("not an OIC resource definition")
if defn.get('type') == 'object':
yield generate_object(rt, defn['title'], defn['properties'])
def master_json_as_string(generated):
master_json = {
'name': 'oic',
'meta': {
'author': 'Intel Corporation',
'license': 'BSD 3-Clause',
'version': '1'
},
'types': [t['json_server'] for t in generated] + [t['json_client'] for t in generated]
}
return json.dumps(master_json, indent=4)
def master_c_as_string(generated):
generated = list(generated)
code = '''#include <arpa/inet.h>
#include <errno.h>
#include <math.h>
#include <netinet/in.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include "oic-gen.h"
#include "sol-coap.h"
#include "sol-json.h"
#include "sol-mainloop.h"
#include "sol-missing.h"
#include "sol-oic-client.h"
#include "sol-oic-server.h"
#include "sol-str-slice.h"
#include "sol-str-table.h"
#include "sol-util.h"
#define DEFAULT_UDP_PORT 5683
#define MULTICAST_ADDRESS_IPv4 "224.0.1.187"
#define MULTICAST_ADDRESS_IPv6_LOCAL "ff02::fd"
#define MULTICAST_ADDRESS_IPv6_SITE "ff05::fd"
#define FIND_PERIOD_MS 5000
#define UPDATE_TIMEOUT_MS 50
struct client_resource;
struct server_resource;
struct client_resource_funcs {
uint8_t *(*serialize)(struct client_resource *resource, uint16_t *length);
int (*deserialize)(struct client_resource *resource, const uint8_t *payload, uint16_t payload_len);
void (*inform_flow)(struct client_resource *resource);
int found_port;
};
struct server_resource_funcs {
uint8_t *(*serialize)(struct server_resource *resource, uint16_t *length);
int (*deserialize)(struct server_resource *resource, const uint8_t *payload, uint16_t payload_len);
void (*inform_flow)(struct server_resource *resource);
};
struct client_resource {
struct sol_flow_node *node;
const struct client_resource_funcs *funcs;
struct sol_oic_resource *resource;
struct sol_timeout *find_timeout;
struct sol_timeout *update_schedule_timeout;
struct sol_oic_client client;
const char *rt;
char *hwaddr;
};
struct server_resource {
struct sol_flow_node *node;
const struct server_resource_funcs *funcs;
struct sol_coap_resource *coap;
struct sol_timeout *update_schedule_timeout;
char *endpoint;
struct sol_oic_resource_type oic;
};
static struct sol_network_link_addr multicast_ipv4, multicast_ipv6_local, multicast_ipv6_site;
static bool multicast_addresses_initialized = false;
static bool
initialize_multicast_addresses_once(void)
{
if (multicast_addresses_initialized)
return true;
multicast_ipv4 = (struct sol_network_link_addr) { .family = AF_INET, .port = DEFAULT_UDP_PORT };
if (inet_pton(AF_INET, MULTICAST_ADDRESS_IPv4, &multicast_ipv4.addr) < 0) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
multicast_ipv6_local = (struct sol_network_link_addr) { .family = AF_INET6, .port = DEFAULT_UDP_PORT };
if (inet_pton(AF_INET6, MULTICAST_ADDRESS_IPv6_LOCAL, &multicast_ipv6_local.addr) < 0) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
multicast_ipv6_site = (struct sol_network_link_addr) { .family = AF_INET6, .port = DEFAULT_UDP_PORT };
if (inet_pton(AF_INET6, MULTICAST_ADDRESS_IPv6_SITE, &multicast_ipv6_site.addr) < 0) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
return true;
}
/* FIXME: These should go into sol-network so it's OS-agnostic. */
static bool
find_device_by_hwaddr_arp_cache(const char *hwaddr, struct sol_network_link_addr *addr)
{
static const size_t hwaddr_len = sizeof("00:00:00:00:00:00") - 1;
FILE *arpcache;
char buffer[128];
bool success = false;
arpcache = fopen("/proc/net/arp", "re");
if (!arpcache) {
SOL_WRN("Could not open arp cache file");
return false;
}
/* IP address HW type Flags HW address Mask Device */
if (!fgets(buffer, sizeof(buffer), arpcache)) {
SOL_WRN("Could not discard header line from arp cache file");
goto out;
}
/* 0000000000011111111122222222223333333333444444444455555555556666666666777777 */
/* 0123456789012345678901234567890123456789012345678901234567890123456789012345 */
/* xxx.xxx.xxx.xxx 0x0 0x0 00:00:00:00:00:00 * eth0 */
while (fgets(buffer, sizeof(buffer), arpcache)) {
buffer[58] = '\\0';
if (strncmp(&buffer[41], hwaddr, hwaddr_len))
continue;
buffer[15] = '\\0';
if (inet_pton(AF_INET, buffer, &addr->addr) < 0) {
SOL_WRN("Could not parse IP address '%%s'", buffer);
goto out;
}
SOL_INF("Found device %%s with IP address %%s", hwaddr, buffer);
success = true;
break;
}
out:
fclose(arpcache);
return success;
}
static bool
link_has_address(const struct sol_network_link *link, const struct sol_network_link_addr *addr)
{
struct sol_network_link_addr *iter;
uint16_t idx;
SOL_VECTOR_FOREACH_IDX(&link->addrs, iter, idx) {
if (sol_network_link_addr_eq(addr, iter))
return true;
}
return false;
}
static bool
has_link_with_address(const struct sol_network_link_addr *addr)
{
const struct sol_vector *links = sol_network_get_available_links();
struct sol_network_link *link;
uint16_t idx;
if (!links)
return false;
SOL_VECTOR_FOREACH_IDX(links, link, idx) {
if (link_has_address(link, addr))
return true;
}
return false;
}
static bool
find_device_by_hwaddr_ipv4(const char *hwaddr, struct sol_network_link_addr *addr)
{
if (has_link_with_address(addr))
return true;
return find_device_by_hwaddr_arp_cache(hwaddr, addr);
}
static bool
find_device_by_hwaddr_ipv6(const char *hwaddr, struct sol_network_link_addr *addr)
{
char addrstr[SOL_INET_ADDR_STRLEN] = {0};
if (!sol_network_addr_to_str(addr, addrstr, sizeof(addrstr))) {
SOL_WRN("Could not convert network address to string");
return false;
}
if (!strncmp(addrstr, "::ffff:", sizeof("::ffff:") - 1)) {
struct sol_network_link_addr tentative_addr = { .family = AF_INET };
const char *ipv4addr = addrstr + sizeof("::ffff:") - 1;
if (inet_pton(tentative_addr.family, ipv4addr, &tentative_addr.addr) < 0)
return false;
return find_device_by_hwaddr_ipv4(hwaddr, &tentative_addr);
}
/* Link local format
* MAC address: xx:xx:xx:xx:xx:xx
* IPv6 Link local address: fe80::xyxx:xxff:fexx:xxxx
* 0000000000111111111122222
* 0123456789012345678901234
*/
if (strncmp(addrstr, "fe80::", sizeof("fe80::") - 1))
goto not_link_local;
if (strncmp(&addrstr[13], "ff:fe", sizeof("ff:fe") - 1))
goto not_link_local;
/* FIXME: There's one additional check for the last byte that's missing here, but
* this is temporary until proper NDP is impemented. */
return (hwaddr[16] == addrstr[23] && hwaddr[15] == addrstr[22])
&& (hwaddr[13] == addrstr[21] && hwaddr[12] == addrstr[20])
&& (hwaddr[10] == addrstr[18] && hwaddr[9] == addrstr[17])
&& (hwaddr[7] == addrstr[11] && hwaddr[6] == addrstr[10])
&& (hwaddr[4] == addrstr[8] && hwaddr[3] == addrstr[7]);
not_link_local:
SOL_WRN("NDP not implemented and client has an IPv6 address: %%s. Ignoring.", addrstr);
return false;
}
static bool
find_device_by_hwaddr(const char *hwaddr, struct sol_network_link_addr *addr)
{
if (addr->family == AF_INET)
return find_device_by_hwaddr_ipv4(hwaddr, addr);
if (addr->family == AF_INET6)
return find_device_by_hwaddr_ipv6(hwaddr, addr);
SOL_WRN("Unknown address family: %%d", addr->family);
return false;
}
static bool
client_resource_implements_type(struct sol_oic_resource *oic_res, const char *resource_type)
{
struct sol_str_slice rt = SOL_STR_SLICE_STR(resource_type, strlen(resource_type));
struct sol_str_slice *type;
uint16_t idx;
SOL_VECTOR_FOREACH_IDX(&oic_res->types, type, idx) {
if (sol_str_slice_eq(*type, rt))
return true;
}
return false;
}
static void
state_changed(struct sol_oic_client *oic_cli, const struct sol_network_link_addr *cliaddr,
const struct sol_str_slice *href, const struct sol_str_slice *payload, void *data)
{
struct client_resource *resource = data;
int r;
if (!sol_str_slice_eq(*href, resource->resource->href)) {
SOL_WRN("Received response to href=`%%.*s`, but resource href is `%%.*s`",
SOL_STR_SLICE_PRINT(*href),
SOL_STR_SLICE_PRINT(resource->resource->href));
return;
}
if (!sol_network_link_addr_eq(cliaddr, &resource->resource->addr)) {
char resaddr[SOL_INET_ADDR_STRLEN] = {0};
char respaddr[SOL_INET_ADDR_STRLEN] = {0};
if (!sol_network_addr_to_str(&resource->resource->addr, resaddr, sizeof(resaddr))) {
SOL_WRN("Could not convert network address to string");
return;
}
if (!sol_network_addr_to_str(cliaddr, respaddr, sizeof(respaddr))) {
SOL_WRN("Could not convert network address to string");
return;
}
SOL_WRN("Expecting response from %%s, got from %%s, ignoring", resaddr, respaddr);
return;
}
r = resource->funcs->deserialize(resource, (const uint8_t *)payload->data, payload->len);
if (r >= 0)
resource->funcs->inform_flow(resource);
}
static void
found_resource(struct sol_oic_client *oic_cli, struct sol_oic_resource *oic_res, void *data)
{
struct client_resource *resource = data;
int r;
/* Some OIC device sent this node a discovery response packet but node's already set up. */
if (resource->resource)
goto out;
/* Not the droid we're looking for. */
if (!find_device_by_hwaddr(resource->hwaddr, &oic_res->addr))
goto out;
/* FIXME: Should this check move to sol-oic-client? Does it actually make sense? */
if (resource->rt && !client_resource_implements_type(oic_res, resource->rt)) {
SOL_WRN("Received resource that does not implement rt=%%s, ignoring", resource->rt);
goto out;
}
SOL_INF("Found resource matching hwaddr %%s", resource->hwaddr);
resource->resource = sol_oic_resource_ref(oic_res);
if (resource->find_timeout) {
sol_timeout_del(resource->find_timeout);
resource->find_timeout = NULL;
}
r = sol_oic_client_resource_set_observable(oic_cli, oic_res, state_changed, resource, true);
if (!r)
SOL_WRN("Could not observe resource as requested");
out:
r = sol_flow_send_boolean_packet(resource->node, resource->funcs->found_port, !!resource->resource);
if (r < 0)
SOL_WRN("Could not send flow packet, will try again");
}
static void
send_discovery_packets(struct client_resource *resource)
{
sol_oic_client_find_resource(&resource->client, &multicast_ipv4, resource->rt,
found_resource, resource);
sol_oic_client_find_resource(&resource->client, &multicast_ipv6_local, resource->rt,
found_resource, resource);
sol_oic_client_find_resource(&resource->client, &multicast_ipv6_site, resource->rt,
found_resource, resource);
}
static bool
find_timer(void *data)
{
struct client_resource *resource = data;
if (resource->resource) {
SOL_INF("Timer expired when node already configured; disabling");
resource->find_timeout = NULL;
return false;
}
send_discovery_packets(resource);
return true;
}
static char *
create_endpoint(void)
{
static int endpoint_id = 0;
char *endpoint;
if (asprintf(&endpoint, "/sol/%%x", endpoint_id) < 0)
return NULL;
endpoint_id++;
return endpoint;
}
static bool
server_resource_perform_update(void *data)
{
struct server_resource *resource = data;
uint8_t *payload;
uint16_t payload_len;
SOL_NULL_CHECK(resource->funcs->serialize, false);
payload = resource->funcs->serialize(resource, &payload_len);
if (!payload) {
SOL_WRN("Error while serializing update message");
} else {
resource->funcs->inform_flow(resource);
sol_oic_notify_observers(resource->coap, payload, payload_len);
free(payload);
}
resource->update_schedule_timeout = NULL;
return false;
}
static void
server_resource_schedule_update(struct server_resource *resource)
{
if (resource->update_schedule_timeout)
return;
resource->update_schedule_timeout = sol_timeout_add(UPDATE_TIMEOUT_MS,
server_resource_perform_update, resource);
}
static sol_coap_responsecode_t
server_handle_put(const struct sol_network_link_addr *cliaddr, const void *data,
uint8_t *payload, uint16_t *payload_len)
{
const struct server_resource *resource = data;
int r;
if (!resource->funcs->deserialize)
return SOL_COAP_RSPCODE_NOT_IMPLEMENTED;
r = resource->funcs->deserialize((struct server_resource *)resource, payload, *payload_len);
if (!r) {
server_resource_schedule_update((struct server_resource *)resource);
*payload_len = 0;
return SOL_COAP_RSPCODE_CHANGED;
}
return SOL_COAP_RSPCODE_PRECONDITION_FAILED;
}
static sol_coap_responsecode_t
server_handle_get(const struct sol_network_link_addr *cliaddr, const void *data,
uint8_t *payload, uint16_t *payload_len)
{
const struct server_resource *resource = data;
uint16_t serialized_len;
uint8_t *serialized;
if (!resource->funcs->serialize)
return SOL_COAP_RSPCODE_NOT_IMPLEMENTED;
serialized = resource->funcs->serialize((struct server_resource*)resource, &serialized_len);
if (!serialized)
return SOL_COAP_RSPCODE_INTERNAL_ERROR;
if (serialized_len > *payload_len) {
free(serialized);
return SOL_COAP_RSPCODE_INTERNAL_ERROR;
}
memcpy(payload, serialized, serialized_len);
*payload_len = serialized_len;
free(serialized);
return SOL_COAP_RSPCODE_CONTENT;
}
static int
server_resource_init(struct server_resource *resource, struct sol_flow_node *node,
struct sol_str_slice resource_type, struct sol_str_slice defn_endpoint,
const struct server_resource_funcs *funcs)
{
struct sol_oic_device_definition *def;
SOL_LOG_INTERNAL_INIT_ONCE;
if (!sol_oic_server_init(DEFAULT_UDP_PORT)) {
SOL_WRN("Could not create %%.*s server", SOL_STR_SLICE_PRINT(resource_type));
return -ENOTCONN;
}
resource->endpoint = create_endpoint();
SOL_NULL_CHECK(resource->endpoint, -ENOMEM);
resource->node = node;
resource->update_schedule_timeout = NULL;
resource->funcs = funcs;
resource->oic = (struct sol_oic_resource_type) {
.api_version = SOL_OIC_RESOURCE_TYPE_API_VERSION,
.endpoint = sol_str_slice_from_str(resource->endpoint),
.resource_type = resource_type,
.iface = SOL_STR_SLICE_LITERAL("oc.mi.def"),
.get = { .handle = server_handle_get },
.put = { .handle = server_handle_put },
};
def = sol_oic_server_register_definition(defn_endpoint, resource_type,
SOL_COAP_FLAGS_OC_CORE | SOL_COAP_FLAGS_WELL_KNOWN);
if (!def)
goto out;
resource->coap = sol_oic_device_definition_register_resource_type(def,
&resource->oic, resource, SOL_COAP_FLAGS_OC_CORE | SOL_COAP_FLAGS_OBSERVABLE);
if (!resource->coap)
goto out;
return 0;
out:
sol_oic_server_release();
free(resource->endpoint);
return -EINVAL;
}
static void
server_resource_close(struct server_resource *resource)
{
if (resource->update_schedule_timeout)
sol_timeout_del(resource->update_schedule_timeout);
free(resource->endpoint);
sol_oic_server_release();
}
static int
client_resource_init(struct sol_flow_node *node, struct client_resource *resource, const char *resource_type,
const char *hwaddr, const struct client_resource_funcs *funcs)
{
SOL_LOG_INTERNAL_INIT_ONCE;
if (!initialize_multicast_addresses_once()) {
SOL_ERR("Could not initialize multicast addresses");
return -ENOTCONN;
}
assert(resource_type);
if (!hwaddr)
return -EINVAL;
resource->client.server = sol_coap_server_new(0);
SOL_NULL_CHECK(resource->client.server, -ENOMEM);
resource->hwaddr = strdup(hwaddr);
SOL_NULL_CHECK_GOTO(resource->hwaddr, nomem);
resource->node = node;
resource->find_timeout = NULL;
resource->update_schedule_timeout = NULL;
resource->resource = NULL;
resource->funcs = funcs;
resource->rt = resource_type;
SOL_INF("Sending multicast packets to find resource with hwaddr %%s (rt=%%s)",
resource->hwaddr, resource->rt);
resource->find_timeout = sol_timeout_add(FIND_PERIOD_MS, find_timer, resource);
if (resource->find_timeout) {
/* Perform a find now instead of waiting FIND_PERIOD_MS the first time. If the
* resource is found in the mean time, the timeout will be automatically disabled. */
send_discovery_packets(resource);
return 0;
}
SOL_ERR("Could not create timeout to find resource");
free(resource->hwaddr);
nomem:
sol_coap_server_unref(resource->client.server);
return -ENOMEM;
}
static void
client_resource_close(struct client_resource *resource)
{
free(resource->hwaddr);
if (resource->find_timeout)
sol_timeout_del(resource->find_timeout);
if (resource->update_schedule_timeout)
sol_timeout_del(resource->update_schedule_timeout);
if (resource->resource) {
bool r = sol_oic_client_resource_set_observable(&resource->client, resource->resource,
NULL, NULL, false);
if (!r)
SOL_WRN("Could not unobserve resource");
sol_oic_resource_unref(resource->resource);
}
sol_coap_server_unref(resource->client.server);
}
static bool
client_resource_perform_update(void *data)
{
struct client_resource *resource = data;
uint8_t *payload;
uint16_t payload_len;
SOL_NULL_CHECK_GOTO(resource->resource, disable_timeout);
SOL_NULL_CHECK_GOTO(resource->funcs->serialize, disable_timeout);
payload = resource->funcs->serialize(resource, &payload_len);
if (!payload) {
SOL_WRN("Error while serializing update message");
} else {
int r = sol_oic_client_resource_request(&resource->client, resource->resource,
SOL_COAP_METHOD_PUT, payload, payload_len, NULL, NULL);
free(payload);
if (r < 0) {
SOL_WRN("Could not send update request to resource, will try again");
return true;
}
}
disable_timeout:
resource->update_schedule_timeout = NULL;
return false;
}
static void
client_resource_schedule_update(struct client_resource *resource)
{
if (resource->update_schedule_timeout)
return;
resource->update_schedule_timeout = sol_timeout_add(UPDATE_TIMEOUT_MS,
client_resource_perform_update, resource);
}
static const char escapable_chars[] = {'\\\\', '\\"', '/', '\\b', '\\f', '\\n', '\\r', '\\t'};
SOL_ATTR_USED static size_t
calculate_escaped_len(const char *s)
{
size_t len = 0;
for (; *s; s++) {
if (memchr(escapable_chars, *s, sizeof(escapable_chars)))
len++;
len++;
}
return len + 1;
}
SOL_ATTR_USED static char *
escape_json_string(const char *s, char *buf)
{
char *out = buf;
for (; *s; s++) {
if (memchr(escapable_chars, *s, sizeof(escapable_chars))) {
*buf++ = '\\\\';
switch (*s) {
case '"': *buf++ = '"'; break;
case '\\\\': *buf++ = '\\\\'; break;
case '/': *buf++ = '/'; break;
case '\\b': *buf++ = 'b'; break;
case '\\f': *buf++ = 'f'; break;
case '\\n': *buf++ = 'n'; break;
case '\\r': *buf++ = 'r'; break;
case '\\t': *buf++ = 't'; break;
}
} else {
*buf++ = *s;
}
}
*buf++ = '\\0';
return out;
}
#define ESCAPE_STRING(s) ({ \\
char buffer ## __COUNT__[calculate_escaped_len(s)]; \\
escape_json_string(s, buffer ## __COUNT__); \\
})
SOL_ATTR_USED static bool
json_token_to_int32(struct sol_json_token *token, int32_t *out)
{
long val;
char *endptr;
if (sol_json_token_get_type(token) != SOL_JSON_TYPE_NUMBER)
return false;
errno = 0;
val = strtol(token->start, &endptr, 10);
if (errno)
return false;
if (endptr != token->end)
return false;
if (*endptr != 0)
return false;
if ((long)(int32_t) val != val)
return false;
*out = (long)val;
return true;
}
SOL_ATTR_USED static bool
json_token_to_float(struct sol_json_token *token, float *out)
{
float val;
char *endptr;
if (sol_json_token_get_type(token) != SOL_JSON_TYPE_NUMBER)
return false;
errno = 0;
val = strtof(token->start, &endptr);
if (errno)
return false;
if (endptr != token->end)
return false;
if (*endptr != 0)
return false;
if (isgreaterequal(val, HUGE_VALF))
return false;
*out = val;
return true;
}
SOL_ATTR_USED static bool
json_token_to_string(struct sol_json_token *token, char **out)
{
if (sol_json_token_get_type(token) != SOL_JSON_TYPE_STRING)
return false;
free(*out);
*out = strndup(token->start, token->end - token->start);
return !!*out;
}
SOL_ATTR_USED static bool
json_token_to_bool(struct sol_json_token *token, bool *out)
{
if (sol_json_token_get_type(token) == SOL_JSON_TYPE_TRUE)
*out = true;
else if (sol_json_token_get_type(token) == SOL_JSON_TYPE_FALSE)
*out = false;
else
return false;
return true;
}
%(generated_c_common)s
%(generated_c_client)s
%(generated_c_server)s
#include "oic-gen.c"
''' % {
'generated_c_common': '\n'.join(t['c_common'] for t in generated),
'generated_c_client': '\n'.join(t['c_client'] for t in generated),
'generated_c_server': '\n'.join(t['c_server'] for t in generated),
}
return code.replace('\n\n\n', '\n')
if __name__ == '__main__':
def seems_schema(path):
return path.endswith('.json') and (path.startswith('oic.r.') or path.startswith('core.'))
generated = []
print('Generating code for schemas: ', end='')
for path in (f for f in os.listdir(sys.argv[1]) if seems_schema(f)):
print(path, end=', ')
try:
for code in generate_for_schema(sys.argv[1], path):
generated.append(code)
except KeyError as e:
if e.args[0] == 'array':
print("(arrays unsupported)", end=' ')
else:
raise e
except Exception as e:
print('Ignoring due to exception in generator. Traceback follows:')
traceback.print_exc(e, file=sys.stderr)
continue
print('\nWriting master JSON: %s' % sys.argv[2])
open(sys.argv[2], 'w+').write(master_json_as_string(generated))
print('Writing C: %s' % sys.argv[3])
open(sys.argv[3], 'w+').write(master_c_as_string(generated))
if os.path.exists('/usr/bin/indent'):
print('Indenting generated C.')
os.system("/usr/bin/indent -kr -l120 '%s'" % sys.argv[3])
print('Done.')
|
|
###########################################
# VCZ calibration (fine landscape) FLUX dance 1
###########################################
# Align flux pulses
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X3, flux_lm_D8,
flux_lm_D6, flux_lm_X2],
which_gate= ['NE', 'SW',
'SW', 'NE'],
fl_lm_park = [flux_lm_Z1, flux_lm_D7, flux_lm_Z4],
speed_limit = [2.9583333333333334e-08,
2.75e-08])
swf2.set_parameter(4)
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X1, flux_lm_D2],
which_gate= ['NE', 'SW'],
fl_lm_park = [flux_lm_D1],
speed_limit = [2.75e-08])
swf2.set_parameter(6)
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# flux-dance 2
## input from user
pairs = [['X3', 'D8'], ['D6', 'X2'], ['X1', 'D2']]
which_gate= [['NE', 'SW'],['SW', 'NE'], ['NE', 'SW']]
parked_qubits = ['D7', 'Z1', 'Z4', 'D1']
cfg_amps = [0.28500000000000003,0.19302332066356387,0.25166666666666665]
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
# set CZ parameters
for i,flux_lm_target in enumerate(flux_lms_target):
flux_lm_target.cfg_awg_channel_amplitude(cfg_amps[i])
flux_lm_target.set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][0]), 0.5)
flux_lms_control[i].set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][1]), 0)
# Set park parameters
for i,flux_lm_park in enumerate(flux_lms_park):
flux_lm_park.cfg_awg_channel_amplitude(.3)
flux_lm_park.park_amp(.5)
flux_lm_park.park_double_sided(True)
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-1',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
Sw_functions_2 = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_fine_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf2 = swf.multi_sweep_function(Sw_functions_2, sweep_point_ratios= [1, 1, 1])
MC.live_plot_enabled(True)
nested_MC.live_plot_enabled(True)
nested_MC.cfg_clipping_mode(True)
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.97, 1.03, 21))
nested_MC.set_sweep_points_2D(np.linspace(0, 1, 11))
label = 'VCZ_2D_{}_fine_sweep'.format(pairs)
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (fine landscape) FLUX dance 2
###########################################
# Align flux pulses
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X3, flux_lm_D7,
flux_lm_D5, flux_lm_X2,
flux_lm_X1, flux_lm_D1],
which_gate= ['NW', 'SE',
'SE', 'NW',
'NW', 'SE'],
fl_lm_park = [flux_lm_Z1, flux_lm_D8, flux_lm_Z4, flux_lm_D2],
speed_limit = [2.9583333333333334e-08,
2.4166666666666668e-08,
2.5416666666666666e-08])
swf2.set_parameter(5)
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# flux-dance 2
## input from user
pairs = [['X3', 'D7'], ['D5', 'X2'], ['X1', 'D1']]
which_gate= [['NW', 'SE'],['SE', 'NW'], ['NW', 'SE']]
parked_qubits = ['D8', 'Z1', 'Z4', 'D2']
cfg_amps = [0.3242724012703858,0.16687470158591108,0.27975182997855896]
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
# set CZ parameters
for i,flux_lm_target in enumerate(flux_lms_target):
flux_lm_target.cfg_awg_channel_amplitude(cfg_amps[i])
flux_lm_target.set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][0]), 0.5)
flux_lms_control[i].set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][1]), 0)
# Set park parameters
for i,flux_lm_park in enumerate(flux_lms_park):
flux_lm_park.cfg_awg_channel_amplitude(.3)
flux_lm_park.park_amp(.5)
flux_lm_park.park_double_sided(True)
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-2',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
Sw_functions_2 = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_fine_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf2 = swf.multi_sweep_function(Sw_functions_2, sweep_point_ratios= [1, 1, 1])
MC.live_plot_enabled(True)
nested_MC.live_plot_enabled(True)
nested_MC.cfg_clipping_mode(True)
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 41))
nested_MC.set_sweep_points_2D(np.linspace(0, 1, 21))
label = 'VCZ_2D_{}_fine_sweep'.format(pairs)
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (fine landscape) FLUX dance 3
###########################################
# Align flux pulses
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_D5, flux_lm_X3,
flux_lm_X2, flux_lm_D3],
which_gate= ['NW', 'SE',
'SE', 'NW'],
fl_lm_park = [flux_lm_Z1, flux_lm_Z4, flux_lm_D2],
speed_limit = [2.75e-08, 2.75e-8])
swf2.set_parameter(8)
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X4, flux_lm_D9],
which_gate= ['SE', 'NW'],
fl_lm_park = [flux_lm_D8],
speed_limit = [2.75e-8])
swf2.set_parameter(5)
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# flux-dance 3
pairs = [['X4', 'D9'], ['D5', 'X3'], ['X2', 'D3']]
which_gate= [['SE', 'NW'],['NW', 'SE'], ['SE', 'NW']]
parked_qubits = ['D8', 'Z1', 'Z4', 'D2']
cfg_amps = [] # input
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
# set CZ parameters
for i,flux_lm_target in enumerate(flux_lms_target):
flux_lm_target.cfg_awg_channel_amplitude(cfg_amps[i])
flux_lm_target.set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][0]), 0.5)
flux_lms_control[i].set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][1]), 0)
# Set park parameters
for i,flux_lm_park in enumerate(flux_lms_park):
flux_lm_park.cfg_awg_channel_amplitude(.3)
flux_lm_park.park_amp(.5)
flux_lm_park.park_double_sided(True)
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-3',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
Sw_functions_2 = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_fine_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf2 = swf.multi_sweep_function(Sw_functions_2, sweep_point_ratios= [1, 1, 1])
MC.live_plot_enabled(True)
nested_MC.live_plot_enabled(True)
nested_MC.cfg_clipping_mode(True)
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 41))
nested_MC.set_sweep_points_2D(np.linspace(0, 1, 21))
label = 'VCZ_2D_{}_fine_sweep'.format(pairs)
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (fine landscape) FLUX dance 4
###########################################
# Align flux pulses
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X4, flux_lm_D8,
flux_lm_D4, flux_lm_X3],
which_gate= ['SW', 'NE',
'NE', 'SW'],
fl_lm_park = [flux_lm_D9, flux_lm_Z1, flux_lm_Z3],
speed_limit = [2.75e-08,
2.9583333333333334e-08]) # input
swf2.set_parameter(7) # input
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X2, flux_lm_D2],
which_gate= ['SW', 'NE'],
fl_lm_park = [flux_lm_D3],
speed_limit = [2.75e-08]) # input
swf2.set_parameter(3) # input
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# flux-dance 4
## input from user besides cfg amps & speedlimt & flux-danace code word
pairs = [['X4', 'D8'], ['D4', 'X3'], ['X2', 'D2']]
which_gate= [['SW', 'NE'],['NE', 'SW'], ['SW', 'NE']]
parked_qubits = ['D9', 'Z1', 'Z3', 'D3']
cfg_amps = [] # input
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
# set CZ parameters
for i,flux_lm_target in enumerate(flux_lms_target):
flux_lm_target.cfg_awg_channel_amplitude(cfg_amps[i])
flux_lm_target.set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][0]), 0.5)
flux_lms_control[i].set("vcz_amp_dac_at_11_02_{}".format(which_gate[i][1]), 0)
# Set park parameters
for i,flux_lm_park in enumerate(flux_lms_park):
flux_lm_park.cfg_awg_channel_amplitude(.3)
flux_lm_park.park_amp(.5)
flux_lm_park.park_double_sided(True)
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-4',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
Sw_functions_2 = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_fine_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf2 = swf.multi_sweep_function(Sw_functions_2, sweep_point_ratios= [1, 1, 1])
MC.live_plot_enabled(True)
nested_MC.live_plot_enabled(True)
nested_MC.cfg_clipping_mode(True)
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 41))
nested_MC.set_sweep_points_2D(np.linspace(0, 1, 21))
label = 'VCZ_2D_{}_fine_sweep'.format(pairs)
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
|
|
import logging
from ctf_gameserver.lib.database import transaction_cursor
from ctf_gameserver.lib.exceptions import DBDataError
def get_control_info(db_conn, prohibit_changes=False):
"""
Returns a dictionary containing relevant information about the competion, as stored in the game database.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
cursor.execute('SELECT start, valid_ticks, tick_duration, flag_prefix FROM scoring_gamecontrol')
result = cursor.fetchone()
if result is None:
raise DBDataError('Game control information has not been configured')
return {
'contest_start': result[0],
'valid_ticks': result[1],
'tick_duration': result[2],
'flag_prefix': result[3]
}
def get_service_attributes(db_conn, service_slug, prohibit_changes=False):
"""
Returns ID and name of a service for a given slug.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
cursor.execute('SELECT id, name FROM scoring_service WHERE slug = %s', (service_slug,))
result = cursor.fetchone()
if result is None:
raise DBDataError('Service has not been configured')
return {
'id': result[0],
'name': result[1]
}
def get_current_tick(db_conn, prohibit_changes=False):
"""
Reads the current tick from the game database.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
result = cursor.fetchone()
if result is None:
raise DBDataError('Game control information has not been configured')
return result[0]
def get_check_duration(db_conn, service_id, std_dev_count, prohibit_changes=False):
"""
Estimates the duration of checks for the given service from the average runtime of previous runs and its
standard deviation. We include all previous runs to accomodate to Checker Scripts with varying runtimes.
`std_dev_count` is the number of standard deviations to add to the average, i.e. increasing it will lead
to a greater result. Assuming a normal distribution, 2 standard deviations will include ~ 95 % of
previous results.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
cursor.execute('SELECT avg(extract(epoch from (placement_end - placement_start))) + %s *'
' stddev_pop(extract(epoch from (placement_end - placement_start)))'
' FROM scoring_flag, scoring_gamecontrol'
' WHERE service_id = %s AND tick < current_tick', (std_dev_count, service_id))
result = cursor.fetchone()
return result[0]
def get_task_count(db_conn, service_id, prohibit_changes=False):
"""
Returns the total number of tasks for the given service in the current tick.
With our current Controller implementation, this should always be equal to the number of teams.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
cursor.execute('SELECT COUNT(*)'
' FROM scoring_flag flag, scoring_gamecontrol control'
' WHERE flag.tick = control.current_tick'
' AND flag.service_id = %s', (service_id,))
result = cursor.fetchone()
return result[0]
def get_new_tasks(db_conn, service_id, task_count, prohibit_changes=False):
"""
Retrieves the given number of random open check tasks and marks them as in progress.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
# We need a lock on the whole table to prevent deadlocks because of `ORDER BY RANDOM`
# See https://github.com/fausecteam/ctf-gameserver/issues/62
# "There is no UNLOCK TABLE command; locks are always released at transaction end"
cursor.execute('LOCK TABLE scoring_flag IN EXCLUSIVE MODE')
cursor.execute('SELECT flag.id, flag.protecting_team_id, flag.tick, team.net_number'
' FROM scoring_flag flag, scoring_gamecontrol control, registration_team team'
' WHERE flag.placement_start is NULL'
' AND flag.tick = control.current_tick'
' AND flag.service_id = %s'
' AND flag.protecting_team_id = team.user_id'
' ORDER BY RANDOM()'
' LIMIT %s', (service_id, task_count))
tasks = cursor.fetchall()
# Mark placement as in progress
cursor.executemany('UPDATE scoring_flag'
' SET placement_start = NOW()'
' WHERE id = %s', [(task[0],) for task in tasks])
return [{
'team_id': task[1],
'team_net_no': task[3],
'tick': task[2]
} for task in tasks]
def _net_no_to_team_id(cursor, team_net_no, fake_team_id):
cursor.execute('SELECT user_id FROM registration_team WHERE net_number = %s', (team_net_no,))
data = cursor.fetchone()
# Only do this after executing the SQL query, because we want to ensure the query works
if fake_team_id is not None:
return fake_team_id
elif data is None:
return None
return data[0]
def commit_result(db_conn, service_id, team_net_no, tick, result, prohibit_changes=False, fake_team_id=None):
"""
Saves the result from a Checker run to game database.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
team_id = _net_no_to_team_id(cursor, team_net_no, fake_team_id)
if team_id is None:
logging.error('No team found with net number %d, cannot commit result', team_net_no)
return
cursor.execute('INSERT INTO scoring_statuscheck'
' (service_id, team_id, tick, status, timestamp)'
' VALUES (%s, %s, %s, %s, NOW())', (service_id, team_id, tick, result))
# (In case of `prohibit_changes`,) PostgreSQL checks the database grants even if nothing is matched
# by `WHERE`
cursor.execute('UPDATE scoring_flag'
' SET placement_end = NOW()'
' WHERE service_id = %s AND protecting_team_id = %s AND tick = %s', (service_id,
team_id,
tick))
def set_flagid(db_conn, service_id, team_net_no, tick, flagid, prohibit_changes=False, fake_team_id=None):
"""
Stores a Flag ID in database.
In case of conflict, the previous Flag ID gets overwritten.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
team_id = _net_no_to_team_id(cursor, team_net_no, fake_team_id)
if team_id is None:
logging.error('No team found with net number %d, cannot commit result', team_net_no)
return
# (In case of `prohibit_changes`,) PostgreSQL checks the database grants even if nothing is matched
# by `WHERE`
cursor.execute('UPDATE scoring_flag'
' SET flagid = %s'
' WHERE service_id = %s AND protecting_team_id = %s AND tick = %s', (flagid,
service_id,
team_id,
tick))
def load_state(db_conn, service_id, team_net_no, key, prohibit_changes=False):
"""
Loads Checker state data from database.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
cursor.execute('SELECT data FROM scoring_checkerstate state, registration_team team'
' WHERE state.service_id = %s'
' AND state.key = %s'
' AND team.net_number = %s'
' AND state.team_id = team.user_id', (service_id, key, team_net_no))
data = cursor.fetchone()
if data is None:
return None
return data[0]
def store_state(db_conn, service_id, team_net_no, key, data, prohibit_changes=False, fake_team_id=None):
"""
Stores Checker state data in database.
"""
with transaction_cursor(db_conn, prohibit_changes) as cursor:
team_id = _net_no_to_team_id(cursor, team_net_no, fake_team_id)
if team_id is None:
logging.error('No team found with net number %d, cannot store state', team_net_no)
return
# (In case of `prohibit_changes`,) PostgreSQL checks the database grants even if no CONFLICT occurs
cursor.execute('INSERT INTO scoring_checkerstate (service_id, team_id, key, data)'
' VALUES (%s, %s, %s, %s)'
' ON CONFLICT (service_id, team_id, key)'
' DO UPDATE SET data = EXCLUDED.data', (service_id, team_id, key, data))
|
|
#!/usr/bin/python2
# Copyright 2008 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""
This module implements a fuzzer for sel_ldr's ELF parsing / NaCl
module loading functions.
The fuzzer takes as arguments a pre-built nexe and sel_ldr, and will
randomly modify a copy of the nexe and run sel_ldr with the -F flag.
If/when sel_ldr crashes, the copy of the nexe is saved.
"""
from __future__ import with_statement # pre-2.6
import getopt
import os
import random
import re
import signal
import subprocess
import sys
import tempfile
import elf
max_bytes_to_fuzz = 16
default_progress_period = 64
def uniform_fuzz(input_string, nbytes_max):
nbytes = random.randint(1, nbytes_max) # fuzz at least one byte
# pick n distinct values from [0... len(input_string)) uniformly and
# without replacement.
targets = random.sample(xrange(len(input_string)), nbytes)
targets.sort()
# each entry of keepsies is a tuple (a-1,b) of indices indicating
# the non-fuzzed substrings of input_string.
keepsies = zip([-1] + targets,
targets + [len(input_string)])
# the map is essentially a generator of keepsie substrings followed
# by a random byte. joined together -- and throwing away the extra,
# trailing random byte -- is the fuzzed string.
return ''.join(input_string[subrange[0] + 1 : subrange[1]] +
chr(random.randint(0, 255))
for subrange in keepsies)[:-1]
#enddef
def simple_fuzz(nexe_elf):
orig = nexe_elf.elf_str
start_offset = nexe_elf.ehdr.phoff
length = nexe_elf.ehdr.phentsize * nexe_elf.ehdr.phnum
end_offset = start_offset + length
return (orig[:start_offset] +
uniform_fuzz(orig[start_offset
:end_offset],
max_bytes_to_fuzz) +
orig[end_offset:])
#enddef
def genius_fuzz(nexe_elf):
print >>sys.stderr, 'Genius fuzzer not implemented yet.'
# parse as phdr and use a distribution that concentrates on certain fields
sys.exit(1 + hash(nexe_elf)) # ARGSUSED
#enddef
available_fuzzers = {
'simple' : simple_fuzz,
'genius' : genius_fuzz,
}
def usage(stream):
print >>stream, """\
Usage: elf_fuzzer.py [-d destination_dir]
[-D destination_for_log_fatal]
[-f fuzzer]
[-i iterations]
[-m max_bytes_to_fuzz]
[-n nexe_original]
[-p progress_output_period]
[-s sel_ldr]
[-S seed_string_for_rng]
-d: Directory in which fuzzed files that caused core dumps are saved.
Default: "."
-D: Directory for saving crashes from LOG_FATAL errors. Default: discarded.
-f: Fuzzer to use. Available fuzzers are:
%s
-i: Number of iteration to fuzz. Default: -1 (infinite).
For use as a large test, set to a finite value.
-m: Maximum number of bytes to change. A random choice of one to this
number of bytes in the fuzz template's program header will be replaced
with a random value.
-n: Nexes to fuzz. Multiple nexes may be specified by using -n repeatedly,
in which case each will be used in turn as the fuzz template.
-p: Progress indicator period. Print a character for every this many fuzzing
runs. Requires verbosity to be at least 1. Default is %d.
-S: Seed_string_for_rng is used to seed the random module's random number
generator; any string will do -- it is hashed.
""" % (', '.join(available_fuzzers.keys()), default_progress_period)
#enddef
def choose_progress_char(num_saved):
return '0123456789abcdef'[num_saved % 16]
def main(argv):
global max_bytes_to_fuzz
sel_ldr_path = None
nexe_path = []
dest_dir = '.'
dest_fatal_dir = None # default: do not save
iterations = -1
fuzzer = 'simple'
verbosity = 0
progress_period = default_progress_period
progress_char = '.'
num_saved = 0
try:
opt_list, args = getopt.getopt(argv[1:], 'd:D:f:i:m:n:p:s:S:v')
except getopt.error, e:
print >>sys.stderr, e
usage(sys.stderr)
return 1
#endtry
for (opt, val) in opt_list:
if opt == '-d':
dest_dir = val
elif opt == '-D':
dest_fatal_dir = val
elif opt == '-f':
if available_fuzzers.has_key(val):
fuzzer = val
else:
print >>sys.stderr, 'No fuzzer:', val
usage(sys.stderr)
return 1
#endif
elif opt == '-i':
iterations = long(val)
elif opt == '-m':
max_bytes_to_fuzz = int(val)
elif opt == '-n':
nexe_path.append(val)
elif opt == '-p':
progress_period = int(val)
elif opt == '-s':
sel_ldr_path = val
elif opt == '-S':
random.seed(val)
elif opt == '-v':
verbosity = verbosity + 1
else:
print >>sys.stderr, 'Option', opt, 'not understood.'
return -1
#endif
#endfor
if progress_period <= 0:
print >>sys.stderr, 'verbose progress indication period must be positive.'
return 1
#endif
if not nexe_path:
print >>sys.stderr, 'No nexe specified.'
return 2
#endif
if sel_ldr_path is None:
print >>sys.stderr, 'No sel_ldr specified.'
return 3
#endif
if verbosity > 0:
print 'sel_ldr is at', sel_ldr_path
print 'nexe prototype(s) are at', nexe_path
#endif
nfa = re.compile(r'LOG_FATAL abort exit$')
which_nexe = 0
while iterations != 0:
nexe_bytes = open(nexe_path[which_nexe % len(nexe_path)]).read()
nexe_elf = elf.Elf(nexe_bytes)
fd, path = tempfile.mkstemp()
try:
fstream = os.fdopen(fd, 'w')
fuzzed_bytes = available_fuzzers[fuzzer](nexe_elf)
fstream.write(fuzzed_bytes)
fstream.close()
cmd_arg_list = [ sel_ldr_path,
'-F',
'--', path]
p = subprocess.Popen(cmd_arg_list,
stdin = subprocess.PIPE, # no /dev/null on windows
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
(out_data, err_data) = p.communicate(None)
if p.returncode < 0:
if verbosity > 1:
print 'sel_ldr exited with status', p.returncode, ', output.'
print 79 * '-'
print 'standard output'
print 79 * '-'
print out_data
print 79 * '-'
print 'standard error'
print 79 * '-'
print err_data
elif verbosity > 0:
os.write(1, '*')
#endif
if (os.WTERMSIG(-p.returncode) != signal.SIGABRT or
nfa.search(err_data) == None):
with os.fdopen(tempfile.mkstemp(dir=dest_dir)[0], 'w') as f:
f.write(fuzzed_bytes)
#endwith
# this is a one-liner alternative, relying on the dtor of
# file-like object to handle the flush/close. assumption
# here as with the 'with' statement version: write errors
# would cause an exception.
#
# os.fdopen(tempfile.mkstemp(dir=dest_dir)[0],
# 'w').write(fuzzed_bytes)
num_saved = num_saved + 1
progress_char = choose_progress_char(num_saved)
else:
if dest_fatal_dir is not None:
with os.fdopen(tempfile.mkstemp(dir=dest_fatal_dir)[0], 'w') as f:
f.write(fuzzed_bytes)
#endwith
num_saved = num_saved + 1
progress_char = choose_progress_char(num_saved)
elif verbosity > 1:
print 'LOG_FATAL exit, not saving'
#endif
#endif
#endif
finally:
os.unlink(path)
#endtry
if iterations > 0:
iterations = iterations - 1
#endif
if verbosity > 0 and which_nexe % progress_period == 0:
os.write(1, progress_char)
#endif
which_nexe = which_nexe + 1
#endwhile
print 'A total of', num_saved, 'nexes caused sel_ldr to exit with a signal.'
#enddef
if __name__ == '__main__':
sys.exit(main(sys.argv))
#endif
|
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 Fabio Calefato
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
- https://github.com/collab-uniba/
Requires:
-
"""
import getopt
import glob
import logging
import numpy
import os
import re
import string
import sys
from pyexcelerate import Workbook, Style, Font, Fill, Color
__script__ = 'collect-metrics.py'
__author__ = '@bateman'
__license__ = "MIT"
__date__ = '06-06-2016'
__version_info__ = (0, 0, 1)
__version__ = '.'.join(str(i) for i in __version_info__)
__home__ = 'https://github.com/collab-uniba/s'
__download__ = 'https://github.com/collab-uniba/.zip'
class ComputeMetrics(object):
metric_files = None
metrics = None
per_metric_vals = None
classification_res = None
models = None
# metric_names = {'A1': 'AUROC', 'B1': 'F1', 'C1': 'G-mean', 'D1': 'Phi', 'E1': 'Balance', 'F1': 'parameters',
# 'G1': 'time'}
metric_names = ['AUROC', 'F1', 'G-mean', 'Phi', 'Balance', 'time', 'parameters']
descriptive_stats = None
descriptive_stats_names = ['min', 'max', 'mean', 'median', 'stdev', 'total']
def __init__(self, infolder, outfile, sep=';', ext='txt', runs=10):
self.log = logging.getLogger('ComputeMetrics script')
self.infolder = infolder
self.outfile = outfile
self.sep = sep
self.ext = ext
self.runs = runs
self.metric_files = list()
self.classification_res = dict()
self.metrics = dict()
self.descriptive_stats = dict()
self.models = self.__readmodels('models/models.txt')
def main(self):
self.__getfiles()
for mf in self.metric_files:
model = mf[:-len(self.ext) - 1] # strips .ext away
fcontent = self.__readfile(mf)
self.classification_res[model] = fcontent
for model, content in self.classification_res.iteritems():
self.per_metric_vals = self.__compute_metrics(content)
self.metrics[model] = self.per_metric_vals
self.__compute_descriptive_stats()
self.__save_xls()
@staticmethod
def __readmodels(mfile):
models = list()
with open(mfile, 'r') as _file:
content = _file.readlines()
for m in content:
models.append(string.split(m.strip(), sep=":")[0])
return models
def __getfiles(self):
os.chdir(self.infolder)
for f in glob.glob("*.{0:s}".format(self.ext)):
self.metric_files.append(f)
@staticmethod
def __readfile(f):
with open(f, 'r') as _file:
_file_content = _file.read()
return _file_content
@staticmethod
def __compute_metrics(content):
permetric_vals = dict()
pParams = re.compile("The final values* used for the model (was|were) (.*\n*.*)\.")
Params_vals = list()
pTime = re.compile("Time difference of (.* \w+)")
Time_vals = list()
pHighROC = re.compile(".*TrainSpec\s+method\n1\s+(\d.\d+)")
HighROC_vals = list()
pF1 = re.compile("^F-measure = (.*)$", re.MULTILINE)
F1_vals = list()
pGmean = re.compile("^G-mean = (.*)$", re.MULTILINE)
Gmean_vals = list()
pPhi = re.compile("^Matthews phi = (.*)$", re.MULTILINE)
Phi_vals = list()
pBal = re.compile("^Balance = (.*)$", re.MULTILINE)
Bal_vals = list()
for match in re.finditer(pParams, content):
if match is not None:
Params_vals.append(match.group(2).replace('\n', ''))
if len(Params_vals) is 0:
pParams = re.compile("Tuning parameter \'(.*)\' was held constant at a value of (.*)")
for match in re.finditer(pParams, content):
assert (match is not None)
Params_vals.append(match.group(1) + " = " + match.group(2))
permetric_vals['parameters'] = Params_vals
for match in re.finditer(pTime, content):
assert (match is not None)
Time_vals.append(match.group(1))
permetric_vals['time'] = Time_vals
for match in re.finditer(pHighROC, content):
assert (match is not None)
HighROC_vals.append(match.group(1))
permetric_vals['AUROC'] = HighROC_vals
for match in re.finditer(pF1, content):
assert (match is not None)
F1_vals.append(match.group(1))
permetric_vals['F1'] = F1_vals
for match in re.finditer(pGmean, content):
assert (match is not None)
Gmean_vals.append(match.group(1))
permetric_vals['G-mean'] = Gmean_vals
for match in re.finditer(pPhi, content):
assert (match is not None)
Phi_vals.append(match.group(1))
permetric_vals['Phi'] = Phi_vals
for match in re.finditer(pBal, content):
assert (match is not None)
Bal_vals.append(match.group(1))
permetric_vals['Balance'] = Bal_vals
return permetric_vals
def __compute_descriptive_stats(self):
for model in self.models:
descriptive_stats = dict()
for nmetric in self.metric_names:
stats = dict()
if nmetric is not 'parameters':
mList = self.metrics[model][nmetric]
try:
if nmetric is 'time':
newList = list()
time_unit = ''
for elem in mList:
i, time_unit = string.split(elem, sep=" ")
newList.append(i)
mList = numpy.asarray(newList).astype(numpy.float)
min = repr(numpy.amin(mList)) + ' ' + time_unit
max = repr(numpy.amax(mList)) + ' ' + time_unit
mean = repr(numpy.mean(mList)) + ' ' + time_unit
median = repr(numpy.median(mList)) + ' ' + time_unit
stdev = repr(numpy.std(mList)) + ' ' + time_unit
sum = repr(numpy.sum(mList)) + ' ' + time_unit
stats['total'] = sum
else:
mList = numpy.asarray(mList).astype(numpy.float)
min = numpy.amin(mList)
max = numpy.amax(mList)
mean = numpy.mean(mList)
median = numpy.median(mList)
stdev = numpy.std(mList)
except ValueError:
min = 'N/A'
max = 'N/A'
mean = 'N/A'
median = 'N/A'
stdev = 'N/A'
stats['min'] = min
stats['max'] = max
stats['mean'] = mean
stats['median'] = median
stats['stdev'] = stdev
descriptive_stats[nmetric] = stats
self.descriptive_stats[model] = descriptive_stats
pass
def __save_xls(self):
wb = Workbook()
for model in self.models:
ws = wb.new_sheet(sheet_name=model)
# sets the column name
for j in range(1, len(self.metric_names) + 1):
ws.set_cell_value(1, j + 1, self.metric_names[j - 1])
# ws.set_cell_style(1, j, Style(fill=Fill(background=Color(224, 224, 224, 224))))
ws.set_cell_style(1, j + 1, Style(font=Font(bold=True)))
# sets the cells values
for i in range(1, self.runs + 1):
# sets the first value in col 1 to "runX"
ws.set_cell_value(i + 1, 1, 'run ' + str(i))
for j in range(1, len(self.metric_names) + 1):
try:
ws.set_cell_value(i + 1, j + 1, self.metrics[model][self.metric_names[j - 1]][i - 1])
except IndexError:
ws.set_cell_value(i + 1, j + 1, '')
except KeyError:
pass
# after the last run row plus one empty row
offset = self.runs + 3
for i in range(0, len(self.descriptive_stats_names)):
ws.set_cell_value(i + offset, 1, self.descriptive_stats_names[i])
for j in range(0, len(self.metric_names) - 1):
try:
ws.set_cell_value(i + offset, j + 2, self.descriptive_stats[model][self.metric_names[j]][
self.descriptive_stats_names[i]])
except KeyError:
pass
wb.save(self.outfile)
if __name__ == '__main__':
# default CL arg values
outfile = 'aggregate-metrics.xlsx'
sep = ';'
ext = 'txt'
runs = 10
try:
if (len(sys.argv) <= 1):
raise (getopt.GetoptError("No arguments!"))
else:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:r:e:s:",
["help", "in=", "out=", "sep="])
except getopt.GetoptError:
print('Wrong or no arguments. Please, enter\n\n'
'\t%s [-h|--help]\n\n'
'for usage info.' % __script__)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('Usage: {0:s} [OPTIONS]\n'
'\t-h, --help prints this help\n'
'\t-i, --in <path/to/metrics/folder.txt> path to metric files\n'
'\t-o, --out <output>.<csv|xls|txt> output file\n'
'\t-r, --runs N number of runs\n'
'\t-e, --ext <txt> extension of metric files\n'
'\t-s, --sep <,|;> either , or ; as separator'.format(__script__))
sys.exit()
elif opt in ("-i", "--in"):
infolder = arg
elif opt in ("-o", "--out"):
outfile = arg
elif opt in ("-r", "--runs"):
runs = int(arg)
elif opt in ("-e", "--ext"):
ext = arg
elif opt in ("-s", "--sep"):
sep = arg
cm = ComputeMetrics(infolder, outfile, sep, ext, runs)
cm.main()
|
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
from scipy import sparse
import csv
from scipy.spatial import ConvexHull
import matplotlib
# Pytorch requirements
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
dtype_l = torch.cuda.LongTensor
else:
dtype = torch.FloatTensor
dtype_l = torch.LongTensor
###############################################################################
# PTR-NET #
###############################################################################
class PtrNet_tanh(nn.Module):
def __init__(
self, input_size, hidden_size, batch_size):
super(PtrNet_tanh, self).__init__()
self.rnn_layers = 1
self.hidden_size = hidden_size
self.batch_size = batch_size
self.input_size = input_size
self.n = 16
self.init_var = 0.08
self.init_token = nn.Parameter(torch.zeros((self.input_size)))
self.W1 = nn.Parameter(torch.randn((self.hidden_size,
self.hidden_size)) * self.init_var)
self.W2 = nn.Parameter(torch.randn((self.hidden_size,
self.hidden_size)) * self.init_var)
self.v = nn.Parameter(torch.randn((self.hidden_size, 1)) *
self.init_var)
# cells
self.encoder_cell = nn.GRUCell(input_size, hidden_size)
self.decoder_cell = nn.GRUCell(input_size, hidden_size)
self.NLLoss = nn.NLLLoss(size_average=True)
# initialize weights
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.LSTMCell) or isinstance(m, nn.GRUCell):
m.weight_ih.data.uniform_(-self.init_var, self.init_var)
m.weight_hh.data.uniform_(-self.init_var, self.init_var)
m.bias_ih.data.uniform_(-self.init_var, self.init_var)
m.bias_hh.data.uniform_(-self.init_var, self.init_var)
if isinstance(m, nn.Linear):
# m.weight.data.normal_(0, self.init_var)
m.weight.data.uniform_(-self.init_var, self.init_var)
self.W1.data.uniform_(-self.init_var, self.init_var)
self.W2.data.uniform_(-self.init_var, self.init_var)
self.v.data.uniform_(-self.init_var, self.init_var)
def softmax_m(self, phis_m, u):
mask = phis_m
# masked softmax
u_m = u
u_m = u_m * mask
maxims = torch.max(u_m, 1)[0]
maxims = (maxims.squeeze().unsqueeze(1).expand(self.batch_size,
self.n))
exps = torch.exp(u_m - maxims)
exps_m = exps
exps_m = exps_m * mask
exps_sum = (torch.sum(exps_m, 1).squeeze().unsqueeze(1)
.expand(self.batch_size, self.n))
return exps_m / exps_sum
def Encoder(self, input, phis):
hidden_encoder = (Variable(torch.zeros(self.n, self.batch_size,
self.hidden_size),
requires_grad=True).type(dtype))
hidden = hidden_encoder[0].clone()
for n in xrange(self.n):
input_step = input[:, n]
# decouple interaction between different scopes using subdiagonal
if n > 0:
t = (phis[:, n, n - 1].squeeze().unsqueeze(1).expand(
self.batch_size, self.hidden_size))
hidden = t * hidden
# apply cell
hidden = self.encoder_cell(input_step, hidden)
hidden_encoder[n] = hidden
hidden_encoder = hidden_encoder.permute(1, 0, 2)
return hidden_encoder
def attention(self, hidden, W1xe, hidden_encoder, tanh=True):
# W2xdn has size (batch_size, hidden_size)
if tanh:
W2xdn = torch.mm(hidden, self.W2)
W2xdn = W2xdn.unsqueeze(1).expand(self.batch_size, self.n,
self.hidden_size)
u = (torch.bmm(torch.tanh(W1xe + W2xdn), self.v.unsqueeze(0)
.expand(self.batch_size, self.hidden_size, 1)))
u = u.squeeze()
else:
hidden = hidden.unsqueeze(2)
u = torch.bmm(hidden_encoder, hidden)
u = u.squeeze()
return u
def policy_loss(self, logsoftmax, target_col, logprobs):
pg_logsoftmax = sum([logp.expand_as(logsoftmax) * logsoftmax
for logp in logprobs])
pg_logsoftmax /= float(len(logprobs))
pg_loss_step = self.NLLoss(pg_logsoftmax, target_col.type(dtype_l))
return pg_loss_step
def compute_loss(self, output, target, lp=None):
loss = 0.0
pg_loss = 0.0
for n in xrange(output.size()[1]):
attn = output[:, n] + 1e-6
logsoftmax = torch.log(attn)
if lp is not None and len(lp) > 0:
pg_loss_step = self.policy_loss(logsoftmax, target[:, n], lp)
pg_loss += pg_loss_step
loss_step = self.NLLoss(logsoftmax, target[:, n].type(dtype_l))
loss += loss_step
return loss, pg_loss
def Decoder(self, input, hidden_encoder, phis,
input_target=None, target=None):
feed_target = False
if target is not None:
feed_target = True
# N_n is the number of elements of the scope of the n-th element
N = phis.sum(2).squeeze().unsqueeze(2).expand(self.batch_size, self.n,
self.hidden_size)
output = (Variable(torch.ones(self.batch_size, self.n, self.n))
.type(dtype))
index = ((N[:, 0] - 1) % (self.n)).type(dtype_l).unsqueeze(1)
hidden = (torch.gather(hidden_encoder, 1, index)).squeeze()
# W1xe size: (batch_size, n + 1, hidden_size)
W1xe = (torch.bmm(hidden_encoder, self.W1.unsqueeze(0).expand(
self.batch_size, self.hidden_size, self.hidden_size)))
# init token
start = (self.init_token.unsqueeze(0).expand(self.batch_size,
self.input_size))
input_step = start
for n in xrange(self.n):
# decouple interaction between different scopes by looking at
# subdiagonal elements of Phi
if n > 0:
t = (phis[:, n, n - 1].squeeze().unsqueeze(1).expand(
self.batch_size, self.hidden_size))
index = (((N[:, n] + n - 1) % (self.n)).type(dtype_l)
.unsqueeze(1))
init_hidden = (torch.gather(hidden_encoder, 1, index)
.squeeze())
hidden = t * hidden + (1 - t) * init_hidden
t = (phis[:, n, n - 1].squeeze().unsqueeze(1).expand(
self.batch_size, self.input_size))
input_step = t * input_step + (1 - t) * start
# Compute next state
hidden = self.decoder_cell(input_step, hidden)
# Compute pairwise interactions
u = self.attention(hidden, W1xe, hidden_encoder, tanh=True)
# Normalize interactions by taking the masked softmax by phi
attn = self.softmax_m(phis[:, n].squeeze(), u)
if feed_target:
# feed next step with target
next = (target[:, n].unsqueeze(1).unsqueeze(2)
.expand(self.batch_size, 1, self.input_size)
.type(dtype_l))
input_step = torch.gather(input_target, 1, next).squeeze()
else:
# blend inputs
input_step = (torch.sum(attn.unsqueeze(2).expand(
self.batch_size, self. n,
self.input_size) * input, 1)).squeeze()
# Update output
output[:, n] = attn
return output
def forward(self, input, phis, input_target=None, target=None):
# Encoder
hidden_encoder = self.Encoder(input, phis)
# Pointer Decoder
output = self.Decoder(input, hidden_encoder, phis,
input_target=input_target, target=target)
return output
|
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
from io import BytesIO, StringIO
from typing import Any, Dict, Optional, Union
from unittest.mock import Mock
import signedjson.key
from canonicaljson import encode_canonical_json
from signedjson.sign import sign_json
from signedjson.types import SigningKey
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import NoResource, Resource
from synapse.crypto.keyring import PerspectivesKeyFetcher
from synapse.http.site import SynapseRequest
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.keys import FetchKeyResult
from synapse.types import JsonDict
from synapse.util import Clock
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.stringutils import random_string
from tests import unittest
from tests.server import FakeChannel
from tests.utils import default_config
class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.http_client = Mock()
return self.setup_test_homeserver(federation_http_client=self.http_client)
def create_test_resource(self) -> Resource:
return create_resource_tree(
{"/_matrix/key/v2": KeyApiV2Resource(self.hs)}, root_resource=NoResource()
)
def expect_outgoing_key_request(
self, server_name: str, signing_key: SigningKey
) -> None:
"""
Tell the mock http client to expect an outgoing GET request for the given key
"""
async def get_json(
destination: str,
path: str,
ignore_backoff: bool = False,
**kwargs: Any,
) -> Union[JsonDict, list]:
self.assertTrue(ignore_backoff)
self.assertEqual(destination, server_name)
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
self.assertEqual(
path, "/_matrix/key/v2/server/%s" % (urllib.parse.quote(key_id),)
)
response = {
"server_name": server_name,
"old_verify_keys": {},
"valid_until_ts": 200 * 1000,
"verify_keys": {
key_id: {
"key": signedjson.key.encode_verify_key_base64(
signing_key.verify_key
)
}
},
}
sign_json(response, server_name, signing_key)
return response
self.http_client.get_json.side_effect = get_json
class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase):
def make_notary_request(self, server_name: str, key_id: str) -> dict:
"""Send a GET request to the test server requesting the given key.
Checks that the response is a 200 and returns the decoded json body.
"""
channel = FakeChannel(self.site, self.reactor)
# channel is a `FakeChannel` but `HTTPChannel` is expected
req = SynapseRequest(channel, self.site) # type: ignore[arg-type]
req.content = BytesIO(b"")
req.requestReceived(
b"GET",
b"/_matrix/key/v2/query/%s/%s"
% (server_name.encode("utf-8"), key_id.encode("utf-8")),
b"1.1",
)
channel.await_result()
self.assertEqual(channel.code, 200)
resp = channel.json_body
return resp
def test_get_key(self) -> None:
"""Fetch a remote key"""
SERVER_NAME = "remote.server"
testkey = signedjson.key.generate_signing_key("ver1")
self.expect_outgoing_key_request(SERVER_NAME, testkey)
resp = self.make_notary_request(SERVER_NAME, "ed25519:ver1")
keys = resp["server_keys"]
self.assertEqual(len(keys), 1)
self.assertIn("ed25519:ver1", keys[0]["verify_keys"])
self.assertEqual(len(keys[0]["verify_keys"]), 1)
# it should be signed by both the origin server and the notary
self.assertIn(SERVER_NAME, keys[0]["signatures"])
self.assertIn(self.hs.hostname, keys[0]["signatures"])
def test_get_own_key(self) -> None:
"""Fetch our own key"""
testkey = signedjson.key.generate_signing_key("ver1")
self.expect_outgoing_key_request(self.hs.hostname, testkey)
resp = self.make_notary_request(self.hs.hostname, "ed25519:ver1")
keys = resp["server_keys"]
self.assertEqual(len(keys), 1)
# it should be signed by both itself, and the notary signing key
sigs = keys[0]["signatures"]
self.assertEqual(len(sigs), 1)
self.assertIn(self.hs.hostname, sigs)
oursigs = sigs[self.hs.hostname]
self.assertEqual(len(oursigs), 2)
# the requested key should be present in the verify_keys section
self.assertIn("ed25519:ver1", keys[0]["verify_keys"])
class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
"""End-to-end tests of the perspectives fetch case
The idea here is to actually wire up a PerspectivesKeyFetcher to the notary
endpoint, to check that the two implementations are compatible.
"""
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
# replace the signing key with our own
self.hs_signing_key = signedjson.key.generate_signing_key("kssk")
strm = StringIO()
signedjson.key.write_signing_keys(strm, [self.hs_signing_key])
config["signing_key"] = strm.getvalue()
return config
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
# make a second homeserver, configured to use the first one as a key notary
self.http_client2 = Mock()
config = default_config(name="keyclient")
config["trusted_key_servers"] = [
{
"server_name": self.hs.hostname,
"verify_keys": {
"ed25519:%s"
% (
self.hs_signing_key.version,
): signedjson.key.encode_verify_key_base64(
self.hs_signing_key.verify_key
)
},
}
]
self.hs2 = self.setup_test_homeserver(
federation_http_client=self.http_client2, config=config
)
# wire up outbound POST /key/v2/query requests from hs2 so that they
# will be forwarded to hs1
async def post_json(
destination: str, path: str, data: Optional[JsonDict] = None
) -> Union[JsonDict, list]:
self.assertEqual(destination, self.hs.hostname)
self.assertEqual(
path,
"/_matrix/key/v2/query",
)
channel = FakeChannel(self.site, self.reactor)
# channel is a `FakeChannel` but `HTTPChannel` is expected
req = SynapseRequest(channel, self.site) # type: ignore[arg-type]
req.content = BytesIO(encode_canonical_json(data))
req.requestReceived(
b"POST",
path.encode("utf-8"),
b"1.1",
)
channel.await_result()
self.assertEqual(channel.code, 200)
resp = channel.json_body
return resp
self.http_client2.post_json.side_effect = post_json
def test_get_key(self) -> None:
"""Fetch a key belonging to a random server"""
# make up a key to be fetched.
testkey = signedjson.key.generate_signing_key("abc")
# we expect hs1 to make a regular key request to the target server
self.expect_outgoing_key_request("targetserver", testkey)
keyid = "ed25519:%s" % (testkey.version,)
fetcher = PerspectivesKeyFetcher(self.hs2)
d = fetcher.get_keys("targetserver", [keyid], 1000)
res = self.get_success(d)
self.assertIn(keyid, res)
keyres = res[keyid]
assert isinstance(keyres, FetchKeyResult)
self.assertEqual(
signedjson.key.encode_verify_key_base64(keyres.verify_key),
signedjson.key.encode_verify_key_base64(testkey.verify_key),
)
def test_get_notary_key(self) -> None:
"""Fetch a key belonging to the notary server"""
# make up a key to be fetched. We randomise the keyid to try to get it to
# appear before the key server signing key sometimes (otherwise we bail out
# before fetching its signature)
testkey = signedjson.key.generate_signing_key(random_string(5))
# we expect hs1 to make a regular key request to itself
self.expect_outgoing_key_request(self.hs.hostname, testkey)
keyid = "ed25519:%s" % (testkey.version,)
fetcher = PerspectivesKeyFetcher(self.hs2)
d = fetcher.get_keys(self.hs.hostname, [keyid], 1000)
res = self.get_success(d)
self.assertIn(keyid, res)
keyres = res[keyid]
assert isinstance(keyres, FetchKeyResult)
self.assertEqual(
signedjson.key.encode_verify_key_base64(keyres.verify_key),
signedjson.key.encode_verify_key_base64(testkey.verify_key),
)
def test_get_notary_keyserver_key(self) -> None:
"""Fetch the notary's keyserver key"""
# we expect hs1 to make a regular key request to itself
self.expect_outgoing_key_request(self.hs.hostname, self.hs_signing_key)
keyid = "ed25519:%s" % (self.hs_signing_key.version,)
fetcher = PerspectivesKeyFetcher(self.hs2)
d = fetcher.get_keys(self.hs.hostname, [keyid], 1000)
res = self.get_success(d)
self.assertIn(keyid, res)
keyres = res[keyid]
assert isinstance(keyres, FetchKeyResult)
self.assertEqual(
signedjson.key.encode_verify_key_base64(keyres.verify_key),
signedjson.key.encode_verify_key_base64(self.hs_signing_key.verify_key),
)
|
|
# Heavily based on the XML-RPC implementation in python.
# Based on the json-rpc specs: http://json-rpc.org/wiki/specification
# The main deviation is on the error treatment. The official spec
# would set the 'error' attribute to a string. This implementation
# sets it to a dictionary with keys: message/traceback/type
import cjson
import SocketServer
import sys
import traceback
try:
import fcntl
except ImportError:
fcntl = None
###
### Server code
###
import SimpleXMLRPCServer
class SimpleJSONRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
def _marshaled_dispatch(self, data, dispatch_method = None):
id = None
try:
req = cjson.decode(data)
method = req['method']
params = req['params']
id = req['id']
if dispatch_method is not None:
result = dispatch_method(method, params)
else:
result = self._dispatch(method, params)
response = dict(id=id, result=result, error=None)
except:
extpe, exv, extrc = sys.exc_info()
err = dict(type=str(extpe),
message=str(exv),
traceback=''.join(traceback.format_tb(extrc)))
response = dict(id=id, result=None, error=err)
try:
return cjson.encode(response)
except:
extpe, exv, extrc = sys.exc_info()
err = dict(type=str(extpe),
message=str(exv),
traceback=''.join(traceback.format_tb(extrc)))
response = dict(id=id, result=None, error=err)
return cjson.encode(response)
class SimpleJSONRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/JSON')
class SimpleJSONRPCServer(SocketServer.TCPServer,
SimpleJSONRPCDispatcher):
"""Simple JSON-RPC server.
Simple JSON-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch JSON-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleJSONRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
logRequests=True):
self.logRequests = logRequests
SimpleJSONRPCDispatcher.__init__(self, allow_none=True, encoding=None)
SocketServer.TCPServer.__init__(self, addr, requestHandler)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
###
### Client code
###
import xmlrpclib
class ResponseError(xmlrpclib.ResponseError):
pass
class Fault(xmlrpclib.ResponseError):
pass
def _get_response(file, sock):
data = ""
while 1:
if sock:
response = sock.recv(1024)
else:
response = file.read(1024)
if not response:
break
data += response
file.close()
return data
class Transport(xmlrpclib.Transport):
def _parse_response(self, file, sock):
return _get_response(file, sock)
class SafeTransport(xmlrpclib.SafeTransport):
def _parse_response(self, file, sock):
return _get_response(file, sock)
class ServerProxy:
def __init__(self, uri, id=None, transport=None, use_datetime=0):
# establish a "logical" server connection
# get the url
import urllib
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError, "unsupported JSON-RPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/JSON"
if transport is None:
if type == "https":
transport = SafeTransport(use_datetime=use_datetime)
else:
transport = Transport(use_datetime=use_datetime)
self.__transport = transport
self.__id = id
def __request(self, methodname, params):
# call a method on the remote server
request = cjson.encode(dict(id=self.__id, method=methodname,
params=params))
data = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=False
)
response = cjson.decode(data)
if response["id"] != self.__id:
raise ResponseError("Invalid request id (is: %s, expected: %s)" \
% (response["id"], self.__id))
if response["error"] is not None:
raise Fault("JSON Error", response["error"])
return response["result"]
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
if __name__ == '__main__':
if not len(sys.argv) > 1:
import socket
print 'Running JSON-RPC server on port 8000'
server = SimpleJSONRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
else:
remote = ServerProxy(sys.argv[1])
print 'Using connection', remote
print repr(remote.add(1, 2))
aaa = remote.add
print repr(remote.pow(2, 4))
print aaa(5, 6)
try:
# Invalid parameters
aaa(5, "toto")
print "Successful execution of invalid code"
except Fault:
pass
try:
# Invalid parameters
aaa(5, 6, 7)
print "Successful execution of invalid code"
except Fault:
pass
try:
# Invalid method name
print repr(remote.powx(2, 4))
print "Successful execution of invalid code"
except Fault:
pass
|
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class GeolocationsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_geoloc_settings(self, type, type_id, **kwargs):
"""
Get geolocation settings for an item
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_geoloc_settings(type, type_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str type: Item type, available values are: category, subscription, product, video, extract, blogpage, slider, topmenu, homerail (required)
:param int type_id: Item ID (required)
:return: GeolocSettingsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_geoloc_settings_with_http_info(type, type_id, **kwargs)
else:
(data) = self.get_geoloc_settings_with_http_info(type, type_id, **kwargs)
return data
def get_geoloc_settings_with_http_info(self, type, type_id, **kwargs):
"""
Get geolocation settings for an item
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_geoloc_settings_with_http_info(type, type_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str type: Item type, available values are: category, subscription, product, video, extract, blogpage, slider, topmenu, homerail (required)
:param int type_id: Item ID (required)
:return: GeolocSettingsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['type', 'type_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_geoloc_settings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'type' is set
if ('type' not in params) or (params['type'] is None):
raise ValueError("Missing the required parameter `type` when calling `get_geoloc_settings`")
# verify the required parameter 'type_id' is set
if ('type_id' not in params) or (params['type_id'] is None):
raise ValueError("Missing the required parameter `type_id` when calling `get_geoloc_settings`")
collection_formats = {}
resource_path = '/geolocations/settings'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'type' in params:
query_params['type'] = params['type']
if 'type_id' in params:
query_params['type_id'] = params['type_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GeolocSettingsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_ip_location(self, ip_address, **kwargs):
"""
Get IP location
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_ip_location(ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str ip_address: address ip (required)
:return: IPLocationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_ip_location_with_http_info(ip_address, **kwargs)
else:
(data) = self.get_ip_location_with_http_info(ip_address, **kwargs)
return data
def get_ip_location_with_http_info(self, ip_address, **kwargs):
"""
Get IP location
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_ip_location_with_http_info(ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str ip_address: address ip (required)
:return: IPLocationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ip_address']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ip_location" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'ip_address' is set
if ('ip_address' not in params) or (params['ip_address'] is None):
raise ValueError("Missing the required parameter `ip_address` when calling `get_ip_location`")
collection_formats = {}
resource_path = '/geolocations/ip'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'ip_address' in params:
query_params['ip_address'] = params['ip_address']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IPLocationResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_platform_access_info(self, ip_address, **kwargs):
"""
Get PlatformAccessInfo by ip
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_platform_access_info(ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str ip_address: IP address (required)
:return: PlatformAccessResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_platform_access_info_with_http_info(ip_address, **kwargs)
else:
(data) = self.get_platform_access_info_with_http_info(ip_address, **kwargs)
return data
def get_platform_access_info_with_http_info(self, ip_address, **kwargs):
"""
Get PlatformAccessInfo by ip
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_platform_access_info_with_http_info(ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str ip_address: IP address (required)
:return: PlatformAccessResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ip_address']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_platform_access_info" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'ip_address' is set
if ('ip_address' not in params) or (params['ip_address'] is None):
raise ValueError("Missing the required parameter `ip_address` when calling `get_platform_access_info`")
collection_formats = {}
resource_path = '/geolocations/platform-access'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'ip_address' in params:
query_params['ip_address'] = params['ip_address']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformAccessResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_geolocations(self, product_id, **kwargs):
"""
Get product geolocation restrictions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_geolocations(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:return: GeolocationListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_geolocations_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_geolocations_with_http_info(product_id, **kwargs)
return data
def get_product_geolocations_with_http_info(self, product_id, **kwargs):
"""
Get product geolocation restrictions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_geolocations_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:return: GeolocationListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_geolocations" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_geolocations`")
collection_formats = {}
resource_path = '/products/{product_id}/geolocations'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GeolocationListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_geolocations_by_ip(self, product_id, ip_address, **kwargs):
"""
Check product access using geolocation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_geolocations_by_ip(product_id, ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param str ip_address: address ip (required)
:param int page:
:param int per_page:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_geolocations_by_ip_with_http_info(product_id, ip_address, **kwargs)
else:
(data) = self.get_product_geolocations_by_ip_with_http_info(product_id, ip_address, **kwargs)
return data
def get_product_geolocations_by_ip_with_http_info(self, product_id, ip_address, **kwargs):
"""
Check product access using geolocation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_geolocations_by_ip_with_http_info(product_id, ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param str ip_address: address ip (required)
:param int page:
:param int per_page:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'ip_address', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_geolocations_by_ip" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_geolocations_by_ip`")
# verify the required parameter 'ip_address' is set
if ('ip_address' not in params) or (params['ip_address'] is None):
raise ValueError("Missing the required parameter `ip_address` when calling `get_product_geolocations_by_ip`")
collection_formats = {}
resource_path = '/products/{product_id}/geolocations'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
if 'ip_address' in params:
form_params.append(('ip_address', params['ip_address']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_video_geolocation_by_ip(self, video_id, ip_address, **kwargs):
"""
Check access to a video by geolocation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_video_geolocation_by_ip(video_id, ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param str ip_address: IP address (required)
:param int page:
:param int per_page:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_video_geolocation_by_ip_with_http_info(video_id, ip_address, **kwargs)
else:
(data) = self.get_video_geolocation_by_ip_with_http_info(video_id, ip_address, **kwargs)
return data
def get_video_geolocation_by_ip_with_http_info(self, video_id, ip_address, **kwargs):
"""
Check access to a video by geolocation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_video_geolocation_by_ip_with_http_info(video_id, ip_address, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param str ip_address: IP address (required)
:param int page:
:param int per_page:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['video_id', 'ip_address', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_video_geolocation_by_ip" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'video_id' is set
if ('video_id' not in params) or (params['video_id'] is None):
raise ValueError("Missing the required parameter `video_id` when calling `get_video_geolocation_by_ip`")
# verify the required parameter 'ip_address' is set
if ('ip_address' not in params) or (params['ip_address'] is None):
raise ValueError("Missing the required parameter `ip_address` when calling `get_video_geolocation_by_ip`")
collection_formats = {}
resource_path = '/videos/{video_id}/geolocations/{ip_address}'.replace('{format}', 'json')
path_params = {}
if 'video_id' in params:
path_params['video_id'] = params['video_id']
if 'ip_address' in params:
path_params['ip_address'] = params['ip_address']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_product_geolocation(self, product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs):
"""
Handle geolocation for products by countries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_product_geolocation(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int enabled: Enabled (required)
:param str behavior_detected_countries: Behavior for detected countries (required)
:param str behavior_non_detected_countries: Behavior for non-detected countries (required)
:param str countries: IDs of the non-detected countries separated by comma
:param int page:
:param int per_page:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.set_product_geolocation_with_http_info(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs)
else:
(data) = self.set_product_geolocation_with_http_info(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs)
return data
def set_product_geolocation_with_http_info(self, product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs):
"""
Handle geolocation for products by countries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_product_geolocation_with_http_info(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int enabled: Enabled (required)
:param str behavior_detected_countries: Behavior for detected countries (required)
:param str behavior_non_detected_countries: Behavior for non-detected countries (required)
:param str countries: IDs of the non-detected countries separated by comma
:param int page:
:param int per_page:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'enabled', 'behavior_detected_countries', 'behavior_non_detected_countries', 'countries', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_product_geolocation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `set_product_geolocation`")
# verify the required parameter 'enabled' is set
if ('enabled' not in params) or (params['enabled'] is None):
raise ValueError("Missing the required parameter `enabled` when calling `set_product_geolocation`")
# verify the required parameter 'behavior_detected_countries' is set
if ('behavior_detected_countries' not in params) or (params['behavior_detected_countries'] is None):
raise ValueError("Missing the required parameter `behavior_detected_countries` when calling `set_product_geolocation`")
# verify the required parameter 'behavior_non_detected_countries' is set
if ('behavior_non_detected_countries' not in params) or (params['behavior_non_detected_countries'] is None):
raise ValueError("Missing the required parameter `behavior_non_detected_countries` when calling `set_product_geolocation`")
collection_formats = {}
resource_path = '/products/{product_id}/geolocations'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
if 'countries' in params:
form_params.append(('countries', params['countries']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'enabled' in params:
form_params.append(('enabled', params['enabled']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'behavior_detected_countries' in params:
form_params.append(('behavior_detected_countries', params['behavior_detected_countries']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'behavior_non_detected_countries' in params:
form_params.append(('behavior_non_detected_countries', params['behavior_non_detected_countries']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_video_geolocation(self, video_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs):
"""
Handle geolocation for videos by countries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_video_geolocation(video_id, enabled, behavior_detected_countries, behavior_non_detected_countries, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param int enabled: Enabled (required)
:param str behavior_detected_countries: Behavior for detected countries (required)
:param str behavior_non_detected_countries: Behavior for non-detected countries (required)
:param str countries: IDs of the non-detected countries separated by comma
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.set_video_geolocation_with_http_info(video_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs)
else:
(data) = self.set_video_geolocation_with_http_info(video_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs)
return data
def set_video_geolocation_with_http_info(self, video_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs):
"""
Handle geolocation for videos by countries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_video_geolocation_with_http_info(video_id, enabled, behavior_detected_countries, behavior_non_detected_countries, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param int enabled: Enabled (required)
:param str behavior_detected_countries: Behavior for detected countries (required)
:param str behavior_non_detected_countries: Behavior for non-detected countries (required)
:param str countries: IDs of the non-detected countries separated by comma
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['video_id', 'enabled', 'behavior_detected_countries', 'behavior_non_detected_countries', 'countries']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_video_geolocation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'video_id' is set
if ('video_id' not in params) or (params['video_id'] is None):
raise ValueError("Missing the required parameter `video_id` when calling `set_video_geolocation`")
# verify the required parameter 'enabled' is set
if ('enabled' not in params) or (params['enabled'] is None):
raise ValueError("Missing the required parameter `enabled` when calling `set_video_geolocation`")
# verify the required parameter 'behavior_detected_countries' is set
if ('behavior_detected_countries' not in params) or (params['behavior_detected_countries'] is None):
raise ValueError("Missing the required parameter `behavior_detected_countries` when calling `set_video_geolocation`")
# verify the required parameter 'behavior_non_detected_countries' is set
if ('behavior_non_detected_countries' not in params) or (params['behavior_non_detected_countries'] is None):
raise ValueError("Missing the required parameter `behavior_non_detected_countries` when calling `set_video_geolocation`")
collection_formats = {}
resource_path = '/videos/{video_id}/geolocations'.replace('{format}', 'json')
path_params = {}
if 'video_id' in params:
path_params['video_id'] = params['video_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'countries' in params:
form_params.append(('countries', params['countries']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'enabled' in params:
form_params.append(('enabled', params['enabled']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'behavior_detected_countries' in params:
form_params.append(('behavior_detected_countries', params['behavior_detected_countries']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'behavior_non_detected_countries' in params:
form_params.append(('behavior_non_detected_countries', params['behavior_non_detected_countries']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Student t distribution."""
import importlib
import math
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class StudentTTest(test.TestCase):
def testStudentPDFAndLogPDF(self):
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=-sigma) # pylint: disable=invalid-unary-operand-type
log_pdf = student.log_prob(t)
self.assertEqual(log_pdf.get_shape(), (6,))
log_pdf_values = self.evaluate(log_pdf)
pdf = student.prob(t)
self.assertEqual(pdf.get_shape(), (6,))
pdf_values = self.evaluate(pdf)
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentLogPDFMultidimensional(self):
batch_size = 6
df = constant_op.constant([[1.5, 7.2]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant(
[[-math.sqrt(10.), math.sqrt(15.)]] * batch_size)
df_v = np.array([1.5, 7.2])
mu_v = np.array([3., -3.])
sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)])
t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_pdf = student.log_prob(t)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = student.prob(t)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.log(expected_pdf), log_pdf_values)
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testStudentCDFAndLogCDF(self):
batch_size = 6
df = constant_op.constant([3.] * batch_size)
mu = constant_op.constant([7.] * batch_size)
sigma = constant_op.constant([-8.] * batch_size)
df_v = 3.
mu_v = 7.
sigma_v = 8.
t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
student = student_t.StudentT(df, loc=mu, scale=sigma)
log_cdf = student.log_cdf(t)
self.assertEqual(log_cdf.get_shape(), (6,))
log_cdf_values = self.evaluate(log_cdf)
cdf = student.cdf(t)
self.assertEqual(cdf.get_shape(), (6,))
cdf_values = self.evaluate(cdf)
if not stats:
return
expected_log_cdf = stats.t.logcdf(t, df_v, loc=mu_v, scale=sigma_v)
expected_cdf = stats.t.cdf(t, df_v, loc=mu_v, scale=sigma_v)
self.assertAllClose(expected_log_cdf, log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.log(expected_cdf), log_cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(expected_cdf, cdf_values, atol=0., rtol=1e-5)
self.assertAllClose(
np.exp(expected_log_cdf), cdf_values, atol=0., rtol=1e-5)
def testStudentEntropy(self):
df_v = np.array([[2., 3., 7.]]) # 1x3
mu_v = np.array([[1., -1, 0]]) # 1x3
sigma_v = np.array([[1., -2., 3.]]).T # transposed => 3x1
student = student_t.StudentT(df=df_v, loc=mu_v, scale=sigma_v)
ent = student.entropy()
ent_values = self.evaluate(ent)
# Help scipy broadcast to 3x3
ones = np.array([[1, 1, 1]])
sigma_bc = np.abs(sigma_v) * ones
mu_bc = ones.T * mu_v
df_bc = ones.T * df_v
if not stats:
return
expected_entropy = stats.t.entropy(
np.reshape(df_bc, [-1]),
loc=np.reshape(mu_bc, [-1]),
scale=np.reshape(sigma_bc, [-1]))
expected_entropy = np.reshape(expected_entropy, df_bc.shape)
self.assertAllClose(expected_entropy, ent_values)
def testStudentSample(self):
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(-math.sqrt(10.))
df_v = 4.
mu_v = 3.
sigma_v = np.sqrt(10.)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val,))
self.assertAllClose(sample_values.mean(), mu_v, rtol=0.1, atol=0)
self.assertAllClose(
sample_values.var(), sigma_v**2 * df_v / (df_v - 2), rtol=0.1, atol=0)
self._checkKLApprox(df_v, mu_v, sigma_v, sample_values)
# Test that sampling with the same seed twice gives the same results.
def testStudentSampleMultipleTimes(self):
df = constant_op.constant(4.)
mu = constant_op.constant(3.)
sigma = constant_op.constant(math.sqrt(10.))
n = constant_op.constant(100)
random_seed.set_random_seed(654321)
student = student_t.StudentT(df=df, loc=mu, scale=sigma, name="student_t1")
samples1 = self.evaluate(student.sample(n, seed=123456))
random_seed.set_random_seed(654321)
student2 = student_t.StudentT(df=df, loc=mu, scale=sigma, name="student_t2")
samples2 = self.evaluate(student2.sample(n, seed=123456))
self.assertAllClose(samples1, samples2)
def testStudentSampleSmallDfNoNan(self):
df_v = [1e-1, 1e-5, 1e-10, 1e-20]
df = constant_op.constant(df_v)
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=1., scale=1.)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
n_val = 200000
self.assertEqual(sample_values.shape, (n_val, 4))
self.assertTrue(np.all(np.logical_not(np.isnan(sample_values))))
def testStudentSampleMultiDimensional(self):
batch_size = 7
df = constant_op.constant([[5., 7.]] * batch_size)
mu = constant_op.constant([[3., -3.]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(10.), math.sqrt(15.)]] * batch_size)
df_v = [5., 7.]
mu_v = [3., -3.]
sigma_v = [np.sqrt(10.), np.sqrt(15.)]
n = constant_op.constant(200000)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(n, seed=123456)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (200000, batch_size, 2))
self.assertAllClose(
sample_values[:, 0, 0].mean(), mu_v[0], rtol=0.1, atol=0)
self.assertAllClose(
sample_values[:, 0, 0].var(),
sigma_v[0]**2 * df_v[0] / (df_v[0] - 2),
rtol=0.2,
atol=0)
self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0])
self.assertAllClose(
sample_values[:, 0, 1].mean(), mu_v[1], rtol=0.1, atol=0)
self.assertAllClose(
sample_values[:, 0, 1].var(),
sigma_v[1]**2 * df_v[1] / (df_v[1] - 2),
rtol=0.2,
atol=0)
self._checkKLApprox(df_v[1], mu_v[1], sigma_v[1], sample_values[:, 0, 1])
def _checkKLApprox(self, df, mu, sigma, samples):
n = samples.size
np.random.seed(137)
if not stats:
return
sample_scipy = stats.t.rvs(df, loc=mu, scale=sigma, size=n)
covg = 0.99
r = stats.t.interval(covg, df, loc=mu, scale=sigma)
bins = 100
hist, _ = np.histogram(samples, bins=bins, range=r)
hist_scipy, _ = np.histogram(sample_scipy, bins=bins, range=r)
self.assertGreater(hist.sum(), n * (covg - .01))
self.assertGreater(hist_scipy.sum(), n * (covg - .01))
hist_min1 = hist + 1. # put at least one item in each bucket
hist_norm = hist_min1 / hist_min1.sum()
hist_scipy_min1 = hist_scipy + 1. # put at least one item in each bucket
hist_scipy_norm = hist_scipy_min1 / hist_scipy_min1.sum()
kl_appx = np.sum(np.log(hist_scipy_norm / hist_norm) * hist_scipy_norm)
self.assertLess(kl_appx, 1)
def testBroadcastingParams(self):
def _check(student):
self.assertEqual(student.mean().get_shape(), (3,))
self.assertEqual(student.variance().get_shape(), (3,))
self.assertEqual(student.entropy().get_shape(), (3,))
self.assertEqual(student.log_prob(2.).get_shape(), (3,))
self.assertEqual(student.prob(2.).get_shape(), (3,))
self.assertEqual(student.sample(37).get_shape(), (37, 3,))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def testBroadcastingPdfArgs(self):
def _assert_shape(student, arg, shape):
self.assertEqual(student.log_prob(arg).get_shape(), shape)
self.assertEqual(student.prob(arg).get_shape(), shape)
def _check(student):
_assert_shape(student, 2., (3,))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (3,))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.))
_check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.))
_check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,]))
def _check2d(student):
_assert_shape(student, 2., (1, 3))
xs = np.array([2., 3., 4.], dtype=np.float32)
_assert_shape(student, xs, (1, 3))
xs = np.array([xs])
_assert_shape(student, xs, (1, 3))
xs = xs.T
_assert_shape(student, xs, (3, 3))
_check2d(student_t.StudentT(df=[[2., 3., 4.,]], loc=2., scale=1.))
_check2d(student_t.StudentT(df=7., loc=[[2., 3., 4.,]], scale=1.))
_check2d(student_t.StudentT(df=7., loc=3., scale=[[2., 3., 4.,]]))
def _check2d_rows(student):
_assert_shape(student, 2., (3, 1))
xs = np.array([2., 3., 4.], dtype=np.float32) # (3,)
_assert_shape(student, xs, (3, 3))
xs = np.array([xs]) # (1,3)
_assert_shape(student, xs, (3, 3))
xs = xs.T # (3,1)
_assert_shape(student, xs, (3, 1))
_check2d_rows(student_t.StudentT(df=[[2.], [3.], [4.]], loc=2., scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=[[2.], [3.], [4.]], scale=1.))
_check2d_rows(student_t.StudentT(df=7., loc=3., scale=[[2.], [3.], [4.]]))
def testMeanAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
mu = [1., 3.3, 4.4]
student = student_t.StudentT(df=[3., 5., 7.], loc=mu, scale=[3., 2., 1.])
mean = self.evaluate(student.mean())
self.assertAllClose([1., 3.3, 4.4], mean)
def testMeanAllowNanStatsIsFalseRaisesWhenBatchMemberIsUndefined(self):
mu = [1., 3.3, 4.4]
student = student_t.StudentT(
df=[0.5, 5., 7.], loc=mu, scale=[3., 2., 1.], allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.mean())
def testMeanAllowNanStatsIsTrueReturnsNaNForUndefinedBatchMembers(self):
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=[0.5, 1., 3., 5., 7.], loc=mu, scale=sigma, allow_nan_stats=True)
mean = self.evaluate(student.mean())
self.assertAllClose([np.nan, np.nan, 1., 3.3, 4.4], mean)
def testVarianceAllowNanStatsTrueReturnsNaNforUndefinedBatchMembers(self):
# df = 0.5 ==> undefined mean ==> undefined variance.
# df = 1.5 ==> infinite variance.
df = [0.5, 1.5, 3., 5., 7.]
mu = [-2, 0., 1., 3.3, 4.4]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(
df=df, loc=mu, scale=sigma, allow_nan_stats=True)
var = self.evaluate(student.variance())
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
# Slicing off first element due to nan/inf mismatch in different SciPy
# versions.
self.assertAllClose(expected_var[1:], var[1:])
def testVarianceAllowNanStatsFalseGivesCorrectValueForDefinedBatchMembers(
self):
# df = 1.5 ==> infinite variance.
df = [1.5, 3., 5., 7.]
mu = [0., 1., 3.3, 4.4]
sigma = [4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
var = self.evaluate(student.variance())
if not stats:
return
expected_var = [
stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_var, var)
def testVarianceAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
# df <= 1 ==> variance not defined
student = student_t.StudentT(df=1., loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
# df <= 1 ==> variance not defined
student = student_t.StudentT(
df=0.5, loc=0., scale=1., allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(student.variance())
def testStd(self):
# Defined for all batch members.
df = [3.5, 5., 3., 5., 7.]
mu = [-2.2]
sigma = [5., 4., 3., 2., 1.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
stddev = self.evaluate(student.stddev())
mu *= len(df)
if not stats:
return
expected_stddev = [
stats.t.std(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma)
]
self.assertAllClose(expected_stddev, stddev)
def testMode(self):
df = [0.5, 1., 3]
mu = [-1, 0., 1]
sigma = [5., 4., 3.]
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
# Test broadcast of mu across shape of df/sigma
mode = self.evaluate(student.mode())
self.assertAllClose([-1., 0, 1], mode)
def testPdfOfSample(self):
student = student_t.StudentT(df=3., loc=np.pi, scale=1.)
num = 20000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
mean = student.mean()
mean_pdf = student.prob(student.mean())
sample_vals, pdf_vals, mean_val, mean_pdf_val = self.evaluate(
[samples, pdfs, student.mean(), mean_pdf])
self.assertEqual(samples.get_shape(), (num,))
self.assertEqual(pdfs.get_shape(), (num,))
self.assertEqual(mean.get_shape(), ())
self.assertNear(np.pi, np.mean(sample_vals), err=0.1)
self.assertNear(np.pi, mean_val, err=1e-6)
# Verify integral over sample*pdf ~= 1.
# Tolerance increased since eager was getting a value of 1.002041.
self._assertIntegral(sample_vals, pdf_vals, err=5e-2)
if not stats:
return
self.assertNear(stats.t.pdf(np.pi, 3., loc=np.pi), mean_pdf_val, err=1e-6)
def testFullyReparameterized(self):
df = constant_op.constant(2.0)
mu = constant_op.constant(1.0)
sigma = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(df)
tape.watch(mu)
tape.watch(sigma)
student = student_t.StudentT(df=df, loc=mu, scale=sigma)
samples = student.sample(100)
grad_df, grad_mu, grad_sigma = tape.gradient(samples, [df, mu, sigma])
self.assertIsNotNone(grad_df)
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
def testPdfOfSampleMultiDims(self):
student = student_t.StudentT(df=[7., 11.], loc=[[5.], [6.]], scale=3.)
self.assertAllEqual([], student.event_shape)
self.assertAllEqual([], self.evaluate(student.event_shape_tensor()))
self.assertAllEqual([2, 2], student.batch_shape)
self.assertAllEqual([2, 2], self.evaluate(student.batch_shape_tensor()))
num = 50000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertNear(5., np.mean(sample_vals[:, 0, :]), err=0.1)
self.assertNear(6., np.mean(sample_vals[:, 1, :]), err=0.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.05)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.05)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.05)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.05)
if not stats:
return
self.assertNear(
stats.t.var(7., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 0]),
err=1.0)
self.assertNear(
stats.t.var(11., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 1]),
err=1.0)
def _assertIntegral(self, sample_vals, pdf_vals, err=1.5e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (sample_vals.min() - 1000, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testNegativeDofFails(self):
with self.assertRaisesOpError(r"Condition x > 0 did not hold"):
student = student_t.StudentT(
df=[2, -5.], loc=0., scale=1., validate_args=True, name="S")
self.evaluate(student.mean())
def testStudentTWithAbsDfSoftplusScale(self):
df = constant_op.constant([-3.2, -4.6])
mu = constant_op.constant([-4.2, 3.4])
sigma = constant_op.constant([-6.4, -8.8])
student = student_t.StudentTWithAbsDfSoftplusScale(
df=df, loc=mu, scale=sigma)
self.assertAllClose(
math_ops.floor(self.evaluate(math_ops.abs(df))),
self.evaluate(student.df))
self.assertAllClose(self.evaluate(mu), self.evaluate(student.loc))
self.assertAllClose(
self.evaluate(nn_ops.softplus(sigma)), self.evaluate(student.scale))
if __name__ == "__main__":
test.main()
|
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BSON (Binary JSON) encoding and decoding.
"""
import calendar
import collections
import datetime
import itertools
import re
import struct
import sys
import uuid
from codecs import (utf_8_decode as _utf_8_decode,
utf_8_encode as _utf_8_encode)
from bson.binary import (Binary, OLD_UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY,
UUIDLegacy)
from bson.code import Code
from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS
from bson.dbref import DBRef
from bson.errors import (InvalidBSON,
InvalidDocument,
InvalidStringData)
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.py3compat import (b,
PY3,
iteritems,
text_type,
string_type,
reraise)
from bson.regex import Regex
from bson.son import SON, RE_TYPE
from bson.timestamp import Timestamp
from bson.tz_util import utc
try:
from bson import _cbson
_USE_C = True
except ImportError:
_USE_C = False
EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc)
EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0)
BSONNUM = b"\x01" # Floating point
BSONSTR = b"\x02" # UTF-8 string
BSONOBJ = b"\x03" # Embedded document
BSONARR = b"\x04" # Array
BSONBIN = b"\x05" # Binary
BSONUND = b"\x06" # Undefined
BSONOID = b"\x07" # ObjectId
BSONBOO = b"\x08" # Boolean
BSONDAT = b"\x09" # UTC Datetime
BSONNUL = b"\x0A" # Null
BSONRGX = b"\x0B" # Regex
BSONREF = b"\x0C" # DBRef
BSONCOD = b"\x0D" # Javascript code
BSONSYM = b"\x0E" # Symbol
BSONCWS = b"\x0F" # Javascript code with scope
BSONINT = b"\x10" # 32bit int
BSONTIM = b"\x11" # Timestamp
BSONLON = b"\x12" # 64bit int
BSONMIN = b"\xFF" # Min key
BSONMAX = b"\x7F" # Max key
_UNPACK_FLOAT = struct.Struct("<d").unpack
_UNPACK_INT = struct.Struct("<i").unpack
_UNPACK_LENGTH_SUBTYPE = struct.Struct("<iB").unpack
_UNPACK_LONG = struct.Struct("<q").unpack
_UNPACK_TIMESTAMP = struct.Struct("<II").unpack
def _get_int(data, position, dummy0, dummy1):
"""Decode a BSON int32 to python int."""
end = position + 4
return _UNPACK_INT(data[position:end])[0], end
def _get_c_string(data, position):
"""Decode a BSON 'C' string to python unicode string."""
end = data.index(b"\x00", position)
return _utf_8_decode(data[position:end], None, True)[0], end + 1
def _get_float(data, position, dummy0, dummy1):
"""Decode a BSON double to python float."""
end = position + 8
return _UNPACK_FLOAT(data[position:end])[0], end
def _get_string(data, position, obj_end, dummy):
"""Decode a BSON string to python unicode string."""
length = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length < 1 or obj_end - position < length:
raise InvalidBSON("invalid string length")
end = position + length - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("invalid end of string")
return _utf_8_decode(data[position:end], None, True)[0], end + 1
def _get_object(data, position, obj_end, opts):
"""Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef."""
obj_size = _UNPACK_INT(data[position:position + 4])[0]
end = position + obj_size - 1
if data[end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
raise InvalidBSON("invalid object length")
obj = _elements_to_dict(data, position + 4, end, opts)
position += obj_size
if "$ref" in obj:
return (DBRef(obj.pop("$ref"), obj.pop("$id", None),
obj.pop("$db", None), obj), position)
return obj, position
def _get_array(data, position, obj_end, opts):
"""Decode a BSON array to python list."""
size = _UNPACK_INT(data[position:position + 4])[0]
end = position + size - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("bad eoo")
position += 4
end -= 1
result = []
# Avoid doing global and attibute lookups in the loop.
append = result.append
index = data.index
getter = _ELEMENT_GETTER
while position < end:
element_type = data[position:position + 1]
# Just skip the keys.
position = index(b'\x00', position) + 1
value, position = getter[element_type](data, position, obj_end, opts)
append(value)
return result, position + 1
def _get_binary(data, position, dummy, opts):
"""Decode a BSON binary to bson.binary.Binary or python UUID."""
length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5])
position += 5
if subtype == 2:
length2 = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length2 != length - 4:
raise InvalidBSON("invalid binary (st 2) - lengths don't match!")
length = length2
end = position + length
if subtype in (3, 4):
# Java Legacy
uuid_representation = opts.uuid_representation
if uuid_representation == JAVA_LEGACY:
java = data[position:end]
value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1])
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
value = uuid.UUID(bytes_le=data[position:end])
# Python
else:
value = uuid.UUID(bytes=data[position:end])
return value, end
# Python3 special case. Decode subtype 0 to 'bytes'.
if PY3 and subtype == 0:
value = data[position:end]
else:
value = Binary(data[position:end], subtype)
return value, end
def _get_oid(data, position, dummy0, dummy1):
"""Decode a BSON ObjectId to bson.objectid.ObjectId."""
end = position + 12
return ObjectId(data[position:end]), end
def _get_boolean(data, position, dummy0, dummy1):
"""Decode a BSON true/false to python True/False."""
end = position + 1
return data[position:end] == b"\x01", end
def _get_date(data, position, dummy, opts):
"""Decode a BSON datetime to python datetime.datetime."""
end = position + 8
millis = _UNPACK_LONG(data[position:end])[0]
diff = ((millis % 1000) + 1000) % 1000
seconds = (millis - diff) / 1000
micros = diff * 1000
if opts.tz_aware:
return EPOCH_AWARE + datetime.timedelta(
seconds=seconds, microseconds=micros), end
else:
return EPOCH_NAIVE + datetime.timedelta(
seconds=seconds, microseconds=micros), end
def _get_code(data, position, obj_end, opts):
"""Decode a BSON code to bson.code.Code."""
code, position = _get_string(data, position, obj_end, opts)
return Code(code), position
def _get_code_w_scope(data, position, obj_end, opts):
"""Decode a BSON code_w_scope to bson.code.Code."""
code, position = _get_string(data, position + 4, obj_end, opts)
scope, position = _get_object(data, position, obj_end, opts)
return Code(code, scope), position
def _get_regex(data, position, dummy0, dummy1):
"""Decode a BSON regex to bson.regex.Regex or a python pattern object."""
pattern, position = _get_c_string(data, position)
bson_flags, position = _get_c_string(data, position)
bson_re = Regex(pattern, bson_flags)
return bson_re, position
def _get_ref(data, position, obj_end, opts):
"""Decode (deprecated) BSON DBPointer to bson.dbref.DBRef."""
collection, position = _get_string(data, position, obj_end, opts)
oid, position = _get_oid(data, position, obj_end, opts)
return DBRef(collection, oid), position
def _get_timestamp(data, position, dummy0, dummy1):
"""Decode a BSON timestamp to bson.timestamp.Timestamp."""
end = position + 8
inc, timestamp = _UNPACK_TIMESTAMP(data[position:end])
return Timestamp(timestamp, inc), end
def _get_int64(data, position, dummy0, dummy1):
"""Decode a BSON int64 to bson.int64.Int64."""
end = position + 8
return Int64(_UNPACK_LONG(data[position:end])[0]), end
# Each decoder function's signature is:
# - data: bytes
# - position: int, beginning of object in 'data' to decode
# - obj_end: int, end of object to decode in 'data' if variable-length type
# - opts: a CodecOptions
_ELEMENT_GETTER = {
BSONNUM: _get_float,
BSONSTR: _get_string,
BSONOBJ: _get_object,
BSONARR: _get_array,
BSONBIN: _get_binary,
BSONUND: lambda w, x, y, z: (None, x), # Deprecated undefined
BSONOID: _get_oid,
BSONBOO: _get_boolean,
BSONDAT: _get_date,
BSONNUL: lambda w, x, y, z: (None, x),
BSONRGX: _get_regex,
BSONREF: _get_ref, # Deprecated DBPointer
BSONCOD: _get_code,
BSONSYM: _get_string, # Deprecated symbol
BSONCWS: _get_code_w_scope,
BSONINT: _get_int,
BSONTIM: _get_timestamp,
BSONLON: _get_int64,
BSONMIN: lambda w, x, y, z: (MinKey(), x),
BSONMAX: lambda w, x, y, z: (MaxKey(), x)}
def _element_to_dict(data, position, obj_end, opts):
"""Decode a single key, value pair."""
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position)
value, position = _ELEMENT_GETTER[element_type](data,
position, obj_end, opts)
return element_name, value, position
def _elements_to_dict(data, position, obj_end, opts):
"""Decode a BSON document."""
result = opts.document_class()
end = obj_end - 1
while position < end:
(key, value, position) = _element_to_dict(data, position, obj_end, opts)
result[key] = value
return result
def _bson_to_dict(data, opts):
"""Decode a BSON string to document_class."""
try:
obj_size = _UNPACK_INT(data[:4])[0]
except struct.error as exc:
raise InvalidBSON(str(exc))
if obj_size != len(data):
raise InvalidBSON("invalid object size")
if data[obj_size - 1:obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
try:
return _elements_to_dict(data, 4, obj_size - 1, opts)
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
_bson_to_dict = _cbson._bson_to_dict
_PACK_FLOAT = struct.Struct("<d").pack
_PACK_INT = struct.Struct("<i").pack
_PACK_LENGTH_SUBTYPE = struct.Struct("<iB").pack
_PACK_LONG = struct.Struct("<q").pack
_PACK_TIMESTAMP = struct.Struct("<II").pack
_LIST_NAMES = tuple(b(str(i)) + b"\x00" for i in range(1000))
def gen_list_name():
"""Generate "keys" for encoded lists in the sequence
b"0\x00", b"1\x00", b"2\x00", ...
The first 1000 keys are returned from a pre-built cache. All
subsequent keys are generated on the fly.
"""
for name in _LIST_NAMES:
yield name
counter = itertools.count(1000)
while True:
yield b(str(next(counter))) + b"\x00"
def _make_c_string_check(string):
"""Make a 'C' string, checking for embedded NUL characters."""
if isinstance(string, bytes):
if b"\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
def _make_c_string(string):
"""Make a 'C' string."""
if isinstance(string, bytes):
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
return _utf_8_encode(string)[0] + b"\x00"
if PY3:
def _make_name(string):
"""Make a 'C' string suitable for a BSON key."""
# Keys can only be text in python 3.
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
else:
# Keys can be unicode or bytes in python 2.
_make_name = _make_c_string_check
def _encode_float(name, value, dummy0, dummy1):
"""Encode a float."""
return b"\x01" + name + _PACK_FLOAT(value)
if PY3:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python bytes."""
# Python3 special case. Store 'bytes' as BSON binary subtype 0.
return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value
else:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python str (python 2.x)."""
try:
_utf_8_decode(value, None, True)
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % (value,))
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_mapping(name, value, check_keys, opts):
"""Encode a mapping type."""
data = b"".join([_element_to_bson(key, val, check_keys, opts)
for key, val in iteritems(value)])
return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_dbref(name, value, check_keys, opts):
"""Encode bson.dbref.DBRef."""
buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00")
begin = len(buf) - 4
buf += _name_value_to_bson(b"$ref\x00",
value.collection, check_keys, opts)
buf += _name_value_to_bson(b"$id\x00",
value.id, check_keys, opts)
if value.database is not None:
buf += _name_value_to_bson(
b"$db\x00", value.database, check_keys, opts)
for key, val in iteritems(value._DBRef__kwargs):
buf += _element_to_bson(key, val, check_keys, opts)
buf += b"\x00"
buf[begin:begin + 4] = _PACK_INT(len(buf) - begin)
return bytes(buf)
def _encode_list(name, value, check_keys, opts):
"""Encode a list/tuple."""
lname = gen_list_name()
data = b"".join([_name_value_to_bson(next(lname), item,
check_keys, opts)
for item in value])
return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_text(name, value, dummy0, dummy1):
"""Encode a python unicode (python 2.x) / str (python 3.x)."""
value = _utf_8_encode(value)[0]
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_binary(name, value, dummy0, dummy1):
"""Encode bson.binary.Binary."""
subtype = value.subtype
if subtype == 2:
value = _PACK_INT(len(value)) + value
return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value
def _encode_uuid(name, value, dummy, opts):
"""Encode uuid.UUID."""
uuid_representation = opts.uuid_representation
# Python Legacy Common Case
if uuid_representation == OLD_UUID_SUBTYPE:
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes
# Java Legacy
elif uuid_representation == JAVA_LEGACY:
from_uuid = value.bytes
data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1]
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
# Microsoft GUID representation.
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le
# New
else:
return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes
def _encode_objectid(name, value, dummy0, dummy1):
"""Encode bson.objectid.ObjectId."""
return b"\x07" + name + value.binary
def _encode_bool(name, value, dummy0, dummy1):
"""Encode a python boolean (True/False)."""
return b"\x08" + name + (value and b"\x01" or b"\x00")
def _encode_datetime(name, value, dummy0, dummy1):
"""Encode datetime.datetime."""
if value.utcoffset() is not None:
value = value - value.utcoffset()
millis = int(calendar.timegm(value.timetuple()) * 1000 +
value.microsecond / 1000)
return b"\x09" + name + _PACK_LONG(millis)
def _encode_none(name, dummy0, dummy1, dummy2):
"""Encode python None."""
return b"\x0A" + name
def _encode_regex(name, value, dummy0, dummy1):
"""Encode a python regex or bson.regex.Regex."""
flags = value.flags
# Python 2 common case
if flags == 0:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00"
# Python 3 common case
elif flags == re.UNICODE:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00"
else:
sflags = b""
if flags & re.IGNORECASE:
sflags += b"i"
if flags & re.LOCALE:
sflags += b"l"
if flags & re.MULTILINE:
sflags += b"m"
if flags & re.DOTALL:
sflags += b"s"
if flags & re.UNICODE:
sflags += b"u"
if flags & re.VERBOSE:
sflags += b"x"
sflags += b"\x00"
return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags
def _encode_code(name, value, dummy, opts):
"""Encode bson.code.Code."""
cstring = _make_c_string(value)
cstrlen = len(cstring)
if not value.scope:
return b"\x0D" + name + _PACK_INT(cstrlen) + cstring
scope = _dict_to_bson(value.scope, False, opts, False)
full_length = _PACK_INT(8 + cstrlen + len(scope))
return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope
def _encode_int(name, value, dummy0, dummy1):
"""Encode a python int."""
if -2147483648 <= value <= 2147483647:
return b"\x10" + name + _PACK_INT(value)
else:
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_timestamp(name, value, dummy0, dummy1):
"""Encode bson.timestamp.Timestamp."""
return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time)
def _encode_long(name, value, dummy0, dummy1):
"""Encode a python long (python 2.x)"""
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_minkey(name, dummy0, dummy1, dummy2):
"""Encode bson.min_key.MinKey."""
return b"\xFF" + name
def _encode_maxkey(name, dummy0, dummy1, dummy2):
"""Encode bson.max_key.MaxKey."""
return b"\x7F" + name
# Each encoder function's signature is:
# - name: utf-8 bytes
# - value: a Python data type, e.g. a Python int for _encode_int
# - check_keys: bool, whether to check for invalid names
# - opts: a CodecOptions
_ENCODERS = {
bool: _encode_bool,
bytes: _encode_bytes,
datetime.datetime: _encode_datetime,
dict: _encode_mapping,
float: _encode_float,
int: _encode_int,
list: _encode_list,
# unicode in py2, str in py3
text_type: _encode_text,
tuple: _encode_list,
type(None): _encode_none,
uuid.UUID: _encode_uuid,
Binary: _encode_binary,
Int64: _encode_long,
Code: _encode_code,
DBRef: _encode_dbref,
MaxKey: _encode_maxkey,
MinKey: _encode_minkey,
ObjectId: _encode_objectid,
Regex: _encode_regex,
RE_TYPE: _encode_regex,
SON: _encode_mapping,
Timestamp: _encode_timestamp,
UUIDLegacy: _encode_binary,
# Special case. This will never be looked up directly.
collections.Mapping: _encode_mapping,
}
_MARKERS = {
5: _encode_binary,
7: _encode_objectid,
11: _encode_regex,
13: _encode_code,
17: _encode_timestamp,
18: _encode_long,
100: _encode_dbref,
127: _encode_maxkey,
255: _encode_minkey,
}
if not PY3:
_ENCODERS[long] = _encode_long
def _name_value_to_bson(name, value, check_keys, opts):
"""Encode a single name, value pair."""
# First see if the type is already cached. KeyError will only ever
# happen once per subtype.
try:
return _ENCODERS[type(value)](name, value, check_keys, opts)
except KeyError:
pass
# Second, fall back to trying _type_marker. This has to be done
# before the loop below since users could subclass one of our
# custom types that subclasses a python built-in (e.g. Binary)
marker = getattr(value, "_type_marker", None)
if isinstance(marker, int) and marker in _MARKERS:
func = _MARKERS[marker]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
# If all else fails test each base type. This will only happen once for
# a subtype of a supported base type.
for base in _ENCODERS:
if isinstance(value, base):
func = _ENCODERS[base]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
raise InvalidDocument("cannot convert value of type %s to bson" %
type(value))
def _element_to_bson(key, value, check_keys, opts):
"""Encode a single key, value pair."""
if not isinstance(key, string_type):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % (key,))
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % (key,))
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % (key,))
name = _make_name(key)
return _name_value_to_bson(name, value, check_keys, opts)
def _dict_to_bson(doc, check_keys, opts, top_level=True):
"""Encode a document to BSON."""
try:
elements = []
if top_level and "_id" in doc:
elements.append(_name_value_to_bson(b"_id\x00", doc["_id"],
check_keys, opts))
for (key, value) in iteritems(doc):
if not top_level or key != "_id":
elements.append(_element_to_bson(key, value,
check_keys, opts))
except AttributeError:
raise TypeError("encoder expected a mapping type but got: %r" % (doc,))
encoded = b"".join(elements)
return _PACK_INT(len(encoded) + 5) + encoded + b"\x00"
if _USE_C:
_dict_to_bson = _cbson._dict_to_bson
_CODEC_OPTIONS_TYPE_ERROR = TypeError(
"codec_options must be an instance of CodecOptions")
def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON regular
expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
docs = []
position = 0
end = len(data) - 1
try:
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
if len(data) - position < obj_size:
raise InvalidBSON("invalid object size")
obj_end = position + obj_size - 1
if data[obj_end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
docs.append(_elements_to_dict(data,
position + 4,
obj_end,
codec_options))
position += obj_size
return docs
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
decode_all = _cbson.decode_all
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents as a generator.
Works similarly to the decode_all function, but yields one document at a
time.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
position = 0
end = len(data) - 1
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
elements = data[position:position + obj_size]
position += obj_size
yield _bson_to_dict(elements, codec_options)
def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode bson data from a file to multiple documents as a generator.
Works similarly to the decode_all function, but reads from the file object
in chunks and parses bson in chunks, yielding one document at a time.
:Parameters:
- `file_obj`: A file object containing BSON data.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
while True:
# Read size of next object.
size_data = file_obj.read(4)
if len(size_data) == 0:
break # Finished with file normaly.
elif len(size_data) != 4:
raise InvalidBSON("cut off in middle of objsize")
obj_size = _UNPACK_INT(size_data)[0] - 4
elements = size_data + file_obj.read(obj_size)
yield _bson_to_dict(elements, codec_options)
def is_valid(bson):
"""Check that the given string represents valid :class:`BSON` data.
Raises :class:`TypeError` if `bson` is not an instance of
:class:`str` (:class:`bytes` in python 3). Returns ``True``
if `bson` is valid :class:`BSON`, ``False`` otherwise.
:Parameters:
- `bson`: the data to be validated
"""
if not isinstance(bson, bytes):
raise TypeError("BSON data must be an instance of a subclass of bytes")
try:
_bson_to_dict(bson, DEFAULT_CODEC_OPTIONS)
return True
except Exception:
return False
class BSON(bytes):
"""BSON (Binary JSON) data.
"""
@classmethod
def encode(cls, document, check_keys=False,
codec_options=DEFAULT_CODEC_OPTIONS):
"""Encode a document to a new :class:`BSON` instance.
A document can be any mapping type (like :class:`dict`).
Raises :class:`TypeError` if `document` is not a mapping type,
or contains keys that are not instances of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~bson.errors.InvalidDocument` if `document` cannot be
converted to :class:`BSON`.
:Parameters:
- `document`: mapping type representing a document
- `check_keys` (optional): check if keys start with '$' or
contain '.', raising :class:`~bson.errors.InvalidDocument` in
either case
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `uuid_subtype` option with `codec_options`.
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return cls(_dict_to_bson(document, check_keys, codec_options))
def decode(self, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode this BSON data.
By default, returns a BSON document represented as a Python
:class:`dict`. To use a different :class:`MutableMapping` class,
configure a :class:`~bson.codec_options.CodecOptions`::
>>> import collections # From Python standard library.
>>> import bson
>>> from bson.codec_options import CodecOptions
>>> data = bson.BSON.encode({'a': 1})
>>> decoded_doc = bson.BSON.decode(data)
<type 'dict'>
>>> options = CodecOptions(document_class=collections.OrderedDict)
>>> decoded_doc = bson.BSON.decode(data, codec_options=options)
>>> type(decoded_doc)
<class 'collections.OrderedDict'>
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return _bson_to_dict(self, codec_options)
def has_c():
"""Is the C extension installed?
"""
return _USE_C
|
|
"""
Mazhalai
-------
Custom code interpreter.
:copyright: (c) 2017 by Gokul Sridhar.
:license: MIT
"""
from mazhalai import ast
from mazhalai.errors import MazhalaiSyntaxError
class ParserError(MazhalaiSyntaxError):
def __init__(self, message, token):
super(ParserError, self).__init__(message, token.line, token.column)
def enter_scope(parser, name):
class State(object):
def __enter__(self):
parser.scope.append(name)
def __exit__(self, exc_type, exc_val, exc_tb):
parser.scope.pop()
return State()
class Subparser(object):
PRECEDENCE = {
'call': 10,
'subscript': 10,
'unary': 9,
'*': 7,
'/': 7,
'%': 7,
'+': 6,
'-': 6,
'>': 5,
'>=': 5,
'<': 5,
'<=': 5,
'==': 4,
'!=': 4,
'&&': 3,
'||': 2,
'..': 1,
'...': 1,
}
def get_subparser(self, token, subparsers, default=None):
cls = subparsers.get(token.name, default)
if cls is not None:
return cls()
class PrefixSubparser(Subparser):
def parse(self, parser, tokens):
raise NotImplementedError()
class InfixSubparser(Subparser):
def parse(self, parser, tokens, left):
raise NotImplementedError()
def get_precedence(self, token):
raise NotImplementedError()
# number_expr: NUMBER
class NumberExpression(PrefixSubparser):
def parse(self, parser, tokens):
token = tokens.consume_expected('NUMBER')
return ast.Number(token.value)
# str_expr: STRING
class StringExpression(PrefixSubparser):
def parse(self, parser, tokens):
token = tokens.consume_expected('STRING')
return ast.String(token.value)
# name_expr: NAME
class NameExpression(PrefixSubparser):
def parse(self, parser, tokens):
token = tokens.consume_expected('NAME')
return ast.Identifier(token.value)
# prefix_expr: OPERATOR expr
class UnaryOperatorExpression(PrefixSubparser):
SUPPORTED_OPERATORS = ['-', '!']
def parse(self, parser, tokens):
token = tokens.consume_expected('OPERATOR')
if token.value not in self.SUPPORTED_OPERATORS:
raise ParserError('Unary operator {} is not supported'.format(token.value), token)
right = Expression().parse(parser, tokens, self.get_precedence(token))
if right is None:
raise ParserError('Expected expression'.format(token.value), tokens.consume())
return ast.UnaryOperator(token.value, right)
def get_precedence(self, token):
return self.PRECEDENCE['unary']
# group_expr: LPAREN expr RPAREN
class GroupExpression(PrefixSubparser):
def parse(self, parser, tokens):
tokens.consume_expected('LPAREN')
right = Expression().parse(parser, tokens)
tokens.consume_expected('RPAREN')
return right
# array_expr: LBRACK list_of_expr? RBRACK
class ArrayExpression(PrefixSubparser):
def parse(self, parser, tokens):
tokens.consume_expected('LBRACK')
items = ListOfExpressions().parse(parser, tokens)
tokens.consume_expected('RBRACK')
return ast.Array(items)
# dict_expr: LCBRACK (expr COLON expr COMMA)* RCBRACK
class DictionaryExpression(PrefixSubparser):
def _parse_keyvals(self, parser, tokens):
items = []
while not tokens.is_end():
key = Expression().parse(parser, tokens)
if key is not None:
tokens.consume_expected('COLON')
value = Expression().parse(parser, tokens)
if value is None:
raise ParserError('Dictionary value expected', tokens.consume())
items.append((key, value))
else:
break
if tokens.current().name == 'COMMA':
tokens.consume_expected('COMMA')
else:
break
return items
def parse(self, parser, tokens):
tokens.consume_expected('LCBRACK')
items = self._parse_keyvals(parser, tokens)
tokens.consume_expected('RCBRACK')
return ast.Dictionary(items)
# infix_expr: expr OPERATOR expr
class BinaryOperatorExpression(InfixSubparser):
def parse(self, parser, tokens, left):
token = tokens.consume_expected('OPERATOR')
right = Expression().parse(parser, tokens, self.get_precedence(token))
if right is None:
raise ParserError('Expected expression'.format(token.value), tokens.consume())
return ast.BinaryOperator(token.value, left, right)
def get_precedence(self, token):
return self.PRECEDENCE[token.value]
# call_expr: NAME LPAREN list_of_expr? RPAREN
class CallExpression(InfixSubparser):
def parse(self, parser, tokens, left):
tokens.consume_expected('LPAREN')
arguments = ListOfExpressions().parse(parser, tokens)
tokens.consume_expected('RPAREN')
return ast.Call(left, arguments)
def get_precedence(self, token):
return self.PRECEDENCE['call']
# subscript_expr: NAME LBRACK expr RBRACK
class SubscriptOperatorExpression(InfixSubparser):
def parse(self, parser, tokens, left):
tokens.consume_expected('LBRACK')
key = Expression().parse(parser, tokens)
if key is None:
raise ParserError('Subscript operator key is required', tokens.current())
tokens.consume_expected('RBRACK')
return ast.SubscriptOperator(left, key)
def get_precedence(self, token):
return self.PRECEDENCE['subscript']
# expr: number_expr | str_expr | name_expr | group_expr | array_expr | dict_expr | prefix_expr | infix_expr | call_expr
# | subscript_expr
class Expression(Subparser):
def get_prefix_subparser(self, token):
return self.get_subparser(token, {
'NUMBER': NumberExpression,
'STRING': StringExpression,
'NAME': NameExpression,
'LPAREN': GroupExpression,
'LBRACK': ArrayExpression,
'LCBRACK': DictionaryExpression,
'OPERATOR': UnaryOperatorExpression,
})
def get_infix_subparser(self, token):
return self.get_subparser(token, {
'OPERATOR': BinaryOperatorExpression,
'LPAREN': CallExpression,
'LBRACK': SubscriptOperatorExpression,
})
def get_next_precedence(self, tokens):
if not tokens.is_end():
token = tokens.current()
parser = self.get_infix_subparser(token)
if parser is not None:
return parser.get_precedence(token)
return 0
def parse(self, parser, tokens, precedence=0):
subparser = self.get_prefix_subparser(tokens.current())
if subparser is not None:
left = subparser.parse(parser, tokens)
if left is not None:
while precedence < self.get_next_precedence(tokens):
op = self.get_infix_subparser(tokens.current()).parse(parser, tokens, left)
if op is not None:
left = op
return left
# list_of_expr: (expr COMMA)*
class ListOfExpressions(Subparser):
def parse(self, parser, tokens):
items = []
while not tokens.is_end():
exp = Expression().parse(parser, tokens)
if exp is not None:
items.append(exp)
else:
break
if tokens.current().name == 'COMMA':
tokens.consume_expected('COMMA')
else:
break
return items
# block: NEWLINE INDENT stmnts DEDENT
class Block(Subparser):
def parse(self, parser, tokens):
tokens.consume_expected('NEWLINE', 'INDENT')
statements = Statements().parse(parser, tokens)
tokens.consume_expected('DEDENT')
return statements
# func_stmnt: FUNCTION NAME LPAREN func_params? RPAREN COLON block
class FunctionStatement(Subparser):
# func_params: (NAME COMMA)*
def _parse_params(self, tokens):
params = []
if tokens.current().name == 'NAME':
while not tokens.is_end():
id_token = tokens.consume_expected('NAME')
params.append(id_token.value)
if tokens.current().name == 'COMMA':
tokens.consume_expected('COMMA')
else:
break
return params
def parse(self, parser, tokens):
tokens.consume_expected('FUNCTION')
id_token = tokens.consume_expected('NAME')
tokens.consume_expected('LPAREN')
arguments = self._parse_params(tokens)
tokens.consume_expected('RPAREN', 'COLON')
with enter_scope(parser, 'function'):
block = Block().parse(parser, tokens)
if block is None:
raise ParserError('Expected function body', tokens.current())
return ast.Function(id_token.value, arguments, block)
# cond_stmnt: IF expr COLON block (ELIF COLON block)* (ELSE COLON block)?
class ConditionalStatement(Subparser):
def _parse_elif_conditions(self, parser, tokens):
conditions = []
while not tokens.is_end() and tokens.current().name == 'ELIF':
tokens.consume_expected('ELIF')
test = Expression().parse(parser, tokens)
if test is None:
raise ParserError('Expected `elif` condition', tokens.current())
tokens.consume_expected('COLON')
block = Block().parse(parser, tokens)
if block is None:
raise ParserError('Expected `elif` body', tokens.current())
conditions.append(ast.ConditionElif(test, block))
return conditions
def _parse_else(self, parser, tokens):
else_block = None
if not tokens.is_end() and tokens.current().name == 'ELSE':
tokens.consume_expected('ELSE', 'COLON')
else_block = Block().parse(parser, tokens)
if else_block is None:
raise ParserError('Expected `else` body', tokens.current())
return else_block
def parse(self, parser, tokens):
tokens.consume_expected('IF')
test = Expression().parse(parser, tokens)
if test is None:
raise ParserError('Expected `if` condition', tokens.current())
tokens.consume_expected('COLON')
if_block = Block().parse(parser, tokens)
if if_block is None:
raise ParserError('Expected if body', tokens.current())
elif_conditions = self._parse_elif_conditions(parser, tokens)
else_block = self._parse_else(parser, tokens)
return ast.Condition(test, if_block, elif_conditions, else_block)
# match_stmnt: MATCH expr COLON NEWLINE INDENT match_when+ (ELSE COLON block)? DEDENT
class MatchStatement(Subparser):
# match_when: WHEN expr COLON block
def _parse_when(self, parser, tokens):
tokens.consume_expected('WHEN')
pattern = Expression().parse(parser, tokens)
if pattern is None:
raise ParserError('Pattern expression expected', tokens.current())
tokens.consume_expected('COLON')
block = Block().parse(parser, tokens)
return ast.MatchPattern(pattern, block)
def parse(self, parser, tokens):
tokens.consume_expected('MATCH')
test = Expression().parse(parser, tokens)
tokens.consume_expected('COLON', 'NEWLINE', 'INDENT')
patterns = []
while not tokens.is_end() and tokens.current().name == 'WHEN':
patterns.append(self._parse_when(parser, tokens))
if not patterns:
raise ParserError('One or more `when` pattern excepted', tokens.current())
else_block = None
if not tokens.is_end() and tokens.current().name == 'ELSE':
tokens.consume_expected('ELSE', 'COLON')
else_block = Block().parse(parser, tokens)
if else_block is None:
raise ParserError('Expected `else` body', tokens.current())
tokens.consume_expected('DEDENT')
return ast.Match(test, patterns, else_block)
# loop_while_stmnt: WHILE expr COLON block
class WhileLoopStatement(Subparser):
def parse(self, parser, tokens):
tokens.consume_expected('WHILE')
test = Expression().parse(parser, tokens)
if test is None:
raise ParserError('While condition expected', tokens.current())
tokens.consume_expected('COLON')
with enter_scope(parser, 'loop'):
block = Block().parse(parser, tokens)
if block is None:
raise ParserError('Expected loop body', tokens.current())
return ast.WhileLoop(test, block)
# loop_for_stmnt: FOR NAME expr COLON block
class ForLoopStatement(Subparser):
def parse(self, parser, tokens):
tokens.consume_expected('FOR')
id_token = tokens.consume_expected('NAME')
tokens.consume_expected('IN')
collection = Expression().parse(parser, tokens)
tokens.consume_expected('COLON')
with enter_scope(parser, 'loop'):
block = Block().parse(parser, tokens)
if block is None:
raise ParserError('Expected loop body', tokens.current())
return ast.ForLoop(id_token.value, collection, block)
# return_stmnt: RETURN expr?
class ReturnStatement(Subparser):
def parse(self, parser, tokens):
if not parser.scope or 'function' not in parser.scope:
raise ParserError('Return outside of function', tokens.current())
tokens.consume_expected('RETURN')
value = Expression().parse(parser, tokens)
tokens.consume_expected('NEWLINE')
return ast.Return(value)
# break_stmnt: BREAK
class BreakStatement(Subparser):
def parse(self, parser, tokens):
if not parser.scope or parser.scope[-1] != 'loop':
raise ParserError('Break outside of loop', tokens.current())
tokens.consume_expected('BREAK', 'NEWLINE')
return ast.Break()
# cont_stmnt: CONTINUE
class ContinueStatement(Subparser):
def parse(self, parser, tokens):
if not parser.scope or parser.scope[-1] != 'loop':
raise ParserError('Continue outside of loop', tokens.current())
tokens.consume_expected('CONTINUE', 'NEWLINE')
return ast.Continue()
# assing_stmnt: expr ASSIGN expr NEWLINE
class AssignmentStatement(Subparser):
def parse(self, parser, tokens, left):
tokens.consume_expected('ASSIGN')
right = Expression().parse(parser, tokens)
tokens.consume_expected('NEWLINE')
return ast.Assignment(left, right)
# expr_stmnt: assing_stmnt
# | expr NEWLINE
class ExpressionStatement(Subparser):
def parse(self, parser, tokens):
exp = Expression().parse(parser, tokens)
if exp is not None:
if tokens.current().name == 'ASSIGN':
return AssignmentStatement().parse(parser, tokens, exp)
else:
tokens.consume_expected('NEWLINE')
return exp
# stmnts: stmnt*
class Statements(Subparser):
def get_statement_subparser(self, token):
return self.get_subparser(token, {
'FUNCTION': FunctionStatement,
'IF': ConditionalStatement,
'MATCH': MatchStatement,
'WHILE': WhileLoopStatement,
'FOR': ForLoopStatement,
'RETURN': ReturnStatement,
'BREAK': BreakStatement,
'CONTINUE': ContinueStatement,
}, ExpressionStatement)
def parse(self, parser, tokens):
statements = []
while not tokens.is_end():
statement = self.get_statement_subparser(tokens.current()).parse(parser, tokens)
if statement is not None:
statements.append(statement)
else:
break
return statements
# prog: stmnts
class Program(Subparser):
def parse(self, parser, tokens):
statements = Statements().parse(parser, tokens)
tokens.expect_end()
return ast.Program(statements)
class Parser(object):
def __init__(self):
self.scope = None
def parse(self, tokens):
self.scope = []
return Program().parse(self, tokens)
|
|
from operator import attrgetter
from django.db import connection
from django.db.models import FileField, Value
from django.db.models.functions import Lower
from django.test import (
TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import (
Country, NoFields, NullableFields, Pizzeria, ProxyCountry,
ProxyMultiCountry, ProxyMultiProxyCountry, ProxyProxyCountry, Restaurant,
State, TwoFields,
)
class BulkCreateTests(TestCase):
def setUp(self):
self.data = [
Country(name="United States of America", iso_two_letter="US"),
Country(name="The Netherlands", iso_two_letter="NL"),
Country(name="Germany", iso_two_letter="DE"),
Country(name="Czech Republic", iso_two_letter="CZ")
]
def test_simple(self):
created = Country.objects.bulk_create(self.data)
self.assertEqual(len(created), 4)
self.assertQuerysetEqual(Country.objects.order_by("-name"), [
"United States of America", "The Netherlands", "Germany", "Czech Republic"
], attrgetter("name"))
created = Country.objects.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Country.objects.count(), 4)
@skipUnlessDBFeature('has_bulk_insert')
def test_efficiency(self):
with self.assertNumQueries(1):
Country.objects.bulk_create(self.data)
def test_multi_table_inheritance_unsupported(self):
expected_message = "Can't bulk create a multi-table inherited model"
with self.assertRaisesMessage(ValueError, expected_message):
Pizzeria.objects.bulk_create([
Pizzeria(name="The Art of Pizza"),
])
with self.assertRaisesMessage(ValueError, expected_message):
ProxyMultiCountry.objects.bulk_create([
ProxyMultiCountry(name="Fillory", iso_two_letter="FL"),
])
with self.assertRaisesMessage(ValueError, expected_message):
ProxyMultiProxyCountry.objects.bulk_create([
ProxyMultiProxyCountry(name="Fillory", iso_two_letter="FL"),
])
def test_proxy_inheritance_supported(self):
ProxyCountry.objects.bulk_create([
ProxyCountry(name="Qwghlm", iso_two_letter="QW"),
Country(name="Tortall", iso_two_letter="TA"),
])
self.assertQuerysetEqual(ProxyCountry.objects.all(), {
"Qwghlm", "Tortall"
}, attrgetter("name"), ordered=False)
ProxyProxyCountry.objects.bulk_create([
ProxyProxyCountry(name="Netherlands", iso_two_letter="NT"),
])
self.assertQuerysetEqual(ProxyProxyCountry.objects.all(), {
"Qwghlm", "Tortall", "Netherlands",
}, attrgetter("name"), ordered=False)
def test_non_auto_increment_pk(self):
State.objects.bulk_create([
State(two_letter_code=s)
for s in ["IL", "NY", "CA", "ME"]
])
self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
"CA", "IL", "ME", "NY",
], attrgetter("two_letter_code"))
@skipUnlessDBFeature('has_bulk_insert')
def test_non_auto_increment_pk_efficiency(self):
with self.assertNumQueries(1):
State.objects.bulk_create([
State(two_letter_code=s)
for s in ["IL", "NY", "CA", "ME"]
])
self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
"CA", "IL", "ME", "NY",
], attrgetter("two_letter_code"))
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
valid_country = Country(name='Germany', iso_two_letter='DE')
invalid_country = Country(id=0, name='Poland', iso_two_letter='PL')
with self.assertRaises(ValueError):
Country.objects.bulk_create([valid_country, invalid_country])
def test_batch_same_vals(self):
# Sqlite had a problem where all the same-valued models were
# collapsed to one insert.
Restaurant.objects.bulk_create([
Restaurant(name='foo') for i in range(0, 2)
])
self.assertEqual(Restaurant.objects.count(), 2)
def test_large_batch(self):
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)
])
self.assertEqual(TwoFields.objects.count(), 1001)
self.assertEqual(
TwoFields.objects.filter(f1__gte=450, f1__lte=550).count(),
101)
self.assertEqual(TwoFields.objects.filter(f2__gte=901).count(), 101)
@skipUnlessDBFeature('has_bulk_insert')
def test_large_single_field_batch(self):
# SQLite had a problem with more than 500 UNIONed selects in single
# query.
Restaurant.objects.bulk_create([
Restaurant() for i in range(0, 501)
])
@skipUnlessDBFeature('has_bulk_insert')
def test_large_batch_efficiency(self):
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)
])
self.assertLess(len(connection.queries), 10)
def test_large_batch_mixed(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)])
self.assertEqual(TwoFields.objects.count(), 1000)
# We can't assume much about the ID's created, except that the above
# created IDs must exist.
id_range = range(100000, 101000, 2)
self.assertEqual(TwoFields.objects.filter(id__in=id_range).count(), 500)
self.assertEqual(TwoFields.objects.exclude(id__in=id_range).count(), 500)
@skipUnlessDBFeature('has_bulk_insert')
def test_large_batch_mixed_efficiency(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create([
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)])
self.assertLess(len(connection.queries), 10)
def test_explicit_batch_size(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 4)]
num_objs = len(objs)
TwoFields.objects.bulk_create(objs, batch_size=1)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=2)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=3)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=num_objs)
self.assertEqual(TwoFields.objects.count(), num_objs)
def test_empty_model(self):
NoFields.objects.bulk_create([NoFields() for i in range(2)])
self.assertEqual(NoFields.objects.count(), 2)
@skipUnlessDBFeature('has_bulk_insert')
def test_explicit_batch_size_efficiency(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 100)]
with self.assertNumQueries(2):
TwoFields.objects.bulk_create(objs, 50)
TwoFields.objects.all().delete()
with self.assertNumQueries(1):
TwoFields.objects.bulk_create(objs, len(objs))
@skipUnlessDBFeature('has_bulk_insert')
def test_bulk_insert_expressions(self):
Restaurant.objects.bulk_create([
Restaurant(name="Sam's Shake Shack"),
Restaurant(name=Lower(Value("Betty's Beetroot Bar")))
])
bbb = Restaurant.objects.filter(name="betty's beetroot bar")
self.assertEqual(bbb.count(), 1)
@skipUnlessDBFeature('has_bulk_insert')
def test_bulk_insert_nullable_fields(self):
# NULL can be mixed with other values in nullable fields
nullable_fields = [field for field in NullableFields._meta.get_fields() if field.name != 'id']
NullableFields.objects.bulk_create([
NullableFields(**{field.name: None}) for field in nullable_fields
])
self.assertEqual(NullableFields.objects.count(), len(nullable_fields))
for field in nullable_fields:
with self.subTest(field=field):
field_value = '' if isinstance(field, FileField) else None
self.assertEqual(NullableFields.objects.filter(**{field.name: field_value}).count(), 1)
@skipUnlessDBFeature('can_return_ids_from_bulk_insert')
def test_set_pk_and_insert_single_item(self):
with self.assertNumQueries(1):
countries = Country.objects.bulk_create([self.data[0]])
self.assertEqual(len(countries), 1)
self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
@skipUnlessDBFeature('can_return_ids_from_bulk_insert')
def test_set_pk_and_query_efficiency(self):
with self.assertNumQueries(1):
countries = Country.objects.bulk_create(self.data)
self.assertEqual(len(countries), 4)
self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
self.assertEqual(Country.objects.get(pk=countries[1].pk), countries[1])
self.assertEqual(Country.objects.get(pk=countries[2].pk), countries[2])
self.assertEqual(Country.objects.get(pk=countries[3].pk), countries[3])
@skipUnlessDBFeature('can_return_ids_from_bulk_insert')
def test_set_state(self):
country_nl = Country(name='Netherlands', iso_two_letter='NL')
country_be = Country(name='Belgium', iso_two_letter='BE')
Country.objects.bulk_create([country_nl])
country_be.save()
# Objects save via bulk_create() and save() should have equal state.
self.assertEqual(country_nl._state.adding, country_be._state.adding)
self.assertEqual(country_nl._state.db, country_be._state.db)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.compute.drivers.cloudsigma import CloudSigmaNodeDriver
from libcloud.compute.drivers.cloudsigma import CloudSigma_2_0_NodeDriver
from libcloud.compute.drivers.cloudsigma import CloudSigmaError
from libcloud.compute.types import NodeState
from libcloud.test import unittest
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import ComputeFileFixtures
class CloudSigmaAPI20BaseTestCase(object):
def setUp(self):
self.driver_klass.connectionCls.conn_class = CloudSigmaMockHttp
CloudSigmaMockHttp.type = None
CloudSigmaMockHttp.use_param = 'do'
self.driver = self.driver_klass(*self.driver_args,
**self.driver_kwargs)
self.driver.DRIVE_TRANSITION_SLEEP_INTERVAL = 0.1
self.driver.DRIVE_TRANSITION_TIMEOUT = 1
self.node = self.driver.list_nodes()[0]
def test_invalid_api_versions(self):
expected_msg = 'Unsupported API version: invalid'
self.assertRaisesRegexp(NotImplementedError, expected_msg,
CloudSigmaNodeDriver, 'username', 'password',
api_version='invalid')
def test_invalid_credentials(self):
CloudSigmaMockHttp.type = 'INVALID_CREDS'
self.assertRaises(InvalidCredsError, self.driver.list_nodes)
def test_invalid_region(self):
expected_msg = 'Invalid region:'
self.assertRaisesRegexp(ValueError, expected_msg,
CloudSigma_2_0_NodeDriver, 'foo', 'bar',
region='invalid')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
size = sizes[0]
self.assertEqual(size.id, 'micro-regular')
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(image.name, 'ubuntu-10.04-toMP')
self.assertEqual(image.extra['image_type'], 'preinst')
self.assertEqual(image.extra['media'], 'disk')
self.assertEqual(image.extra['os'], 'linux')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
node = nodes[0]
self.assertEqual(len(nodes), 2)
self.assertEqual(node.id, '9de75ed6-fd33-45e2-963f-d405f31fd911')
self.assertEqual(node.name, 'test no drives')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['185.12.5.181', '178.22.68.55'])
self.assertEqual(node.private_ips, [])
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
metadata = {'foo': 'bar'}
node = self.driver.create_node(name='test node', size=size, image=image,
ex_metadata=metadata)
self.assertEqual(node.name, 'test node')
self.assertEqual(len(node.extra['nics']), 1)
self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp')
def test_create_node_with_vlan(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
vlan_uuid = '39ae851d-433f-4ac2-a803-ffa24cb1fa3e'
node = self.driver.create_node(name='test node vlan', size=size,
image=image, ex_vlan=vlan_uuid)
self.assertEqual(node.name, 'test node vlan')
self.assertEqual(len(node.extra['nics']), 2)
self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp')
self.assertEqual(node.extra['nics'][1]['vlan']['uuid'], vlan_uuid)
def test_destroy_node(self):
status = self.driver.destroy_node(node=self.node)
self.assertTrue(status)
def test_ex_start_node(self):
status = self.driver.ex_start_node(node=self.node)
self.assertTrue(status)
def test_ex_start_node_avoid_mode(self):
CloudSigmaMockHttp.type = 'AVOID_MODE'
ex_avoid = ['1', '2']
status = self.driver.ex_start_node(node=self.node,
ex_avoid=ex_avoid)
self.assertTrue(status)
def test_ex_start_node_already_started(self):
CloudSigmaMockHttp.type = 'ALREADY_STARTED'
expected_msg = 'Cannot start guest in state "started". Guest should ' \
'be in state "stopped'
self.assertRaisesRegexp(CloudSigmaError, expected_msg,
self.driver.ex_start_node, node=self.node)
def test_ex_stop_node(self):
status = self.driver.ex_stop_node(node=self.node)
self.assertTrue(status)
def test_ex_stop_node_already_stopped(self):
CloudSigmaMockHttp.type = 'ALREADY_STOPPED'
expected_msg = 'Cannot stop guest in state "stopped"'
self.assertRaisesRegexp(CloudSigmaError, expected_msg,
self.driver.ex_stop_node, node=self.node)
def test_ex_clone_node(self):
node_to_clone = self.driver.list_nodes()[0]
cloned_node = self.driver.ex_clone_node(node=node_to_clone,
name='test cloned node')
self.assertEqual(cloned_node.name, 'test cloned node')
def test_ex_open_vnc_tunnel(self):
node = self.driver.list_nodes()[0]
vnc_url = self.driver.ex_open_vnc_tunnel(node=node)
self.assertEqual(vnc_url, 'vnc://direct.lvs.cloudsigma.com:41111')
def test_ex_close_vnc_tunnel(self):
node = self.driver.list_nodes()[0]
status = self.driver.ex_close_vnc_tunnel(node=node)
self.assertTrue(status)
def test_ex_list_library_drives(self):
drives = self.driver.ex_list_library_drives()
drive = drives[0]
self.assertEqual(drive.name, 'IPCop 2.0.2')
self.assertEqual(drive.size, 1000000000)
self.assertEqual(drive.media, 'cdrom')
self.assertEqual(drive.status, 'unmounted')
def test_ex_list_user_drives(self):
drives = self.driver.ex_list_user_drives()
drive = drives[0]
self.assertEqual(drive.name, 'test node 2-drive')
self.assertEqual(drive.size, 13958643712)
self.assertEqual(drive.media, 'disk')
self.assertEqual(drive.status, 'unmounted')
def test_ex_create_drive(self):
CloudSigmaMockHttp.type = 'CREATE'
name = 'test drive 5'
size = 2000 * 1024 * 1024
drive = self.driver.ex_create_drive(name=name, size=size, media='disk')
self.assertEqual(drive.name, 'test drive 5')
self.assertEqual(drive.media, 'disk')
def test_ex_clone_drive(self):
drive = self.driver.ex_list_user_drives()[0]
cloned_drive = self.driver.ex_clone_drive(drive=drive,
name='cloned drive')
self.assertEqual(cloned_drive.name, 'cloned drive')
def test_ex_resize_drive(self):
drive = self.driver.ex_list_user_drives()[0]
size = 1111 * 1024 * 1024
resized_drive = self.driver.ex_resize_drive(drive=drive, size=size)
self.assertEqual(resized_drive.name, 'test drive 5')
self.assertEqual(resized_drive.media, 'disk')
self.assertEqual(resized_drive.size, size)
def test_ex_list_firewall_policies(self):
policies = self.driver.ex_list_firewall_policies()
policy = policies[1]
rule = policy.rules[0]
self.assertEqual(policy.name, 'My awesome policy')
self.assertEqual(rule.action, 'drop')
self.assertEqual(rule.direction, 'out')
self.assertEqual(rule.dst_ip, '23.0.0.0/32')
self.assertEqual(rule.ip_proto, 'tcp')
self.assertIsNone(rule.dst_port)
self.assertIsNone(rule.src_ip)
self.assertIsNone(rule.src_port)
self.assertEqual(rule.comment, 'Drop traffic from the VM to IP address 23.0.0.0/32')
def test_ex_create_firewall_policy_no_rules(self):
CloudSigmaMockHttp.type = 'CREATE_NO_RULES'
policy = self.driver.ex_create_firewall_policy(name='test policy 1')
self.assertEqual(policy.name, 'test policy 1')
self.assertEqual(policy.rules, [])
def test_ex_create_firewall_policy_with_rules(self):
CloudSigmaMockHttp.type = 'CREATE_WITH_RULES'
rules = [
{
'action': 'accept',
'direction': 'out',
'ip_proto': 'tcp',
'src_ip': '127.0.0.1',
'dst_ip': '127.0.0.1'
}
]
policy = self.driver.ex_create_firewall_policy(name='test policy 2',
rules=rules)
rule = policy.rules[0]
self.assertEqual(policy.name, 'test policy 2')
self.assertEqual(len(policy.rules), 1)
self.assertEqual(rule.action, 'accept')
self.assertEqual(rule.direction, 'out')
self.assertEqual(rule.ip_proto, 'tcp')
def test_ex_attach_firewall_policy(self):
policy = self.driver.ex_list_firewall_policies()[0]
node = self.driver.list_nodes()[0]
CloudSigmaMockHttp.type = 'ATTACH_POLICY'
updated_node = self.driver.ex_attach_firewall_policy(policy=policy,
node=node)
nic = updated_node.extra['nics'][0]
self.assertEqual(nic['firewall_policy']['uuid'],
'461dfb8c-e641-43d7-a20e-32e2aa399086')
def test_ex_attach_firewall_policy_inexistent_nic(self):
policy = self.driver.ex_list_firewall_policies()[0]
node = self.driver.list_nodes()[0]
nic_mac = 'inexistent'
expected_msg = 'Cannot find the NIC interface to attach a policy to'
self.assertRaisesRegexp(ValueError, expected_msg,
self.driver.ex_attach_firewall_policy,
policy=policy,
node=node,
nic_mac=nic_mac)
def test_ex_delete_firewall_policy(self):
policy = self.driver.ex_list_firewall_policies()[0]
status = self.driver.ex_delete_firewall_policy(policy=policy)
self.assertTrue(status)
def test_ex_list_tags(self):
tags = self.driver.ex_list_tags()
tag = tags[0]
self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d')
self.assertEqual(tag.name, 'test tag 2')
def test_ex_get_tag(self):
tag = self.driver.ex_get_tag(tag_id='a010ec41-2ead-4630-a1d0-237fa77e4d4d')
self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d')
self.assertEqual(tag.name, 'test tag 2')
def test_ex_create_tag(self):
tag = self.driver.ex_create_tag(name='test tag 3')
self.assertEqual(tag.name, 'test tag 3')
def test_ex_create_tag_with_resources(self):
CloudSigmaMockHttp.type = 'WITH_RESOURCES'
resource_uuids = ['1']
tag = self.driver.ex_create_tag(name='test tag 3',
resource_uuids=resource_uuids)
self.assertEqual(tag.name, 'test tag 3')
self.assertEqual(tag.resources, resource_uuids)
def test_ex_tag_resource(self):
node = self.driver.list_nodes()[0]
tag = self.driver.ex_list_tags()[0]
updated_tag = self.driver.ex_tag_resource(resource=node, tag=tag)
self.assertEqual(updated_tag.name, 'test tag 3')
def test_ex_tag_resources(self):
nodes = self.driver.list_nodes()
tag = self.driver.ex_list_tags()[0]
updated_tag = self.driver.ex_tag_resources(resources=nodes, tag=tag)
self.assertEqual(updated_tag.name, 'test tag 3')
def test_ex_tag_resource_invalid_resource_object(self):
tag = self.driver.ex_list_tags()[0]
expected_msg = 'Resource doesn\'t have id attribute'
self.assertRaisesRegexp(ValueError, expected_msg,
self.driver.ex_tag_resource, tag=tag,
resource={})
def test_ex_delete_tag(self):
tag = self.driver.ex_list_tags()[0]
status = self.driver.ex_delete_tag(tag=tag)
self.assertTrue(status)
def test_ex_get_balance(self):
balance = self.driver.ex_get_balance()
self.assertEqual(balance['balance'], '10.00')
self.assertEqual(balance['currency'], 'USD')
def test_ex_get_pricing(self):
pricing = self.driver.ex_get_pricing()
self.assertTrue('current' in pricing)
self.assertTrue('next' in pricing)
self.assertTrue('objects' in pricing)
def test_ex_get_usage(self):
pricing = self.driver.ex_get_usage()
self.assertTrue('balance' in pricing)
self.assertTrue('usage' in pricing)
def test_ex_list_subscriptions(self):
subscriptions = self.driver.ex_list_subscriptions()
subscription = subscriptions[0]
self.assertEqual(len(subscriptions), 5)
self.assertEqual(subscription.id, '7272')
self.assertEqual(subscription.resource, 'vlan')
self.assertEqual(subscription.amount, 1)
self.assertEqual(subscription.period, '345 days, 0:00:00')
self.assertEqual(subscription.status, 'active')
self.assertEqual(subscription.price, '0E-20')
def test_ex_create_subscription(self):
CloudSigmaMockHttp.type = 'CREATE_SUBSCRIPTION'
subscription = self.driver.ex_create_subscription(amount=1,
period='1 month',
resource='vlan')
self.assertEqual(subscription.amount, 1)
self.assertEqual(subscription.period, '1 month')
self.assertEqual(subscription.resource, 'vlan')
self.assertEqual(subscription.price, '10.26666666666666666666666667')
self.assertEqual(subscription.auto_renew, False)
self.assertEqual(subscription.subscribed_object, '2494079f-8376-40bf-9b37-34d633b8a7b7')
def test_ex_list_subscriptions_status_filterting(self):
CloudSigmaMockHttp.type = 'STATUS_FILTER'
self.driver.ex_list_subscriptions(status='active')
def test_ex_list_subscriptions_resource_filterting(self):
CloudSigmaMockHttp.type = 'RESOURCE_FILTER'
resources = ['cpu', 'mem']
self.driver.ex_list_subscriptions(resources=resources)
def test_ex_toggle_subscription_auto_renew(self):
subscription = self.driver.ex_list_subscriptions()[0]
status = self.driver.ex_toggle_subscription_auto_renew(
subscription=subscription)
self.assertTrue(status)
def test_ex_list_capabilities(self):
capabilities = self.driver.ex_list_capabilities()
self.assertEqual(capabilities['servers']['cpu']['min'], 250)
def test_ex_list_servers_availability_groups(self):
groups = self.driver.ex_list_servers_availability_groups()
self.assertEqual(len(groups), 3)
self.assertEqual(len(groups[0]), 2)
self.assertEqual(len(groups[2]), 1)
def test_ex_list_drives_availability_groups(self):
groups = self.driver.ex_list_drives_availability_groups()
self.assertEqual(len(groups), 1)
self.assertEqual(len(groups[0]), 11)
def test_wait_for_drive_state_transition_timeout(self):
drive = self.driver.ex_list_user_drives()[0]
state = 'timeout'
expected_msg = 'Timed out while waiting for drive transition'
self.assertRaisesRegexp(Exception, expected_msg,
self.driver._wait_for_drive_state_transition,
drive=drive, state=state,
timeout=0.5)
def test_wait_for_drive_state_transition_success(self):
drive = self.driver.ex_list_user_drives()[0]
state = 'unmounted'
drive = self.driver._wait_for_drive_state_transition(drive=drive,
state=state,
timeout=0.5)
self.assertEqual(drive.status, state)
class CloudSigmaAPI20DirectTestCase(CloudSigmaAPI20BaseTestCase,
unittest.TestCase):
driver_klass = CloudSigma_2_0_NodeDriver
driver_args = ('foo', 'bar')
driver_kwargs = {}
class CloudSigmaAPI20IndirectTestCase(CloudSigmaAPI20BaseTestCase,
unittest.TestCase):
driver_klass = CloudSigmaNodeDriver
driver_args = ('foo', 'bar')
driver_kwargs = {'api_version': '2.0'}
class CloudSigmaMockHttp(MockHttp, unittest.TestCase):
fixtures = ComputeFileFixtures('cloudsigma_2_0')
def _api_2_0_servers_detail_INVALID_CREDS(self, method, url, body, headers):
body = self.fixtures.load('libdrives.json')
return (httplib.UNAUTHORIZED, body, {},
httplib.responses[httplib.UNAUTHORIZED])
def _api_2_0_libdrives(self, method, url, body, headers):
body = self.fixtures.load('libdrives.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_servers_detail(self, method, url, body, headers):
body = self.fixtures.load('servers_detail_mixed_state.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911(self, method, url, body, headers):
body = ''
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _api_2_0_servers(self, method, url, body, headers):
if method == 'POST':
# create_node
parsed = json.loads(body)
if 'vlan' in parsed['name']:
self.assertEqual(len(parsed['nics']), 2)
body = self.fixtures.load('servers_create_with_vlan.json')
else:
body = self.fixtures.load('servers_create.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_start(self, method, url, body, headers):
body = self.fixtures.load('start_success.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_AVOID_MODE_start(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'avoid': '1,2'})
body = self.fixtures.load('start_success.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STARTED_start(self, method, url, body, headers):
body = self.fixtures.load('start_already_started.json')
return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_stop(self, method, url, body, headers):
body = self.fixtures.load('stop_success.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STOPPED_stop(self, method, url, body, headers):
body = self.fixtures.load('stop_already_stopped.json')
return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_clone(self, method, url, body, headers):
body = self.fixtures.load('servers_clone.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_open_vnc(self, method, url, body, headers):
body = self.fixtures.load('servers_open_vnc.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_close_vnc(self, method, url, body, headers):
body = self.fixtures.load('servers_close_vnc.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_detail(self, method, url, body, headers):
body = self.fixtures.load('drives_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913(self, method, url, body, headers):
body = self.fixtures.load('drives_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809(self, method, url, body, headers):
body = self.fixtures.load('drives_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_CREATE(self, method, url, body, headers):
body = self.fixtures.load('drives_create.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_clone(self, method, url, body, headers):
body = self.fixtures.load('drives_clone.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_5236b9ee_f735_42fd_a236_17558f9e12d3_action_clone(self, method, url, body, headers):
body = self.fixtures.load('drives_clone.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913_action_resize(self, method, url, body, headers):
body = self.fixtures.load('drives_resize.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_resize(self, method, url, body, headers):
body = self.fixtures.load('drives_resize.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_fwpolicies_detail(self, method, url, body, headers):
body = self.fixtures.load('fwpolicies_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_fwpolicies_CREATE_NO_RULES(self, method, url, body, headers):
body = self.fixtures.load('fwpolicies_create_no_rules.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_fwpolicies_CREATE_WITH_RULES(self, method, url, body, headers):
body = self.fixtures.load('fwpolicies_create_with_rules.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_ATTACH_POLICY(self, method, url, body, headers):
body = self.fixtures.load('servers_attach_policy.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_fwpolicies_0e339282_0cb5_41ac_a9db_727fb62ff2dc(self, method, url, body, headers):
if method == 'DELETE':
body = ''
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _api_2_0_tags_detail(self, method, url, body, headers):
body = self.fixtures.load('tags_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_tags(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('tags_create.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_tags_WITH_RESOURCES(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('tags_create_with_resources.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_tags_a010ec41_2ead_4630_a1d0_237fa77e4d4d(self, method, url, body, headers):
if method == 'GET':
# ex_get_tag
body = self.fixtures.load('tags_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'PUT':
# ex_tag_resource
body = self.fixtures.load('tags_update.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'DELETE':
# ex_delete_tag
body = ''
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _api_2_0_balance(self, method, url, body, headers):
body = self.fixtures.load('balance.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_pricing(self, method, url, body, headers):
body = self.fixtures.load('pricing.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_currentusage(self, method, url, body, headers):
body = self.fixtures.load('currentusage.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions(self, method, url, body, headers):
body = self.fixtures.load('subscriptions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_STATUS_FILTER(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'status': 'active'})
body = self.fixtures.load('subscriptions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_RESOURCE_FILTER(self, method, url, body, headers):
expected_params = {'resource': 'cpu,mem', 'status': 'all'}
self.assertUrlContainsQueryParams(url, expected_params)
body = self.fixtures.load('subscriptions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_7272_action_auto_renew(self, method, url, body, headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_CREATE_SUBSCRIPTION(self, method, url, body, headers):
body = self.fixtures.load('create_subscription.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_capabilities(self, method, url, body, headers):
body = self.fixtures.load('capabilities.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_servers_availability_groups(self, method, url, body, headers):
body = self.fixtures.load('servers_avail_groups.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_availability_groups(self, method, url, body, headers):
body = self.fixtures.load('drives_avail_groups.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
#!/usr/bin/env python
import argparse
import yaml
import logging
from cloudify_rest_client.client import CloudifyClient
from cloudify_cli.cli import cfy
from cloudify_cli.execution_events_fetcher import wait_for_execution
from cloudify.logs import create_event_message_prefix
from prettytable import PrettyTable, ALL
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def _events_logger(events):
for event in events:
output = create_event_message_prefix(event)
if output is not None:
logger.info(output)
def _wait(client, execution):
wait_for_execution(client,
execution,
events_handler=_events_logger,
include_logs=True, logger=logger)
def _is_virtual_ip_node(node):
return 'cloudify.nodes.VirtualIP' in node.type_hierarchy
@cfy.pass_client()
def add_secrets(client, deployment_id, node_id, file, password, **kwargs):
with open(file, 'r') as input_file:
secrets = yaml.load(input_file)
nodes_map = {}
node_instances_map = {}
nodes = client.nodes.list(deployment_id=deployment_id,
sort='id')
for node in nodes:
nodes_map[node.id] = node
all_node_instances = client.node_instances.list(deployment_id=deployment_id)
for node_instance in all_node_instances:
node_instances_map[node_instance.id] = node_instance
node_instances = client.node_instances.list(deployment_id=deployment_id,
node_id=node_id)
for ni in node_instances:
public_ip = None
for rel in ni.relationships:
target_node_id = rel['target_name']
target_node = nodes_map[target_node_id]
if _is_virtual_ip_node(target_node):
target_node_instance = node_instances_map[rel['target_id']]
public_ip = target_node_instance.runtime_properties['aws_resource_id']
break
if not public_ip:
raise Exception()
current_client = CloudifyClient(host=public_ip, username='admin', password=password,
tenant='default_tenant')
print "Creating on: {}".format(public_ip)
for key, value in secrets.iteritems():
print "\t{}...".format(key)
current_client.secrets.create(key, value, True)
@cfy.pass_client()
def control(client, deployment_id, kind, op, **kwargs):
d = {
'cli': {
'filter': 'node_ids',
'criteria': ['cli_vm']
},
'mgr': {
'filter': 'node_ids',
'criteria': ['manager_vm']
},
'mgr-ami': {
'filter': 'node_ids',
'criteria': ['cm_vm']
},
'app': {
'filter': 'node_ids',
'criteria': ['app_vm']
},
'all': {
'filter': 'type_names',
'criteria': ['cloudify.nodes.Compute']
}
}
desc = d[kind]
filter = desc['filter']
criteria = desc['criteria']
execution = client.executions.start(
deployment_id, 'execute_operation',
{
filter: criteria,
'operation': 'cloudify.interfaces.lifecycle.{}'.format(op)
})
print('Execution ID: {}'.format(execution.id))
_wait(client, execution)
@cfy.pass_client()
def recreate(client, deployment_id, vm_type, **kwargs):
node_id = {
'cli': 'cli_vm',
'mgr': 'manager_vm',
'mgr-img': 'cm_vm',
'app': 'app_vm'
}[vm_type]
node_instances = client.node_instances.list(deployment_id=deployment_id,
node_id=node_id)
for ni in node_instances:
print 'Recreating VM: {}'.format(ni.id)
execution = client.executions.start(
deployment_id, 'heal', {'node_instance_id': ni.id},
force=True)
print '\tExecution ID: {}'.format(execution.id)
@cfy.pass_client()
def install_cli(client, deployment_id, **kwargs):
execution = client.executions.start(
deployment_id,
'execute_operation',
{'operation': 'custom.install_cli',
'node_ids': ['cli_configuration']})
print 'Execution ID: {}'.format(execution.id)
_wait(client, execution)
@cfy.pass_client()
def refresh_labs(client, deployment_id, **kwargs):
execution = client.executions.start(
deployment_id,
'execute_operation',
{'operation': 'cloudify.interfaces.lifecycle.create',
'node_ids': ['cli_labs']})
print 'Execution ID: {}'.format(execution.id)
_wait(client, execution)
@cfy.pass_client()
def heal(client, deployment_id, node_instance_id, **kwargs):
execution = client.executions.start(
deployment_id,
'heal',
{'node_instance_id': node_instance_id})
print 'Execution ID: {}'.format(execution.id)
_wait(client, execution)
@cfy.pass_client()
def scale(client, deployment_id, delta, **kwargs):
execution = client.executions.start(
deployment_id,
'scale',
{'scalable_entity_name': 'trainee',
'delta': delta})
print 'Execution ID: {}'.format(execution.id)
_wait(client, execution)
@cfy.pass_client()
def get_info(client, deployment_id, **kwargs):
nodes = client.nodes.list(deployment_id=deployment_id,
sort='id')
if not nodes:
print 'No nodes returned for deployment ID {0}. Are you sure this deployment exists?'.format(deployment_id)
exit(1)
node_instances = client.node_instances.list(deployment_id=deployment_id)
compute_nodes=[]
public_compute_nodes=[]
nodes_map = {}
node_instances_map = {}
for node in nodes:
nodes_map[node.id] = node
for node_instance in node_instances:
node_instances_map[node_instance.id] = node_instance
for node in nodes:
if 'lab_vm' in node.type_hierarchy:
compute_nodes.append(node)
for rel in node.relationships:
target_node = nodes_map[rel['target_id']]
if _is_virtual_ip_node(target_node):
public_compute_nodes.append(node.id)
continue
trainees = {}
for compute_node in compute_nodes:
node_instances = client.node_instances.list(deployment_id=deployment_id,
node_id=compute_node.id)
for node_instance in node_instances:
scaling_group_id = node_instance.scaling_groups[0]['id']
if scaling_group_id in trainees:
trainee = trainees[scaling_group_id]
else:
trainee = {}
trainees[scaling_group_id] = trainee
trainee[node_instance.node_id] = {}
trainee[node_instance.node_id]['private'] = node_instance.runtime_properties['ip']
trainee[node_instance.node_id]['instance'] = node_instance.id
for rel in node_instance.relationships:
target_node = nodes_map[rel['target_name']]
if _is_virtual_ip_node(target_node):
target_node_instance = node_instances_map[rel['target_id']]
trainee[node_instance.node_id]['public'] = target_node_instance.runtime_properties['aws_resource_id']
break
header_elements = ['#']
for node in compute_nodes:
display_name = node.properties['display_name']
header_elements.append("{}".format(display_name))
# header_elements.append("{} (private)".format(display_name))
# if node.id in public_compute_nodes:
# header_elements.append("{} (public)".format(display_name))
table = PrettyTable(header_elements,
hrules=ALL)
table.valign = 'm'
ix = 1
for trainee in trainees:
lst = [ix]
for node in compute_nodes:
contents = '{}\n' \
'{}'.format(
trainees[trainee][node.id]['instance'],
trainees[trainee][node.id]['private']
)
if node.id in public_compute_nodes:
contents += '\n{}'.format(
trainees[trainee][node.id]['public']
)
lst.append(contents)
table.add_row(lst)
ix += 1
print "Note: each cell contains, from top to bottom: instance ID, private IP, public IP"
print table
if __name__ == '__main__':
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument('deployment_id',
help='ID of the deployment')
master_parser = argparse.ArgumentParser()
subparsers = master_parser.add_subparsers()
subparsers.required = True
labs_info_parser = subparsers.add_parser('get-info', parents=[common_parser])
labs_info_parser.set_defaults(func=get_info)
add_secrets_parser = subparsers.add_parser('add-secrets', parents=[common_parser])
add_secrets_parser.add_argument('node_id',
choices=['cm_vm', 'manager_vm'],
help="Manager node ID")
add_secrets_parser.add_argument('file',
help="Secrets file")
add_secrets_parser.add_argument('password',
help="Password for the admin user")
add_secrets_parser.set_defaults(func=add_secrets)
control_parser = subparsers.add_parser('control', parents=[common_parser])
control_parser.add_argument('op',
choices=['start', 'stop', 'restart'],
help="Operation to perform")
control_parser.add_argument('kind',
choices=['cli', 'mgr', 'mgr-ami', 'app', 'all'],
help="VM's to control")
control_parser.set_defaults(func=control)
recreate_parser = subparsers.add_parser('recreate', parents=[common_parser])
recreate_parser.add_argument('vm_type',
choices=['cli', 'mgr', 'mgr-ami', 'app'],
help="Type of VM to recreate")
recreate_parser.set_defaults(func=recreate)
install_cli_parser = subparsers.add_parser('install-cli', parents=[common_parser])
install_cli_parser.set_defaults(func=install_cli)
heal_parser = subparsers.add_parser('heal', parents=[common_parser])
heal_parser.add_argument('node_instance_id',
help="ID of node instance to heal")
heal_parser.set_defaults(func=heal)
scale_parser = subparsers.add_parser('scale', parents=[common_parser])
scale_parser.add_argument('delta',
help="number of instances/groups to create/delete")
scale_parser.set_defaults(func=scale)
refresh_labs_parser = subparsers.add_parser('refresh-labs', parents=[common_parser])
refresh_labs_parser.set_defaults(func=refresh_labs)
args = master_parser.parse_args()
args.func(**vars(args))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MetaGraph and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os.path
import re
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Prefix to be added to unbound input names so they are easily identifiable.
_UNBOUND_INPUT_PREFIX = "$unbound_inputs_"
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if (export_scope and
not node_def.input[i].lstrip("^").startswith(export_scope)):
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = re.sub(r"([\^]|^)(.*)",
r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = [compat.as_bytes(
ops.strip_name_scope(s, export_scope)) for s in v.list.s
if not export_scope or
compat.as_str(s).split("@")[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content, graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node_def:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
def _should_include_node(node_or_node_name, export_scope):
"""Returns `True` if a node should be included.
Args:
node_or_node_name: A node or `string` node name.
export_scope: `string`. Name scope under which to extract the subgraph. The
scope name will be striped from the node definitions for easy import later
into new name scopes.
Returns:
`True` if the node should be included.
"""
if not isinstance(node_or_node_name, six.string_types):
try:
node_name = node_or_node_name.name
except AttributeError:
# Keep the object that we don't know how to process.
return True
else:
node_name = node_or_node_name
return (node_name.startswith(_UNBOUND_INPUT_PREFIX) or
(not export_scope or node_name.startswith(export_scope)))
def add_collection_def(meta_graph_def, key, graph=None,
export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
"""
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
collection_list = graph.get_collection(key)
# Remove nodes that should not be exported from the collection list.
collection_list = [x for x in collection_list if
_should_include_node(x, export_scope)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(
ops.strip_name_scope(x.name, export_scope))
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Error encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def create_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
graph=None,
export_scope=None):
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
graph: The `Graph` to create `MetaGraphDef` out of.
export_scope: Optional `string`. Name scope to remove.
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Type check.
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if not meta_info_def:
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
# Set the tf version strings to the current tf build.
meta_info_def.tensorflow_version = versions.__version__
meta_info_def.tensorflow_git_version = versions.__git_version__
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(graph.as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list:
clist = collection_list
else:
clist = graph.get_all_collection_keys()
for ctype in clist:
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def import_scoped_meta_graph(meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs"):
"""Recreates a`Graph` saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
Returns:
A dictionary of all the `Variables` imported into the name scope.
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph_or_file
else:
meta_graph_def = read_meta_graph_file(meta_graph_or_file)
if unbound_inputs_col_name:
for key, col_def in meta_graph_def.collection_def.items():
if key == unbound_inputs_col_name:
kind = col_def.WhichOneof("kind")
field = getattr(col_def, kind)
if field.value and (
not input_map or
sorted([compat.as_str(v) for v in field.value]) !=
sorted(input_map)):
raise ValueError("Graph contains unbound inputs: %s. Must "
"provide these inputs through input_map." %
",".join([compat.as_str(v) for v in field.value
if not input_map or v not in input_map]))
break
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Gathers the list of nodes we are interested in.
with graph.as_default():
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(
input_graph_def, name=(import_scope or ""), input_map=input_map,
producer_op_list=producer_op_list)
# Restores all the other collections.
for key, col_def in meta_graph_def.collection_def.items():
# Don't add unbound_inputs to the new graph.
if key == unbound_inputs_col_name:
continue
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto:
assert kind == "bytes_list"
proto_type = ops.get_collection_proto_type(key)
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
graph.add_to_collection(
key, from_proto(proto, import_scope=import_scope))
else:
field = getattr(col_def, kind)
if kind == "node_list":
for value in field.value:
col_op = graph.as_graph_element(
ops.prepend_name_scope(value, import_scope))
graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
graph.add_to_collection(key, int(value))
else:
for value in field.value:
graph.add_to_collection(
key, ops.prepend_name_scope(value, import_scope))
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=import_scope)
for v in variables:
var_list[ops.strip_name_scope(v.name, import_scope)] = v
return var_list
def export_scoped_meta_graph(filename=None,
graph_def=None,
graph=None,
export_scope=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
**kwargs):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
graph: The `Graph` to import into. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be striped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported. graph_def and export_scope cannot both be specified.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
**kwargs: Optional keyed arguments, including meta_info_def,
saver_def, collection_list.
Returns:
A `MetaGraphDef` proto and dictionary of `Variables` in the exported
name scope.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
"""
graph = graph or ops.get_default_graph()
unbound_inputs = []
if export_scope or clear_devices:
if graph_def:
new_graph_def = graph_pb2.GraphDef()
new_graph_def.versions.CopyFrom(graph_def.versions)
for node_def in graph_def.node:
if _should_include_node(node_def.name, export_scope):
new_node_def = _node_def(node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
new_graph_def.node.extend([new_node_def])
graph_def = new_graph_def
else:
# Only do this complicated work if we want to remove a name scope.
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
for key in sorted(graph._nodes_by_id):
if _should_include_node(graph._nodes_by_id[key].name, export_scope):
value = graph._nodes_by_id[key]
# pylint: enable=protected-access
node_def = _node_def(value.node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if value.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in value.outputs])
bytesize += value.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=export_scope)
for v in variables:
if _should_include_node(v, export_scope):
var_list[ops.strip_name_scope(v.name, export_scope)] = v
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
export_scope=export_scope,
**kwargs)
if filename:
graph_io.write_graph(
scoped_meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
return scoped_meta_graph_def, var_list
def copy_scoped_meta_graph(from_scope, to_scope,
from_graph=None, to_graph=None):
"""Copies a sub-meta_graph from one scope to another.
Args:
from_scope: `String` name scope containing the subgraph to be copied.
to_scope: `String` name scope under which the copied subgraph will reside.
from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the
default graph is use.
to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the
default graph is used.
Returns:
A dictionary of `Variables` that has been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same while
`from_graph` and `to_graph` are also the same.
"""
from_graph = from_graph or ops.get_default_graph()
to_graph = to_graph or ops.get_default_graph()
if from_graph == to_graph and from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
orig_meta_graph, var_list = export_scoped_meta_graph(
export_scope=from_scope, graph=from_graph)
var_list = import_scoped_meta_graph(orig_meta_graph,
graph=to_graph,
import_scope=to_scope)
return var_list
|
|
#!/usr/bin/python3
import tkinter as tk
from tkinter import ttk
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import numpy as np
import math
#TODO add time measurement
#TODO save cluster affiliations
RGBA_VALS = [[0.000, 0.000, 1.000, 1], [1.000, 0.000, 0.000, 1], [0.000, 1.000, 0.000, 1], [1.000, 0.647, 0.000, 1],
[0.118, 0.565, 1.000, 1], [1.000, 0.078, 0.576, 1], [1.000, 1.000, 0.000, 1], [1.000, 0.000, 1.000, 1],
[0.502, 0.000, 0.502, 1], [0.647, 0.165, 0.165, 1], [0.251, 0.878, 0.816, 1], [0.804, 0.361, 0.361, 1],
[0.741, 0.718, 0.420, 1], [0.000, 0.392, 0.000, 1], [0.690, 0.878, 0.902, 1], [0.502, 0.000, 0.000, 1]]
class MainApp(tk.Tk):
"""The controller of data and window contents."""
def __init__(self, *args, **kwargs):
"""Create the main window and assign commands to buttons of the sidepane.
"""
tk.Tk.__init__(self, *args, **kwargs)
self.title("Isodata Fuzzy-C-Means playground")
#self.xData = list(np.random.rand(50))
#self.yData = list(np.random.rand(50))
self.filePath = ""
try:
self.xData, self.yData = np.loadtxt("two_clusters.txt").tolist()
except FileNotFoundError:
print("Could not find '8clusters.txt', will start with empty field!")
self.xData = []
self.yData = []
self.colors = [RGBA_VALS[0]]
self.centerXCoords = []
self.centerYCoords = []
self.affiliations = np.array([])
self.filePath = ""
self.sidepane = Sidepane(self, padding="3 3 12 12")
self.sidepane.grid(column=5, row=5, sticky="nsew")
self.sidepane.loadButton.config(command=self.loadData)
self.sidepane.saveButton.config(command=self.saveData)
self.sidepane.saveAsButton.config(command=self.saveDataAs)
self.sidepane.resetButton.config(command=self.resetData)
self.sidepane.randDataButton.config(command=self.randomizeData)
self.sidepane.numRandDataChooser.bind("<Return>", self.randomizeData)
self.sidepane.numClusterChooser.bind("<Return>", self.runFCM)
self.sidepane.startFCMButton.config(command=self.runFCM)
self.plotArea = PlotArea(self, padding="3 3 12 12")
self.plotArea.grid(column=10, row=5, sticky="nsew")
self.plotArea.canvas.mpl_connect('button_press_event', self.onClick)
self.plotArea.canvas.mpl_connect('pick_event', self.onPick)
self.columnconfigure(10, weight=1)
self.rowconfigure(5, weight=1)
def saveData(self, *args):
"""Save data in xData and yData to the file in filePath. If filePath is
empty, call saveDataAs()
"""
if self.filePath:
np.savetxt(self.filePath, (self.xData, self.yData))
else:
self.saveDataAs(self, *args)
def saveDataAs(self, *args):
"""Open dialog to select location and filename to save the data from
xData and yData. File path will be saved to filePath.
"""
self.filePath = tk.filedialog.asksaveasfilename(initialdir=self.filePath, parent=self)
if self.filePath:
np.savetxt(self.filePath, (self.xData, self.yData))
def loadData(self, *args):
"""Open dialog to select a file and load its content in to xData and
yData when possible
"""
self.filePath = tk.filedialog.askopenfilename(initialdir=self.filePath, parent=self)
self.xData, self.yData = np.loadtxt(self.filePath).tolist()
self.affiliations = np.array([])
self.plotArea.redraw()
self.sidepane.update()
def onClick(self, event):
"""Handle clicks in the plot area. When left mouse button is clicked,
the point is added to xData, yData and shown in the plot.
"""
if (event.button == 1 and event.xdata is not None and
event.ydata is not None):
self.xData.append(event.xdata)
self.yData.append(event.ydata)
self.affiliations = np.array([])
self.plotArea.redraw()
self.sidepane.update()
def onPick(self, event):
"""Handle pick events. If there is a mousebutton3-click on a data point
it will be removed from the dataset
"""
if event.mouseevent.button == 3:
xMouse = event.mouseevent.xdata
yMouse = event.mouseevent.ydata
distances = [((xMouse-x)**2+(yMouse-y)**2)**0.5 for (x, y) in zip(self.xData, self.yData)]
index = distances.index(min(distances))
del self.xData[index]
del self.yData[index]
self.affiliations = np.array([])
self.colors = [RGBA_VALS[0]]
self.plotArea.redraw()
self.sidepane.update()
def resetData(self):
"""Initializes xData, yData with empty lists and redraws the plot."""
self.xData = []
self.yData = []
self.filePath = ""
self.affiliations = np.array([])
self.colors = [RGBA_VALS[0]]
self.plotArea.redraw()
self.sidepane.update()
def randomizeData(self, *args):
"""Fill the list of datapoints with random data. The number of points
is determined by the spinbox from the sidepane. Even though there's a
max limit to the box you can enter higher numbers than the limit. That's
why there's another limit in this method (double as high).
"""
self.xData = list(np.random.rand(min(self.sidepane.numRandData.get(), 10000)))
self.yData = list(np.random.rand(min(self.sidepane.numRandData.get(), 10000)))
self.filePath = ""
self.affiliations = np.array([])
self.colors = [RGBA_VALS[0]]
self.plotArea.redraw()
self.sidepane.update()
def runFCM(self, *args):
self.fcm = FCM(self.xData, self.yData, int(self.sidepane.numClusterChooser.get()), self.sidepane.contrast.get(), self.sidepane.truncErr.get())
self.fcm.run()
self.centerXCoords = self.fcm.centerXCoords
self.centerYCoords = self.fcm.centerYCoords
self.affiliations = self.fcm.affiliations
'''For each data point will exist numCluster RGBA values, RGB belongs to a cluster,
and the alpha value is the affiliation to this cluster.'''
self.colors = np.empty((self.fcm.numCluster, len(self.xData), 4))
for j in range(self.fcm.numCluster):
self.colors[j] = np.array([RGBA_VALS[j]] * len(self.xData))
for i in range(len(self.xData)):
self.colors[j][i][3] = self.affiliations[i][j]
self.plotArea.redraw()
class FCM():
"""Implements the Fuzzy-C-Means algorithm for 2D data. Uses no data
encapsulation or anything fancy (like the very fancy act of checking
parameters).
Note: i denotes the index determining the data point, j the one determining
the cluster.
"""
def __init__(self, xData=[], yData=[], numCluster=2, contrast=1, truncErr=0.5):
"""Initialization."""
self.xData = xData
self.yData = yData
self.numCluster = numCluster
self.contrast = contrast
self.truncErr = truncErr
self.affiliations = np.random.rand(len(xData), numCluster)
self.distances = np.empty((len(xData), numCluster))
for i in range(self.affiliations.shape[0]):
self.affiliations[i] = self.normalized(self.affiliations[i])
def normalized(self, vec):
"""Normalizes values in a list/vector so that the sum of all values equals ~1"""
s = vec.sum()
return np.array([float(vec[j])/s for j in range(vec.shape[0])])
def calcDistances(self):
"""Calcuates the distances from all data points to each cluster center"""
for i in range(0, len(self.xData)):
for j in range(0, self.numCluster):
self.distances[i][j] = math.sqrt((self.xData[i] - self.centerXCoords[j])**2 + (self.yData[i] - self.centerYCoords[j])**2)
def calcCenters(self):
"""Calculates the locations of the cluster centers"""
self.centerXCoords = [0] * self.numCluster;
self.centerYCoords = [0] * self.numCluster;
for j in range(0, self.numCluster):
denominator = 0.0
for i in range(0, len(self.xData)):
affiliationVal = self.affiliations[i][j]**self.contrast
denominator += affiliationVal
self.centerXCoords[j] += self.xData[i] * affiliationVal
self.centerYCoords[j] += self.yData[i] * affiliationVal
self.centerXCoords[j] /= denominator
self.centerYCoords[j] /= denominator
def calcAffiliation(self):
"""Recalculates the affiliation of each datapoint to each cluster by
the distance to their centers. Returns the maximum distance between
an old and the new value."""
maxDist = 0.0
exponent = 2 / (self.contrast - 1)
for i in range(len(self.xData)):
if min(self.distances[i]) == 0:
clusters = []
while min(self.distances[i]) == 0:
index = list(self.distances[i]).index(0)
clusters.append(index)
self.distances[i][index] = 1
for j in range(0, len(self.affiliations[i])):
if j in clusters:
newVal = 1.0/len(clusters)
else:
newVal = 0
if abs(newVal - self.affiliations[i][j]) > maxDist:
maxDist = abs(newVal - self.affiliations[i][j])
self.affiliations[i][j] = newVal
else:
newVec = [1/sum([(distj/dist)**exponent for dist in self.distances[i]]) for distj in self.distances[i]]
maxDistI = max(abs(newVec - self.affiliations[i]))
if maxDistI > maxDist:
maxDist = maxDistI
self.affiliations[i] = newVec
return maxDist
def run(self):
while True:
self.calcCenters()
self.calcDistances()
if self.calcAffiliation() < self.truncErr:
break
return self.centerXCoords, self.centerYCoords
class Sidepane(ttk.Frame):
"""Contains all the buttons without any functionality."""
def __init__(self, master, *args, **kwargs):
"""Build the interface."""
self.master = master
ttk.Frame.__init__(self, master, *args, **kwargs)
self.saveButton = ttk.Button(self, text="Save Data")
self.saveButton.grid(column=5, row=5, sticky="nsew")
self.saveAsButton = ttk.Button(self, text="Save as...")
self.saveAsButton.grid(column=10, row=5, sticky="nsew")
self.loadButton = ttk.Button(self, text="Load Data...")
self.loadButton.grid(column=5, row=10, sticky="nsew")
self.resetButton = ttk.Button(self, text="Reset Data")
self.resetButton.grid(column=10, row=10, sticky="nsew")
divider = ttk.Separator(self, orient=tk.HORIZONTAL)
divider.grid(column=5, row=15, columnspan=10, sticky="nsew")
randDataDesc = ttk.Label(self, text="Create x random Datapoints:")
randDataDesc.grid(column=5, row=16, columnspan=10, sticky="nsew")
self.numRandData = tk.IntVar()
self.numRandData.set(500)
self.numRandDataChooser = tk.Spinbox(self, from_=1, to=5000, textvariable=self.numRandData)
self.numRandDataChooser.grid(column=5, row=17, sticky="nsew")
self.randDataButton = ttk.Button(self, text="Randomize Data!")
self.randDataButton.grid(column=10, row=17, sticky="nsew")
divider = ttk.Separator(self, orient=tk.HORIZONTAL)
divider.grid(column=5, row=18, columnspan=10, sticky="nsew")
numClusterDesc = ttk.Label(self, text="Number of clusters:")
numClusterDesc.grid(column=5, row=20, sticky="nsew")
self.numClusterChooser = tk.Spinbox(self, from_=2, to=max(2, len(self.master.xData)))
self.numClusterChooser.grid(column=5, row=21, sticky="nsw")
contrastDesc = ttk.Label(self, text="Set cluster contrast variable:")
contrastDesc.grid(column=5, row=24, sticky="nsew")
self.contrast = tk.DoubleVar()
contrastChooser = ttk.Scale(self, from_=1.01, to=15, variable=self.contrast)
contrastChooser.grid(column=5, row=26, sticky="nsew")
contrastChooser.set(2)
contrastDisplay = ttk.Label(self, textvariable=self.contrast, width=5)
contrastDisplay.grid(column=10, row = 26, sticky="w")
truncErrDesc = ttk.Label(self, text="Set truncation error:")
truncErrDesc.grid(column=5, row=30, sticky="nsew")
self.truncErr = tk.DoubleVar()
truncErrChooser = ttk.Scale(self, from_=0.0001, to=0.3, variable=self.truncErr)
truncErrChooser.grid(column=5, row=31, sticky="nsew")
truncErrChooser.set(0.01)
truncErrDisplay = ttk.Label(self, textvariable=self.truncErr, width=5)
truncErrDisplay.grid(column=10, row = 31, sticky="w")
self.startFCMButton = ttk.Button(self, text="Calc Clusters")
self.startFCMButton.grid(column=5, row=35, columnspan=10, sticky="nsew")
for child in self.winfo_children(): child.grid_configure(padx=2, pady=5)
def update(self):
self.numClusterChooser.config(to=max(2, len(self.master.xData)))
class PlotArea(ttk.Frame):
"""Contains the area with the data visualization, provided by matplotlib."""
def __init__(self, master, *args, **kwargs):
"""Initialize a scatter diagram using matplotlib."""
self.master = master
ttk.Frame.__init__(self, master, *args, **kwargs)
self.figure = Figure(figsize=(6, 6))
self.subplot = self.figure.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.redraw()
self.canvas.draw()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.toolbar = NavigationToolbar2Tk(self.canvas, self)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def redraw(self):
"""Update shown graph after master's xData, yData changed."""
self.subplot.clear()
if not self.master.affiliations.size:
self.subplot.scatter(self.master.xData, self.master.yData, c='blue', cmap=None, lw=0.2, picker=3, s=75)
else:
for rgbMat in self.master.colors:
self.subplot.scatter(self.master.xData, self.master.yData, c=rgbMat, lw=0.2, picker=3, s=75)
self.subplot.scatter(self.master.centerXCoords, self.master.centerYCoords, edgecolor='black', color='white', marker='o', alpha = 1, s=150, lw=3)
if (not self.master.xData or not self.master.yData or
(max(self.master.xData) <= 1 and max(self.master.yData) <= 1
and min(self.master.xData) >= 0 and min(self.master.yData) >= 0)):
self.subplot.axis([0, 1, 0, 1])
self.canvas.draw()
def main():
"""Function to call when module runs as main application."""
mainApp = MainApp()
mainApp.mainloop()
def FCM_test():
'''FCM test function to compare computed values to manually calculated ones
No assertion stuff because of different truncation errors and laziness'''
xData = [1, 2, 2]
yData = [3, 1, 3]
fcm = FCM(xData, yData, 2, 2, 0.1)
fcm.affiliations = np.array([[0.75, 0.25], [0.25, 0.75], [0.4, 0.6]])
fcm.calcCenters()
print("Center xCoords: ")
print(fcm.centerXCoords)
print("Center yCoords: ")
print(fcm.centerYCoords)
fcm.calcDistances()
print("Distances:")
print(fcm.distances)
print("Max dist to last affiliations: " + str(fcm.calcAffiliation()))
print("Affiliations:")
print(fcm.affiliations)
if __name__ == '__main__':
main()
|
|
'''
Created on May 10, 2016
@author: David R. Darling
'''
import logging
import os
import time
import ftplib
import ftputil
from ftputil.error import FTPError
import pysftp
from ensure import ensure_annotations
from FetcherClasses.Classes.EnumCommons import FtpLibNameEnum
from FetcherClasses.Classes.LoggingCommons import LoggingUtilities
from FetcherClasses.Classes.UtilityCommons import DynamicUtilities
host_url = 'localhost'
host_type = FtpLibNameEnum.FTPUTIL
host_port = 22 if host_type == FtpLibNameEnum.PYSFTP else 21
username = 'fetch_dlz'
folder_path_prefix = '/'
folder_paths_list = ['TEST1','TEST2']
recurse_remote_folders = True
remove_remote_file_on_download = True
scan_interval_seconds = 5
output_path_name = 'temp'
class FtpScanner(object):
host_url = 'localhost'
host_type = FtpLibNameEnum.PYSFTP
host_port = 22 if host_type == FtpLibNameEnum.PYSFTP else 21
username = 'anonymous'
password = 'anonymous'
folder_path_prefix = '/'
initial_folder_path = '.'
scan_interval_seconds = 60
prcnt_download_amount = 5
recursively = False
close_connection = False
is_test_mode = False
target_file = None
target_full_name = None
output_path_name = 'temp'
bytes_xfered_sofar = 0
bytes_xfered_total = 0
bytes_xfered_prcnt = 0
loggingUtilities = LoggingUtilities()
dynamicUtilities = DynamicUtilities()
logger = logging
file_dict = {}
ftp_conn = None
first_report = True
not_yet_reported = True
@ensure_annotations
def __init__(self,
host_url: str='localhost',
host_type: int=FtpLibNameEnum.PYSFTP,
host_port: int=22,
username: str='anonymous',
folder_path_prefix: str='',
initial_folder_path: str='.',
scan_interval_seconds: int=60,
is_test_mode: bool=False):
self.host_url = host_url
self.host_port = host_port
self.host_type = host_type
self.username = username
self.folder_path_prefix = folder_path_prefix
self.initial_folder_path = initial_folder_path
self.scan_interval_seconds = scan_interval_seconds
self.is_test_mode = is_test_mode
self.logger, _allLoggerFH, _errLoggerFH, err_str = self.loggingUtilities.get_logger(dft_msg_format=self.loggingUtilities.dft_msg_format,
dft_date_format=self.loggingUtilities.dft_date_format,
log_file_mode=self.loggingUtilities.log_file_mode)
return err_str
@ensure_annotations
def get_ftputil_callback(self,
data_block):
self.bytes_xfered_sofar += len(data_block)
if self.bytes_xfered_total > 0:
bytes_xfered_prcnt = int(self.bytes_xfered_sofar * 100.0 / self.bytes_xfered_total)
else:
bytes_xfered_prcnt = 0
if self.first_report:
self.logger.info('Bytes xfered: %d of total bytes: %d, pct transferred: %d', self.bytes_xfered_sofar, self.bytes_xfered_total, bytes_xfered_prcnt)
self.first_report = False
if bytes_xfered_prcnt == 100 or ((bytes_xfered_prcnt - self.bytes_xfered_prcnt) >= self.prcnt_download_amount):
self.bytes_xfered_prcnt = bytes_xfered_prcnt
self.logger.info('Bytes xfered: %d of total bytes: %d, pct transferred: %d', self.bytes_xfered_sofar, self.bytes_xfered_total, bytes_xfered_prcnt)
return
@ensure_annotations
def get_pysftp_callback(self,
bytes_xfered_sofar: int,
bytes_xfered_total: int):
if bytes_xfered_total > 0:
bytes_xfered_prcnt = int(bytes_xfered_sofar * 100.0 / bytes_xfered_total)
else:
bytes_xfered_prcnt = 0
if self.first_report:
self.logger.info('Bytes xfered: %d of total bytes: %d, pct transferred: %d', bytes_xfered_sofar, bytes_xfered_total, bytes_xfered_prcnt)
self.first_report = False
if (bytes_xfered_prcnt == 100 and self.not_yet_reported) or ((bytes_xfered_prcnt - self.bytes_xfered_prcnt) >= self.prcnt_download_amount):
self.bytes_xfered_prcnt = bytes_xfered_prcnt
self.logger.info('Bytes xfered: %d of total bytes: %d, pct transferred: %d', bytes_xfered_sofar, bytes_xfered_total, bytes_xfered_prcnt)
if bytes_xfered_prcnt == 100:
self.not_yet_reported = False
return
@ensure_annotations
def get_connection(self,
logger=None,
is_test_mode: bool=False):
ftp_conn = None
if logger is None:
logger = self.logger
if is_test_mode is None:
is_test_mode = self.is_test_mode
# obtain password to FTP site via user's password keyring
password, err_str = self.dynamicUtilities.get_pwd_via_keyring(key=host_url,
login=self.username,
redact_passwords=True,
log_results=True,
logger=logger,
is_test_mode=is_test_mode)
if not err_str:
if self.host_type == FtpLibNameEnum.PYSFTP:
try:
logger.info('Connecting to FTP server "%s" via PYSFTP, please wait...', host_url)
ftp_conn = pysftp.Connection(host=self.host_url, username=self.username, password=password)
logger.info('Connected to FTP server "%s" via PYSFTP', host_url)
except Exception as err:
err_str = str(err)
logger.error(err_str)
elif self.host_type == FtpLibNameEnum.FTPUTIL:
try:
logger.info('Connecting to FTP server "%s" via FTPUTIL, please wait...', host_url)
ftp_conn = ftputil.FTPHost(host=self.host_url, user=self.username, passwd=password)
logger.info('Connected to FTP server "%s" via FTPUTIL!', host_url)
except Exception as err:
err_str = str(err)
logger.error(err_str)
return ftp_conn, err_str
@ensure_annotations
def get_remote_file(self,
remote_path_name: str,
remote_file_name: str,
target_path_name: str,
target_file_name: str,
ftp_conn=None,
close_connection: bool=False,
remove_remote_file_on_download: bool=False,
logger=None,
is_test_mode: bool=False):
err_str = None
if logger is None:
logger = self.logger
if is_test_mode is None:
is_test_mode = self.is_test_mode
if self.target_file is not None:
self.target_file.close()
self.target_file = None
if remote_path_name is None:
remote_path_name = self.folder_path_prefix + self.initial_folder_path
remote_dirname = remote_path_name
if remote_dirname.startswith('/'):
remote_dirname = remote_dirname[1:]
self.target_full_name = self.dynamicUtilities.expand_path(path_fldr=target_path_name,
path_name=target_file_name,
logger=logger,
is_test_mode=is_test_mode)
target_path_name = os.path.dirname(self.target_full_name)
if not os.path.exists(target_path_name):
try:
logger.info('Make directory ATTEMPT: "%s"', target_path_name)
_dirname, err_str = self.dynamicUtilities.create_path(path_name=target_path_name,
contains_file_name=False,
logger=logger,
is_test_mode=is_test_mode)
if err_str is not None:
logger.error('Make directory FAILURE: "%s"', target_path_name)
logger.error(err_str)
else:
logger.info('Make directory SUCCESS: "%s"', target_path_name)
except Exception as err:
err_str = str(err)
logger.error('Make directory FAILURE: "%s"', target_path_name)
logger.error(err_str)
if err_str is None:
if ftp_conn is None:
ftp_conn, err_str = self.get_connection()
close_connection = True
# if err_str is None:
# logger.info('Navigating to remote folder: "%s"' % remote_path_name)
# ftp_conn.chdir(remote_path_name)
# logger.info('Navigated to remote folder: "%s"' % remote_path_name)
if err_str is None:
try:
_resp_code = None
if self.host_type == FtpLibNameEnum.PYSFTP:
# pass because download handled
# in the walktreeFcallback method
if remote_path_name is not None and remote_path_name != '':
entity_path = remote_path_name + '/' + remote_file_name
else:
entity_path = remote_file_name
# derive target path name for use with folder creation if so needed
target_path_name = self.dynamicUtilities.expand_path(path_fldr=self.output_path_name,
path_name=os.path.dirname(entity_path),
logger=logger,
is_test_mode=is_test_mode)
if not os.path.exists(target_path_name):
try:
self.logger.info('Target folder creation ATTEMPT: "%s"', target_path_name)
_dirname, err_str = self.dynamicUtilities.create_path(path_name=target_path_name,
contains_file_name=False,
logger=logger,
is_test_mode=is_test_mode)
self.logger.info('Target folder creation SUCCESS: "%s"', target_path_name)
except Exception as err:
err_str = str(err)
self.logger.error('Target folder creation FAILURE: "%s"', target_path_name)
self.logger.error(err_str)
# derive target destination path
target_dest_name = self.dynamicUtilities.expand_path(path_fldr=self.output_path_name,
path_name=entity_path,
logger=logger,
is_test_mode=is_test_mode)
# if the remote file exists
if self.ftp_conn.exists(entity_path):
try:
self.logger.info('Remote file accessible YET? "%s"', entity_path)
_resp_code = self.ftp_conn.open(remote_file=entity_path,
mode='a',
bufsize=-1)
self.logger.info('Remote file accessible NOW! "%s"', entity_path)
self.bytes_xfered_prcnt = 0
try:
self.logger.info('Remote file download ATTEMPT: "%s"', entity_path)
self.not_yet_reported = True
self.first_report = True
_resp_code = self.ftp_conn.get(remotepath=entity_path,
localpath=target_dest_name,
callback=self.get_pysftp_callback,
preserve_mtime=True)
self.logger.info('Remote file download SUCCESS: "%s"', entity_path)
except Exception as err:
err_str = str(err)
self.logger.error('Remote file download FAILURE! "%s"', entity_path)
self.logger.error(err_str)
if self.remove_remote_file_on_download:
try:
self.logger.info('Remote file removal ATTEMPT: "%s"', entity_path)
self.ftp_conn.remove(entity_path)
self.logger.info('Remote file removal SUCCESS: "%s"', entity_path)
except Exception as err:
err_str = str(err)
self.logger.error('Remote file removal FAILURE! "%s"', entity_path)
self.logger.error(err_str)
except TypeError as err:
self.logger.info(err)
self.logger.info('Remote file accessible NOW! "%s"', entity_path)
except IOError as err:
self.logger.info(err)
self.logger.info('Remote file accessible NOT! "%s"', entity_path)
self.logger.info('Will attempt accessibility on next iteration..')
else:
self.logger.error('Remote file NOT found: %s', entity_path)
elif self.host_type == FtpLibNameEnum.FTPUTIL:
# if the source file exists
if ftp_conn.path.exists(remote_file_name):
# resp_code = ftp_util.open(path=src_file_name, mode='rb')
logger.info('Obtaining remote file size for: %s', remote_file_name)
self.bytes_xfered_prcnt = 0
self.bytes_xfered_sofar = 0
self.bytes_xfered_total = ftp_conn.path.getsize(remote_file_name)
logger.info('Obtained remote file size %d for: "%s"', self.bytes_xfered_total, remote_file_name)
logger.info('Download attempted: "%s"', remote_file_name)
self.first_report = True
_resp_code = ftp_conn.download(source=remote_file_name,
target=self.target_full_name,
callback=self.get_ftputil_callback)
logger.info('Download completed: "%s"', self.target_full_name)
if remove_remote_file_on_download:
logger.info('Remove downloaded file attempted: "%s"', remote_file_name)
ftp_conn.remove(remote_file_name)
logger.info('Remove downloaded file successful: "%s"', remote_file_name)
else:
logger.error('FTP file NOT found: "%s"', remote_file_name)
logger.error('Will attempt download later...')
except TypeError as err:
logger.info(err)
logger.info('FTP file is now downloadable: "%s"', remote_file_name)
except ftplib.error_temp as err:
logger.info(err)
except ftplib.error_reply as err:
logger.info(err)
except ftplib.error_perm as err:
logger.info(err)
logger.info('FTP file is NOT yet downloadable: "%s"', remote_file_name)
logger.info('Will attempt download later...')
except IOError as err:
logger.info(err)
logger.info('FTP file is NOT yet downloadable: "%s"', remote_file_name)
logger.info('Will attempt download later..')
if ftp_conn is not None and close_connection:
# close the FTP connection as appropriate
try:
logger.info('Closing FTP connection')
ftp_conn.close()
logger.info('Closed FTP connection')
except Exception as err:
logger.info(err)
return self.target_full_name, err_str
@ensure_annotations
def get_remote_files(self,
remote_path_name: str=None,
target_path_name: str=None,
ftp_conn=None,
close_connection: bool=False,
remove_remote_file_on_download: bool=False,
logger=None,
is_test_mode: bool=False):
err_str = None
list_of_files = []
if logger is None:
logger = self.logger
if is_test_mode is None:
is_test_mode = self.is_test_mode
else:
self.is_test_mode = is_test_mode
if remote_path_name is None:
remote_path_name = self.remote_path_name
else:
self.remote_path_name = remote_path_name
if target_path_name is None:
target_path_name = self.target_path_name
else:
self.target_path_name = target_path_name
if ftp_conn is None:
ftp_conn = self.get_connection(logger=logger,
is_test_mode=is_test_mode)
close_connection = True
else:
self.ftp_conn = ftp_conn
if close_connection is None:
close_connection = self.close_connection
if remote_path_name is None:
remote_path_name = self.folder_path_prefix + self.initial_folder_path
if remove_remote_file_on_download is None:
remove_remote_file_on_download = self.remove_remote_file_on_download
else:
self.remove_remote_file_on_download = remove_remote_file_on_download
if err_str is None:
logger.info('Navigate to remote folder ATTEMPT: "%s"', remote_path_name)
if self.remote_path_exists(remote_path_name):
ftp_conn.chdir(remote_path_name)
logger.info('Navigate to remote folder SUCCESS: "%s"', remote_path_name)
try:
logger.info('Scanning remote folder: "%s"' % remote_path_name)
if self.host_type == FtpLibNameEnum.FTPUTIL:
file_names_list = ftp_conn.listdir(remote_path_name)
for remote_file_name in file_names_list:
if ftp_conn.path.isfile(remote_file_name):
logger.info('Fetching remote file: "%s"', remote_file_name)
self.get_remote_file(remote_path_name=remote_path_name,
remove_file_name=remote_file_name,
target_path_name=target_path_name,
target_file_name=remote_file_name,
ftp_conn=ftp_conn,
close_connection=close_connection,
remove_remote_file_on_download=remove_remote_file_on_download,
logger=logger,
is_test_mode=is_test_mode)
else:
file_names_list = ftp_conn.listdir()
for remote_file_name in file_names_list:
if ftp_conn.isfile(remote_file_name):
logger.info('Fetching remote file: "%s"', remote_file_name)
self.get_remote_file(remote_path_name=remote_path_name,
remote_file_name=remote_file_name,
target_path_name=target_path_name,
target_file_name=remote_file_name,
ftp_conn=ftp_conn,
close_connection=close_connection,
remove_remote_file_on_download=remove_remote_file_on_download,
logger=logger,
is_test_mode=is_test_mode)
except Exception as err:
logger.error(err)
else:
logger.error('Navigate to remote folder FAILURE: "%s"', remote_path_name)
logger.error('Remote path does not exist or is inaccessible!')
# close the FTP connection as appropriate
if ftp_conn is not None and close_connection:
logger.info('Closing FTP connection')
ftp_conn.close()
logger.info('Closed FTP connection')
return list_of_files, err_str
@ensure_annotations
def get_remote_folders_files(self,
remote_path_name: str,
entity_path_name: str,
target_path_name: str,
recursively: bool=False,
ftp_conn=None,
close_connection: bool=False,
remove_remote_file_on_download: bool=False,
logger=None,
is_test_mode: bool=False):
err_str = None
if logger is None:
logger = self.logger
if remote_path_name is None:
remote_path_name = self.remote_path_name
else:
self.remote_path_name = remote_path_name
if target_path_name is None:
target_path_name = self.target_path_name
else:
self.target_path_name = target_path_name
if recursively is None:
recursively = self.recursively
else:
self.recursively = recursively
if ftp_conn is None:
ftp_conn = self.ftp_conn
else:
self.ftp_conn = ftp_conn
if close_connection is None:
close_connection = self.close_connection
else:
self.close_connection = close_connection
if remove_remote_file_on_download is None:
remove_remote_file_on_download = self.remove_remote_file_on_download
else:
self.remove_remote_file_on_download = remove_remote_file_on_download
if is_test_mode is None:
is_test_mode = self.is_test_mode
else:
self.is_test_mode = is_test_mode
if self.host_type == FtpLibNameEnum.PYSFTP:
try:
logger.info('Current working folder: "%s"', ftp_conn.pwd)
logger.info('Does remote path exist? "%s"', remote_path_name)
if ftp_conn.exists(remote_path_name):
logger.info('Remote path does exist! "%s"', remote_path_name)
logger.info('Navigate to remote folder ATTEMPT: "%s"', remote_path_name)
ftp_conn.cd(remote_path_name)
logger.info('Navigate to remote folder SUCCESS: "%s"', remote_path_name)
ftp_conn.walktree(remote_path_name,
self.walktreeFcallback,
self.walktreeDcallback,
self.walktreeUcallback)
else:
logger.error('Remote path does NOT exist! "%s"', remote_path_name)
except FTPError as err:
errStr = str(err)
logger.error('WALK of FTP folder "%s" encountered an FTP error: %s%s', errStr, os.linesep)
logger.error('Program will retry WALK on the next iteration!%s', os.linesep)
else:
for dirpath, dirnames, filenames in ftp_conn.walk(remote_path_name):
for filename in filenames:
target_path_name_temp = self.dynamicUtilities.expand_path(path_fldr=self.output_path_name,
path_name=dirpath[1:] if dirpath.startswith('/') else dirpath,
logger=logger,
is_test_mode=is_test_mode)
try:
logger.info('Navigate to remote folder ATTEMPT: "%s"', dirpath)
ftp_conn.chdir(dirpath)
logger.info('Navigate to remote folder SUCCESS: "%s"', dirpath)
self.get_remote_file(dirpath, filename, target_path_name_temp, filename, ftp_conn, close_connection, remove_remote_file_on_download, logger=logger)
except Exception as err:
err_str = str(err)
logger.error('Navigate to remote folder FAILURE: "%s"', dirpath)
logger.error(err_str)
if recursively:
for dirname in dirnames:
temppath = ftp_conn.path.join(dirpath, dirname)
target_path_name_temp = self.dynamicUtilities.expand_path(path_fldr=self.output_path_name,
path_name=dirpath[1:] if dirpath.startswith('/') else dirpath,
logger=logger,
is_test_mode=is_test_mode)
self.get_remote_folders_files(remote_path_name=temppath,
entity_path_name=dirpath,
target_path_name=target_path_name,
recursively=recursively,
ftp_conn=ftp_conn,
close_connection=close_connection,
remove_remote_file_on_download=remove_remote_file_on_download,
logger=logger,
is_test_mode=is_test_mode)
return err_str
@ensure_annotations
def remote_path_exists(self,
path_name: str,
logger=None):
exists = False
err_str = None
if logger is None:
logger = self.logger
try:
if self.host_type == FtpLibNameEnum.FTPUTIL:
logger.info('Remote path exists via FTPUTIL? "%s"', path_name)
exists = self.ftp_conn.path.exists(path_name)
logger.info('Remote path exists via FTPUTIL! "%s"', path_name)
else:
logger.info('Remote path exists via PYSFTP? "%s"', path_name)
exists = self.ftp_conn.exists(path_name)
logger.info('Remote path exists via PYSFTP! "%s"', path_name)
except Exception as err:
err_str = str(err)
logger.error('Remote path name does NOT exist: "%s"', path_name)
logger.error(err_str)
return exists, err_str
@ensure_annotations
def walktreeFcallback(self,
entity_path: str):
# derive target path name for use with folder creation if so needed
target_path_name = self.dynamicUtilities.expand_path(path_fldr=self.output_path_name,
path_name=os.path.dirname(entity_path),
logger=self.logger,
is_test_mode=self.is_test_mode)
_target_full_name, _err_str = self.get_remote_file(os.path.dirname(entity_path), os.path.basename(entity_path), target_path_name, os.path.basename(entity_path), self.ftp_conn, self.close_connection, self.remove_remote_file_on_download, self.logger)
return
@ensure_annotations
def walktreeDcallback(self,
entity_path: str):
return
@ensure_annotations
def walktreeUcallback(self,
entity_path: str):
return
if __name__ == '__main__':
while True:
for current_folder_path in folder_paths_list:
remote_path_name = folder_path_prefix + current_folder_path
ftpScanner = FtpScanner(host_url=host_url,
host_type=host_type,
host_port=host_port,
username=username,
folder_path_prefix=folder_path_prefix,
initial_folder_path=current_folder_path,
scan_interval_seconds=scan_interval_seconds,
is_test_mode=False)
ftpScanner.output_path_name = output_path_name
if not os.path.exists(output_path_name):
_dirname, err_str = ftpScanner.dynamicUtilities.create_path(path_name=output_path_name,
contains_file_name=False,
logger=ftpScanner.logger,
is_test_mode=ftpScanner.is_test_mode)
ftpScanner.ftp_conn, err_str = ftpScanner.get_connection()
if not err_str:
ftpScanner.logger.info('Remote path exists YES? "%s"', remote_path_name)
path_exists, err_str = ftpScanner.remote_path_exists(remote_path_name)
if not path_exists:
ftpScanner.logger.warning('Remote path exists NOT! "%s"', remote_path_name)
if folder_path_prefix == '/':
ftpScanner.logger.warning('Remove folder_path_prefix, try existence check again.')
remote_path_name = current_folder_path
else:
ftpScanner.logger.warning('Prepend folder_path_prefix, try existence check again.')
remote_path_name = folder_path_prefix + current_folder_path
ftpScanner.logger.info('Remote path exists YES? "%s"', remote_path_name)
path_exists, err_str = ftpScanner.remote_path_exists(remote_path_name)
if path_exists:
ftpScanner.logger.info('Remote path exists YES!: "%s"', remote_path_name)
ftpScanner.logger.info('Attempt download of files from remote path: "%s"', remote_path_name)
ftpScanner.get_remote_folders_files(remote_path_name=remote_path_name,
entity_path_name=remote_path_name,
target_path_name=output_path_name,
recursively=recurse_remote_folders,
ftp_conn=ftpScanner.ftp_conn,
close_connection=False,
remove_remote_file_on_download=remove_remote_file_on_download)
else:
ftpScanner.logger.error('Remote path exists NOT! "%s"', remote_path_name)
if ftpScanner.ftp_conn is not None:
ftpScanner.logger.info('Closing FTP connection')
ftpScanner.ftp_conn.close()
ftpScanner.logger.info('Closed FTP connection')
ftpScanner.logger.info('---------------------------------------')
ftpScanner.logger.info('Sleeping for %d seconds, please wait...', scan_interval_seconds)
ftpScanner.logger.info('=======================================')
time.sleep(scan_interval_seconds)
# break
|
|
import numpy as np
import scipy.sparse as sp
from scipy.constants import mu_0
import properties
from ...utils.code_utils import deprecate_class
from discretize.utils import Zero
from ... import props
from ...data import Data
from ...utils import mkvc
from ..base import BaseEMSimulation
from ..utils import omega
from .survey import Survey
from .fields import (
FieldsFDEM,
Fields3DElectricField,
Fields3DMagneticFluxDensity,
Fields3DMagneticField,
Fields3DCurrentDensity,
)
class BaseFDEMSimulation(BaseEMSimulation):
"""
We start by looking at Maxwell's equations in the electric
field \\\(\\\mathbf{e}\\\) and the magnetic flux
density \\\(\\\mathbf{b}\\\)
.. math ::
\mathbf{C} \mathbf{e} + i \omega \mathbf{b} = \mathbf{s_m} \\\\
{\mathbf{C}^{\\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{b} -
\mathbf{M_{\sigma}^e} \mathbf{e} = \mathbf{s_e}}
if using the E-B formulation (:code:`Simulation3DElectricField`
or :code:`Simulation3DMagneticFluxDensity`). Note that in this case,
:math:`\mathbf{s_e}` is an integrated quantity.
If we write Maxwell's equations in terms of
\\\(\\\mathbf{h}\\\) and current density \\\(\\\mathbf{j}\\\)
.. math ::
\mathbf{C}^{\\top} \mathbf{M_{\\rho}^f} \mathbf{j} +
i \omega \mathbf{M_{\mu}^e} \mathbf{h} = \mathbf{s_m} \\\\
\mathbf{C} \mathbf{h} - \mathbf{j} = \mathbf{s_e}
if using the H-J formulation (:code:`Simulation3DCurrentDensity` or
:code:`Simulation3DMagneticField`). Note that here, :math:`\mathbf{s_m}` is an
integrated quantity.
The problem performs the elimination so that we are solving the system
for \\\(\\\mathbf{e},\\\mathbf{b},\\\mathbf{j} \\\) or
\\\(\\\mathbf{h}\\\)
"""
fieldsPair = FieldsFDEM
mu, muMap, muDeriv = props.Invertible("Magnetic Permeability (H/m)", default=mu_0)
mui, muiMap, muiDeriv = props.Invertible("Inverse Magnetic Permeability (m/H)")
props.Reciprocal(mu, mui)
forward_only = properties.Boolean(
"If True, A-inverse not stored at each frequency in forward simulation",
default=False,
)
survey = properties.Instance("a survey object", Survey, required=True)
# @profile
def fields(self, m=None):
"""
Solve the forward problem for the fields.
:param numpy.ndarray m: inversion model (nP,)
:rtype: numpy.ndarray
:return f: forward solution
"""
if m is not None:
self.model = m
try:
self.Ainv
except AttributeError:
if self.verbose:
print("num_frequencies =", self.survey.num_frequencies)
self.Ainv = [None for i in range(self.survey.num_frequencies)]
if self.Ainv[0] is not None:
for i in range(self.survey.num_frequencies):
self.Ainv[i].clean()
if self.verbose:
print("Cleaning Ainv")
f = self.fieldsPair(self)
for nf, freq in enumerate(self.survey.frequencies):
A = self.getA(freq)
rhs = self.getRHS(freq)
self.Ainv[nf] = self.solver(A, **self.solver_opts)
u = self.Ainv[nf] * rhs
Srcs = self.survey.get_sources_by_frequency(freq)
f[Srcs, self._solutionType] = u
if self.forward_only:
if self.verbose:
print("Fields simulated for frequency {}".format(nf))
self.Ainv[nf].clean()
return f
# @profile
def Jvec(self, m, v, f=None):
"""
Sensitivity times a vector.
:param numpy.ndarray m: inversion model (nP,)
:param numpy.ndarray v: vector which we take sensitivity product with
(nP,)
:param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM u: fields object
:rtype: numpy.ndarray
:return: Jv (ndata,)
"""
if f is None:
f = self.fields(m)
self.model = m
# Jv = Data(self.survey)
Jv = []
for nf, freq in enumerate(self.survey.frequencies):
for src in self.survey.get_sources_by_frequency(freq):
u_src = f[src, self._solutionType]
dA_dm_v = self.getADeriv(freq, u_src, v, adjoint=False)
dRHS_dm_v = self.getRHSDeriv(freq, src, v)
du_dm_v = self.Ainv[nf] * (-dA_dm_v + dRHS_dm_v)
for rx in src.receiver_list:
Jv.append(rx.evalDeriv(src, self.mesh, f, du_dm_v=du_dm_v, v=v))
return np.hstack(Jv)
# @profile
def Jtvec(self, m, v, f=None):
"""
Sensitivity transpose times a vector
:param numpy.ndarray m: inversion model (nP,)
:param numpy.ndarray v: vector which we take adjoint product with (nP,)
:param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM u: fields object
:rtype: numpy.ndarray
:return: Jv (ndata,)
"""
if f is None:
f = self.fields(m)
self.model = m
# Ensure v is a data object.
if not isinstance(v, Data):
v = Data(self.survey, v)
Jtv = np.zeros(m.size)
for nf, freq in enumerate(self.survey.frequencies):
for src in self.survey.get_sources_by_frequency(freq):
u_src = f[src, self._solutionType]
df_duT_sum = 0
df_dmT_sum = 0
for rx in src.receiver_list:
df_duT, df_dmT = rx.evalDeriv(
src, self.mesh, f, v=v[src, rx], adjoint=True
)
if not isinstance(df_duT, Zero):
df_duT_sum += df_duT
if not isinstance(df_dmT, Zero):
df_dmT_sum += df_dmT
ATinvdf_duT = self.Ainv[nf] * df_duT_sum
dA_dmT = self.getADeriv(freq, u_src, ATinvdf_duT, adjoint=True)
dRHS_dmT = self.getRHSDeriv(freq, src, ATinvdf_duT, adjoint=True)
du_dmT = -dA_dmT + dRHS_dmT
df_dmT_sum += du_dmT
Jtv += np.real(df_dmT_sum)
return mkvc(Jtv)
# @profile
def getSourceTerm(self, freq):
"""
Evaluates the sources for a given frequency and puts them in matrix
form
:param float freq: Frequency
:rtype: tuple
:return: (s_m, s_e) (nE or nF, nSrc)
"""
Srcs = self.survey.get_sources_by_frequency(freq)
if self._formulation == "EB":
s_m = np.zeros((self.mesh.nF, len(Srcs)), dtype=complex)
s_e = np.zeros((self.mesh.nE, len(Srcs)), dtype=complex)
elif self._formulation == "HJ":
s_m = np.zeros((self.mesh.nE, len(Srcs)), dtype=complex)
s_e = np.zeros((self.mesh.nF, len(Srcs)), dtype=complex)
for i, src in enumerate(Srcs):
smi, sei = src.eval(self)
s_m[:, i] = s_m[:, i] + smi
s_e[:, i] = s_e[:, i] + sei
return s_m, s_e
###############################################################################
# E-B Formulation #
###############################################################################
class Simulation3DElectricField(BaseFDEMSimulation):
"""
By eliminating the magnetic flux density using
.. math ::
\mathbf{b} = \\frac{1}{i \omega}\\left(-\mathbf{C} \mathbf{e} +
\mathbf{s_m}\\right)
we can write Maxwell's equations as a second order system in
\\\(\\\mathbf{e}\\\) only:
.. math ::
\\left(\mathbf{C}^{\\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{C} +
i \omega \mathbf{M^e_{\sigma}} \\right)\mathbf{e} =
\mathbf{C}^{\\top} \mathbf{M_{\mu^{-1}}^f}\mathbf{s_m}
- i\omega\mathbf{M^e}\mathbf{s_e}
which we solve for :math:`\mathbf{e}`.
:param discretize.base.BaseMesh mesh: mesh
"""
_solutionType = "eSolution"
_formulation = "EB"
fieldsPair = Fields3DElectricField
def __init__(self, mesh, **kwargs):
super(Simulation3DElectricField, self).__init__(mesh, **kwargs)
def getA(self, freq):
"""
System matrix
.. math ::
\mathbf{A} = \mathbf{C}^{\\top} \mathbf{M_{\mu^{-1}}^f} \mathbf{C}
+ i \omega \mathbf{M^e_{\sigma}}
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MfMui = self.MfMui
MeSigma = self.MeSigma
C = self.mesh.edgeCurl
return C.T * MfMui * C + 1j * omega(freq) * MeSigma
# def getADeriv(self, freq, u, v, adjoint=False):
# return
def getADeriv_sigma(self, freq, u, v, adjoint=False):
"""
Product of the derivative of our system matrix with respect to the
conductivity model and a vector
.. math ::
\\frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}_{\\sigma}} =
i \omega \\frac{d \mathbf{M^e_{\sigma}}(\mathbf{u})\mathbf{v} }{d\mathbf{m}}
:param float freq: frequency
:param numpy.ndarray u: solution vector (nE,)
:param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for
adjoint
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative of the system matrix times a vector (nP,) or
adjoint (nD,)
"""
dMe_dsig_v = self.MeSigmaDeriv(u, v, adjoint)
return 1j * omega(freq) * dMe_dsig_v
def getADeriv_mui(self, freq, u, v, adjoint=False):
"""
Product of the derivative of the system matrix with respect to the
permeability model and a vector.
.. math ::
\\frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}_{\\mu^{-1}} =
\mathbf{C}^{\top} \\frac{d \mathbf{M^f_{\\mu^{-1}}}\mathbf{v}}{d\mathbf{m}}
"""
C = self.mesh.edgeCurl
if adjoint:
return self.MfMuiDeriv(C * u).T * (C * v)
return C.T * (self.MfMuiDeriv(C * u) * v)
def getADeriv(self, freq, u, v, adjoint=False):
return self.getADeriv_sigma(freq, u, v, adjoint) + self.getADeriv_mui(
freq, u, v, adjoint
)
def getRHS(self, freq):
"""
Right hand side for the system
.. math ::
\mathbf{RHS} = \mathbf{C}^{\\top}
\mathbf{M_{\mu^{-1}}^f}\mathbf{s_m} -
i\omega\mathbf{M_e}\mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray
:return: RHS (nE, nSrc)
"""
s_m, s_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MfMui = self.MfMui
return C.T * (MfMui * s_m) - 1j * omega(freq) * s_e
def getRHSDeriv(self, freq, src, v, adjoint=False):
"""
Derivative of the Right-hand side with respect to the model. This
includes calls to derivatives in the sources
"""
C = self.mesh.edgeCurl
MfMui = self.MfMui
s_m, s_e = self.getSourceTerm(freq)
s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint)
MfMuiDeriv = self.MfMuiDeriv(s_m)
if adjoint:
return (
s_mDeriv(MfMui * (C * v))
+ MfMuiDeriv.T * (C * v)
- 1j * omega(freq) * s_eDeriv(v)
)
return C.T * (MfMui * s_mDeriv(v) + MfMuiDeriv * v) - 1j * omega(
freq
) * s_eDeriv(v)
class Simulation3DMagneticFluxDensity(BaseFDEMSimulation):
"""
We eliminate :math:`\mathbf{e}` using
.. math ::
\mathbf{e} = \mathbf{M^e_{\sigma}}^{-1} \\left(\mathbf{C}^{\\top}
\mathbf{M_{\mu^{-1}}^f} \mathbf{b} - \mathbf{s_e}\\right)
and solve for :math:`\mathbf{b}` using:
.. math ::
\\left(\mathbf{C} \mathbf{M^e_{\sigma}}^{-1} \mathbf{C}^{\\top}
\mathbf{M_{\mu^{-1}}^f} + i \omega \\right)\mathbf{b} = \mathbf{s_m} +
\mathbf{M^e_{\sigma}}^{-1}\mathbf{M^e}\mathbf{s_e}
.. note ::
The inverse problem will not work with full anisotropy
:param discretize.base.BaseMesh mesh: mesh
"""
_solutionType = "bSolution"
_formulation = "EB"
fieldsPair = Fields3DMagneticFluxDensity
def __init__(self, mesh, **kwargs):
super(Simulation3DMagneticFluxDensity, self).__init__(mesh, **kwargs)
def getA(self, freq):
"""
System matrix
.. math ::
\mathbf{A} = \mathbf{C} \mathbf{M^e_{\sigma}}^{-1}
\mathbf{C}^{\\top} \mathbf{M_{\mu^{-1}}^f} + i \omega
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MfMui = self.MfMui
MeSigmaI = self.MeSigmaI
C = self.mesh.edgeCurl
iomega = 1j * omega(freq) * sp.eye(self.mesh.nF)
A = C * (MeSigmaI * (C.T * MfMui)) + iomega
if self._makeASymmetric is True:
return MfMui.T * A
return A
def getADeriv_sigma(self, freq, u, v, adjoint=False):
"""
Product of the derivative of our system matrix with respect to the
model and a vector
.. math ::
\\frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}} =
\mathbf{C} \\frac{\mathbf{M^e_{\sigma}} \mathbf{v}}{d\mathbf{m}}
:param float freq: frequency
:param numpy.ndarray u: solution vector (nF,)
:param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for
adjoint
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative of the system matrix times a vector (nP,) or
adjoint (nD,)
"""
MfMui = self.MfMui
C = self.mesh.edgeCurl
MeSigmaIDeriv = self.MeSigmaIDeriv
vec = C.T * (MfMui * u)
if adjoint:
return MeSigmaIDeriv(vec, C.T * v, adjoint)
return C * MeSigmaIDeriv(vec, v, adjoint)
# if adjoint:
# return MeSigmaIDeriv.T * (C.T * v)
# return C * (MeSigmaIDeriv * v)
def getADeriv_mui(self, freq, u, v, adjoint=False):
MfMui = self.MfMui
MfMuiDeriv = self.MfMuiDeriv(u)
MeSigmaI = self.MeSigmaI
C = self.mesh.edgeCurl
if adjoint:
return MfMuiDeriv.T * (C * (MeSigmaI.T * (C.T * v)))
return C * (MeSigmaI * (C.T * (MfMuiDeriv * v)))
def getADeriv(self, freq, u, v, adjoint=False):
if adjoint is True and self._makeASymmetric:
v = self.MfMui * v
ADeriv = self.getADeriv_sigma(freq, u, v, adjoint) + self.getADeriv_mui(
freq, u, v, adjoint
)
if adjoint is False and self._makeASymmetric:
return self.MfMui.T * ADeriv
return ADeriv
def getRHS(self, freq):
"""
Right hand side for the system
.. math ::
\mathbf{RHS} = \mathbf{s_m} +
\mathbf{M^e_{\sigma}}^{-1}\mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray
:return: RHS (nE, nSrc)
"""
s_m, s_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MeSigmaI = self.MeSigmaI
RHS = s_m + C * (MeSigmaI * s_e)
if self._makeASymmetric is True:
MfMui = self.MfMui
return MfMui.T * RHS
return RHS
def getRHSDeriv(self, freq, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
:param float freq: frequency
:param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of rhs deriv with a vector
"""
C = self.mesh.edgeCurl
s_m, s_e = src.eval(self)
MfMui = self.MfMui
if self._makeASymmetric and adjoint:
v = self.MfMui * v
# MeSigmaIDeriv = self.MeSigmaIDeriv(s_e)
s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint)
if not adjoint:
# RHSderiv = C * (MeSigmaIDeriv * v)
RHSderiv = C * self.MeSigmaIDeriv(s_e, v, adjoint)
SrcDeriv = s_mDeriv(v) + C * (self.MeSigmaI * s_eDeriv(v))
elif adjoint:
# RHSderiv = MeSigmaIDeriv.T * (C.T * v)
RHSderiv = self.MeSigmaIDeriv(s_e, C.T * v, adjoint)
SrcDeriv = s_mDeriv(v) + s_eDeriv(self.MeSigmaI.T * (C.T * v))
if self._makeASymmetric is True and not adjoint:
return MfMui.T * (SrcDeriv + RHSderiv)
return RHSderiv + SrcDeriv
###############################################################################
# H-J Formulation #
###############################################################################
class Simulation3DCurrentDensity(BaseFDEMSimulation):
"""
We eliminate \\\(\\\mathbf{h}\\\) using
.. math ::
\mathbf{h} = \\frac{1}{i \omega} \mathbf{M_{\mu}^e}^{-1}
\\left(-\mathbf{C}^{\\top} \mathbf{M_{\\rho}^f} \mathbf{j} +
\mathbf{M^e} \mathbf{s_m} \\right)
and solve for \\\(\\\mathbf{j}\\\) using
.. math ::
\\left(\mathbf{C} \mathbf{M_{\mu}^e}^{-1} \mathbf{C}^{\\top}
\mathbf{M_{\\rho}^f} + i \omega\\right)\mathbf{j} =
\mathbf{C} \mathbf{M_{\mu}^e}^{-1} \mathbf{M^e} \mathbf{s_m} -
i\omega\mathbf{s_e}
.. note::
This implementation does not yet work with full anisotropy!!
:param discretize.base.BaseMesh mesh: mesh
"""
_solutionType = "jSolution"
_formulation = "HJ"
fieldsPair = Fields3DCurrentDensity
def __init__(self, mesh, **kwargs):
super(Simulation3DCurrentDensity, self).__init__(mesh, **kwargs)
def getA(self, freq):
"""
System matrix
.. math ::
\\mathbf{A} = \\mathbf{C} \\mathbf{M^e_{\\mu^{-1}}}
\\mathbf{C}^{\\top} \\mathbf{M^f_{\\sigma^{-1}}} + i\\omega
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MeMuI = self.MeMuI
MfRho = self.MfRho
C = self.mesh.edgeCurl
iomega = 1j * omega(freq) * sp.eye(self.mesh.nF)
A = C * MeMuI * C.T * MfRho + iomega
if self._makeASymmetric is True:
return MfRho.T * A
return A
def getADeriv_rho(self, freq, u, v, adjoint=False):
"""
Product of the derivative of our system matrix with respect to the
model and a vector
In this case, we assume that electrical conductivity, :math:`\sigma`
is the physical property of interest (i.e. :math:`\sigma` =
model.transform). Then we want
.. math ::
\\frac{\mathbf{A(\sigma)} \mathbf{v}}{d \mathbf{m}} =
\mathbf{C} \mathbf{M^e_{mu^{-1}}} \mathbf{C^{\\top}}
\\frac{d \mathbf{M^f_{\sigma^{-1}}}\mathbf{v} }{d \mathbf{m}}
:param float freq: frequency
:param numpy.ndarray u: solution vector (nF,)
:param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for
adjoint
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative of the system matrix times a vector (nP,) or
adjoint (nD,)
"""
MeMuI = self.MeMuI
MfRho = self.MfRho
C = self.mesh.edgeCurl
if adjoint:
vec = C * (MeMuI.T * (C.T * v))
return self.MfRhoDeriv(u, vec, adjoint)
return C * (MeMuI * (C.T * (self.MfRhoDeriv(u, v, adjoint))))
# MfRhoDeriv = self.MfRhoDeriv(u)
# if adjoint:
# return MfRhoDeriv.T * (C * (MeMuI.T * (C.T * v)))
# return C * (MeMuI * (C.T * (MfRhoDeriv * v)))
def getADeriv_mu(self, freq, u, v, adjoint=False):
C = self.mesh.edgeCurl
MfRho = self.MfRho
MeMuIDeriv = self.MeMuIDeriv(C.T * (MfRho * u))
if adjoint is True:
# if self._makeASymmetric:
# v = MfRho * v
return MeMuIDeriv.T * (C.T * v)
Aderiv = C * (MeMuIDeriv * v)
# if self._makeASymmetric:
# Aderiv = MfRho.T * Aderiv
return Aderiv
def getADeriv(self, freq, u, v, adjoint=False):
if adjoint and self._makeASymmetric:
v = self.MfRho * v
ADeriv = self.getADeriv_rho(freq, u, v, adjoint) + self.getADeriv_mu(
freq, u, v, adjoint
)
if not adjoint and self._makeASymmetric:
return self.MfRho.T * ADeriv
return ADeriv
def getRHS(self, freq):
"""
Right hand side for the system
.. math ::
\mathbf{RHS} = \mathbf{C} \mathbf{M_{\mu}^e}^{-1}\mathbf{s_m}
- i\omega \mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray
:return: RHS (nE, nSrc)
"""
s_m, s_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MeMuI = self.MeMuI
RHS = C * (MeMuI * s_m) - 1j * omega(freq) * s_e
if self._makeASymmetric is True:
MfRho = self.MfRho
return MfRho.T * RHS
return RHS
def getRHSDeriv(self, freq, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
:param float freq: frequency
:param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of rhs deriv with a vector
"""
# RHS = C * (MeMuI * s_m) - 1j * omega(freq) * s_e
# if self._makeASymmetric is True:
# MfRho = self.MfRho
# return MfRho.T*RHS
C = self.mesh.edgeCurl
MeMuI = self.MeMuI
MeMuIDeriv = self.MeMuIDeriv
s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint)
s_m, _ = self.getSourceTerm(freq)
if adjoint:
if self._makeASymmetric:
MfRho = self.MfRho
v = MfRho * v
CTv = C.T * v
return (
s_mDeriv(MeMuI.T * CTv)
+ MeMuIDeriv(s_m).T * CTv
- 1j * omega(freq) * s_eDeriv(v)
)
else:
RHSDeriv = C * (MeMuI * s_mDeriv(v) + MeMuIDeriv(s_m) * v) - 1j * omega(
freq
) * s_eDeriv(v)
if self._makeASymmetric:
MfRho = self.MfRho
return MfRho.T * RHSDeriv
return RHSDeriv
class Simulation3DMagneticField(BaseFDEMSimulation):
"""
We eliminate \\\(\\\mathbf{j}\\\) using
.. math ::
\mathbf{j} = \mathbf{C} \mathbf{h} - \mathbf{s_e}
and solve for \\\(\\\mathbf{h}\\\) using
.. math ::
\\left(\mathbf{C}^{\\top} \mathbf{M_{\\rho}^f} \mathbf{C} +
i \omega \mathbf{M_{\mu}^e}\\right) \mathbf{h} = \mathbf{M^e}
\mathbf{s_m} + \mathbf{C}^{\\top} \mathbf{M_{\\rho}^f} \mathbf{s_e}
:param discretize.base.BaseMesh mesh: mesh
"""
_solutionType = "hSolution"
_formulation = "HJ"
fieldsPair = Fields3DMagneticField
def __init__(self, mesh, **kwargs):
super(Simulation3DMagneticField, self).__init__(mesh, **kwargs)
def getA(self, freq):
"""
System matrix
.. math::
\mathbf{A} = \mathbf{C}^{\\top} \mathbf{M_{\\rho}^f} \mathbf{C} +
i \omega \mathbf{M_{\mu}^e}
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MeMu = self.MeMu
MfRho = self.MfRho
C = self.mesh.edgeCurl
return C.T * (MfRho * C) + 1j * omega(freq) * MeMu
def getADeriv_rho(self, freq, u, v, adjoint=False):
"""
Product of the derivative of our system matrix with respect to the
model and a vector
.. math::
\\frac{\mathbf{A}(\mathbf{m}) \mathbf{v}}{d \mathbf{m}} =
\mathbf{C}^{\\top}\\frac{d \mathbf{M^f_{\\rho}}\mathbf{v}}
{d\mathbf{m}}
:param float freq: frequency
:param numpy.ndarray u: solution vector (nE,)
:param numpy.ndarray v: vector to take prodct with (nP,) or (nD,) for
adjoint
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative of the system matrix times a vector (nP,) or
adjoint (nD,)
"""
MeMu = self.MeMu
C = self.mesh.edgeCurl
if adjoint:
return self.MfRhoDeriv(C * u, C * v, adjoint)
return C.T * self.MfRhoDeriv(C * u, v, adjoint)
# MfRhoDeriv = self.MfRhoDeriv(C*u)
# if adjoint:
# return MfRhoDeriv.T * (C * v)
# return C.T * (MfRhoDeriv * v)
def getADeriv_mu(self, freq, u, v, adjoint=False):
MeMuDeriv = self.MeMuDeriv(u)
if adjoint is True:
return 1j * omega(freq) * (MeMuDeriv.T * v)
return 1j * omega(freq) * (MeMuDeriv * v)
def getADeriv(self, freq, u, v, adjoint=False):
return self.getADeriv_rho(freq, u, v, adjoint) + self.getADeriv_mu(
freq, u, v, adjoint
)
def getRHS(self, freq):
"""
Right hand side for the system
.. math ::
\mathbf{RHS} = \mathbf{M^e} \mathbf{s_m} + \mathbf{C}^{\\top}
\mathbf{M_{\\rho}^f} \mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray
:return: RHS (nE, nSrc)
"""
s_m, s_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MfRho = self.MfRho
return s_m + C.T * (MfRho * s_e)
def getRHSDeriv(self, freq, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
:param float freq: frequency
:param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM src: FDEM source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of rhs deriv with a vector
"""
_, s_e = src.eval(self)
C = self.mesh.edgeCurl
MfRho = self.MfRho
# MfRhoDeriv = self.MfRhoDeriv(s_e)
# if not adjoint:
# RHSDeriv = C.T * (MfRhoDeriv * v)
# elif adjoint:
# RHSDeriv = MfRhoDeriv.T * (C * v)
if not adjoint:
RHSDeriv = C.T * (self.MfRhoDeriv(s_e, v, adjoint))
elif adjoint:
RHSDeriv = self.MfRhoDeriv(s_e, C * v, adjoint)
s_mDeriv, s_eDeriv = src.evalDeriv(self, adjoint=adjoint)
return RHSDeriv + s_mDeriv(v) + C.T * (MfRho * s_eDeriv(v))
############
# Deprecated
############
@deprecate_class(removal_version="0.16.0", error=True)
class Problem3D_e(Simulation3DElectricField):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Problem3D_b(Simulation3DMagneticFluxDensity):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Problem3D_h(Simulation3DMagneticField):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Problem3D_j(Simulation3DCurrentDensity):
pass
|
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .provider import (TargetThread, ThreadProvider)
from .common import (read_c_string, HandlerModeThread)
from ..debug.context import DebugContext
from ..coresight.cortex_m import CORE_REGISTER
from pyOCD.pyDAPAccess import DAPAccess
import logging
IS_RUNNING_OFFSET = 0x54
ALL_OBJECTS_THREADS_OFFSET = 0
THREAD_STACK_POINTER_OFFSET = 0
THREAD_EXTENDED_FRAME_OFFSET = 4
THREAD_NAME_OFFSET = 8
THREAD_STACK_BOTTOM_OFFSET = 12
THREAD_PRIORITY_OFFSET = 16
THREAD_STATE_OFFSET = 17
THREAD_CREATED_NODE_OFFSET = 36
LIST_NODE_NEXT_OFFSET = 0
LIST_NODE_OBJ_OFFSET= 8
# Create a logger for this module.
log = logging.getLogger("argon")
class TargetList(object):
def __init__(self, context, ptr):
self._context = context
self._list = ptr
def __iter__(self):
next = 0
head = self._context.read32(self._list)
node = head
is_valid = head != 0
while is_valid and next != head:
try:
# Read the object from the node.
obj = self._context.read32(node + LIST_NODE_OBJ_OFFSET)
yield obj
next = self._context.read32(node + LIST_NODE_NEXT_OFFSET)
node = next
except DAPAccess.TransferError:
log.warning("TransferError while reading list elements (list=0x%08x, node=0x%08x), terminating list", self._list, node)
is_valid = False
## @brief
class ArgonThreadContext(DebugContext):
# SP is handled specially, so it is not in these dicts.
CORE_REGISTER_OFFSETS = {
# Software stacked
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
# Hardware stacked
0: 32, # r0
1: 36, # r1
2: 40, # r2
3: 44, # r3
12: 48, # r12
14: 52, # lr
15: 56, # pc
16: 60, # xpsr
}
FPU_EXTENDED_REGISTER_OFFSETS = {
# Software stacked
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
0x50: 32, # s16
0x51: 36, # s17
0x52: 40, # s18
0x53: 44, # s19
0x54: 48, # s20
0x55: 52, # s21
0x56: 56, # s22
0x57: 60, # s23
0x58: 64, # s24
0x59: 68, # s25
0x5a: 72, # s26
0x5b: 76, # s27
0x5c: 80, # s28
0x5d: 84, # s29
0x5e: 88, # s30
0x5f: 92, # s31
# Hardware stacked
0: 96, # r0
1: 100, # r1
2: 104, # r2
3: 108, # r3
12: 112, # r12
14: 116, # lr
15: 120, # pc
16: 124, # xpsr
0x40: 128, # s0
0x41: 132, # s1
0x42: 136, # s2
0x43: 140, # s3
0x44: 144, # s4
0x45: 148, # s5
0x46: 152, # s6
0x47: 156, # s7
0x48: 160, # s8
0x49: 164, # s9
0x4a: 168, # s10
0x4b: 172, # s11
0x4c: 176, # s12
0x4d: 180, # s13
0x4e: 184, # s14
0x4f: 188, # s15
33: 192, # fpscr
# (reserved word: 196)
}
# Registers that are not available on the stack for exceptions.
EXCEPTION_UNAVAILABLE_REGS = (4, 5, 6, 7, 8, 9, 10, 11)
def __init__(self, parentContext, thread):
super(ArgonThreadContext, self).__init__(parentContext.core)
self._parent = parentContext
self._thread = thread
def readCoreRegistersRaw(self, reg_list):
reg_list = [self.registerNameToIndex(reg) for reg in reg_list]
reg_vals = []
inException = self._get_ipsr() > 0
isCurrent = self._thread.is_current
# If this is the current thread and we're not in an exception, just read the live registers.
if isCurrent and not inException:
return self._parent.readCoreRegistersRaw(reg_list)
sp = self._thread.get_stack_pointer()
# Determine which register offset table to use and the offsets past the saved state.
realSpOffset = 0x40
realSpExceptionOffset = 0x20
table = self.CORE_REGISTER_OFFSETS
if self._thread.has_extended_frame:
table = self.FPU_EXTENDED_REGISTER_OFFSETS
realSpOffset = 0xc8
realSpExceptionOffset = 0x68
for reg in reg_list:
# Check for regs we can't access.
if isCurrent and inException:
if reg in self.EXCEPTION_UNAVAILABLE_REGS:
reg_vals.append(0)
continue
if reg == 18 or reg == 13: # PSP
log.debug("psp = 0x%08x", sp + realSpExceptionOffset)
reg_vals.append(sp + realSpExceptionOffset)
continue
# Must handle stack pointer specially.
if reg == 13:
reg_vals.append(sp + realSpOffset)
continue
# Look up offset for this register on the stack.
spOffset = table.get(reg, None)
if spOffset is None:
reg_vals.append(self._parent.readCoreRegisterRaw(reg))
continue
if isCurrent and inException:
spOffset -= realSpExceptionOffset #0x20
try:
reg_vals.append(self._parent.read32(sp + spOffset))
except DAPAccess.TransferError:
reg_vals.append(0)
return reg_vals
def _get_ipsr(self):
return self._parent.readCoreRegister('xpsr') & 0xff
def writeCoreRegistersRaw(self, reg_list, data_list):
self._parent.writeCoreRegistersRaw(reg_list, data_list)
## @brief Base class representing a thread on the target.
class ArgonThread(TargetThread):
UNKNOWN = 0
SUSPENDED = 1
READY = 2
RUNNING = 3
BLOCKED = 4
SLEEPING = 5
DONE = 6
STATE_NAMES = {
UNKNOWN : "Unknown",
SUSPENDED : "Suspended",
READY : "Ready",
RUNNING : "Running",
BLOCKED : "Blocked",
SLEEPING : "Sleeping",
DONE : "Done",
}
def __init__(self, targetContext, provider, base):
super(ArgonThread, self).__init__()
self._target_context = targetContext
self._provider = provider
self._base = base
self._thread_context = ArgonThreadContext(self._target_context, self)
self._has_fpu = self._thread_context.core.has_fpu
self._priority = 0
self._state = self.UNKNOWN
self._name = "?"
try:
self.update_info()
ptr = self._target_context.read32(self._base + THREAD_NAME_OFFSET)
self._name = read_c_string(self._target_context, ptr)
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread info")
def get_stack_pointer(self):
sp = 0
if self.is_current:
# Read live process stack.
sp = self._target_context.readCoreRegister('psp')
else:
# Get stack pointer saved in thread struct.
try:
sp = self._target_context.read32(self._base + THREAD_STACK_POINTER_OFFSET)
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread's stack pointer @ 0x%08x", self._base + THREAD_STACK_POINTER_OFFSET)
return sp
def update_info(self):
try:
self._priority = self._target_context.read8(self._base + THREAD_PRIORITY_OFFSET)
self._state = self._target_context.read8(self._base + THREAD_STATE_OFFSET)
if self._state > self.DONE:
self._state = self.UNKNOWN
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread info")
@property
def state(self):
return self._state
@property
def priority(self):
return self._priority
@property
def unique_id(self):
return self._base
@property
def name(self):
return self._name
@property
def description(self):
return "%s; Priority %d" % (self.STATE_NAMES[self.state], self.priority)
@property
def is_current(self):
return self._provider.get_actual_current_thread_id() == self.unique_id
@property
def context(self):
return self._thread_context
@property
def has_extended_frame(self):
if not self._has_fpu:
return False
try:
flag = self._target_context.read8(self._base + THREAD_EXTENDED_FRAME_OFFSET)
return flag != 0
except DAPAccess.TransferError:
log.debug("Transfer error while reading thread's extended frame flag @ 0x%08x", self._base + THREAD_EXTENDED_FRAME_OFFSET)
return False
def __str__(self):
return "<ArgonThread@0x%08x id=%x name=%s>" % (id(self), self.unique_id, self.name)
def __repr__(self):
return str(self)
## @brief Base class for RTOS support plugins.
class ArgonThreadProvider(ThreadProvider):
def __init__(self, target):
super(ArgonThreadProvider, self).__init__(target)
self.g_ar = None
self.g_ar_objects = None
self._all_threads = None
self._threads = {}
def init(self, symbolProvider):
self.g_ar = symbolProvider.get_symbol_value("g_ar")
if self.g_ar is None:
return False
log.debug("Argon: g_ar = 0x%08x", self.g_ar)
self.g_ar_objects = symbolProvider.get_symbol_value("g_ar_objects")
if self.g_ar_objects is None:
return False
log.debug("Argon: g_ar_objects = 0x%08x", self.g_ar_objects)
self._all_threads = self.g_ar_objects + ALL_OBJECTS_THREADS_OFFSET
return True
def _build_thread_list(self):
allThreads = TargetList(self._target_context, self._all_threads)
newThreads = {}
for threadBase in allThreads:
try:
# Reuse existing thread objects if possible.
if threadBase in self._threads:
t = self._threads[threadBase]
# Ask the thread object to update its state and priority.
t.update_info()
else:
t = ArgonThread(self._target_context, self, threadBase)
log.debug("Thread 0x%08x (%s)", threadBase, t.name)
newThreads[t.unique_id] = t
except DAPAccess.TransferError:
log.debug("TransferError while examining thread 0x%08x", threadBase)
# Create fake handler mode thread.
if self.get_ipsr() > 0:
log.debug("creating handler mode thread")
t = HandlerModeThread(self._target_context, self)
newThreads[t.unique_id] = t
self._threads = newThreads
def get_threads(self):
if not self.is_enabled:
return []
self.update_threads()
return self._threads.values()
def get_thread(self, threadId):
if not self.is_enabled:
return None
self.update_threads()
return self._threads.get(threadId, None)
@property
def is_enabled(self):
return self.g_ar is not None and self.get_is_running()
@property
def current_thread(self):
if not self.is_enabled:
return None
self.update_threads()
id = self.get_current_thread_id()
try:
return self._threads[id]
except KeyError:
log.debug("key error getting current thread id=%x", id)
log.debug("self._threads = %s", repr(self._threads))
return None
def is_valid_thread_id(self, threadId):
if not self.is_enabled:
return False
self.update_threads()
return threadId in self._threads
def get_current_thread_id(self):
if not self.is_enabled:
return None
if self.get_ipsr() > 0:
return 2
return self.get_actual_current_thread_id()
def get_actual_current_thread_id(self):
if not self.is_enabled:
return None
return self._target_context.read32(self.g_ar)
def get_is_running(self):
if self.g_ar is None:
return False
flag = self._target_context.read8(self.g_ar + IS_RUNNING_OFFSET)
return flag != 0
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage operator."""
import warnings
from typing import TYPE_CHECKING, Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
WILDCARD = '*'
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToGCSOperator(BaseOperator):
"""
Copies objects from a bucket to another, with renaming if requested.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToGCSOperator`
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:type source_bucket: str
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:type source_object: str
:param source_objects: A list of source name of the objects to copy in the Google cloud
storage bucket. (templated)
:type source_objects: List[str]
:param destination_bucket: The destination Google Cloud Storage bucket
where the object should be. If the destination_bucket is None, it defaults
to source_bucket. (templated)
:type destination_bucket: str
:param destination_object: The destination name of the object in the
destination Google Cloud Storage bucket. (templated)
If a wildcard is supplied in the source_object argument, this is the
prefix that will be prepended to the final destination objects' paths.
Note that the source path's part before the wildcard will be removed;
if it needs to be retained it should be appended to destination_object.
For example, with prefix ``foo/*`` and destination_object ``blah/``, the
file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write
the destination_object as e.g. ``blah/foo``, in which case the copied file
will be named ``blah/foo/baz``.
The same thing applies to source objects inside source_objects.
:type destination_object: str
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:type move_object: bool
:param replace: Whether you want to replace existing destination files or not.
:type replace: bool
:param delimiter: This is used to restrict the result to only the 'files' in a given 'folder'.
If source_objects = ['foo/bah/'] and delimiter = '.avro', then only the 'files' in the
folder 'foo/bah/' with '.avro' delimiter will be copied to the destination object.
:type delimiter: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param last_modified_time: When specified, the objects will be copied or moved,
only if they were modified after last_modified_time.
If tzinfo has not been set, UTC will be assumed.
:type last_modified_time: datetime.datetime
:param maximum_modified_time: When specified, the objects will be copied or moved,
only if they were modified before maximum_modified_time.
If tzinfo has not been set, UTC will be assumed.
:type maximum_modified_time: datetime.datetime
:param is_older_than: When specified, the objects will be copied if they are older
than the specified time in seconds.
:type is_older_than: int
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:Example:
The following Operator would copy a single file named
``sales/sales-2017/january.avro`` in the ``data`` bucket to the file named
``copied_sales/2017/january-backup.avro`` in the ``data_backup`` bucket ::
copy_single_file = GCSToGCSOperator(
task_id='copy_single_file',
source_bucket='data',
source_objects=['sales/sales-2017/january.avro'],
destination_bucket='data_backup',
destination_object='copied_sales/2017/january-backup.avro',
gcp_conn_id=google_cloud_conn_id
)
The following Operator would copy all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
``copied_sales/2017`` folder in the ``data_backup`` bucket. ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_objects=['sales/sales-2017'],
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
delimiter='.avro'
gcp_conn_id=google_cloud_conn_id
)
Or ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
gcp_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
same folder in the ``data_backup`` bucket, deleting the original files in the
process. ::
move_files = GCSToGCSOperator(
task_id='move_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
move_object=True,
gcp_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2019``
and ``sales/sales-2020` folder in ``data`` bucket to the same folder in the
``data_backup`` bucket, deleting the original files in the process. ::
move_files = GCSToGCSOperator(
task_id='move_files',
source_bucket='data',
source_objects=['sales/sales-2019/*.avro', 'sales/sales-2020'],
destination_bucket='data_backup',
delimiter='.avro',
move_object=True,
gcp_conn_id=google_cloud_conn_id
)
"""
template_fields: Sequence[str] = (
'source_bucket',
'source_object',
'source_objects',
'destination_bucket',
'destination_object',
'delimiter',
'impersonation_chain',
)
ui_color = '#f0eee4'
def __init__(
self,
*,
source_bucket,
source_object=None,
source_objects=None,
destination_bucket=None,
destination_object=None,
delimiter=None,
move_object=False,
replace=True,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
last_modified_time=None,
maximum_modified_time=None,
is_older_than=None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
):
super().__init__(**kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = google_cloud_storage_conn_id
self.source_bucket = source_bucket
self.source_object = source_object
self.source_objects = source_objects
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.delimiter = delimiter
self.move_object = move_object
self.replace = replace
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.last_modified_time = last_modified_time
self.maximum_modified_time = maximum_modified_time
self.is_older_than = is_older_than
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
if self.source_objects and self.source_object:
error_msg = (
f"You can either set source_object parameter or source_objects parameter but not both. "
f"Found source_object={self.source_object} and source_objects={self.source_objects}"
)
raise AirflowException(error_msg)
if not self.source_object and not self.source_objects:
error_msg = "You must set source_object parameter or source_objects parameter. None set"
raise AirflowException(error_msg)
if self.source_objects and not all(isinstance(item, str) for item in self.source_objects):
raise AirflowException('At least, one of the `objects` in the `source_objects` is not a string')
# If source_object is set, default it to source_objects
if self.source_object:
self.source_objects = [self.source_object]
if self.destination_bucket is None:
self.log.warning(
'destination_bucket is None. Defaulting it to source_bucket (%s)', self.source_bucket
)
self.destination_bucket = self.source_bucket
# An empty source_object means to copy all files
if len(self.source_objects) == 0:
self.source_objects = ['']
# Raise exception if empty string `''` is used twice in source_object, this is to avoid double copy
if self.source_objects.count('') > 1:
raise AirflowException("You can't have two empty strings inside source_object")
# Iterate over the source_objects and do the copy
for prefix in self.source_objects:
# Check if prefix contains wildcard
if WILDCARD in prefix:
self._copy_source_with_wildcard(hook=hook, prefix=prefix)
# Now search with prefix using provided delimiter if any
else:
self._copy_source_without_wildcard(hook=hook, prefix=prefix)
def _copy_source_without_wildcard(self, hook, prefix):
"""
For source_objects with no wildcard, this operator would first list
all files in source_objects, using provided delimiter if any. Then copy
files from source_objects to destination_object and rename each source
file.
Example 1:
The following Operator would copy all the files from ``a/``folder
(i.e a/a.csv, a/b.csv, a/c.csv)in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.csv, b/b.csv, b/c.csv) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_objects=['a/'],
destination_bucket='data_backup',
destination_object='b/',
gcp_conn_id=google_cloud_conn_id
)
Example 2:
The following Operator would copy all avro files from ``a/``folder
(i.e a/a.avro, a/b.avro, a/c.avro)in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.avro, b/b.avro, b/c.avro) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_objects=['a/'],
destination_bucket='data_backup',
destination_object='b/',
delimiter='.avro',
gcp_conn_id=google_cloud_conn_id
)
"""
objects = hook.list(self.source_bucket, prefix=prefix, delimiter=self.delimiter)
# If objects is empty and we have prefix, let's check if prefix is a blob
# and copy directly
if len(objects) == 0 and prefix:
if hook.exists(self.source_bucket, prefix):
self._copy_single_object(
hook=hook, source_object=prefix, destination_object=self.destination_object
)
for source_obj in objects:
if self.destination_object is None:
destination_object = source_obj
else:
destination_object = source_obj.replace(prefix, self.destination_object, 1)
self._copy_single_object(
hook=hook, source_object=source_obj, destination_object=destination_object
)
def _copy_source_with_wildcard(self, hook, prefix):
total_wildcards = prefix.count(WILDCARD)
if total_wildcards > 1:
error_msg = (
"Only one wildcard '*' is allowed in source_object parameter. "
f"Found {total_wildcards} in {prefix}."
)
raise AirflowException(error_msg)
self.log.info('Delimiter ignored because wildcard is in prefix')
prefix_, delimiter = prefix.split(WILDCARD, 1)
objects = hook.list(self.source_bucket, prefix=prefix_, delimiter=delimiter)
if not self.replace:
# If we are not replacing, list all files in the Destination GCS bucket
# and only keep those files which are present in
# Source GCS bucket and not in Destination GCS bucket
if self.destination_object is None:
existing_objects = hook.list(self.destination_bucket, prefix=prefix_, delimiter=delimiter)
else:
self.log.info("Replaced destination_object with source_object prefix.")
destination_objects = hook.list(
self.destination_bucket,
prefix=self.destination_object,
delimiter=delimiter,
)
existing_objects = [
dest_object.replace(self.destination_object, prefix_, 1)
for dest_object in destination_objects
]
objects = set(objects) - set(existing_objects)
if len(objects) > 0:
self.log.info('%s files are going to be synced: %s.', len(objects), objects)
else:
self.log.info('There are no new files to sync. Have a nice day!')
for source_object in objects:
if self.destination_object is None:
destination_object = source_object
else:
destination_object = source_object.replace(prefix_, self.destination_object, 1)
self._copy_single_object(
hook=hook, source_object=source_object, destination_object=destination_object
)
def _copy_single_object(self, hook, source_object, destination_object):
if self.is_older_than:
# Here we check if the given object is older than the given time
# If given, last_modified_time and maximum_modified_time is ignored
if hook.is_older_than(self.source_bucket, source_object, self.is_older_than):
self.log.info("Object is older than %s seconds ago", self.is_older_than)
else:
self.log.debug("Object is not older than %s seconds ago", self.is_older_than)
return
elif self.last_modified_time and self.maximum_modified_time:
# check to see if object was modified between last_modified_time and
# maximum_modified_time
if hook.is_updated_between(
self.source_bucket, source_object, self.last_modified_time, self.maximum_modified_time
):
self.log.info(
"Object has been modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
else:
self.log.debug(
"Object was not modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
return
elif self.last_modified_time is not None:
# Check to see if object was modified after last_modified_time
if hook.is_updated_after(self.source_bucket, source_object, self.last_modified_time):
self.log.info("Object has been modified after %s ", self.last_modified_time)
else:
self.log.debug("Object was not modified after %s ", self.last_modified_time)
return
elif self.maximum_modified_time is not None:
# Check to see if object was modified before maximum_modified_time
if hook.is_updated_before(self.source_bucket, source_object, self.maximum_modified_time):
self.log.info("Object has been modified before %s ", self.maximum_modified_time)
else:
self.log.debug("Object was not modified before %s ", self.maximum_modified_time)
return
self.log.info(
'Executing copy of gs://%s/%s to gs://%s/%s',
self.source_bucket,
source_object,
self.destination_bucket,
destination_object,
)
hook.rewrite(self.source_bucket, source_object, self.destination_bucket, destination_object)
if self.move_object:
hook.delete(self.source_bucket, source_object)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib
from tensorflow.contrib.mixed_precision.python import loss_scale_optimizer as lso
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent as gd
class LossScaleOptimizerTest(test.TestCase):
def _build_graph(self, lr, init_val, loss_scale_opt_fn=None):
x = variable_scope.get_variable(
"x", initializer=init_val, dtype=dtypes.float32)
c1 = constant_op.constant(1e4, dtype=dtypes.float16)
c2 = constant_op.constant(1e-4, dtype=dtypes.float16)
c3 = constant_op.constant(1e-4, dtype=dtypes.float16)
if context.executing_eagerly():
loss = lambda: math_ops.cast(x, dtypes.float16) * c1 * c2 * c3
else:
loss = math_ops.cast(x, dtypes.float16) * c1 * c2 * c3
opt = gd.GradientDescentOptimizer(lr)
if loss_scale_opt_fn:
opt = loss_scale_opt_fn(opt)
return x, loss, opt
@test_util.run_in_graph_and_eager_modes
def test_float16_underflow_without_loss_scale(self):
lr = 1
init_val = 1.
x, loss, opt = self._build_graph(lr, init_val)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt.minimize(loss, var_list=[x]))
# Symbolic grad is c1 * c2 * c3 = 1e-4 and actual grad is 0, since in
# backprop, c2 * c3 underflows in fp16 range. So variable isn't updated.
expected_update = 0
symbolic_update = 1e-4 * lr
self.assertAllClose(
init_val - expected_update,
self.evaluate(x),
rtol=0,
atol=min(symbolic_update, 1e-6))
@test_util.run_in_graph_and_eager_modes
def test_float16_with_loss_scale(self):
lr = 1.
init_val = 1.
def loss_scale_opt_fn(opt):
return lso.LossScaleOptimizer(opt, lsm_lib.FixedLossScaleManager(1e4))
x, loss, opt = self._build_graph(lr, init_val, loss_scale_opt_fn)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt.minimize(loss, var_list=[x]))
# Symbolic grad is c1 * c2 * c3 = 1e-4 and actual grad is the same, due to
# up-scaled loss before backprop starts.
expected_update = 1.e-4 * lr
self.assertAllClose(
init_val - expected_update,
self.evaluate(x),
rtol=0,
atol=min(expected_update, 1e-6))
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_with_loss_scale(self):
lr = 1
init_val = 1.
def loss_scale_opt_fn(opt):
return lso.LossScaleOptimizer(opt, lsm_lib.FixedLossScaleManager(1e4))
x, loss, opt = self._build_graph(lr, init_val, loss_scale_opt_fn)
grads_and_vars = opt.compute_gradients(loss, var_list=[x])
self.assertEqual(len(grads_and_vars), 1)
self.evaluate(variables.global_variables_initializer())
g_v = self.evaluate(grads_and_vars[0][0])
self.assertAllClose(g_v, 1e-4)
self.assertIs(grads_and_vars[0][1], x)
# Gradients aren't applied.
self.assertAllClose(init_val, self.evaluate(x), rtol=0, atol=1e-6)
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_without_loss_scale(self):
lr = 1
init_val = 1.
x, loss, opt = self._build_graph(lr, init_val)
grads_and_vars = opt.compute_gradients(loss, var_list=[x])
self.assertEqual(len(grads_and_vars), 1)
self.evaluate(variables.global_variables_initializer())
g_v = self.evaluate(grads_and_vars[0][0])
self.assertAllClose(g_v, 0)
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients(self):
x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
itr = dataset.make_one_shot_iterator()
lr = 1
opt = gd.GradientDescentOptimizer(lr)
lsm = lsm_lib.FixedLossScaleManager(1.e4)
opt = lso.LossScaleOptimizer(opt, lsm)
train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
if not context.executing_eagerly():
train_op = train_fn()
expected_output = [1, 1, 1 - 0.1]
actual_output = []
self.evaluate(variables.global_variables_initializer())
for _ in range(3):
# nan or inf is not applied.
if context.executing_eagerly():
train_fn()
else:
self.evaluate(train_op)
actual_output.append(self.evaluate(x))
self.assertAllClose(expected_output, actual_output)
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients_loss_scale_is_updated(self):
class SimpleLossScaleManager(lsm_lib.LossScaleManager):
"""A simple loss scale manager for easier testing.
It increments loss scale by 1 if grads are finite, and decreases loss
scale by 1 if otherwise.
"""
def __init__(self, loss_scale):
self._loss_scale = variable_scope.variable(
name="loss_scale",
initial_value=loss_scale,
dtype=dtypes.float32,
trainable=False)
def get_loss_scale(self):
return self._loss_scale
def update_loss_scale(self, if_finite_grads):
return control_flow_ops.cond(
if_finite_grads, lambda: state_ops.assign_add(self._loss_scale, 1),
lambda: state_ops.assign_sub(self._loss_scale, 1))
x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
itr = dataset.make_one_shot_iterator()
lr = 1
init_loss_scale = 8
opt = gd.GradientDescentOptimizer(lr)
lsm = SimpleLossScaleManager(init_loss_scale)
opt = lso.LossScaleOptimizer(opt, lsm)
train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
if not context.executing_eagerly():
train_op = train_fn()
self.evaluate(variables.global_variables_initializer())
expected_loss_scale = [
init_loss_scale - 1, init_loss_scale - 2, init_loss_scale - 2 + 1
]
expected_output = [1, 1, 1 - 0.1]
actual_output = []
for i in range(3):
# nan or inf is not applied.
if context.executing_eagerly():
train_fn()
else:
self.evaluate(train_op)
actual_output.append(self.evaluate(x))
self.assertAllClose(expected_loss_scale[i],
self.evaluate(lsm._loss_scale))
self.assertAllClose(expected_output, actual_output)
if __name__ == "__main__":
test.main()
|
|
"""===========================
Pipeline Splicing
===========================
Overview
========
This pipeline enables differential exon usage testing through the
implementation of
* rMATS-turbo
* DEXSeq
rMATS is a computational tool to detect differential alternative splicing
events from RNA-Seq data. The statistical model of MATS calculates the P-value
and false discovery rate that the difference in the isoform ratio of a gene
between two conditions exceeds a given user-defined threshold. From the
RNA-Seq data, MATS can automatically detect and analyze alternative splicing
events corresponding to all major types of alternative splicing patterns.
MATS handles replicate RNA-Seq data from both paired and unpaired study design.
DEXSeq is a bioconductor R package to detect differential exon usage between
conditions in RNA-Seq experiments. Relative exon usage is defined as
(number of reads from exon)/(number of reads from its gene). It uses a similar
model to DESeq to estimate dispersion parameters prior to differential
testing.
Principal targets
-----------------
full
compute all functions
Optional targets
----------------
permute
repeat rMATS after permuting sample group labels
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
CGATReport report requires a :file:`conf.py` and optionally a
:file:`cgatreport.ini` file (see :ref:`PipelineReporting`).
Default configuration files can be generated by executing:
python <srcdir>/pipeline_splicing.py config
Input files
-----------
".bam" files generated using STAR or Tophat2. Other mappers
may also work. Bam indexation is not required.
Design_files ("*.design.tsv") are used to specify sample variates. The
minimal design file is shown below, where include specifies if the
sample should be included in the analysis, group specifies the sample
group and pair specifies whether the sample is paired. Note, multiple
design files may be included, for example so that multiple models can
be fitted to different subsets of the data
(tab-seperated values)
sample include group pair
WT-1-1 1 WT 0
WT-1-2 1 WT 0
Mutant-1-1 1 Mutant 0
Mutant-1-2 1 Mutant 0
The pipeline can only handle comparisons between two conditions with
replicates. If further comparisons are needed, further design files
should be used.
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
Requirements:
* samtools
* DEXSeq
* rMATS-turbo
* pysam
* HTSeqCounts
Pipeline output
===============
For each experiment, the output from rMATS is placed in the results.dir
folder. Each experiment is found in a subdirectory named designfilename.dir
rMATS output is further described here:
http://rnaseq-mats.sourceforge.net/user_guide.htm
Glossary
========
.. glossary::
Code
====
"""
from ruffus import *
import sys
import os
import glob
import sqlite3
import pandas as pd
from rpy2.robjects import r as R
import CGAT.BamTools as BamTools
import CGAT.Experiment as E
import CGATPipelines.Pipeline as P
import CGATPipelines.PipelineTracks as PipelineTracks
import CGATPipelines.PipelineSplicing as PipelineSplicing
###################################################################
###################################################################
###################################################################
# Load options and annotations
###################################################################
# load options from the config file
PARAMS = P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
# add configuration values from associated pipelines
PARAMS = P.PARAMS
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_genesets.py",
prefix="annotations_",
update_interface=True,
restrict_interface=True)) # add config values from associated pipelines
# The DEXSeq R directory contains important python helper functions
PYTHONSCRIPTSDIR = R('''
f = function(){
pythonScriptsDir = system.file("python_scripts", package="DEXSeq")
}
f()''').tostring()
###################################################################
###################################################################
###################################################################
# Utility functions
###################################################################
def connect():
'''Connect to database (sqlite by default)
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
class MySample(PipelineTracks.Sample):
attributes = tuple(PARAMS["attributes"].split(","))
TRACKS = PipelineTracks.Tracks(MySample).loadFromDirectory(
glob.glob("*.bam"), "(\S+).bam")
Sample = PipelineTracks.AutoSample
DESIGNS = PipelineTracks.Tracks(Sample).loadFromDirectory(
glob.glob("*.design.tsv"), "(\S+).design.tsv")
###################################################################
###################################################################
###################################################################
# DEXSeq workflow
###################################################################
@mkdir("results.dir")
@files(PARAMS["annotations_interface_geneset_all_gtf"],
"geneset_flat.gff")
def buildGff(infile, outfile):
'''Creates a gff for DEXSeq
This takes the gtf and flattens it to an exon based input
required by DEXSeq. The required python script is provided by DEXSeq
and uses HTSeqCounts.
Parameters
----------
infile : string
Input filename in :term:`gtf` format
outfile : string
A :term:`gff` file for use in DEXSeq
annotations_interface_geneset_all_gtf : string
:term:`PARAMS`. Filename of :term:`gtf` file containing
all ensembl annotations
'''
tmpgff = P.getTempFilename(".")
statement = "gunzip -c %(infile)s > %(tmpgff)s"
P.run()
ps = PYTHONSCRIPTSDIR
statement = '''python %(ps)s/dexseq_prepare_annotation.py
%(tmpgff)s %(outfile)s'''
P.run()
os.unlink(tmpgff)
@mkdir("counts.dir")
@transform(glob.glob("*.bam"),
regex("(\S+).bam"),
add_inputs(buildGff),
r"counts.dir/\1.txt")
def countDEXSeq(infiles, outfile):
'''create counts for DEXSeq
Counts bam reads agains exon features in flattened gtf.
The required python script is provided by DEXSeq
and uses HTSeqCounts.
Parameters
----------
infile[0]: string
:term:`bam` file input
infile[1]: string
:term:`gff` output from buildGff function
outfile : string
A :term:`txt` file containing results
DEXSeq_strandedness : string
:term:`PARAMS`. Specifies strandedness, options
are 'yes', 'no' and 'reverse'
'''
infile, gfffile = infiles
ps = PYTHONSCRIPTSDIR
if BamTools.isPaired(infile):
paired = "yes"
else:
paired = "no"
strandedness = PARAMS["DEXSeq_strandedness"]
statement = '''python %(ps)s/dexseq_count.py
-p %(paired)s
-s %(strandedness)s
-r pos
-f bam %(gfffile)s %(infile)s %(outfile)s'''
P.run()
@collate(countDEXSeq,
regex("counts.dir/([^.]+)\.txt"),
r"summarycounts.tsv")
def aggregateExonCounts(infiles, outfile):
''' Build a matrix of counts with exons and tracks dimensions.
Uses `combine_tables.py` to combine all the `txt` files output from
countDEXSeq into a single :term:`tsv` file named
"summarycounts.tsv". A `.log` file is also produced.
Parameters
---------
infiles : list
a list of `tsv.gz` files from the counts.dir that were the
output from dexseq_count.py
outfile : string
a filename denoting the file containing a matrix of counts with genes
as rows and tracks as the columns - this is a `tsv.gz` file '''
infiles = " ".join(infiles)
statement = '''python %(scriptsdir)s/combine_tables.py
--columns=1
--take=2
--use-file-prefix
--regex-filename='([^.]+)\.txt'
--no-titles
--log=%(outfile)s.log
%(infiles)s
> %(outfile)s '''
P.run()
@follows(aggregateExonCounts)
@mkdir("results.dir/DEXSeq")
@subdivide(["%s.design.tsv" % x.asFile().lower() for x in DESIGNS],
regex("(\S+).design.tsv"),
r"results.dir/DEXSeq/\1_results.tsv")
def runDEXSeq(infile, outfile):
''' run DEXSeq command
DEXSeq is run using the counts2table from the
CGAT code collection. Output is standardised to
correspond to differential gene expression output
from DESeq2 or Sleuth.
Will currently only test 2 groups.
Parameters
---------
infiles : string
filename and path of design file
outfile : string
a filename denoting the file containing a standard results
output with full results of all tested exons
DEXSeq_model_% : string
DEXSeq_contrast_% : string
DEXSeq_refgroup_% : string
:term:`PARAMS`. Specifies model, contrast and reference
group for DEXSeq analysis
'''
outdir = os.path.dirname(outfile)
countsdir = "counts.dir/"
gfffile = os.path.abspath("geneset_flat.gff")
dexseq_fdr = 0.05
design = infile.split('.')[0]
model = PARAMS["DEXSeq_model_%s" % design]
contrast = PARAMS["DEXSeq_contrast_%s" % design]
refgroup = PARAMS["DEXSeq_refgroup_%s" % design]
statement = '''
python %%(scriptsdir)s/counts2table.py
--design-tsv-file=%(infile)s
--output-filename-pattern=%(outdir)s/%(design)s
--log=%(outdir)s/%(design)s_DEXSeq.log
--method=dexseq
--fdr=%(dexseq_fdr)s
--model=%(model)s
--dexseq-counts-dir=%(countsdir)s
--contrast=%(contrast)s
-r %(refgroup)s
--dexseq-flattened-file=%(gfffile)s
> %(outfile)s;
''' % locals()
P.run()
###################################################################
###################################################################
###################################################################
# rMATS workflow
###################################################################
@mkdir("results.dir/rMATS")
@subdivide(["%s.design.tsv" % x.asFile().lower() for x in DESIGNS],
regex("(\S+).design.tsv"),
add_inputs(PARAMS["annotations_interface_geneset_all_gtf"]),
[r"results.dir/rMATS/\1.dir/%s.MATS.JC.txt" % x for x in ["SE", "A5SS", "A3SS", "MXE", "RI"]])
def runMATS(infile, outfiles):
'''run rMATS-turbo
Runs rMATS command.
Parameters
---------
infiles[0] : string
filename and path of design file
infiles[1] : string
filename and path of :term:`gtf` file
outfile : list
a list of filenames denoting the file containing a standard results
output with full results for all five tested differential exon
usage conditions.
MATS_libtype : string
:term:`PARAMS`. Specifies library type. Can be "fr-firstrand",
"fr-secondstrand" or "fr-unstranded"
'''
design, gtffile = infile
strand = PARAMS["MATS_libtype"]
outdir = os.path.dirname(outfiles[0])
if not os.path.exists(outdir):
os.makedirs(outdir)
PipelineSplicing.runRMATS(gtffile=gtffile, designfile=design,
pvalue=PARAMS["MATS_cutoff"],
strand=strand, outdir=outdir)
@follows(runMATS)
@transform(runMATS,
regex("results.dir/rMATS/(\S+).dir/(\S+).MATS.JC.txt"),
r"results.dir/rMATS/rMATS_\1_\2_JC.load")
def loadMATS(infile, outfile):
'''load RMATS results into relational database
Loads rMATS results into relational database.
Continues if table empty.
Parameters
----------
infile: term:`tsv` file containing one type of rMATS results.
outfile: .load file
'''
try:
P.load(infile, outfile)
except:
P.touch(outfile)
@collate(runMATS,
regex("results.dir/rMATS/(\S+).dir/\S+.MATS.JC.txt"),
r"results.dir/rMATS/rMATS_\1_results.summary")
def collateMATS(infiles, outfile):
'''collates summary from all events
Collates number of events below FDR threshold from all
five events into simple table
Parameters
----------
infiles: list
list of results files from rMATS
MATS_fdr : string
:term:`PARAMS`. User specified threshold for result counting
outfile: string
summary file containing number of results below FDR threshold
'''
indir = os.path.dirname(infiles[1])
collate = []
with open(indir + "/b1.txt", "r") as f:
collate.append(f.readline())
with open(indir + "/b2.txt", "r") as f:
collate.append(f.readline())
for event in ["SE", "A5SS", "A3SS", "MXE", "RI"]:
temp = pd.read_csv("%s/%s.MATS.JC.txt" %
(indir, event), sep='\t')
collate.append(str(len(temp[temp['FDR'] < PARAMS['MATS_fdr']])))
with open(outfile, "w") as f:
f.write("Group1\tGroup2\tSE\tA5SS\tA3SS\tMXE\tRI\n")
f.write('\t'.join(collate))
@transform(collateMATS,
suffix(".summary"),
".load")
def loadCollateMATS(infile, outfile):
'''load rMATS summary into relational database
Loads rMATS summary results into relational database.
Parameters
----------
infile: file containing summary table of rMATS results
outfile: .load file
'''
P.load(infile, outfile)
@active_if(PARAMS["permute"] == 1)
@subdivide(["%s.design.tsv" % x.asFile().lower() for x in DESIGNS],
regex("(\S+).design.tsv"),
r"results.dir/rMATS/\1.dir/permutations/run*.dir/init",
r"results.dir/rMATS/\1.dir/permutations")
def permuteMATS(infile, outfiles, outdir):
'''creates directories for permutation testing
Creates directories for permutation testing and leaves dummy
init file in directory (for timestamping)
Only becomes active if :term:`PARAMS` permute is set to 1
Parameters
----------
infile: string
name and path to design
outfile: list
list of unknown length, capturing all permutations
retrospectively
outdir: string
directory to generate permutations in
permutations : string
:term:`PARAMS`. number of directories to be generated
'''
if not os.path.exists(outdir):
os.makedirs(outdir)
for i in range(0, PARAMS["permutations"]):
if not os.path.exists("%s/run%i.dir" % (outdir, i)):
os.makedirs("%s/run%i.dir" % (outdir, i))
P.touch("%s/run%i.dir/init" % (outdir, i))
@transform(permuteMATS,
regex("results.dir/rMATS/(\S+).dir/permutations/(\S+).dir/init"),
add_inputs(PARAMS["annotations_interface_geneset_all_gtf"]),
r"results.dir/rMATS/\1.dir/permutations/\2.dir/result.tsv",
r"\1.design.tsv")
def runPermuteMATS(infiles, outfile, design):
'''run rMATS-turbo permutation testing
Runs rMATS command on permutations and then collates results into
small summary table for each permutation
Parameters
---------
infiles[0] : string
filename and path of design file
infiles[1] : string
filename and path of :term:`gtf` file
outfile : :term:`tsv` file
file containing summary results meeting the user-specified FDR
threshold
design : string
name and path of design file
MATS_libtype : string
:term:`PARAMS`. Specifies library type. Can be "fr-firstrand",
"fr-secondstrand" or "fr-unstranded"
MATS_fdr : string
:term:`PARAMS`. User specified threshold for result counting
'''
init, gtffile = infiles
directory = os.path.dirname(init)
strand = PARAMS["MATS_libtype"]
outdir = os.path.dirname(outfile)
if not os.path.exists(outdir):
os.makedirs(outdir)
PipelineSplicing.runRMATS(gtffile=gtffile, designfile=design,
pvalue=PARAMS["MATS_cutoff"],
strand=strand, outdir=directory, permute=1)
collate = []
with open(os.path.dirname(init) + "/b1.txt", "r") as f:
collate.append(f.readline())
with open(os.path.dirname(init) + "/b2.txt", "r") as f:
collate.append(f.readline())
for event in ["SE", "A5SS", "A3SS", "MXE", "RI"]:
temp = pd.read_csv("%s/%s.MATS.JC.txt" %
(os.path.dirname(outfile), event), sep='\t')
collate.append(str(len(temp[temp['FDR'] < PARAMS['MATS_fdr']])))
with open(outfile, "w") as f:
f.write("Group1\tGroup2\tSE\tA5SS\tA3SS\tMXE\tRI\n")
f.write('\t'.join(collate))
@collate(runPermuteMATS,
regex("results.dir/rMATS/(\S+).dir/permutations/\S+.dir/result.tsv"),
r"results.dir/rMATS/rMATS_\1_permutations.summary")
def collatePermuteMATS(infiles, outfile):
'''collates summary table of all permutations
Collates number of events below FDR threshold from all
permutation runs.
Parameters
----------
infiles: list
list of rMATS result summaries from all permutation runs
outfile: string
summary file containing a table with all permutation run
results
'''
collate = []
for infile in infiles:
collate.append(pd.read_csv(infile, sep='\t'))
pd.concat(collate).to_csv(outfile, sep='\t', index=0)
@transform(collatePermuteMATS,
suffix(".summary"),
".load")
def loadPermuteMATS(infile, outfile):
'''load rMATS permutation results
Loads rMATS permutation summary results into relational database.
Parameters
----------
infile: file containing summary table of rMATS permutation results
outfile: .load file
'''
P.load(infile, outfile)
@mkdir("results.dir/sashimi")
@transform(runMATS,
regex("results.dir/rMATS/(\S+).dir/(\S+).MATS.JC.txt"),
add_inputs(r"\1.design.tsv"),
r"results.dir/sashimi/\1.dir/\2")
def runSashimi(infiles, outfile):
'''draws sashimi plots
Draws Sashimi plots (pdf files) for all results below FDR threshold
from all five rMATS events
Parameters
----------
infiles: list
list of results files from rMATS
MATS_fdr : string
:term:`PARAMS`. User specified threshold for result drawing
outfile: string
summary file containing number of results below FDR threshold
'''
infile, design = infiles
fdr = PARAMS["MATS_fdr"]
if not os.path.exists(outfile):
os.makedirs(outfile)
PipelineSplicing.rmats2sashimi(infile, design, fdr, outfile)
###################################################################
###################################################################
###################################################################
# Pipeline management
###################################################################
@follows(loadMATS,
loadCollateMATS,
loadPermuteMATS,
runSashimi,
runDEXSeq)
def full():
pass
@follows(mkdir("report"))
def build_report():
'''build report from scratch.
Any existing report will be overwritten.
Currently report function is not active for this pipeline
'''
E.info("starting report build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.
This will update a report with any changes inside the report
document or code. Note that updates to the data will not cause
relevant sections to be updated. Use the cgatreport-clean utility
first.
'''
E.info("updating report")
P.run_report(clean=False)
@follows(update_report)
def publish_report():
'''publish report in the CGAT downloads directory.'''
E.info("publishing report")
P.publish_report()
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
|
import os.path
import sys
import cStringIO
import xml.etree.cElementTree as ET
import HTMLParser
import json
from collections import defaultdict
from jinja2 import Template
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from dummy_data_layout import LAYOUT_SCHEMA
from service_api import service_gateway_get
from config import PORTAL_ROOT, UI_MODE, STATIC_ASSETS
from random import randint
DEFINED_VIEWS = [
'2163152', # Facepage
'2163153', # Status
'2163154', # Related
'2163156', # Dashboard
'2163157', # Command
'2163158', # Direct Command
]
class LayoutApi(object):
@staticmethod
def get_new_layout_schema():
layout_schema = service_gateway_get('directory', 'get_ui_specs', params={'user_id': 'tboteler'})
return layout_schema
# Brute force method to quickly experiment with different rendering strategies
# with CSS rules, etc. Optimizations/refactoring will be underway soon.
@staticmethod
def process_layout(layout_schema=None, interactions=None):
# Load template and find 'body' for template appendation
env = Environment()
env.loader = FileSystemLoader(PORTAL_ROOT+'/templates')
tmpl_unparsed = env.get_template('ion_ux.html').render(static_assets=STATIC_ASSETS)
tmpl = ET.fromstring(tmpl_unparsed.encode('utf-8'))
body_elmt = tmpl.find('body')
# Fetch the layout schema
layout_schema = LayoutApi.get_new_layout_schema()
# Track resource types, metadata and widgets without processed sub-attributes
resource_types = []
metadata_processed = []
exclude_sub_attributes = ['table_ooi', 'chart_ooi']
attribute_levels = ['level-zero', 'level-one', 'level-two', 'level-three', 'level-four', 'level-five', 'level-six']
# --------------------------------------------------------------------------
# VIEWS
# --------------------------------------------------------------------------
# Loop through defined views and build <script> templates with following heirarchy:
# view -> groups -> blocks -> attributes -> sub-attributes.
for view_id in DEFINED_VIEWS:
view = layout_schema['spec']['elements'][view_id]
script_elmt = _make_element(body_elmt, 'script', id=view_id, type='text/template')
# heading_elmt = _make_element(script_elmt, 'div', css='row-fluid heading')
v00_elmt = _make_element(script_elmt, 'div', css='v00 heading')
content_elmt = _make_element(script_elmt, 'div', css='row-fluid')
v01_elmt = _make_element(content_elmt, 'div', css='v01 span3')
v02_elmt = _make_element(content_elmt, 'div', css='v02 span9')
# --------------------------------------------------------------------------
# GROUPS
# --------------------------------------------------------------------------
# Track groups on per view basis
groups = {}
# Loop through groups
for gr_idx, gr_element in enumerate(view['embed']):
group_elid = gr_element['elid']
group_link_id = group_elid + str(randint(0,1000))
group_position = gr_element['pos']
group = layout_schema['spec']['elements'][group_elid]
# Set the parent element for the group
if group_position == 'V00':
parent_elmt = v00_elmt
elif group_position == 'V01':
parent_elmt = v01_elmt
else:
parent_elmt = v02_elmt
# LABEL OVERRIDES
if gr_element.has_key('olabel'):
#print 'group label override:', group['label'], '->', gr_element['olabel'], group_elid
group_label = gr_element['olabel']
else:
group_label = group['label']
# CHECK FOR TITLE BAR (V00), creates tabs for V01 and V02 groups
if group_position == 'V00':
group_elmt = parent_elmt
else:
if not group_position in groups.keys():
group_container_elmt = _make_element(parent_elmt, 'div', id=group_elid, css='group')
group_ul_elmt = _make_element(group_container_elmt, 'ul', css='nav nav-tabs')
group_block_container_elmt = _make_element(group_container_elmt, 'div', css='tab-content')
groups.update({group_position: {'ul_elmt': group_ul_elmt,'group_container_elmt': group_container_elmt ,'group_block_container_elmt': group_block_container_elmt}})
else:
group_ul_elmt = groups[group_position]['ul_elmt']
group_block_container_elmt = groups[group_position]['group_block_container_elmt']
# <li>, <a> and group element
group_li_elmt = _make_element(group_ul_elmt, 'li', css='')
group_a_elmt = _make_element(group_li_elmt, 'a', href="#%s" % group_link_id, data_toggle='tab', content=group_label)
group_elmt = _make_element(group_block_container_elmt, 'div', id=group_link_id, css='tab-pane row-fluid')
# --------------------------------------------------------------------------
# BLOCKS
# --------------------------------------------------------------------------
# Loop through blocks
for bl_element in group['embed']:
block_elid = bl_element['elid']
block_position = bl_element['pos']
block = layout_schema['spec']['elements'][block_elid]
block_widget_id = block['wid']
block_widget = layout_schema['spec']['widgets'][block_widget_id]
block_widget_type = block_widget['name']
block_res_type = block['ie']['ie_name'] if block.has_key('ie') else ''
if not block_res_type in resource_types:
resource_types.append(block_res_type)
# Set li class based on block_res_type
if group_position != 'V00':
li_css_class = group_li_elmt.get('class')
if not block_res_type in li_css_class:
li_css_class += ' %s' % block_res_type
group_li_elmt.attrib['class'] = li_css_class
# LABEL OVERRIDES
if bl_element.has_key('olabel'):
#print 'block label override:', block['label'], '->', bl_element['olabel'], block_elid
block_label = bl_element['olabel']
else:
block_label = block['label']
block_css_class = block_res_type
# if not block_res_type in block_css_class:
# block_css_class += ' %s' % block_res_type
# BLOCK LAYOUT
if block['embed']:
for at_element in block['embed']:
attribute = layout_schema['spec']['elements'][at_element['elid']]
attribute_widget_type = layout_schema['spec']['widgets'][attribute['wid']]['name']
wide_container = True if attribute_widget_type in ('table_ooi', 'chart_ooi') else False
if wide_container:
block_container = _make_element(group_elmt, 'div', css='row-fluid')
block_elmt = _make_element(block_container, 'div', style="display:none;", id=block_elid)
block_css_class += ' span12'
else:
block_elmt = _make_element(group_elmt, 'div', style="display:none;", id=block_elid)
block_css_class += ' block'
# Greater than V01
if group_position not in ('V00','V01'):
block_css_class += ' span3'
# CHECK FOR TITLE BAR (V00)
elif group_position == 'V00':
block_css_class += ' row-fluid'
block_elmt.attrib['class'] = block_css_class
# SET GROUP HEADINGS
if group_position != 'V00':
# Hide table headers for now.
if not attribute_widget_type == 'table_ooi':
block_h3_elmt = _make_element(block_elmt, 'h3', content=block_label)
if group_position == 'V00':
block_container_elmt = block_elmt
left_elmt = _make_element(block_container_elmt, 'div', css='span6 heading-left')
right_elmt = _make_element(block_container_elmt, 'div', css='span6 heading-right')
else:
block_container_elmt = _make_element(block_elmt, 'div')
# Attributes
for at_element in block['embed']:
attribute_elid = at_element['elid']
attribute_position = at_element['pos']
attribute_data_path = at_element['dpath']
attribute_level = at_element['olevel']
attribute_css = attribute_levels[int(attribute_level)] if attribute_level else ''
attribute = layout_schema['spec']['elements'][attribute_elid]
attribute_widget_id = attribute['wid']
attribute_widget_type = layout_schema['spec']['widgets'][attribute_widget_id]['name']
# LABEL OVERRIDES
if at_element.has_key('olabel'):
#print 'attribute label override:', attribute['label'], '->', at_element['olabel'], attribute_elid
attribute_label = at_element['olabel']
else:
attribute_label = attribute['label']
if attribute_widget_type == 'image_ooi':
image_class = layout_schema['spec']['graphics'][attribute['gfx']]['name']
attribute_css += ' %s %s' % (attribute_widget_type, image_class)
else:
attribute_css += ' %s' % attribute_widget_type
# CHECK FOR TITLE BAR
if attribute_widget_type not in ('table_ooi', 'chart_ooi') and group_position != 'V00':
block_container_elmt.set('class', 'content-wrapper')
attribute_options = {
'id': attribute_elid,
'data-position': attribute_position,
'data-path': attribute_data_path,
'data-level': attribute_level,
'data-label': attribute_label,
'css': attribute_css
}
if group_position == 'V00':
if attribute_position == 'B01' or attribute_position == 'B02':
attribute_elmt = _make_element(left_elmt, 'div', **attribute_options)
else:
attribute_elmt = _make_element(right_elmt, 'div', **attribute_options)
else:
attribute_elmt = _make_element(block_container_elmt, 'div', **attribute_options)
# FOR INTEGRATION
# if UI_MODE == 'DEVELOPMENT':
# attribute_elmt.text = 'Attribute: %s (%s) (%s) (%s) (%s)' % (attribute['label'], attribute['name'], attribute_elid, attribute_widget_type, attribute_position)
# Generate metadata for nested elements, ex. tables and attribute groups
if attribute_widget_type in ('table_ooi', 'attribute_group_ooi') and attribute_elid not in metadata_processed:
metadata_processed.append(attribute_elid)
metadata = []
for embedded_attribute in attribute['embed']:
embedded_object = layout_schema['spec']['elements'][embedded_attribute['elid']]
embedded_widget_type = layout_schema['spec']['widgets'][embedded_attribute['wid']]['name']
# LABEL OVERRIDE
if embedded_attribute.has_key('olabel'):
#print 'sub-attribute label override:', embedded_object['label'], '->', embedded_attribute['olabel'], attribute_elid
embedded_object_label = embedded_attribute['olabel']
else:
embedded_object_label = embedded_object['label']
embedded_info_level = embedded_attribute['olevel']
if embedded_info_level:
embedded_info_level_index = int(embedded_info_level)
metadata_items = [embedded_widget_type, embedded_object_label, embedded_attribute['dpath'], embedded_attribute['pos'], embedded_info_level, attribute_levels[embedded_info_level_index]]
if attribute_widget_type == 'attribute_group_ooi':
meta_elmt_id = 'ATTRIBUTE_GROUP_' + attribute_elid
metadata_items.append(embedded_attribute['elid'])
metadata_items.append(embedded_attribute['dpath'])
elif attribute_widget_type == 'table_ooi':
meta_elmt_id = 'TABLE_' + attribute_elid
metadata.append(metadata_items)
# Append metadata to body as a JSON script
meta_elmt = ET.SubElement(body_elmt, 'script')
meta_elmt.set('id', meta_elmt_id)
meta_elmt.text = "var %s=%s" % (meta_elmt_id, json.dumps(metadata))
layout_elmt = ET.SubElement(body_elmt, 'script')
layout_elmt.set('id', 'layout')
layout_elmt.text = "var LAYOUT=%s;" % json.dumps(layout_schema)
resource_types_elmt = ET.SubElement(body_elmt, 'script')
resource_types_elmt.set('id', 'resource_types')
resource_types_elmt.text = "var RESOURCE_TYPES=%s" % json.dumps(resource_types)
init_script_elmt = ET.Element('script')
init_script_elmt.set('type', 'text/javascript')
init_script_elmt.text = "$(function(){initialize_app();});"
body_elmt.append(init_script_elmt)
tmpl = ET.tostring(tmpl)
tmpl = '<!DOCTYPE html>\n' + tmpl
h = HTMLParser.HTMLParser()
return h.unescape(tmpl)
def _make_element(parent_elmt, elmt_type, **kwargs):
elmt = ET.SubElement(parent_elmt, elmt_type)
for (key, value) in kwargs.items():
if key == 'css':
elmt.set('class', value)
elif key.startswith('data'):
elmt.set(key.replace('_','-'), value)
elif key == 'content':
elmt.text = value
else:
elmt.set(key, value)
return elmt
|
|
"""Download and install structured genome data and aligner index files.
Downloads prepared FASTA, indexes for aligners like BWA, Bowtie and novoalign
and other genome data in automated pipelines. Specify the genomes and aligners
to use in an input biodata.yaml configuration file.
The main targets are fabric functions:
- install_data -- Install biological data from scratch, including indexing genomes.
- install_data_s3 -- Install biological data, downloading pre-computed indexes from S3.
- upload_s3 -- Upload created indexes to biodata S3 bucket.
"""
import os
import operator
import socket
import subprocess
from math import log
from fabric.api import *
from fabric.contrib.files import *
from fabric.context_managers import path
try:
import yaml
except ImportError:
yaml = None
try:
import boto
except ImportError:
boto = None
from cloudbio.biodata import galaxy, ggd
from cloudbio.biodata.dbsnp import download_dbsnp
from cloudbio.biodata.rnaseq import download_transcripts
from cloudbio.custom import shared
from cloudbio.fabutils import quiet
import multiprocessing as mp
# -- Configuration for genomes to download and prepare
class _DownloadHelper:
def __init__(self):
self.config = {}
def ucsc_name(self):
return None
def _exists(self, fname, seq_dir):
"""Check if a file exists in either download or final destination.
"""
return env.safe_exists(fname) or env.safe_exists(os.path.join(seq_dir, fname))
class UCSCGenome(_DownloadHelper):
def __init__(self, genome_name, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "UCSC"
self._name = genome_name
self.dl_name = dl_name if dl_name is not None else genome_name
self._url = "ftp://hgdownload.cse.ucsc.edu/goldenPath/%s/bigZips" % \
genome_name
def ucsc_name(self):
return self._name
def _karyotype_sort(self, xs):
"""Sort reads in karyotypic order to work with GATK's defaults.
"""
def karyotype_keyfn(x):
base = os.path.splitext(os.path.basename(x))[0]
if base.startswith("chr"):
base = base[3:]
parts = base.split("_")
try:
parts[0] = int(parts[0])
except ValueError:
pass
# unplaced at the very end
if parts[0] == "Un":
parts.insert(0, "z")
# mitochondrial special case -- after X/Y
elif parts[0] in ["M", "MT"]:
parts.insert(0, "x")
# sort random and extra chromosomes after M
elif len(parts) > 1:
parts.insert(0, "y")
return parts
return sorted(xs, key=karyotype_keyfn)
def _split_multifasta(self, fasta_file):
chrom = ""
file_handle = None
file_names = []
out_dir = os.path.dirname(fasta_file)
with open(fasta_file) as in_handle:
for line in in_handle:
if line.startswith(">"):
chrom = line.split(">")[1].strip()
file_handle.close() if file_handle else None
file_names.append(chrom + ".fa")
file_handle = open(os.path.join(out_dir, chrom + ".fa"), "w")
file_handle.write(line)
else:
file_handle.write(line)
file_handle.close()
return file_names
def download(self, seq_dir):
zipped_file = None
genome_file = "%s.fa" % self._name
if not self._exists(genome_file, seq_dir):
prep_dir = "seq_prep"
env.safe_run("mkdir -p %s" % prep_dir)
with cd(prep_dir):
zipped_file = self._download_zip(seq_dir)
if zipped_file.endswith(".tar.gz"):
env.safe_run("tar -xzpf %s" % zipped_file)
elif zipped_file.endswith(".zip"):
env.safe_run("unzip %s" % zipped_file)
elif zipped_file.endswith(".gz"):
if not env.safe_exists("out.fa"):
env.safe_run("gunzip -c %s > out.fa" % zipped_file)
else:
raise ValueError("Do not know how to handle: %s" % zipped_file)
tmp_file = genome_file.replace(".fa", ".txt")
result = env.safe_run_output("find `pwd` -name '*.fa'")
result = [x.strip() for x in result.split("\n")]
if len(result) == 1:
orig_result = result[0]
result = self._split_multifasta(result[0])
env.safe_run("rm %s" % orig_result)
result = self._karyotype_sort(result)
env.safe_run("rm -f inputs.txt")
for fname in result:
with quiet():
env.safe_run("echo '%s' >> inputs.txt" % fname)
env.safe_run("cat `cat inputs.txt` > %s" % (tmp_file))
for fname in result:
with quiet():
env.safe_run("rm -f %s" % fname)
env.safe_run("mv %s %s" % (tmp_file, genome_file))
zipped_file = os.path.join(prep_dir, zipped_file)
genome_file = os.path.join(prep_dir, genome_file)
return genome_file, [zipped_file]
def _download_zip(self, seq_dir):
for zipped_file in ["chromFa.tar.gz", "%s.fa.gz" % self._name,
"chromFa.zip"]:
if not self._exists(zipped_file, seq_dir):
result = shared._remote_fetch(env, "%s/%s" % (self._url, zipped_file), allow_fail=True)
if result:
break
else:
break
return zipped_file
class NCBIRest(_DownloadHelper):
"""Retrieve files using the TogoWS REST server pointed at NCBI.
"""
def __init__(self, name, refs, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "NCBI"
self._name = name
self._refs = refs
self.dl_name = dl_name if dl_name is not None else name
self._base_url = "http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta"
def download(self, seq_dir):
genome_file = "%s.fa" % self._name
if not self._exists(genome_file, seq_dir):
for ref in self._refs:
shared._remote_fetch(env, self._base_url % ref)
env.safe_run("ls -l")
env.safe_sed('%s.fasta' % ref, '^>.*$', '>%s' % ref, '1')
tmp_file = genome_file.replace(".fa", ".txt")
env.safe_run("cat *.fasta > %s" % tmp_file)
env.safe_run("rm -f *.fasta")
env.safe_run("rm -f *.bak")
env.safe_run("mv %s %s" % (tmp_file, genome_file))
return genome_file, []
class VectorBase(_DownloadHelper):
"""Retrieve genomes from VectorBase) """
def __init__(self, name, genus, species, strain, release, assembly_types):
_DownloadHelper.__init__(self)
self._name = name
self.data_source = "VectorBase"
self._base_url = ("http://www.vectorbase.org/sites/default/files/ftp/"
"downloads/")
_base_file = ("{genus}-{species}-{strain}_{assembly}"
"_{release}.fa.gz")
self._to_get = []
for assembly in assembly_types:
self._to_get.append(_base_file.format(**locals()))
def download(self, seq_dir):
print os.getcwd()
genome_file = "%s.fa" % self._name
for fn in self._to_get:
url = self._base_url + fn
if not self._exists(fn, seq_dir):
shared._remote_fetch(env, url)
env.safe_run("gunzip -c %s >> %s" % (fn, genome_file))
return genome_file, []
class EnsemblGenome(_DownloadHelper):
"""Retrieve genome FASTA files from Ensembl.
ftp://ftp.ensemblgenomes.org/pub/plants/release-22/fasta/
arabidopsis_thaliana/dna/Arabidopsis_thaliana.TAIR10.22.dna.toplevel.fa.gz
ftp://ftp.ensembl.org/pub/release-75/fasta/
caenorhabditis_elegans/dna/Caenorhabditis_elegans.WBcel235.75.dna.toplevel.fa.gz
ftp://ftp.ensemblgenomes.org/pub/bacteria/release-23/bacteria/fasta/
bacteria_17_collection/pseudomonas_aeruginosa_ucbpp_pa14/dna/
Pseudomonas_aeruginosa_ucbpp_pa14.GCA_000014625.1.23.dna.toplevel.fa.gz
"""
def __init__(self, ensembl_section, release, organism, name, subsection=None):
_DownloadHelper.__init__(self)
self.data_source = "Ensembl"
if ensembl_section == "standard":
url = "ftp://ftp.ensembl.org/pub/"
else:
url = "ftp://ftp.ensemblgenomes.org/pub/%s/" % ensembl_section
url += "release-%s/fasta/" % release
if subsection:
url += "%s/" % subsection
url += "%s/dna/" % organism.lower()
self._url = url
if ensembl_section == "standard":
self._get_file = "%s.%s.dna.toplevel.fa.gz" % (organism, name)
else:
self._get_file = "%s.%s.%s.dna.toplevel.fa.gz" % (organism, name, release)
self._name = name
self.dl_name = name
def download(self, seq_dir):
genome_file = "%s.fa" % self._name
if not self._exists(self._get_file, seq_dir):
shared._remote_fetch(env, "%s%s" % (self._url, self._get_file))
if not self._exists(genome_file, seq_dir):
env.safe_run("gunzip -c %s > %s" % (self._get_file, genome_file))
return genome_file, [self._get_file]
class BroadGenome(_DownloadHelper):
"""Retrieve genomes organized and sorted by Broad for use with GATK.
Uses the UCSC-name compatible versions of the GATK bundles.
"""
def __init__(self, name, bundle_version, target_fasta, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "UCSC"
self._name = name
self.dl_name = dl_name if dl_name is not None else name
self._target = target_fasta
self._ftp_url = "ftp://gsapubftp-anonymous:@ftp.broadinstitute.org/bundle/" + \
"{ver}/{org}/".format(ver=bundle_version, org=self.dl_name)
def download(self, seq_dir):
org_file = "%s.fa" % self._name
if not self._exists(org_file, seq_dir):
shared._remote_fetch(env, "%s%s.gz" % (self._ftp_url, self._target))
env.safe_run("gunzip %s.gz" % self._target)
env.safe_run("mv %s %s" % (self._target, org_file))
return org_file, []
class GGDGenome:
"""Genome with download specified via a GGD recipe.
"""
def __init__(self, name):
self._name = name
BROAD_BUNDLE_VERSION = "2.8"
DBSNP_VERSION = "138"
GENOMES_SUPPORTED = [
("phiX174", "phix", NCBIRest("phix", ["NC_001422.1"])),
("Scerevisiae", "sacCer3", UCSCGenome("sacCer3")),
("Mmusculus", "mm10", UCSCGenome("mm10")),
("Mmusculus", "mm9", UCSCGenome("mm9")),
("Mmusculus", "mm8", UCSCGenome("mm8")),
("Hsapiens", "hg18", BroadGenome("hg18", BROAD_BUNDLE_VERSION,
"Homo_sapiens_assembly18.fasta")),
("Hsapiens", "hg19", BroadGenome("hg19", BROAD_BUNDLE_VERSION,
"ucsc.hg19.fasta")),
("Hsapiens", "GRCh37", BroadGenome("GRCh37", BROAD_BUNDLE_VERSION,
"human_g1k_v37.fasta", "b37")),
("Hsapiens", "hg38", GGDGenome("hg38")),
("Hsapiens", "hg38-noalt", GGDGenome("hg38-noalt")),
("Rnorvegicus", "rn6", UCSCGenome("rn6")),
("Rnorvegicus", "rn5", UCSCGenome("rn5")),
("Rnorvegicus", "rn4", UCSCGenome("rn4")),
("Xtropicalis", "xenTro3", UCSCGenome("xenTro3")),
("Athaliana", "TAIR10", EnsemblGenome("plants", "26",
"Arabidopsis_thaliana", "TAIR10")),
("Dmelanogaster", "dm3", UCSCGenome("dm3")),
("Celegans", "WBcel235", EnsemblGenome("standard", "80",
"Caenorhabditis_elegans", "WBcel235")),
("Mtuberculosis_H37Rv", "mycoTube_H37RV", NCBIRest("mycoTube_H37RV",
["NC_000962"])),
("Msmegmatis", "92", NCBIRest("92", ["NC_008596.1"])),
("Paeruginosa_UCBPP-PA14", "pseudomonas_aeruginosa_ucbpp_pa14",
EnsemblGenome("bacteria", "26", "Pseudomonas_aeruginosa_ucbpp_pa14",
"GCA_000014625.1", "bacteria_17_collection")),
("Ecoli", "eschColi_K12", NCBIRest("eschColi_K12", ["U00096.2"])),
("Amellifera_Honeybee", "apiMel3", UCSCGenome("apiMel3")),
("Cfamiliaris_Dog", "canFam3", UCSCGenome("canFam3")),
("Cfamiliaris_Dog", "canFam2", UCSCGenome("canFam2")),
("Drerio_Zebrafish", "Zv9", EnsemblGenome("standard", "80", "Danio_rerio", "Zv9")),
("Drerio_Zebrafish", "GRCz10", EnsemblGenome("standard", "81", "Danio_rerio", "GRCz10")),
("Ecaballus_Horse", "equCab2", UCSCGenome("equCab2")),
("Fcatus_Cat", "felCat3", UCSCGenome("felCat3")),
("Ggallus_Chicken", "galGal4", UCSCGenome("galGal4")),
("Tguttata_Zebra_finch", "taeGut1", UCSCGenome("taeGut1")),
("Aalbimanus", "AalbS1", VectorBase("AalbS1", "Anopheles",
"albimanus", "STECLA",
"AalbS1", ["SCAFFOLDS"])),
("Agambiae", "AgamP3", VectorBase("AgamP3", "Anopheles",
"gambiae", "PEST",
"AgamP3", ["CHROMOSOMES"])),]
GENOME_INDEXES_SUPPORTED = ["bowtie", "bowtie2", "bwa", "maq", "novoalign", "novoalign-cs",
"ucsc", "mosaik", "snap", "star", "rtg"]
DEFAULT_GENOME_INDEXES = ["seq"]
# -- Fabric instructions
def _check_version():
version = env.version
if int(version.split(".")[0]) < 1:
raise NotImplementedError("Please install fabric version 1 or better")
def install_data(config_source, approaches=None):
"""Main entry point for installing useful biological data.
"""
PREP_FNS = {"s3": _download_s3_index,
"ggd": _install_with_ggd,
"raw": _prep_raw_index}
if approaches is None: approaches = ["raw"]
ready_approaches = []
for approach in approaches:
ready_approaches.append((approach, PREP_FNS[approach]))
_check_version()
# Append a potentially custom system install path to PATH so tools are found
with path(os.path.join(env.system_install, 'bin')):
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes = [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes] + genome_indexes
_make_genome_directories(env, genomes)
download_transcripts(genomes, env)
_prep_genomes(env, genomes, genome_indexes, ready_approaches)
_install_additional_data(genomes, genome_indexes, config)
def install_data_s3(config_source):
"""Install data using pre-existing genomes present on Amazon s3.
"""
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
_make_genome_directories(env, genomes)
download_transcripts(genomes, env)
_download_genomes(genomes, genome_indexes)
_install_additional_data(genomes, genome_indexes, config)
def install_data_rsync(config_source):
"""Install data using pre-existing genomes from Galaxy rsync servers.
"""
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
# Galaxy stores FASTAs in ucsc format and generates on the fly
if "ucsc" not in genome_indexes:
genome_indexes.append("ucsc")
genome_dir = _make_genome_dir()
galaxy.rsync_genomes(genome_dir, genomes, genome_indexes)
def upload_s3(config_source):
"""Upload prepared genome files by identifier to Amazon s3 buckets.
"""
if boto is None:
raise ImportError("install boto to upload to Amazon s3")
if env.host != "localhost" and not env.host.startswith(socket.gethostname()):
raise ValueError("Need to run S3 upload on a local machine")
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
_data_ngs_genomes(genomes, genome_indexes)
_upload_genomes(genomes, genome_indexes)
def _install_additional_data(genomes, genome_indexes, config):
download_dbsnp(genomes, BROAD_BUNDLE_VERSION, DBSNP_VERSION)
for custom in (config.get("custom") or []):
_prep_custom_genome(custom, genomes, genome_indexes, env)
if config.get("install_liftover", False):
lift_over_genomes = [g.ucsc_name() for (_, _, g) in genomes if g.ucsc_name()]
_data_liftover(lift_over_genomes)
if config.get("install_uniref", False):
_data_uniref()
def _get_genomes(config_source):
if isinstance(config_source, dict):
config = config_source
else:
if yaml is None:
raise ImportError("install yaml to read configuration from %s" % config_source)
with open(config_source) as in_handle:
config = yaml.load(in_handle)
genomes = []
genomes_config = config["genomes"] or []
env.logger.info("List of genomes to get (from the config file at '{0}'): {1}"
.format(config_source, ', '.join(g.get('name', g["dbkey"]) for g in genomes_config)))
for g in genomes_config:
ginfo = None
for info in GENOMES_SUPPORTED:
if info[1] == g["dbkey"]:
ginfo = info
break
assert ginfo is not None, "Did not find download info for %s" % g["dbkey"]
name, gid, manager = ginfo
manager.config = g
genomes.append((name, gid, manager))
indexes = config["genome_indexes"] or []
if "seq" in indexes:
indexes.remove("seq")
indexes.insert(0, "seq")
return genomes, indexes, config
# ## Decorators and context managers
def _if_installed(pname):
"""Run if the given program name is installed.
"""
def argcatcher(func):
def decorator(*args, **kwargs):
if not shared._executable_not_on_path(pname):
return func(*args, **kwargs)
return decorator
return argcatcher
# ## Generic preparation functions
def _make_genome_dir():
genome_dir = os.path.join(env.data_files, "genomes")
if not env.safe_exists(genome_dir):
with settings(warn_only=True):
result = env.safe_run_output("mkdir -p %s" % genome_dir)
else:
result = None
if result is not None and result.failed:
env.safe_sudo("mkdir -p %s" % genome_dir)
env.safe_sudo("chown -R %s %s" % (env.user, genome_dir))
return genome_dir
def _make_genome_directories(env, genomes):
genome_dir = _make_genome_dir()
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname, gid)
if not env.safe_exists(org_dir):
env.safe_run('mkdir -p %s' % org_dir)
def _prep_genomes(env, genomes, genome_indexes, retrieve_fns):
"""Prepare genomes with the given indexes, supporting multiple retrieval methods.
"""
genome_dir = _make_genome_dir()
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname, gid)
if not env.safe_exists(org_dir):
env.safe_run('mkdir -p %s' % org_dir)
ggd_recipes = manager.config.get("annotations", []) + manager.config.get("validation", [])
for idx in genome_indexes + ggd_recipes:
with cd(org_dir):
if not env.safe_exists(idx):
finished = False
for method, retrieve_fn in retrieve_fns:
try:
retrieve_fn(env, manager, gid, idx)
finished = True
break
except KeyboardInterrupt:
raise
except:
# Fail on incorrect GGD recipes
if idx in ggd_recipes and method == "ggd":
raise
else:
env.logger.info("Genome preparation method {0} failed, trying next".format(method))
if not finished:
raise IOError("Could not prepare index {0} for {1} by any method".format(idx, gid))
ref_file = os.path.join(org_dir, "seq", "%s.fa" % gid)
if not env.safe_exists(ref_file):
ref_file = os.path.join(org_dir, "seq", "%s.fa" % manager._name)
assert env.safe_exists(ref_file), ref_file
cur_indexes = manager.config.get("indexes", genome_indexes)
_index_to_galaxy(org_dir, ref_file, gid, cur_indexes, manager.config)
# ## Genomes index for next-gen sequencing tools
def _get_ref_seq(env, manager):
"""Check for or retrieve the reference sequence.
"""
seq_dir = os.path.join(env.cwd, "seq")
ref_file = os.path.join(seq_dir, "%s.fa" % manager._name)
if not env.safe_exists(ref_file):
ref_file, base_zips = manager.download(seq_dir)
ref_file = _move_seq_files(ref_file, base_zips, seq_dir)
return ref_file
def _prep_raw_index(env, manager, gid, idx):
"""Prepare genome from raw downloads and indexes.
"""
env.logger.info("Preparing genome {0} with index {1}".format(gid, idx))
ref_file = _get_ref_seq(env, manager)
get_index_fn(idx)(ref_file)
def _data_ngs_genomes(genomes, genome_indexes):
"""Download and create index files for next generation genomes.
"""
genome_dir = _make_genome_dir()
for organism, genome, manager in genomes:
cur_dir = os.path.join(genome_dir, organism, genome)
env.logger.info("Processing genome {0} and putting it to {1}"\
.format(organism, cur_dir))
if not env.safe_exists(cur_dir):
env.safe_run('mkdir -p %s' % cur_dir)
with cd(cur_dir):
if hasattr(env, "remove_old_genomes") and env.remove_old_genomes:
_clean_genome_directory()
seq_dir = 'seq'
ref_file, base_zips = manager.download(seq_dir)
ref_file = _move_seq_files(ref_file, base_zips, seq_dir)
cur_indexes = manager.config.get("indexes", genome_indexes)
_index_to_galaxy(cur_dir, ref_file, genome, cur_indexes, manager.config)
def _index_to_galaxy(work_dir, ref_file, gid, genome_indexes, config):
"""Index sequence files and update associated Galaxy loc files.
"""
indexes = {}
with cd(work_dir):
for idx in genome_indexes:
index_file = get_index_fn(idx)(ref_file)
if index_file:
indexes[idx] = os.path.join(work_dir, index_file)
galaxy.prep_locs(gid, indexes, config)
class CustomMaskManager:
"""Create a custom genome based on masking an existing genome.
"""
def __init__(self, custom, config):
assert custom.has_key("mask")
self._custom = custom
self.config = config
def download(self, seq_dir):
base_seq = os.path.join(os.pardir, self._custom["base"],
"seq", "{0}.fa".format(self._custom["base"]))
assert env.safe_exists(base_seq)
mask_file = os.path.basename(self._custom["mask"])
ready_mask = apply("{0}-complement{1}".format, os.path.splitext(mask_file))
out_fasta = "{0}.fa".format(self._custom["dbkey"])
if not env.safe_exists(os.path.join(seq_dir, out_fasta)):
if not env.safe_exists(mask_file):
shared._remote_fetch(env, self._custom["mask"])
if not env.safe_exists(ready_mask):
env.safe_run("bedtools complement -i {i} -g {g}.fai > {o}".format(
i=mask_file, g=base_seq, o=ready_mask))
if not env.safe_exists(out_fasta):
env.safe_run("bedtools maskfasta -fi {fi} -bed {bed} -fo {fo}".format(
fi=base_seq, bed=ready_mask, fo=out_fasta))
return out_fasta, [mask_file, ready_mask]
def _prep_custom_genome(custom, genomes, genome_indexes, env):
"""Prepare a custom genome derived from existing genome.
Allows creation of masked genomes for specific purposes.
"""
cur_org = None
cur_manager = None
for org, gid, manager in genomes:
if gid == custom["base"]:
cur_org = org
cur_manager = manager
break
assert cur_org is not None
_data_ngs_genomes([[cur_org, custom["dbkey"],
CustomMaskManager(custom, cur_manager.config)]],
genome_indexes)
def _clean_genome_directory():
"""Remove any existing sequence information in the current directory.
"""
for dirname in GENOME_INDEXES_SUPPORTED + DEFAULT_GENOME_INDEXES:
if env.safe_exists(dirname):
env.safe_run("rm -rf %s" % dirname)
def _move_seq_files(ref_file, base_zips, seq_dir):
if not env.safe_exists(seq_dir):
env.safe_run('mkdir %s' % seq_dir)
for move_file in [ref_file] + base_zips:
if env.safe_exists(move_file):
env.safe_run("mv %s %s" % (move_file, seq_dir))
path, fname = os.path.split(ref_file)
moved_ref = os.path.join(path, seq_dir, fname)
assert env.safe_exists(moved_ref), moved_ref
return moved_ref
# ## Indexing for specific aligners
def _index_w_command(dir_name, command, ref_file, pre=None, post=None, ext=None):
"""Low level function to do the indexing and paths with an index command.
"""
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
full_ref_path = os.path.join(os.pardir, ref_file)
if not env.safe_exists(dir_name):
env.safe_run("mkdir %s" % dir_name)
with cd(dir_name):
if pre:
full_ref_path = pre(full_ref_path)
env.safe_run(command.format(ref_file=full_ref_path, index_name=index_name))
if post:
post(full_ref_path)
return os.path.join(dir_name, index_name)
@_if_installed("faToTwoBit")
def _index_twobit(ref_file):
"""Index reference files using 2bit for random access.
"""
dir_name = "ucsc"
cmd = "faToTwoBit {ref_file} {index_name}"
return _index_w_command(dir_name, cmd, ref_file)
def _index_bowtie(ref_file):
dir_name = "bowtie"
cmd = "bowtie-build -f {ref_file} {index_name}"
return _index_w_command(dir_name, cmd, ref_file)
def _index_bowtie2(ref_file):
dir_name = "bowtie2"
cmd = "bowtie2-build {ref_file} {index_name}"
out_suffix = _index_w_command(dir_name, cmd, ref_file)
bowtie_link = os.path.normpath(os.path.join(os.path.dirname(ref_file), os.path.pardir,
out_suffix + ".fa"))
relative_ref_file = os.path.relpath(ref_file, os.path.dirname(bowtie_link))
if not env.safe_exists(bowtie_link):
env.safe_run("ln -sf %s %s" % (relative_ref_file, bowtie_link))
return out_suffix
def _index_bwa(ref_file):
dir_name = "bwa"
local_ref = os.path.split(ref_file)[-1]
if not env.safe_exists(dir_name):
env.safe_run("mkdir %s" % dir_name)
with cd(dir_name):
env.safe_run("ln -sf %s" % os.path.join(os.pardir, ref_file))
with settings(warn_only=True):
result = env.safe_run("bwa index -a bwtsw %s" % local_ref)
# work around a bug in bwa indexing for small files
if result.failed:
env.safe_run("bwa index %s" % local_ref)
env.safe_run("rm -f %s" % local_ref)
return os.path.join(dir_name, local_ref)
def _index_maq(ref_file):
dir_name = "maq"
cmd = "maq fasta2bfa {ref_file} {index_name}"
def link_local(ref_file):
local = os.path.basename(ref_file)
env.safe_run("ln -sf {0} {1}".format(ref_file, local))
return local
def rm_local(local_file):
env.safe_run("rm -f {0}".format(local_file))
return _index_w_command(dir_name, cmd, ref_file, pre=link_local, post=rm_local)
@_if_installed("novoindex")
def _index_novoalign(ref_file):
dir_name = "novoalign"
cmd = "novoindex {index_name} {ref_file}"
return _index_w_command(dir_name, cmd, ref_file)
@_if_installed("novoalignCS")
def _index_novoalign_cs(ref_file):
dir_name = "novoalign_cs"
cmd = "novoindex -c {index_name} {ref_file}"
return _index_w_command(dir_name, cmd, ref_file)
def _index_sam(ref_file):
(ref_dir, local_file) = os.path.split(ref_file)
with cd(ref_dir):
if not env.safe_exists("%s.fai" % local_file):
env.safe_run("samtools faidx %s" % local_file)
galaxy.index_picard(ref_file)
return ref_file
def _index_star(ref_file):
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = os.path.join(ref_dir, os.pardir, "rnaseq", "ref-transcripts.gtf")
if not os.path.exists(gtf_file):
print "%s not found, skipping creating the STAR index." % (gtf_file)
return None
GenomeLength = os.path.getsize(ref_file)
Nbases = int(round(min(14, log(GenomeLength, 2)/2 - 2), 0))
dir_name = os.path.normpath(os.path.join(ref_dir, os.pardir, "star"))
cpu = mp.cpu_count()
cmd = ("STAR --genomeDir %s --genomeFastaFiles {ref_file} "
"--runThreadN %s "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile %s --genomeSAindexNbases %s" % (dir_name, str(cpu), gtf_file, Nbases))
return _index_w_command(dir_name, cmd, ref_file)
def _index_snap(ref_file):
"""Snap indexing is computationally expensive. Ask for all cores and need 64Gb of memory.
"""
dir_name = "snap"
index_name = os.path.splitext(os.path.basename(ref_file))[0]
org_arg = "-hg19" if index_name in ["hg19", "GRCh37"] else ""
cmd = "snap index {ref_file} {dir_name} -bSpace {org_arg}"
if not env.safe_exists(os.path.join(dir_name, "GenomeIndex")):
env.safe_run(cmd.format(**locals()))
return dir_name
def _index_rtg(ref_file):
"""Perform indexing for use with Real Time Genomics tools.
https://github.com/RealTimeGenomics/rtg-tools
"""
dir_name = "rtg"
index_name = "%s.sdf" % os.path.splitext(os.path.basename(ref_file))[0]
if not env.safe_exists(os.path.join(dir_name, index_name, "done")):
cmd = "rtg format -o {dir_name}/{index_name} {ref_file}"
env.safe_run(cmd.format(**locals()))
return dir_name
@_if_installed("MosaikJump")
def _index_mosaik(ref_file):
hash_size = 15
dir_name = "mosaik"
cmd = "MosaikBuild -fr {ref_file} -oa {index_name}"
def create_jumpdb(ref_file):
jmp_base = os.path.splitext(os.path.basename(ref_file))[0]
dat_file = "{0}.dat".format(jmp_base)
if not env.safe_exists("{0}_keys.jmp".format(jmp_base)):
cmd = "export MOSAIK_TMP=`pwd` && MosaikJump -hs {hash_size} -ia {ref_file} -out {index_name}".format(
hash_size=hash_size, ref_file=dat_file, index_name=jmp_base)
env.safe_run(cmd)
return _index_w_command(dir_name, cmd, ref_file,
post=create_jumpdb, ext=".dat")
# -- Retrieve using GGD recipes
def _install_with_ggd(env, manager, gid, recipe):
assert env.hosts == ["localhost"], "GGD recipes only work for local runs"
recipe_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, "ggd-recipes"))
recipe_file = os.path.join(recipe_dir, gid, "%s.yaml" % recipe)
if os.path.exists(recipe_file):
ggd.install_recipe(env.cwd, recipe_file)
else:
raise NotImplementedError("GGD recipe not available for %s %s" % (gid, recipe))
# -- Genome upload and download to Amazon s3 buckets
def _download_s3_index(env, manager, gid, idx):
env.logger.info("Downloading genome from s3: {0} {1}".format(gid, idx))
url = "https://s3.amazonaws.com/biodata/genomes/%s-%s.tar.xz" % (gid, idx)
out_file = shared._remote_fetch(env, url)
env.safe_run("xz -dc %s | tar -xvpf -" % out_file)
env.safe_run("rm -f %s" % out_file)
def _download_genomes(genomes, genome_indexes):
"""Download a group of genomes from Amazon s3 bucket.
"""
genome_dir = _make_genome_dir()
for (orgname, gid, manager) in genomes:
org_dir = os.path.join(genome_dir, orgname, gid)
if not env.safe_exists(org_dir):
env.safe_run('mkdir -p %s' % org_dir)
for idx in genome_indexes:
with cd(org_dir):
if not env.safe_exists(idx):
_download_s3_index(env, manager, gid, idx)
ref_file = os.path.join(org_dir, "seq", "%s.fa" % gid)
if not env.safe_exists(ref_file):
ref_file = os.path.join(org_dir, "seq", "%s.fa" % manager._name)
assert env.safe_exists(ref_file), ref_file
cur_indexes = manager.config.get("indexes", genome_indexes)
_index_to_galaxy(org_dir, ref_file, gid, cur_indexes, manager.config)
def _upload_genomes(genomes, genome_indexes):
"""Upload our configured genomes to Amazon s3 bucket.
"""
conn = boto.connect_s3()
bucket = conn.create_bucket("biodata")
genome_dir = os.path.join(env.data_files, "genomes")
for (orgname, gid, _) in genomes:
cur_dir = os.path.join(genome_dir, orgname, gid)
_clean_directory(cur_dir, gid)
for idx in genome_indexes:
idx_dir = os.path.join(cur_dir, idx)
tarball = _tar_directory(idx_dir, "%s-%s" % (gid, idx))
_upload_to_s3(tarball, bucket)
bucket.make_public()
def _upload_to_s3(tarball, bucket):
"""Upload the genome tarball to s3.
"""
upload_script = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
"utils", "s3_multipart_upload.py")
s3_key_name = os.path.join("genomes", os.path.basename(tarball))
if not bucket.get_key(s3_key_name):
gb_size = int(run("du -sm %s" % tarball).split()[0]) / 1000.0
print "Uploading %s %.1fGb" % (s3_key_name, gb_size)
cl = ["python", upload_script, tarball, bucket.name, s3_key_name, "--public"]
subprocess.check_call(cl)
def _tar_directory(dir, tar_name):
"""Create a tarball of the directory.
"""
base_dir, tar_dir = os.path.split(dir)
tarball = os.path.join(base_dir, "%s.tar.xz" % tar_name)
if not env.safe_exists(tarball):
with cd(base_dir):
env.safe_run("tar -cvpf - %s | xz -zc - > %s" % (tar_dir,
os.path.basename(tarball)))
return tarball
def _clean_directory(dir, gid):
"""Clean duplicate files from directories before tar and upload.
"""
# get rid of softlinks
bowtie_ln = os.path.join(dir, "bowtie", "%s.fa" % gid)
maq_ln = os.path.join(dir, "maq", "%s.fa" % gid)
for to_remove in [bowtie_ln, maq_ln]:
if env.safe_exists(to_remove):
env.safe_run("rm -f %s" % to_remove)
# remove any downloaded original sequence files
remove_exts = ["*.gz", "*.zip"]
with cd(os.path.join(dir, "seq")):
for rext in remove_exts:
fnames = env.safe_run("find . -name '%s'" % rext)
for fname in (f.strip() for f in fnames.split("\n") if f.strip()):
env.safe_run("rm -f %s" % fname)
# == Liftover files
def _data_liftover(lift_over_genomes):
"""Download chain files for running liftOver.
Does not install liftOver binaries automatically.
"""
lo_dir = os.path.join(env.data_files, "liftOver")
if not env.safe_exists(lo_dir):
env.safe_run("mkdir %s" % lo_dir)
lo_base_url = "ftp://hgdownload.cse.ucsc.edu/goldenPath/%s/liftOver/%s"
lo_base_file = "%sTo%s.over.chain.gz"
for g1 in lift_over_genomes:
for g2 in [g for g in lift_over_genomes if g != g1]:
g2u = g2[0].upper() + g2[1:]
cur_file = lo_base_file % (g1, g2u)
non_zip = os.path.splitext(cur_file)[0]
worked = False
with cd(lo_dir):
if not env.safe_exists(non_zip):
result = shared._remote_fetch(env, "%s" % (lo_base_url % (g1, cur_file)), allow_fail=True)
# Lift over back and forths don't always exist
# Only move forward if we found the file
if result:
worked = True
env.safe_run("gunzip %s" % result)
if worked:
ref_parts = [g1, g2, os.path.join(lo_dir, non_zip)]
galaxy.update_loc_file("liftOver.loc", ref_parts)
# == UniRef
def _data_uniref():
"""Retrieve and index UniRef databases for protein searches.
http://www.ebi.ac.uk/uniref/
These are currently indexed for FASTA searches. Are other indexes desired?
Should this be separated out and organized by program like genome data?
This should also check the release note and automatically download and
replace older versions.
"""
site = "ftp://ftp.uniprot.org"
base_url = site + "/pub/databases/uniprot/" \
"current_release/uniref/%s/%s"
for uniref_db in ["uniref50", "uniref90", "uniref100"]:
work_dir = os.path.join(env.data_files, "uniref", uniref_db)
if not env.safe_exists(work_dir):
env.safe_run("mkdir -p %s" % work_dir)
base_work_url = base_url % (uniref_db, uniref_db)
fasta_url = base_work_url + ".fasta.gz"
base_file = os.path.splitext(os.path.basename(fasta_url))[0]
with cd(work_dir):
if not env.safe_exists(base_file):
out_file = shared._remote_fetch(env, fasta_url)
env.safe_run("gunzip %s" % out_file)
shared._remote_fetch(env, base_work_url + ".release_note")
_index_blast_db(work_dir, base_file, "prot")
def _index_blast_db(work_dir, base_file, db_type):
"""Index a database using blast+ for similary searching.
"""
type_to_ext = dict(prot = ("phr", "pal"), nucl = ("nhr", "nal"))
db_name = os.path.splitext(base_file)[0]
with cd(work_dir):
if not reduce(operator.or_,
(env.safe_exists("%s.%s" % (db_name, ext)) for ext in type_to_ext[db_type])):
env.safe_run("makeblastdb -in %s -dbtype %s -out %s" %
(base_file, db_type, db_name))
def get_index_fn(index):
"""
return the index function for an index, if it is missing return a function
that is a no-op
"""
return INDEX_FNS.get(index, lambda x: None)
INDEX_FNS = {
"seq" : _index_sam,
"bwa" : _index_bwa,
"bowtie": _index_bowtie,
"bowtie2": _index_bowtie2,
"maq": _index_maq,
"mosaik": _index_mosaik,
"novoalign": _index_novoalign,
"novoalign_cs": _index_novoalign_cs,
"ucsc": _index_twobit,
"star": _index_star,
"snap": _index_snap,
"rtg": _index_rtg,
}
|
|
import threading
import shared
import sqlite3
import time
import shutil # used for moving the messages.dat file
import sys
import os
from debug import logger
from namecoin import ensureNamecoinOptions
import random
import string
import tr#anslate
# This thread exists because SQLITE3 is so un-threadsafe that we must
# submit queries to it and it puts results back in a different queue. They
# won't let us just use locks.
class sqlThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
self.conn = sqlite3.connect(shared.appdata + 'messages.dat')
self.conn.text_factory = str
self.cur = self.conn.cursor()
try:
self.cur.execute(
'''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, encodingtype int, read bool, UNIQUE(msgid) ON CONFLICT REPLACE)''' )
self.cur.execute(
'''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, pubkeyretrynumber integer, msgretrynumber integer, folder text, encodingtype int)''' )
self.cur.execute(
'''CREATE TABLE subscriptions (label text, address text, enabled bool)''' )
self.cur.execute(
'''CREATE TABLE addressbook (label text, address text)''' )
self.cur.execute(
'''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
self.cur.execute(
'''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
"""
Explanation of what is in the pubkeys table:
The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
transmitdata /was/ literally the data that was included in the Bitmessage pubkey message when it arrived,
except for the 24 byte protocol header- ie, it started with the POW nonce. Since protocol v3, to maintain
backwards compability, the data format of the data on disk is staying the same even though the wire format has changed.
time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
usedpersonally is set to "yes" if we have used the key personally. This keeps us from deleting it because we may want to
reply to a message in the future. This field is not a bool because we may need more flexability in the future and it doesn't
take up much more space anyway.
"""
self.cur.execute(
'''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''' )
self.cur.execute(
'''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute(
'''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
self.cur.execute(
'''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' )
self.cur.execute( '''INSERT INTO settings VALUES('version','7')''')
self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
int(time.time()),))
self.cur.execute(
'''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
self.conn.commit()
logger.info('Created messages database file')
except Exception as err:
if str(err) == 'table inbox already exists':
logger.debug('Database file already exists.')
else:
sys.stderr.write(
'ERROR trying to create database file (message.dat). Error message: %s\n' % str(err))
os._exit(0)
if shared.config.getint('bitmessagesettings', 'settingsversion') == 1:
shared.config.set('bitmessagesettings', 'settingsversion', '2')
# If the settings version is equal to 2 or 3 then the
# sqlThread will modify the pubkeys table and change
# the settings version to 4.
shared.config.set('bitmessagesettings', 'socksproxytype', 'none')
shared.config.set('bitmessagesettings', 'sockshostname', 'localhost')
shared.config.set('bitmessagesettings', 'socksport', '9050')
shared.config.set('bitmessagesettings', 'socksauthentication', 'false')
shared.config.set('bitmessagesettings', 'socksusername', '')
shared.config.set('bitmessagesettings', 'sockspassword', '')
shared.config.set('bitmessagesettings', 'sockslisten', 'false')
shared.config.set('bitmessagesettings', 'keysencrypted', 'false')
shared.config.set('bitmessagesettings', 'messagesencrypted', 'false')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# People running earlier versions of PyBitmessage do not have the
# usedpersonally field in their pubkeys table. Let's add it.
if shared.config.getint('bitmessagesettings', 'settingsversion') == 2:
item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
shared.config.set('bitmessagesettings', 'settingsversion', '3')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# People running earlier versions of PyBitmessage do not have the
# encodingtype field in their inbox and sent tables or the read field
# in the inbox table. Let's add them.
if shared.config.getint('bitmessagesettings', 'settingsversion') == 3:
item = '''ALTER TABLE inbox ADD encodingtype int DEFAULT '2' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''ALTER TABLE inbox ADD read bool DEFAULT '1' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''ALTER TABLE sent ADD encodingtype int DEFAULT '2' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
shared.config.set('bitmessagesettings', 'settingsversion', '4')
if shared.config.getint('bitmessagesettings', 'settingsversion') == 4:
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
shared.networkDefaultProofOfWorkNonceTrialsPerByte))
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
shared.networkDefaultPayloadLengthExtraBytes))
shared.config.set('bitmessagesettings', 'settingsversion', '5')
if shared.config.getint('bitmessagesettings', 'settingsversion') == 5:
shared.config.set(
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0')
shared.config.set(
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', '0')
shared.config.set('bitmessagesettings', 'settingsversion', '6')
# From now on, let us keep a 'version' embedded in the messages.dat
# file so that when we make changes to the database, the database
# version we are on can stay embedded in the messages.dat file. Let us
# check to see if the settings table exists yet.
item = '''SELECT name FROM sqlite_master WHERE type='table' AND name='settings';'''
parameters = ''
self.cur.execute(item, parameters)
if self.cur.fetchall() == []:
# The settings table doesn't exist. We need to make it.
logger.debug('In messages.dat database, creating new \'settings\' table.')
self.cur.execute(
'''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' )
self.cur.execute( '''INSERT INTO settings VALUES('version','1')''')
self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
int(time.time()),))
logger.debug('In messages.dat database, removing an obsolete field from the pubkeys table.')
self.cur.execute(
'''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''')
self.cur.execute(
'''INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys;''')
self.cur.execute( '''DROP TABLE pubkeys''')
self.cur.execute(
'''CREATE TABLE pubkeys (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute(
'''INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup;''')
self.cur.execute( '''DROP TABLE pubkeys_backup;''')
logger.debug('Deleting all pubkeys from inventory. They will be redownloaded and then saved with the correct times.')
self.cur.execute(
'''delete from inventory where objecttype = 'pubkey';''')
logger.debug('replacing Bitmessage announcements mailing list with a new one.')
self.cur.execute(
'''delete from subscriptions where address='BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx' ''')
self.cur.execute(
'''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
logger.debug('Commiting.')
self.conn.commit()
logger.debug('Vacuuming message.dat. You might notice that the file size gets much smaller.')
self.cur.execute( ''' VACUUM ''')
# After code refactoring, the possible status values for sent messages
# have changed.
self.cur.execute(
'''update sent set status='doingmsgpow' where status='doingpow' ''')
self.cur.execute(
'''update sent set status='msgsent' where status='sentmessage' ''')
self.cur.execute(
'''update sent set status='doingpubkeypow' where status='findingpubkey' ''')
self.cur.execute(
'''update sent set status='broadcastqueued' where status='broadcastpending' ''')
self.conn.commit()
if not shared.config.has_option('bitmessagesettings', 'sockslisten'):
shared.config.set('bitmessagesettings', 'sockslisten', 'false')
ensureNamecoinOptions()
"""# Add a new column to the inventory table to store the first 20 bytes of encrypted messages to support Android app
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
if int(self.cur.fetchall()[0][0]) == 1:
print 'upgrading database'
item = '''ALTER TABLE inventory ADD first20bytesofencryptedmessage blob DEFAULT '' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''update settings set value=? WHERE key='version';'''
parameters = (2,)
self.cur.execute(item, parameters)"""
# Let's get rid of the first20bytesofencryptedmessage field in the inventory table.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
if int(self.cur.fetchall()[0][0]) == 2:
logger.debug('In messages.dat database, removing an obsolete field from the inventory table.')
self.cur.execute(
'''CREATE TEMPORARY TABLE inventory_backup(hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''')
self.cur.execute(
'''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime FROM inventory;''')
self.cur.execute( '''DROP TABLE inventory''')
self.cur.execute(
'''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute(
'''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime FROM inventory_backup;''')
self.cur.execute( '''DROP TABLE inventory_backup;''')
item = '''update settings set value=? WHERE key='version';'''
parameters = (3,)
self.cur.execute(item, parameters)
# Add a new column to the inventory table to store tags.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 1 or currentVersion == 3:
logger.debug('In messages.dat database, adding tag field to the inventory table.')
item = '''ALTER TABLE inventory ADD tag blob DEFAULT '' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''update settings set value=? WHERE key='version';'''
parameters = (4,)
self.cur.execute(item, parameters)
if not shared.config.has_option('bitmessagesettings', 'userlocale'):
shared.config.set('bitmessagesettings', 'userlocale', 'system')
if not shared.config.has_option('bitmessagesettings', 'sendoutgoingconnections'):
shared.config.set('bitmessagesettings', 'sendoutgoingconnections', 'True')
# Raise the default required difficulty from 1 to 2
# With the change to protocol v3, this is obsolete.
if shared.config.getint('bitmessagesettings', 'settingsversion') == 6:
"""if int(shared.config.get('bitmessagesettings','defaultnoncetrialsperbyte')) == shared.networkDefaultProofOfWorkNonceTrialsPerByte:
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
"""
shared.config.set('bitmessagesettings', 'settingsversion', '7')
# Add a new column to the pubkeys table to store the address version.
# We're going to trash all of our pubkeys and let them be redownloaded.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 4:
self.cur.execute( '''DROP TABLE pubkeys''')
self.cur.execute(
'''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''' )
self.cur.execute(
'''delete from inventory where objecttype = 'pubkey';''')
item = '''update settings set value=? WHERE key='version';'''
parameters = (5,)
self.cur.execute(item, parameters)
if not shared.config.has_option('bitmessagesettings', 'useidenticons'):
shared.config.set('bitmessagesettings', 'useidenticons', 'True')
if not shared.config.has_option('bitmessagesettings', 'identiconsuffix'): # acts as a salt
shared.config.set('bitmessagesettings', 'identiconsuffix', ''.join(random.choice("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") for x in range(12))) # a twelve character pseudo-password to salt the identicons
#Add settings to support no longer resending messages after a certain period of time even if we never get an ack
if shared.config.getint('bitmessagesettings', 'settingsversion') == 7:
shared.config.set(
'bitmessagesettings', 'stopresendingafterxdays', '')
shared.config.set(
'bitmessagesettings', 'stopresendingafterxmonths', '')
#shared.config.set(
shared.config.set('bitmessagesettings', 'settingsversion', '8')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# Add a new table: objectprocessorqueue with which to hold objects
# that have yet to be processed if the user shuts down Bitmessage.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 5:
self.cur.execute( '''DROP TABLE knownnodes''')
self.cur.execute(
'''CREATE TABLE objectprocessorqueue (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
item = '''update settings set value=? WHERE key='version';'''
parameters = (6,)
self.cur.execute(item, parameters)
# changes related to protocol v3
# In table inventory and objectprocessorqueue, objecttype is now an integer (it was a human-friendly string previously)
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 6:
logger.debug('In messages.dat database, dropping and recreating the inventory table.')
self.cur.execute( '''DROP TABLE inventory''')
self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' )
self.cur.execute( '''DROP TABLE objectprocessorqueue''')
self.cur.execute( '''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
item = '''update settings set value=? WHERE key='version';'''
parameters = (7,)
self.cur.execute(item, parameters)
logger.debug('Finished dropping and recreating the inventory table.')
# With the change to protocol version 3, reset the user-settable difficulties to 1
if shared.config.getint('bitmessagesettings', 'settingsversion') == 8:
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte))
shared.config.set('bitmessagesettings','defaultpayloadlengthextrabytes', str(shared.networkDefaultPayloadLengthExtraBytes))
previousTotalDifficulty = int(shared.config.getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / 320
previousSmallMessageDifficulty = int(shared.config.getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / 14000
shared.config.set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(previousTotalDifficulty * 1000))
shared.config.set('bitmessagesettings','maxacceptablepayloadlengthextrabytes', str(previousSmallMessageDifficulty * 1000))
shared.config.set('bitmessagesettings', 'settingsversion', '9')
# Adjust the required POW values for each of this user's addresses to conform to protocol v3 norms.
if shared.config.getint('bitmessagesettings', 'settingsversion') == 9:
for addressInKeysFile in shared.config.sections():
try:
previousTotalDifficulty = float(shared.config.getint(addressInKeysFile, 'noncetrialsperbyte')) / 320
previousSmallMessageDifficulty = float(shared.config.getint(addressInKeysFile, 'payloadlengthextrabytes')) / 14000
if previousTotalDifficulty <= 2:
previousTotalDifficulty = 1
if previousSmallMessageDifficulty < 1:
previousSmallMessageDifficulty = 1
shared.config.set(addressInKeysFile,'noncetrialsperbyte', str(int(previousTotalDifficulty * 1000)))
shared.config.set(addressInKeysFile,'payloadlengthextrabytes', str(int(previousSmallMessageDifficulty * 1000)))
except:
continue
shared.config.set('bitmessagesettings', 'maxdownloadrate', '0')
shared.config.set('bitmessagesettings', 'maxuploadrate', '0')
shared.config.set('bitmessagesettings', 'settingsversion', '10')
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# Are you hoping to add a new option to the keys.dat file of existing
# Bitmessage users or modify the SQLite database? Add it right above this line!
try:
testpayload = '\x00\x00'
t = ('1234', 1, testpayload, '12345678', 'no')
self.cur.execute( '''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t)
self.conn.commit()
self.cur.execute(
'''SELECT transmitdata FROM pubkeys WHERE hash='1234' ''')
queryreturn = self.cur.fetchall()
for row in queryreturn:
transmitdata, = row
self.cur.execute('''DELETE FROM pubkeys WHERE hash='1234' ''')
self.conn.commit()
if transmitdata == '':
logger.fatal('Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again.\n')
logger.fatal('PyBitmessage will now exit very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite.\n\n')
os._exit(0)
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal('(While null value test) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
shared.UISignalQueue.put(('alert', (tr.translateText("MainWindow", "Disk full"), tr.translateText("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
else:
return
else:
logger.error(err)
# Let us check to see the last time we vaccumed the messages.dat file.
# If it has been more than a month let's do it now.
item = '''SELECT value FROM settings WHERE key='lastvacuumtime';'''
parameters = ''
self.cur.execute(item, parameters)
queryreturn = self.cur.fetchall()
for row in queryreturn:
value, = row
if int(value) < int(time.time()) - 2592000:
logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...')
try:
self.cur.execute( ''' VACUUM ''')
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal('(While VACUUM) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
shared.UISignalQueue.put(('alert', (tr.translateText("MainWindow", "Disk full"), tr.translateText("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
else:
return
item = '''update settings set value=? WHERE key='lastvacuumtime';'''
parameters = (int(time.time()),)
self.cur.execute(item, parameters)
while True:
item = shared.sqlSubmitQueue.get()
if item == 'commit':
try:
self.conn.commit()
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal('(While committing) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
shared.UISignalQueue.put(('alert', (tr.translateText("MainWindow", "Disk full"), tr.translateText("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
else:
return
elif item == 'exit':
self.conn.close()
logger.info('sqlThread exiting gracefully.')
return
elif item == 'movemessagstoprog':
logger.debug('the sqlThread is moving the messages.dat file to the local program directory.')
try:
self.conn.commit()
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal('(while movemessagstoprog) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
shared.UISignalQueue.put(('alert', (tr.translateText("MainWindow", "Disk full"), tr.translateText("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
else:
return
self.conn.close()
shutil.move(
shared.lookupAppdataFolder() + 'messages.dat', 'messages.dat')
self.conn = sqlite3.connect('messages.dat')
self.conn.text_factory = str
self.cur = self.conn.cursor()
elif item == 'movemessagstoappdata':
logger.debug('the sqlThread is moving the messages.dat file to the Appdata folder.')
try:
self.conn.commit()
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal('(while movemessagstoappdata) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
shared.UISignalQueue.put(('alert', (tr.translateText("MainWindow", "Disk full"), tr.translateText("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
else:
return
self.conn.close()
shutil.move(
'messages.dat', shared.lookupAppdataFolder() + 'messages.dat')
self.conn = sqlite3.connect(shared.appdata + 'messages.dat')
self.conn.text_factory = str
self.cur = self.conn.cursor()
elif item == 'deleteandvacuume':
self.cur.execute('''delete from inbox where folder='trash' ''')
self.cur.execute('''delete from sent where folder='trash' ''')
self.conn.commit()
try:
self.cur.execute( ''' VACUUM ''')
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal('(while deleteandvacuume) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
shared.UISignalQueue.put(('alert', (tr.translateText("MainWindow", "Disk full"), tr.translateText("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
else:
return
else:
parameters = shared.sqlSubmitQueue.get()
# print 'item', item
# print 'parameters', parameters
try:
self.cur.execute(item, parameters)
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal('(while cur.execute) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
shared.UISignalQueue.put(('alert', (tr.translateText("MainWindow", "Disk full"), tr.translateText("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
else:
return
else:
logger.fatal('Major error occurred when trying to execute a SQL statement within the sqlThread. Please tell Atheros about this error message or post it in the forum! Error occurred while trying to execute statement: "%s" Here are the parameters; you might want to censor this data with asterisks (***) as it can contain private information: %s. Here is the actual error message thrown by the sqlThread: %s', str(item), str(repr(parameters)), str(err))
logger.fatal('This program shall now abruptly exit!')
os._exit(0)
shared.sqlReturnQueue.put(self.cur.fetchall())
# shared.sqlSubmitQueue.task_done()
|
|
import unittest
from chainer import testing, Variable, grad, cuda
import numpy as np
from chainer.functions import as_strided
from chainer.functions.array.as_strided import _stride_array
def _broadcast_to(xp, x, shape):
if hasattr(xp, 'broadcast_to'):
return xp.broadcast_to(x, shape)
else:
dummy = xp.empty(shape)
return xp.broadcast_arrays(x, dummy)[0]
@testing.parameterize(
{'dtype': np.float16},
{'dtype': np.float32},
{'dtype': np.float64},
{'dtype': np.int16},
{'dtype': np.int32},
{'dtype': np.int64}
)
class TestStrideArray(unittest.TestCase):
def check_flip(self, xp):
x = xp.arange(4, dtype=self.dtype)
y = _stride_array(x, (4,), (-1,), 3) # [3, 2, 1, 0]
y_expected = x[::-1]
testing.assert_allclose(y, y_expected)
def test_flip_cpu(self):
self.check_flip(np)
@testing.attr.gpu
def test_flip_gpu(self):
self.check_flip(cuda.cupy)
def check_broadcast(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
# [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
y = _stride_array(x, (2, 3, 4), (0, 4, 1), 0)
y_expected = _broadcast_to(xp, x, (2, 3, 4))
testing.assert_allclose(y, y_expected)
def test_broadcast_cpu(self):
self.check_broadcast(np)
@testing.attr.gpu
def test_broadcast_gpu(self):
self.check_broadcast(cuda.cupy)
def check_unstride(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
y = _stride_array(x, (12,), (1,), 0)
y_expected = xp.arange(12, dtype=self.dtype)
testing.assert_allclose(y, y_expected)
def test_unstride_cpu(self):
self.check_unstride(np)
@testing.attr.gpu
def test_unstride_gpu(self):
self.check_unstride(cuda.cupy)
def check_general_stride(self, xp):
x = xp.arange(8, dtype=self.dtype)
y = _stride_array(x, (3, 3), (-1, 2), 3)
y_expected = xp.array(
[[3, 5, 7],
[2, 4, 6],
[1, 3, 5]],
dtype=self.dtype
)
testing.assert_allclose(y, y_expected)
def test_general_stride_cpu(self):
self.check_general_stride(np)
@testing.attr.gpu
def test_general_stride_gpu(self):
self.check_general_stride(cuda.cupy)
def check_invalid_negative_index(self, xp):
x = xp.arange(8, dtype=self.dtype)
with self.assertRaises(ValueError):
_stride_array(x, (3, 3), (-1, 2), 1)
def test_invalid_negative_index_cpu(self):
self.check_invalid_negative_index(np)
@testing.attr.gpu
def test_invalid_negative_index_gpu(self):
self.check_invalid_negative_index(cuda.cupy)
@testing.parameterize(
{'dtype': np.float16},
{'dtype': np.float32},
{'dtype': np.float64},
{'dtype': np.int16},
{'dtype': np.int32},
{'dtype': np.int64}
)
class TestAsStridedForward(unittest.TestCase):
def check_flip_forward(self, xp):
x = xp.arange(4, dtype=self.dtype)
v = Variable(x)
y = as_strided(v, (4,), (-1,), 3)
y_expected = x[::-1]
testing.assert_allclose(y.array, y_expected)
def test_flip_forward_cpu(self):
self.check_flip_forward(np)
@testing.attr.gpu
def test_flip_forward_gpu(self):
self.check_flip_forward(cuda.cupy)
def check_broadcast_forward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
v = Variable(x)
y = as_strided(v, (2, 3, 4), (0, 4, 1), 0)
y_expected = _broadcast_to(xp, x, (2, 3, 4))
testing.assert_allclose(y.array, y_expected)
def test_broadcast_forward_cpu(self):
self.check_broadcast_forward(np)
@testing.attr.gpu
def test_broadcast_forward_gpu(self):
self.check_broadcast_forward(cuda.cupy)
def check_unstride_forward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
v = Variable(x)
y = as_strided(v, (12,), (1,), 0)
y_expected = xp.arange(12, dtype=self.dtype)
testing.assert_allclose(y.array, y_expected)
def test_unstride_forward_cpu(self):
self.check_unstride_forward(np)
@testing.attr.gpu
def test_unstride_forward_gpu(self):
self.check_unstride_forward(cuda.cupy)
def check_general_stride(self, xp):
x = _stride_array(xp.arange(8, dtype=self.dtype), (3, 3), (-1, 2), 3)
# [[3., 5., 7.], [2., 4., 6.], [1., 3., 5.]]
v = Variable(x)
y = as_strided(v, (3, 3), (1, 2), 0)
# [[0., 2., 4.], [1., 3., 5.,], [2., 4., 6.]]
y_expected = _stride_array(xp.arange(8, dtype=self.dtype),
(3, 3), (1, 2), 0)
assert (y.array == y_expected).all()
def test_general_stride_forward_cpu(self):
self.check_general_stride(np)
@testing.attr.gpu
def test_general_stride_forward_gpu(self):
self.check_general_stride(cuda.cupy)
@testing.parameterize(
{'dtype': np.float16},
{'dtype': np.float32},
{'dtype': np.float64}
)
class TestAsStridedBackward(unittest.TestCase):
def check_flip_backward(self, xp):
x = xp.arange(4, dtype=self.dtype)
v = Variable(x)
y = as_strided(v, (4,), (-1,), 3)
y.grad = xp.ones((4,), dtype=self.dtype)
gx, = grad((y,), (v,))
testing.assert_allclose(gx.array, xp.ones((4,), dtype=self.dtype))
def test_flip_backward_cpu(self):
self.check_flip_backward(np)
@testing.attr.gpu
def test_flip_backward_gpu(self):
self.check_flip_backward(cuda.cupy)
def check_broadcast_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
v = Variable(x)
y = as_strided(v, (2, 3, 4), (0, 4, 1), 0)
y.grad = xp.ones((2, 3, 4), dtype=self.dtype)
gx, = grad((y,), (v,))
testing.assert_allclose(gx.array,
xp.ones(x.shape, dtype=self.dtype) * 2)
def test_broadcast_backward_cpu(self):
self.check_broadcast_backward(np)
@testing.attr.gpu
def test_broadcast_backward_gpu(self):
self.check_broadcast_backward(cuda.cupy)
def check_unstride_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
v = Variable(x)
y = as_strided(v, (12,), (1,), 0)
y.grad = xp.ones((12,), dtype=self.dtype)
gx, = grad((y,), (v,))
testing.assert_allclose(gx.array, xp.ones(x.shape, dtype=self.dtype))
def test_unstride_backward_cpu(self):
self.check_unstride_backward(np)
@testing.attr.gpu
def test_unstride_backward_gpu(self):
self.check_unstride_backward(cuda.cupy)
def check_general_stride_backward(self, xp):
x = _stride_array(xp.arange(8, dtype=self.dtype), (3, 3), (-1, 2), 3)
# [[3., 5., 7.], [2., 4., 6.], [1., 3., 5.]]
v = Variable(x)
y = as_strided(v, (3, 3), (1, 2), 0)
# [[0., 2., 4.], [1., 3., 5.,], [2., 4., 6.]]
y.grad = xp.ones(y.shape, dtype=self.dtype)
gx, = grad((y,), (v,))
testing.assert_allclose(gx.array,
xp.array([
[0.5, 0.5, 0.],
[2., 2., 1.],
[1., 0.5, 0.5]
], dtype=self.dtype)
)
def test_general_stride_backward_cpu(self):
self.check_general_stride_backward(np)
@testing.attr.gpu
def test_general_stride_backward_gpu(self):
self.check_general_stride_backward(cuda.cupy)
@testing.parameterize(
{'dtype': np.int16},
{'dtype': np.int32},
{'dtype': np.int64}
)
class TestAsStridedBackwardInvalidType(unittest.TestCase):
def check_flip_backward(self, xp):
x = xp.arange(4, dtype=self.dtype)
v = Variable(x)
y = as_strided(v, (4,), (-1,), 3)
y.grad = xp.ones((4,), dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = grad((y,), (v,))
def test_flip_backward_cpu(self):
self.check_flip_backward(np)
@testing.attr.gpu
def test_flip_backward_gpu(self):
self.check_flip_backward(cuda.cupy)
def check_broadcast_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
v = Variable(x)
y = as_strided(v, (2, 3, 4), (0, 4, 1), 0)
y.grad = xp.ones((2, 3, 4), dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = grad((y,), (v,))
def test_broadcast_backward_cpu(self):
self.check_broadcast_backward(np)
@testing.attr.gpu
def test_broadcast_backward_gpu(self):
self.check_broadcast_backward(cuda.cupy)
def check_unstride_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
v = Variable(x)
y = as_strided(v, (12,), (1,), 0)
y.grad = xp.ones((12,), dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = grad((y,), (v,))
def test_unstride_backward_cpu(self):
self.check_unstride_backward(np)
@testing.attr.gpu
def test_unstride_backward_gpu(self):
self.check_unstride_backward(cuda.cupy)
def check_general_stride_backward(self, xp):
x = _stride_array(xp.arange(8, dtype=self.dtype), (3, 3), (-1, 2), 3)
# [[3., 5., 7.], [2., 4., 6.], [1., 3., 5.]]
v = Variable(x)
y = as_strided(v, (3, 3), (1, 2), 0)
# [[0., 2., 4.], [1., 3., 5.,], [2., 4., 6.]]
y.grad = xp.ones(y.shape, dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = grad((y,), (v,))
def test_general_stride_backward_cpu(self):
self.check_general_stride_backward(np)
@testing.attr.gpu
def test_general_stride_backward_gpu(self):
self.check_general_stride_backward(cuda.cupy)
testing.run_module(__name__, __file__)
|
|
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
import os
from operator import attrgetter
import sys
from docker.errors import APIError
from .container import Container, get_container_name
from .progress_stream import stream_output, StreamOutputError
log = logging.getLogger(__name__)
DOCKER_CONFIG_KEYS = [
'cap_add',
'cap_drop',
'cpu_shares',
'command',
'detach',
'dns',
'dns_search',
'domainname',
'entrypoint',
'env_file',
'environment',
'hostname',
'image',
'mem_limit',
'net',
'ports',
'privileged',
'restart',
'stdin_open',
'tty',
'user',
'volumes',
'volumes_from',
'working_dir',
]
DOCKER_CONFIG_HINTS = {
'cpu_share' : 'cpu_shares',
'link' : 'links',
'port' : 'ports',
'privilege' : 'privileged',
'priviliged': 'privileged',
'privilige' : 'privileged',
'volume' : 'volumes',
'workdir' : 'working_dir',
}
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'dns',
'dns_search',
'env_file',
'net',
'privileged',
'restart',
]
VALID_NAME_CHARS = '[a-zA-Z0-9]'
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class CannotBeScaledError(Exception):
pass
class ConfigError(ValueError):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, name):
raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
if 'image' in options and 'build' in options:
raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
for filename in get_env_files(options):
if not os.path.exists(filename):
raise ConfigError("Couldn't find env file for service %s: %s" % (name, filename))
supported_options = DOCKER_CONFIG_KEYS + ['build', 'expose',
'external_links']
for k in options:
if k not in supported_options:
msg = "Unsupported config option for %s service: '%s'" % (name, k)
if k in DOCKER_CONFIG_HINTS:
msg += " (did you mean '%s'?)" % DOCKER_CONFIG_HINTS[k]
raise ConfigError(msg)
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.options = options
def containers(self, stopped=False, one_off=False):
return [Container.from_ps(self.client, container)
for container in self.client.containers(all=stopped)
if self.has_container(container, one_off=one_off)]
def has_container(self, container, one_off=False):
"""Return True if `container` was created to fulfill this service."""
name = get_container_name(container)
if not name or not is_valid_name(name, one_off):
return False
project, name, _number = parse_name(name)
return project == self.project and name == self.name
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
for container in self.client.containers():
if not self.has_container(container):
continue
_, _, container_number = parse_name(get_container_name(container))
if container_number == number:
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
def scale(self, desired_num):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if not self.can_be_scaled():
raise CannotBeScaledError()
# Create enough containers
containers = self.containers(stopped=True)
while len(containers) < desired_num:
log.info("Creating %s..." % self._next_container_name(containers))
containers.append(self.create_container(detach=True))
running_containers = []
stopped_containers = []
for c in containers:
if c.is_running:
running_containers.append(c)
else:
stopped_containers.append(c)
running_containers.sort(key=lambda c: c.number)
stopped_containers.sort(key=lambda c: c.number)
# Stop containers
while len(running_containers) > desired_num:
c = running_containers.pop()
log.info("Stopping %s..." % c.name)
c.stop(timeout=1)
stopped_containers.append(c)
# Start containers
while len(running_containers) < desired_num:
c = stopped_containers.pop(0)
log.info("Starting %s..." % c.name)
self.start_container(c)
running_containers.append(c)
self.remove_stopped()
def remove_stopped(self, **options):
for c in self.containers(stopped=True):
if not c.is_running:
log.info("Removing %s..." % c.name)
c.remove(**options)
def create_container(self,
one_off=False,
insecure_registry=False,
do_build=True,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
container_options = self._get_container_create_options(
override_options,
one_off=one_off)
if (do_build and
self.can_be_built() and
not self.client.images(name=self.full_name)):
self.build()
try:
return Container.create(self.client, **container_options)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
log.info('Pulling image %s...' % container_options['image'])
output = self.client.pull(
container_options['image'],
stream=True,
insecure_registry=insecure_registry
)
stream_output(output, sys.stdout)
return Container.create(self.client, **container_options)
raise
def recreate_containers(self, insecure_registry=False, do_build=True, **override_options):
"""
If a container for this service doesn't exist, create and start one. If there are
any, stop them, create+start new ones, and remove the old containers.
"""
containers = self.containers(stopped=True)
if not containers:
log.info("Creating %s..." % self._next_container_name(containers))
container = self.create_container(
insecure_registry=insecure_registry,
do_build=do_build,
**override_options)
self.start_container(container)
return [(None, container)]
else:
tuples = []
for c in containers:
log.info("Recreating %s..." % c.name)
tuples.append(self.recreate_container(c, insecure_registry=insecure_registry, **override_options))
return tuples
def recreate_container(self, container, **override_options):
"""Recreate a container. An intermediate container is created so that
the new container has the same name, while still supporting
`volumes-from` the original container.
"""
try:
container.stop()
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
intermediate_container = Container.create(
self.client,
image=container.image,
entrypoint=['/bin/echo'],
command=[],
detach=True,
)
intermediate_container.start(volumes_from=container.id)
intermediate_container.wait()
container.remove()
options = dict(override_options)
new_container = self.create_container(do_build=False, **options)
self.start_container(new_container, intermediate_container=intermediate_container)
intermediate_container.remove()
return (intermediate_container, new_container)
def start_container_if_stopped(self, container, **options):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container, **options)
def start_container(self, container, intermediate_container=None, **override_options):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in options.get('volumes') or []
if ':' in volume)
privileged = options.get('privileged', False)
net = options.get('net', 'bridge')
dns = options.get('dns', None)
dns_search = options.get('dns_search', None)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
restart = parse_restart_spec(options.get('restart', None))
container.start(
links=self._get_links(link_to_self=options.get('one_off', False)),
port_bindings=port_bindings,
binds=volume_bindings,
volumes_from=self._get_volumes_from(intermediate_container),
privileged=privileged,
network_mode=net,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
)
return container
def start_or_create_containers(
self,
insecure_registry=False,
detach=False,
do_build=True):
containers = self.containers(stopped=True)
if not containers:
log.info("Creating %s..." % self._next_container_name(containers))
new_container = self.create_container(
insecure_registry=insecure_registry,
detach=detach,
do_build=do_build,
)
return [self.start_container(new_container)]
else:
return [self.start_container_if_stopped(c) for c in containers]
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def _next_container_name(self, all_containers, one_off=False):
bits = [self.project, self.name]
if one_off:
bits.append('run')
return '_'.join(bits + [str(self._next_container_number(all_containers))])
def _next_container_number(self, all_containers):
numbers = [parse_name(c.name).number for c in all_containers]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self, intermediate_container=None):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
if intermediate_container:
volumes_from.append(intermediate_container.id)
return volumes_from
def _get_container_create_options(self, override_options, one_off=False):
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
container_options['name'] = self._next_container_name(
self.containers(stopped=True, one_off=one_off),
one_off)
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port in all_ports:
port = str(port)
if ':' in port:
port = port.split(':')[-1]
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(container_options)
if self.can_be_built():
container_options['image'] = self.full_name
else:
container_options['image'] = self._get_image_name(container_options['image'])
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
return container_options
def _get_image_name(self, image):
repo, tag = parse_repository_tag(image)
if tag == "":
tag = "latest"
return '%s:%s' % (repo, tag)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
build_output = self.client.build(
self.options['build'],
tag=self.full_name,
stream=True,
rm=True,
nocache=no_cache,
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError, e:
raise BuildError(self, unicode(e))
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def can_be_scaled(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return False
return True
def pull(self, insecure_registry=False):
if 'image' in self.options:
image_name = self._get_image_name(self.options['image'])
log.info('Pulling %s (%s)...' % (self.name, image_name))
self.client.pull(
image_name,
insecure_registry=insecure_registry
)
NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
def is_valid_name(name, one_off=False):
match = NAME_RE.match(name)
if match is None:
return False
if one_off:
return match.group(3) == 'run_'
else:
return match.group(3) is None
def parse_name(name):
match = NAME_RE.match(name)
(project, service_name, _, suffix) = match.groups()
return ServiceName(project, service_name, int(suffix))
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
return VolumeSpec(None, parts[0], 'rw')
if len(parts) == 2:
parts.append('rw')
external, internal, mode = parts
if mode not in ('rw', 'ro'):
raise ConfigError("Volume %s has invalid mode (%s), should be "
"one of: rw, ro." % (volume_config, mode))
return VolumeSpec(external, internal, mode)
def parse_repository_tag(s):
if ":" not in s:
return s, ""
repo, tag = s.rsplit(":", 1)
if "/" in tag:
return s, ""
return repo, tag
def build_volume_binding(volume_spec):
internal = {'bind': volume_spec.internal, 'ro': volume_spec.mode == 'ro'}
external = os.path.expanduser(volume_spec.external)
return os.path.abspath(os.path.expandvars(external)), internal
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port, external = split_port(port)
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
return port_bindings
def split_port(port):
parts = str(port).split(':')
if not 1 <= len(parts) <= 3:
raise ConfigError('Invalid port "%s", should be '
'[[remote_ip:]remote_port:]port[/protocol]' % port)
if len(parts) == 1:
internal_port, = parts
return internal_port, None
if len(parts) == 2:
external_port, internal_port = parts
return internal_port, external_port
external_ip, external_port, internal_port = parts
return internal_port, (external_ip, external_port or None)
def get_env_files(options):
env_files = options.get('env_file', [])
if not isinstance(env_files, list):
env_files = [env_files]
return env_files
def merge_environment(options):
env = {}
for f in get_env_files(options):
env.update(env_vars_from_file(f))
if 'environment' in options:
if isinstance(options['environment'], list):
env.update(dict(split_env(e) for e in options['environment']))
else:
env.update(options['environment'])
return dict(resolve_env(k, v) for k, v in env.iteritems())
def split_env(env):
if '=' in env:
return env.split('=', 1)
else:
return env, None
def resolve_env(key, val):
if val is not None:
return key, val
elif key in os.environ:
return key, os.environ[key]
else:
return key, ''
def env_vars_from_file(filename):
"""
Read in a line delimited file of environment variables.
"""
env = {}
for line in open(filename, 'r'):
line = line.strip()
if line and not line.startswith('#'):
k, v = split_env(line)
env[k] = v
return env
|
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtCore, QtGui
from core import debug
from core.thumbnails import ThumbnailCache
from core.collection.search import SearchCompiler, SearchParseError
from core.db.locator import FileLocator, DBLocator
from core.system import default_connections_file
from core.external_connection import ExtConnectionList
from db import VistrailsDBException
from db.services.io import test_db_connection
from db.services.query import runLogQuery, runWorkflowQuery
from gui.theme import CurrentTheme
from gui.open_db_window import QDBConnectionList, QConnectionDBSetupWindow
from gui.vistrails_palette import QVistrailsPaletteInterface
class QExplorerWindow(QtGui.QWidget, QVistrailsPaletteInterface):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.splitter = QtGui.QSplitter()
self.splitter.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.splitter)
self.connectionList = QDBConnectionList(self)
dbGrid = QtGui.QGridLayout(self)
dbGrid.setMargin(0)
dbGrid.setSpacing(0)
dbGrid.addWidget(self.connectionList, 2, 1, QtCore.Qt.AlignLeft)
self.addAct = QtGui.QAction("Add Database", self)
self.removeAct = QtGui.QAction("Remove Database", self)
self.addButton = QtGui.QToolButton()
self.addButton.setToolTip("Create a new database connection")
self.addButton.setDefaultAction(self.addAct)
self.addButton.setAutoRaise(True)
self.removeButton = QtGui.QToolButton()
self.removeButton.setToolTip("Remove the selected connection from list")
self.removeButton.setDefaultAction(self.removeAct)
self.removeButton.setAutoRaise(True)
self.removeButton.setEnabled(False)
panelButtonsLayout = QtGui.QHBoxLayout()
panelButtonsLayout.setMargin(0)
panelButtonsLayout.setSpacing(0)
panelButtonsLayout.addWidget(self.addButton)
panelButtonsLayout.addWidget(self.removeButton)
dbGrid.addLayout(panelButtonsLayout, 1, 1, QtCore.Qt.AlignLeft)
dbWidget = QDBWidget(parent, self.connectionList)
dbWidget.setLayout(dbGrid)
self.splitter.addWidget(dbWidget)
self.connect(self.addAct,
QtCore.SIGNAL('triggered()'),
self.showConnConfig)
self.connect(self.removeAct,
QtCore.SIGNAL('triggered()'),
self.connectionList.removeConnection)
self.connect(self.connectionList,
QtCore.SIGNAL('itemSelectionChanged()'),
self.updateEditButtons)
self.connect(self.connectionList,
QtCore.SIGNAL('itemSelectionChanged()'),
self.checkConnection)
self.tabView = QtGui.QTabWidget()
self.tabView.setContentsMargins(0, 0, 0, 0)
self.splitter.addWidget(self.tabView)
# self.workflowSearch = WorkflowSearchWidget(self.connectionList)
# self.tabView.addTab(self.workflowSearch, "Search for Workflows")
self.executionSearch = ExecutionSearchWidget(self.connectionList)
self.tabView.addTab(self.executionSearch, "Search for Workflow Executions")
self.setLayout(self.layout)
self.setWindowTitle('Provenance Browser')
self.resize(QtCore.QSize(800, 600))
# self.workflowSearch.setup_results()
self.executionSearch.setup_results()
def showConnConfig(self, *args, **keywords):
return showConnConfig(self.connectionList, *args, **keywords)
def updateEditButtons(self):
"""updateEditButtons() -> None
It will enable/disable the connections buttons according to the
selection
"""
self.workflowSearch.setup_results()
self.executionSearch.setup_results()
id = self.connectionList.getCurrentItemId()
if id != -1:
self.removeButton.setEnabled(True)
else:
self.removeButton.setEnabled(False)
def checkConnection(self):
checkConnection(self.connectionList)
def getConnectionInfo(self, id):
return getConnectionInfo(self.connectionList, id)
def showConnConfig(connectionList, *args, **keywords):
"""showConnConfig(*args, **keywords) -> None
shows a window to configure the connection. The valid keywords
are defined in QConnectionDBSetupWindow.__init__()
"""
dialog = QConnectionDBSetupWindow(**keywords)
if dialog.exec_() == QtGui.QDialog.Accepted:
config = {'id': int(dialog.id),
'name': str(dialog.nameEdt.text()),
'host': str(dialog.hostEdt.text()),
'port': int(dialog.portEdt.value()),
'user': str(dialog.userEdt.text()),
'passwd': str(dialog.passwdEdt.text()),
'db': str(dialog.databaseEdt.text())}
id = connectionList.setConnectionInfo(**config)
connectionList.setCurrentId(id)
return True
else:
return False
def getConnectionInfo(connectionList, id):
"""getConnectionInfo(connectionList: QDBConnectionList, id: int) -> dict
Returns info of ExtConnection """
conn = connectionList.get_connection(id)
key = str(conn.id) + "." + conn.name + "." + conn.host
passwd = DBLocator.keyChain.get_key(key)
if conn != None:
config = {'id': conn.id,
'name': conn.name,
'host': conn.host,
'port': conn.port,
'user': conn.user,
'passwd': passwd,
'db': conn.database}
else:
config = None
return config
def checkConnection(connectionList):
"""checkConnection() -> None
It will try if the connection works or if a password is necessary
"""
conn_id = connectionList.getCurrentItemId()
if conn_id != -1:
conn = connectionList.get_connection(conn_id)
config = getConnectionInfo(connectionList, conn_id)
if config != None:
try:
config_name = config['name']
del config['name']
config_id = config['id']
del config['id']
test_db_connection(config)
except VistrailsDBException:
# assume connection is wrong
config['name'] = config_name
config['id'] = config_id
config["create"] = False
showConnConfig(connectionList, **config)
class QDBWidget(QtGui.QWidget):
""" Custom widget for handling the showConnConfig """
def __init__(self, parent, connectionList):
QtGui.QWidget.__init__(self, parent)
self.connectionList = connectionList
def showConnConfig(self, *args, **keywords):
return showConnConfig(self.connectionList, *args, **keywords)
class ExecutionSearchWidget(QtGui.QSplitter):
def __init__(self, connectionList):
QtGui.QSplitter.__init__(self)
self.connectionList = connectionList
self.conn = None
self.config = None
self.offset = 0
self.limit = 50
self.numRows = None
self.vistrail = None
self.version = None
self.fromTime = None
self.toTime = None
self.user = None
self.thumbs = None
self.completed = None
self.modules = []
self.setOrientation(QtCore.Qt.Vertical)
self.searchLayout = QtGui.QGridLayout()
self.vistrailEditCheckBox = QtGui.QCheckBox()
self.vistrailEditCheckBox.setToolTip('Check to enable this search option')
self.vistrailEdit = QtGui.QLineEdit()
self.searchLayout.addWidget(self.vistrailEditCheckBox, 0,0)
self.searchLayout.addWidget(QtGui.QLabel('Vistrail:'), 0,1)
self.searchLayout.addWidget(self.vistrailEdit, 0,2)
self.versionEditCheckBox = QtGui.QCheckBox()
self.versionEditCheckBox.setToolTip('Check to enable this search option')
self.versionEdit = QtGui.QLineEdit()
self.searchLayout.addWidget(self.versionEditCheckBox, 0,3)
self.searchLayout.addWidget(QtGui.QLabel('Version:'), 0,4)
self.searchLayout.addWidget(self.versionEdit, 0,5)
self.fromTimeEditCheckBox = QtGui.QCheckBox()
self.fromTimeEditCheckBox.setToolTip('Check to enable this search option')
self.fromTimeEdit = QtGui.QDateTimeEdit(QtCore.QDateTime.currentDateTime().addDays(-1))
self.fromTimeEdit.setDisplayFormat('yyyy-MM-d H:mm:ss')
self.fromTimeEdit.setCalendarPopup(True)
self.searchLayout.addWidget(self.fromTimeEditCheckBox, 1,0)
self.searchLayout.addWidget(QtGui.QLabel('From time:'), 1,1)
self.searchLayout.addWidget(self.fromTimeEdit, 1,2)
self.toTimeEditCheckBox = QtGui.QCheckBox()
self.toTimeEditCheckBox.setToolTip('Check to enable this search option')
self.toTimeEdit = QtGui.QDateTimeEdit(QtCore.QDateTime.currentDateTime())
self.toTimeEdit.setDisplayFormat('yyyy-MM-d H:mm:ss')
self.toTimeEdit.setCalendarPopup(True)
self.searchLayout.addWidget(self.toTimeEditCheckBox, 1,3)
self.searchLayout.addWidget(QtGui.QLabel('To time:'), 1,4)
self.searchLayout.addWidget(self.toTimeEdit, 1,5)
self.userEditCheckBox = QtGui.QCheckBox()
self.userEditCheckBox.setToolTip('Check to enable this search option')
self.userEdit = QtGui.QLineEdit()
self.searchLayout.addWidget(self.userEditCheckBox, 2,0)
self.searchLayout.addWidget(QtGui.QLabel('User:'), 2,1)
self.searchLayout.addWidget(self.userEdit, 2,2)
self.completedEditCheckBox = QtGui.QCheckBox()
self.completedEditCheckBox.setToolTip('Check to enable this search option')
self.completedEdit = QtGui.QComboBox()
self.completedEdit.addItems(['Yes', 'No', 'Error'])
self.searchLayout.addWidget(self.completedEditCheckBox, 2,3)
self.searchLayout.addWidget(QtGui.QLabel('Completed:'), 2,4)
self.searchLayout.addWidget(self.completedEdit, 2,5)
self.moduleEditCheckBox = QtGui.QCheckBox()
self.moduleEditCheckBox.setToolTip('Check to enable this search option')
self.moduleEdit = QtGui.QLineEdit()
self.moduleEdit.setToolTip('Add module names separated by ,\nResult type can be specified by using: ModuleName:Yes/No/Error')
self.searchLayout.addWidget(self.moduleEditCheckBox, 3,0)
self.searchLayout.addWidget(QtGui.QLabel('Modules:'), 3,1)
self.searchLayout.addWidget(self.moduleEdit, 3,2)
self.thumbsCheckBox = QtGui.QCheckBox()
self.thumbsCheckBox.setToolTip('Check to view result thumbnails (may be slow)')
self.searchLayout.addWidget(self.thumbsCheckBox, 3,3)
self.searchLayout.addWidget(QtGui.QLabel('View thumbs'), 3,4)
self.searchButton = QtGui.QPushButton("Search")
self.searchButton.setStatusTip("Search the database for executions")
self.searchLayout.addWidget(self.searchButton, 3, 5)
self.searchWidget = QtGui.QWidget()
self.searchWidget.setLayout(self.searchLayout)
self.addWidget(self.searchWidget)
self.itemView = QtGui.QTreeWidget(self.parent())
self.addWidget(self.itemView)
statusGrid = QtGui.QGridLayout()
statusGrid.setMargin(0)
statusGrid.setSpacing(0)
statusWidget = QtGui.QWidget()
statusWidget.setLayout(statusGrid)
self.addWidget(statusWidget)
self.setStretchFactor(0, 0)
self.setStretchFactor(1, 1)
self.setStretchFactor(2, 0)
statusLayout = QtGui.QHBoxLayout()
statusLayout.setSpacing(5)
statusGrid.addLayout(statusLayout, 2, 1, QtCore.Qt.AlignLeft)
self.prevButton = QtGui.QPushButton("Previous")
self.prevButton.setStatusTip("Show previous results")
self.prevButton.hide()
statusLayout.addWidget(self.prevButton)
self.nextButton = QtGui.QPushButton("Next")
self.nextButton.setStatusTip("Show next results")
self.nextButton.hide()
statusLayout.addWidget(self.nextButton)
self.statusText = QtGui.QLabel('No query specified')
statusLayout.addWidget(self.statusText)
self.connect(self.searchButton,
QtCore.SIGNAL('clicked()'),
self.newQuery)
self.connect(self.prevButton,
QtCore.SIGNAL('clicked()'),
self.gotoPrevious)
self.connect(self.nextButton,
QtCore.SIGNAL('clicked()'),
self.gotoNext)
self.connect(self.itemView,
QtCore.SIGNAL('itemDoubleClicked(QTreeWidgetItem *,int)'),
self.showItem)
def newQuery(self):
self.offset = 0
self.vistrail = None
if self.vistrailEditCheckBox.isChecked():
self.vistrail = str(self.vistrailEdit.text()).strip()
self.version = None
if self.versionEditCheckBox.isChecked():
self.version = str(self.versionEdit.text()).strip()
self.fromTime = None
if self.fromTimeEditCheckBox.isChecked():
self.fromTime = str(
self.fromTimeEdit.dateTime().toString('yyyy-MM-d H:mm:ss'))
self.toTime = None
if self.toTimeEditCheckBox.isChecked():
self.toTime = str(
self.toTimeEdit.dateTime().toString('yyyy-MM-d H:mm:ss'))
self.user = None
if self.userEditCheckBox.isChecked():
self.user = str(self.userEdit.text()).strip()
self.completed = None
if self.completedEditCheckBox.isChecked():
self.completed = str(self.completedEdit.currentText()).strip()
self.modules = []
if self.moduleEditCheckBox.isChecked():
# create list of [moduleType, completed] pairs
modules = str(self.moduleEdit.text()).strip()
for k in [i.strip() for i in modules.split(',')]:
v = k.split(':')
if len(v)>1:
self.modules.append((v[0].strip(), v[1].strip()))
else:
self.modules.append((v[0].strip(), None))
self.thumbs = self.thumbsCheckBox.isChecked()
conn_id = self.connectionList.getCurrentItemId()
self.conn = self.connectionList.get_connection(conn_id)
self.config = getConnectionInfo(self.connectionList, conn_id)
self.searchDatabase()
def searchDatabase(self):
self.statusText.setText("Running query...")
self.repaint()
# create connection
conn = self.conn
config = self.config
if conn.dbtype == 'MySQL':
#removing extra keyword arguments for MySQldb
config_name = config['name']
del config['name']
config_id = config['id']
del config['id']
wf_exec_list = runLogQuery(config,
vistrail=self.vistrail, version=self.version,
fromTime=self.fromTime, toTime=self.toTime,
user=self.user, completed=self.completed,
offset=self.offset, limit=self.limit,
modules=self.modules, thumbs=self.thumbs)
if 0 == self.offset:
wf_exec_list, self.numRows = wf_exec_list
if conn.dbtype == 'MySQL':
config['name'] = config_name
config['id'] = config_id
self.setup_results(wf_exec_list)
self.updateStatus()
def gotoPrevious(self):
self.offset = max(self.offset - self.limit, 0)
self.searchDatabase()
def gotoNext(self):
self.offset = min(self.offset + self.limit, self.numRows)
self.searchDatabase()
def updateStatus(self):
if self.offset > 0:
self.prevButton.show()
else:
self.prevButton.hide()
last = self.offset + self.limit
if last < self.numRows:
self.nextButton.show()
else:
self.nextButton.hide()
last = self.numRows
if self.numRows:
self.statusText.setText("Showing %s-%s out of %s results" %
(self.offset+1, last, self.numRows))
else:
self.statusText.setText("No matching results found")
def setup_results(self, wf_exec_list=[]):
self.itemView.clear()
self.itemView.setIconSize(QtCore.QSize(32,32))
columns = ['Vistrail', 'Version', 'Start time', 'End time', 'Completed']
self.itemView.setColumnCount(len(columns))
self.itemView.setHeaderLabels(columns)
self.itemView.setSortingEnabled(True)
for wf_exec in wf_exec_list:
item = QExecutionItem(wf_exec)
self.itemView.addTopLevelItem(item)
self.itemView.header().setResizeMode(4, QtGui.QHeaderView.ResizeToContents)
self.itemView.header().setResizeMode(3, QtGui.QHeaderView.ResizeToContents)
self.itemView.header().setResizeMode(2, QtGui.QHeaderView.ResizeToContents)
self.itemView.header().setResizeMode(1, QtGui.QHeaderView.Interactive)
self.itemView.header().setResizeMode(0, QtGui.QHeaderView.Interactive)
self.itemView.header().resizeSections(QtGui.QHeaderView.Stretch)
conn_id = self.connectionList.getCurrentItemId()
if conn_id < 0:
self.statusText.setText("Select a database")
def showItem(self, item, col):
(v_name, v_id, log_id, v_version, version_name, e_id,
ts_start, ts_end, user, completed, thumb) = item.wf_exec
config = self.config
locator = \
DBLocator(config['host'],
config['port'],
config['db'],
config['user'],
config['passwd'],
config['name'],
obj_id=v_id,
obj_type='vistrail',
workflow_exec=ts_start,
connection_id=config.get('id', None))
#print "url:", locator.to_url()
import gui.application
app = gui.application.get_vistrails_application()
open_vistrail = app.builderWindow.open_vistrail_without_prompt
workflow_exec = locator.kwargs.get('workflow_exec', None)
args = {}
if workflow_exec:
args['workflow_exec'] = workflow_exec
args['version'] = version_name if version_name else v_version
open_vistrail(locator, **args)
class QExecutionItem(QtGui.QTreeWidgetItem):
def __init__(self, wf_exec, parent=None):
(v_name, v_id, log_id, v_version, version_name, e_id,
ts_start, ts_end, user, completed, thumb) = wf_exec
version = version_name if version_name else v_version
completed = {'-1':'Error', '0':'No', '1':'Yes'}.get(str(completed), 'Unknown')
labels = (str(v_name), str(version),
str(ts_start), str(ts_end), str(completed))
QtGui.QTreeWidgetItem.__init__(self, labels)
self.wf_exec = wf_exec
self.setToolTip(0, 'vistrail:%s version:%s log:%s wf_exec:%s user:%s' %
(v_id, v_version, log_id, e_id, user))
if thumb:
pixmap = QtGui.QPixmap()
pixmap.loadFromData(thumb)
self.setIcon(1, QtGui.QIcon(pixmap))
tooltip = """<img border=0 src="data:image/png;base64,%s">""" % thumb.encode('base64')
self.setToolTip(1, tooltip)
def __lt__(self, other):
sort_col = self.treeWidget().sortColumn()
if sort_col in set([1]):
try:
return int(self.text(sort_col)) < int(other.text(sort_col))
except ValueError:
pass
return QtGui.QTreeWidgetItem.__lt__(self, other)
class WorkflowSearchWidget(QtGui.QSplitter):
def __init__(self, connectionList):
QtGui.QSplitter.__init__(self)
self.connectionList = connectionList
self.conn = None
self.config = None
self.offset = 0
self.limit = 50
self.numRows = None
self.vistrail = None
self.version = None
self.fromTime = None
self.toTime = None
self.user = None
self.thumbs = None
self.modules = []
self.setOrientation(QtCore.Qt.Vertical)
self.searchLayout = QtGui.QGridLayout()
self.vistrailEditCheckBox = QtGui.QCheckBox()
self.vistrailEditCheckBox.setToolTip('Check to enable this search option')
self.vistrailEdit = QtGui.QLineEdit()
self.searchLayout.addWidget(self.vistrailEditCheckBox, 0,0)
self.searchLayout.addWidget(QtGui.QLabel('Vistrail:'), 0,1)
self.searchLayout.addWidget(self.vistrailEdit, 0,2)
self.versionEditCheckBox = QtGui.QCheckBox()
self.versionEditCheckBox.setToolTip('Check to enable this search option')
self.versionEdit = QtGui.QLineEdit()
self.searchLayout.addWidget(self.versionEditCheckBox, 0,3)
self.searchLayout.addWidget(QtGui.QLabel('Version:'), 0,4)
self.searchLayout.addWidget(self.versionEdit, 0,5)
self.fromTimeEditCheckBox = QtGui.QCheckBox()
self.fromTimeEditCheckBox.setToolTip('Check to enable this search option')
self.fromTimeEdit = QtGui.QDateTimeEdit(QtCore.QDateTime.currentDateTime().addDays(-1))
self.fromTimeEdit.setDisplayFormat('yyyy-MM-d H:mm:ss')
self.fromTimeEdit.setCalendarPopup(True)
self.searchLayout.addWidget(self.fromTimeEditCheckBox, 1,0)
self.searchLayout.addWidget(QtGui.QLabel('From time:'), 1,1)
self.searchLayout.addWidget(self.fromTimeEdit, 1,2)
self.toTimeEditCheckBox = QtGui.QCheckBox()
self.toTimeEditCheckBox.setToolTip('Check to enable this search option')
self.toTimeEdit = QtGui.QDateTimeEdit(QtCore.QDateTime.currentDateTime())
self.toTimeEdit.setDisplayFormat('yyyy-MM-d H:mm:ss')
self.toTimeEdit.setCalendarPopup(True)
self.searchLayout.addWidget(self.toTimeEditCheckBox, 1,3)
self.searchLayout.addWidget(QtGui.QLabel('To time:'), 1,4)
self.searchLayout.addWidget(self.toTimeEdit, 1,5)
self.userEditCheckBox = QtGui.QCheckBox()
self.userEditCheckBox.setToolTip('Check to enable this search option')
self.userEdit = QtGui.QLineEdit()
self.searchLayout.addWidget(self.userEditCheckBox, 2,0)
self.searchLayout.addWidget(QtGui.QLabel('User:'), 2,1)
self.searchLayout.addWidget(self.userEdit, 2,2)
self.moduleEditCheckBox = QtGui.QCheckBox()
self.moduleEditCheckBox.setToolTip('Check to enable this search option')
self.moduleEdit = QtGui.QLineEdit()
self.moduleEdit.setToolTip('Add module names separated by ,\nConnected modules can be specified by using: ModuleA->ModuleB')
self.searchLayout.addWidget(self.moduleEditCheckBox, 3,0)
self.searchLayout.addWidget(QtGui.QLabel('Modules:'), 3,1)
self.searchLayout.addWidget(self.moduleEdit, 3,2)
self.thumbsCheckBox = QtGui.QCheckBox()
self.thumbsCheckBox.setToolTip('Check to view result thumbnails (may be slow)')
self.searchLayout.addWidget(self.thumbsCheckBox, 3,3)
self.searchLayout.addWidget(QtGui.QLabel('View thumbs'), 3,4)
self.searchButton = QtGui.QPushButton("Search")
self.searchButton.setStatusTip("Search the database for executions")
self.searchLayout.addWidget(self.searchButton, 3, 5)
self.searchWidget = QtGui.QWidget()
self.searchWidget.setLayout(self.searchLayout)
self.addWidget(self.searchWidget)
self.itemView = QtGui.QTreeWidget(self.parent())
self.addWidget(self.itemView)
statusGrid = QtGui.QGridLayout()
statusGrid.setMargin(0)
statusGrid.setSpacing(0)
statusWidget = QtGui.QWidget()
statusWidget.setLayout(statusGrid)
self.addWidget(statusWidget)
self.setStretchFactor(0, 0)
self.setStretchFactor(1, 1)
self.setStretchFactor(2, 0)
statusLayout = QtGui.QHBoxLayout()
statusLayout.setSpacing(5)
statusGrid.addLayout(statusLayout, 2, 1, QtCore.Qt.AlignLeft)
self.prevButton = QtGui.QPushButton("Previous")
self.prevButton.setStatusTip("Show previous results")
self.prevButton.hide()
statusLayout.addWidget(self.prevButton)
self.nextButton = QtGui.QPushButton("Next")
self.nextButton.setStatusTip("Show next results")
self.nextButton.hide()
statusLayout.addWidget(self.nextButton)
self.statusText = QtGui.QLabel('No query specified')
statusLayout.addWidget(self.statusText)
self.connect(self.searchButton,
QtCore.SIGNAL('clicked()'),
self.newQuery)
self.connect(self.prevButton,
QtCore.SIGNAL('clicked()'),
self.gotoPrevious)
self.connect(self.nextButton,
QtCore.SIGNAL('clicked()'),
self.gotoNext)
self.connect(self.itemView,
QtCore.SIGNAL('itemDoubleClicked(QTreeWidgetItem *,int)'),
self.showItem)
def newQuery(self):
self.offset = 0
self.vistrail = None
if self.vistrailEditCheckBox.isChecked():
self.vistrail = str(self.vistrailEdit.text()).strip()
self.version = None
if self.versionEditCheckBox.isChecked():
self.version = str(self.versionEdit.text()).strip()
self.fromTime = None
if self.fromTimeEditCheckBox.isChecked():
self.fromTime = str(
self.fromTimeEdit.dateTime().toString('yyyy-MM-d H:mm:ss'))
self.toTime = None
if self.toTimeEditCheckBox.isChecked():
self.toTime = str(
self.toTimeEdit.dateTime().toString('yyyy-MM-d H:mm:ss'))
self.user = None
if self.userEditCheckBox.isChecked():
self.user = str(self.userEdit.text()).strip()
self.modules = []
if self.moduleEditCheckBox.isChecked():
# create list of [moduleType, connected to previous] pairs
groups = str(self.moduleEdit.text()).strip()
groups = [i.strip() for i in groups.split(',')]
for group in [i.split('->') for i in groups]:
if len(group):
module = group.pop(0).strip()
if len(module):
self.modules.append((module, False))
while len(group):
module = group.pop(0).strip()
if len(module):
self.modules.append((module, True))
self.thumbs = self.thumbsCheckBox.isChecked()
conn_id = self.connectionList.getCurrentItemId()
self.conn = self.connectionList.get_connection(conn_id)
self.config = getConnectionInfo(self.connectionList, conn_id)
self.searchDatabase()
def searchDatabase(self):
self.statusText.setText("Running query...")
self.repaint()
# create connection
conn = self.conn
config = self.config
if conn.dbtype == 'MySQL':
#removing extra keyword arguments for MySQldb
config_name = config['name']
del config['name']
config_id = config['id']
del config['id']
workflow_list = runWorkflowQuery(config,
vistrail=self.vistrail, version=self.version,
fromTime=self.fromTime, toTime=self.toTime,
user=self.user,
offset=self.offset, limit=self.limit,
modules=self.modules,
thumbs=self.thumbs)
if 0 == self.offset:
workflow_list, self.numRows = workflow_list
if conn.dbtype == 'MySQL':
config['name'] = config_name
config['id'] = config_id
self.setup_results(workflow_list)
self.updateStatus()
def gotoPrevious(self):
self.offset = max(self.offset - self.limit, 0)
self.searchDatabase()
def gotoNext(self):
self.offset = min(self.offset + self.limit, self.numRows)
self.searchDatabase()
def updateStatus(self):
if self.offset > 0:
self.prevButton.show()
else:
self.prevButton.hide()
last = self.offset + self.limit
if last < self.numRows:
self.nextButton.show()
else:
self.nextButton.hide()
last = self.numRows
if self.numRows:
self.statusText.setText("Showing %s-%s out of %s results" %
(self.offset+1, last, self.numRows))
else:
self.statusText.setText("No matching results found")
def setup_results(self, workflow_list=[]):
self.itemView.clear()
self.itemView.setIconSize(QtCore.QSize(32,32))
columns = ['Vistrail', 'Version', 'Time', 'User']
self.itemView.setColumnCount(len(columns))
self.itemView.setHeaderLabels(columns)
self.itemView.setSortingEnabled(True)
for workflow in workflow_list:
item = QWorkflowItem(workflow)
self.itemView.addTopLevelItem(item)
self.itemView.header().setResizeMode(3, QtGui.QHeaderView.ResizeToContents)
self.itemView.header().setResizeMode(2, QtGui.QHeaderView.ResizeToContents)
self.itemView.header().setResizeMode(1, QtGui.QHeaderView.Interactive)
self.itemView.header().setResizeMode(0, QtGui.QHeaderView.Interactive)
self.itemView.header().resizeSections(QtGui.QHeaderView.Stretch)
conn_id = self.connectionList.getCurrentItemId()
if conn_id < 0:
self.statusText.setText("Select a database")
def showItem(self, item, col):
(v_name, v_id, v_version, version_name, time, user, thumb) = \
item.workflow
config = self.config
locator = \
DBLocator(config['host'],
config['port'],
config['db'],
config['user'],
config['passwd'],
config['name'],
obj_id=v_id,
obj_type='vistrail',
connection_id=config.get('id', None))
#print "url:", locator.to_url()
import gui.application
app = gui.application.get_vistrails_application()
open_vistrail = app.builderWindow.open_vistrail_without_prompt
args = {}
args['version'] = version_name if version_name else v_version
#print "args", args
open_vistrail(locator, **args)
class QWorkflowItem(QtGui.QTreeWidgetItem):
def __init__(self, workflow, parent=None):
(v_name, v_id, v_version, version_name, time, user, thumb) = workflow
version = version_name if version_name else v_version
labels = (str(v_name), str(version), str(time), str(user))
QtGui.QTreeWidgetItem.__init__(self, labels)
self.workflow = workflow
self.setToolTip(0, 'vistrail:%s version:%s' % (v_id, v_version))
if thumb:
pixmap = QtGui.QPixmap()
pixmap.loadFromData(thumb)
self.setIcon(1, QtGui.QIcon(pixmap))
tooltip = """<img border=0 src="data:image/png;base64,%s">""" % thumb.encode('base64')
self.setToolTip(1, tooltip)
def __lt__(self, other):
sort_col = self.treeWidget().sortColumn()
if sort_col in set([1]):
try:
return int(self.text(sort_col)) < int(other.text(sort_col))
except ValueError:
pass
return QtGui.QTreeWidgetItem.__lt__(self, other)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cloud Endpoints API for Content Addressable Storage."""
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from components import auth
from . import impl
# This is used by endpoints indirectly.
package = 'cipd'
class HashAlgo(messages.Enum):
SHA1 = 1
class FetchResponse(messages.Message):
class Status(messages.Enum):
# File is available for fetch.
SUCCESS = 1
# No such file uploaded.
NOT_FOUND = 2
# Some non-transient error happened.
ERROR = 3
# Status of the operation.
status = messages.EnumField(Status, 1, required=True)
# For SUCCESS status, a signed URL to fetch the file from.
fetch_url = messages.StringField(2, required=False)
# For ERROR status, an error message.
error_message = messages.StringField(3, required=False)
class BeginUploadResponse(messages.Message):
class Status(messages.Enum):
# New upload session has started.
SUCCESS = 1
# Such file is already uploaded to the store.
ALREADY_UPLOADED = 2
# Some non-transient error happened.
ERROR = 3
# Status of this operation, defines what other fields to expect.
status = messages.EnumField(Status, 1, required=True)
# For SUCCESS status, a unique identifier of the upload operation.
upload_session_id = messages.StringField(2, required=False)
# For SUCCESS status, URL to PUT file body to via resumable upload protocol.
upload_url = messages.StringField(3, required=False)
# For ERROR status, an error message.
error_message = messages.StringField(4, required=False)
class FinishUploadResponse(messages.Message):
class Status(messages.Enum):
# Upload session never existed or already expired.
MISSING = impl.UploadSession.STATUS_MISSING
# Client is still uploading the file.
UPLOADING = impl.UploadSession.STATUS_UPLOADING
# Server is verifying the hash of the uploaded file.
VERIFYING = impl.UploadSession.STATUS_VERIFYING
# The file is in the store and visible by all clients. Final state.
PUBLISHED = impl.UploadSession.STATUS_PUBLISHED
# Some non-transient error happened.
ERROR = impl.UploadSession.STATUS_ERROR
# Status of the upload operation.
status = messages.EnumField(Status, 1, required=True)
# For ERROR status, an error message.
error_message = messages.StringField(2, required=False)
# int status -> Enum status.
_UPLOAD_STATUS_MAPPING = {
getattr(impl.UploadSession, k): getattr(
FinishUploadResponse.Status, k[len('STATUS_'):])
for k in dir(impl.UploadSession) if k.startswith('STATUS_')
}
# HashAlgo enum field -> hash algo name. Also asserts that all algorithms
# specified in impl.SUPPORTED_HASH_ALGOS are in the HashAlgo enum.
_HASH_ALGO_MAPPING = {
getattr(HashAlgo, k): k for k in impl.SUPPORTED_HASH_ALGOS
}
@auth.endpoints_api(
name='cas',
version='v1',
title='Content Addressable Storage API')
class CASServiceApi(remote.Service):
"""Content addressable storage API."""
# Endpoints require use of ResourceContainer if parameters are passed via URL.
ITEM_RESOURCE_CONTAINER = endpoints.ResourceContainer(
message_types.VoidMessage,
# Hash algorithm used to identify file contents, e.g. 'SHA1'.
hash_algo = messages.EnumField(HashAlgo, 1, required=True),
# Hex hash digest of a file client wants to upload.
file_hash = messages.StringField(2, required=True))
@auth.endpoints_method(
ITEM_RESOURCE_CONTAINER,
FetchResponse,
path='fetch/{hash_algo}/{file_hash}',
http_method='GET',
name='fetch')
@auth.require(auth.is_admin)
def fetch(self, request):
"""Returns a signed URL that can be used to fetch an object."""
def error(msg):
return FetchResponse(status=FetchResponse.Status.ERROR, error_message=msg)
hash_algo = _HASH_ALGO_MAPPING[request.hash_algo]
if not impl.is_valid_hash_digest(hash_algo, request.file_hash):
return error('Invalid hash digest format')
service = impl.get_cas_service()
if service is None or not service.is_fetch_configured():
raise endpoints.InternalServerErrorException('Service is not configured')
url = service.generate_fetch_url(hash_algo, request.file_hash)
return FetchResponse(status=FetchResponse.Status.SUCCESS, fetch_url=url)
@auth.endpoints_method(
ITEM_RESOURCE_CONTAINER,
BeginUploadResponse,
path='upload/{hash_algo}/{file_hash}',
http_method='POST',
name='beginUpload')
@auth.require(auth.is_admin)
def begin_upload(self, request):
"""Initiates an upload operation if file is missing.
Once initiated the client is then responsible for uploading the file to
temporary location (returned as 'upload_url') and finalizing the upload
with call to 'finishUpload'.
If file is already in the store, returns ALREADY_UPLOADED status.
This method is not intended to be used directly by all clients (only by
admins in case some files has to be injected into CAS store directly). Use
PackageRepositoryApi.register_package instead to initiate an upload of some
package and get upload_url and upload_session_id.
"""
def error(msg):
return BeginUploadResponse(
status=BeginUploadResponse.Status.ERROR,
error_message=msg)
hash_algo = _HASH_ALGO_MAPPING[request.hash_algo]
if not impl.is_valid_hash_digest(hash_algo, request.file_hash):
return error('Invalid hash digest format')
service = impl.get_cas_service()
if service is None:
raise endpoints.InternalServerErrorException('Service is not configured')
if service.is_object_present(hash_algo, request.file_hash):
return BeginUploadResponse(
status=BeginUploadResponse.Status.ALREADY_UPLOADED)
upload_session, upload_session_id = service.create_upload_session(
hash_algo,
request.file_hash,
auth.get_current_identity())
return BeginUploadResponse(
status=BeginUploadResponse.Status.SUCCESS,
upload_session_id=upload_session_id,
upload_url=upload_session.upload_url)
# Endpoints require use of ResourceContainer if parameters are passed via URL.
FINISH_UPLOAD_RESOURCE_CONTAINER = endpoints.ResourceContainer(
message_types.VoidMessage,
# Upload operation ID as returned by beginUpload.
upload_session_id = messages.StringField(1, required=True))
@auth.endpoints_method(
FINISH_UPLOAD_RESOURCE_CONTAINER,
FinishUploadResponse,
path='finalize/{upload_session_id}',
http_method='POST',
name='finishUpload')
@auth.require(lambda: not auth.get_current_identity().is_anonymous)
def finish_upload(self, request):
"""Finishes pending upload or queries its status.
Client should finalize Google Storage upload session first. Once GS upload
is finalized and 'finishUpload' is called, the server starts hash
verification. Uploading client will get 'VERIFYING' status response. It
can continue polling on this method until server returns 'PUBLISHED' status.
upload_session_id implicitly authorizes the request.
"""
service = impl.get_cas_service()
if service is None:
raise endpoints.InternalServerErrorException('Service is not configured')
# Verify the signature if upload_session_id and grab the session. Broken
# or expired signatures are treated in same way as missing upload sessions.
# No need to provide more hits to the malicious caller.
upload_session = service.fetch_upload_session(
request.upload_session_id, auth.get_current_identity())
if upload_session is None:
return FinishUploadResponse(status=FinishUploadResponse.Status.MISSING)
# Start object verification task if necessary, returns updated copy of
# |upload_session| entity.
upload_session = service.maybe_finish_upload(upload_session)
response = FinishUploadResponse(
status=_UPLOAD_STATUS_MAPPING[upload_session.status])
if upload_session.status == impl.UploadSession.STATUS_ERROR:
response.error_message = upload_session.error_message or 'Unknown error'
return response
|
|
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# unittest requires method names starting in 'test'
#pylint: disable-msg=C6409
"""Unit tests for download.py."""
__author__ = 'dgentry@google.com (Denton Gentry)'
import datetime
import shutil
import tempfile
import time
import unittest
import google3
import core
import download
import persistobj
mock_http_clients = []
mock_http_downloads = []
mock_installers = []
mock_downloads = []
class MockHttpClient(object):
def __init__(self, io_loop=None):
self.did_fetch = False
self.request = None
self.callback = None
mock_http_clients.append(self)
def fetch(self, request, callback):
self.did_fetch = True
self.request = request
self.callback = callback
class MockIoloop(object):
def __init__(self):
self.timeout = None
self.callback = None
def add_timeout(self, timeout, callback, monotonic=None):
self.timeout = timeout
self.callback = callback
class MockHttpDownload(object):
def __init__(self, url, username=None, password=None,
download_complete_cb=None, download_dir=None, ioloop=None):
self.url = url
self.username = username
self.password = password
self.download_complete_cb = download_complete_cb
self.download_dir = download_dir
self.ioloop = ioloop
self.did_fetch = False
mock_http_downloads.append(self)
def fetch(self):
self.did_fetch = True
class MockInstaller(object):
def __init__(self, filename):
self.filename = filename
self.did_install = False
self.did_reboot = False
self.file_type = None
self.targe_filename = None
self.install_callback = None
mock_installers.append(self)
def install(self, file_type, target_filename, callback):
self.did_install = True
self.file_type = file_type
self.target_filename = target_filename
self.install_callback = callback
return True
def reboot(self):
self.did_reboot = True
class MockTransferComplete(object):
def __init__(self):
self.transfer_complete_called = False
self.dl = None
self.command_key = None
self.faultcode = None
self.faultstring = None
self.starttime = None
self.endtime = None
def SendTransferComplete(self, dl, command_key, faultcode, faultstring,
starttime, endtime, event_code):
self.transfer_complete_called = True
self.dl = dl
self.command_key = command_key
self.faultcode = faultcode
self.faultstring = faultstring
self.starttime = starttime
self.endtime = endtime
self.event_code = event_code
class MockFile(object):
def __init__(self, name):
self.name = name
def _Delta(t):
return datetime.timedelta(seconds=t)
class DownloadTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
download.INSTALLER = MockInstaller
self.done_command_key = None
self.old_time = time.time
del mock_installers[:]
del mock_http_downloads[:]
download.DOWNLOAD_CLIENT['http'] = MockHttpDownload
download.DOWNLOAD_CLIENT['https'] = MockHttpDownload
def tearDown(self):
time.time = self.old_time
shutil.rmtree(self.tmpdir)
del mock_installers[:]
del mock_http_clients[:]
def mockTime(self):
return 123456.0
def QCheckBoring(self, dl, args):
"""Check get_queue_state() fields which don't change, and return qstate."""
q = dl.get_queue_state()
self.assertEqual(q.CommandKey, args['command_key'])
self.assertTrue(q.IsDownload)
self.assertEqual(q.FileType, args['file_type'])
self.assertEqual(q.FileSize, args['file_size'])
self.assertEqual(q.TargetFileName, args['target_filename'])
return q.State
def testSuccess(self):
ioloop = MockIoloop()
cmpl = MockTransferComplete()
time.time = self.mockTime
kwargs = dict(command_key='testCommandKey',
file_type='testFileType',
url='http://example.com/foo',
username='testUsername',
password='testPassword',
file_size=1000,
target_filename='testTargetFilename',
delay_seconds=99)
stateobj = persistobj.PersistentObject(objdir=self.tmpdir,
rootname='testObj',
filename=None, **kwargs)
dl = download.Download(stateobj=stateobj,
transfer_complete_cb=cmpl.SendTransferComplete,
ioloop=ioloop)
self.assertEqual(self.QCheckBoring(dl, kwargs), 1) # 1: Not Yet Started
# Step 1: Wait delay_seconds
dl.do_start()
self.assertEqual(ioloop.timeout, _Delta(kwargs['delay_seconds']))
self.assertEqual(self.QCheckBoring(dl, kwargs), 1) # 1: Not Yet Started
# Step 2: HTTP Download
dl.timer_callback()
self.assertEqual(len(mock_http_downloads), 1)
http = mock_http_downloads[0]
self.assertEqual(http.url, kwargs['url'])
self.assertEqual(http.username, kwargs['username'])
self.assertEqual(http.password, kwargs['password'])
self.assertTrue(http.download_complete_cb)
self.assertTrue(http.did_fetch)
self.assertEqual(self.QCheckBoring(dl, kwargs), 2) # 2: In process
# Step 3: Install
dlfile = MockFile('/path/to/downloaded/file')
http.download_complete_cb(0, '', dlfile)
self.assertEqual(len(mock_installers), 1)
inst = mock_installers[0]
self.assertTrue(inst.did_install)
self.assertEqual(inst.file_type, kwargs['file_type'])
self.assertEqual(inst.target_filename, kwargs['target_filename'])
self.assertEqual(inst.filename, dlfile.name)
self.assertFalse(inst.did_reboot)
self.assertEqual(self.QCheckBoring(dl, kwargs), 2) # 2: In process
# Step 4: Reboot
inst.install_callback(0, '', must_reboot=True)
self.assertTrue(inst.did_reboot)
self.assertEqual(self.QCheckBoring(dl, kwargs), 2) # 2: In process
# Step 5: Send Transfer Complete
dl.reboot_callback(0, '')
self.assertTrue(cmpl.transfer_complete_called)
self.assertEqual(cmpl.command_key, kwargs['command_key'])
self.assertEqual(cmpl.faultcode, 0)
self.assertEqual(cmpl.faultstring, '')
self.assertEqual(cmpl.starttime, self.mockTime())
self.assertEqual(cmpl.endtime, self.mockTime())
self.assertEqual(cmpl.event_code, 'M Download')
self.assertEqual(self.QCheckBoring(dl, kwargs), 3) # 3: Cleaning up
# Step 6: Wait for Transfer Complete Response
self.assertFalse(dl.cleanup())
self.assertEqual(self.QCheckBoring(dl, kwargs), 3) # 3: Cleaning up
def testDownloadFailed(self):
ioloop = MockIoloop()
cmpl = MockTransferComplete()
time.time = self.mockTime
kwargs = dict(command_key='testCommandKey',
url='http://example.com/foo',
delay_seconds=1)
stateobj = persistobj.PersistentObject(objdir=self.tmpdir,
rootname='testObj',
filename=None, **kwargs)
dl = download.Download(stateobj=stateobj,
transfer_complete_cb=cmpl.SendTransferComplete,
ioloop=ioloop)
# Step 1: Wait delay_seconds
dl.do_start()
self.assertEqual(ioloop.timeout, _Delta(kwargs['delay_seconds']))
# Step 2: HTTP Download
dl.timer_callback()
self.assertEqual(len(mock_http_downloads), 1)
http = mock_http_downloads[0]
self.assertEqual(http.url, kwargs['url'])
# Step 3: Download fails
http.download_complete_cb(100, 'TestDownloadError', None)
self.assertEqual(len(mock_installers), 0)
self.assertTrue(cmpl.transfer_complete_called)
self.assertEqual(cmpl.command_key, kwargs['command_key'])
self.assertEqual(cmpl.faultcode, 100)
self.assertEqual(cmpl.faultstring, 'TestDownloadError')
self.assertEqual(cmpl.starttime, 0.0)
self.assertEqual(cmpl.endtime, 0.0)
self.assertEqual(cmpl.event_code, 'M Download')
def testInstallFailed(self):
ioloop = MockIoloop()
cmpl = MockTransferComplete()
time.time = self.mockTime
kwargs = dict(command_key='testCommandKey',
url='http://example.com/foo',
delay_seconds=1)
stateobj = persistobj.PersistentObject(objdir=self.tmpdir,
rootname='testObj',
filename=None, **kwargs)
dl = download.Download(stateobj=stateobj,
transfer_complete_cb=cmpl.SendTransferComplete,
ioloop=ioloop)
# Step 1: Wait delay_seconds
dl.do_start()
self.assertEqual(ioloop.timeout, _Delta(kwargs['delay_seconds']))
# Step 2: HTTP Download
dl.timer_callback()
self.assertEqual(len(mock_http_downloads), 1)
http = mock_http_downloads[0]
self.assertEqual(http.url, kwargs['url'])
# Step 3: Install
dlfile = MockFile('/path/to/downloaded/file')
http.download_complete_cb(0, '', dlfile)
self.assertEqual(len(mock_installers), 1)
inst = mock_installers[0]
self.assertTrue(inst.did_install)
self.assertEqual(inst.filename, dlfile.name)
self.assertFalse(inst.did_reboot)
# Step 4: Install Failed
inst.install_callback(101, 'TestInstallError', must_reboot=False)
self.assertTrue(cmpl.transfer_complete_called)
self.assertEqual(cmpl.command_key, kwargs['command_key'])
self.assertEqual(cmpl.faultcode, 101)
self.assertEqual(cmpl.faultstring, 'TestInstallError')
self.assertEqual(cmpl.starttime, 0.0)
self.assertEqual(cmpl.endtime, 0.0)
self.assertEqual(cmpl.event_code, 'M Download')
def testInstallNoReboot(self):
ioloop = MockIoloop()
cmpl = MockTransferComplete()
time.time = self.mockTime
kwargs = dict(command_key='testCommandKey',
url='http://example.com/foo',
delay_seconds=1)
stateobj = persistobj.PersistentObject(objdir=self.tmpdir,
rootname='testObj',
filename=None, **kwargs)
dl = download.Download(stateobj=stateobj,
transfer_complete_cb=cmpl.SendTransferComplete,
ioloop=ioloop)
# Step 1: Wait delay_seconds
dl.do_start()
self.assertEqual(ioloop.timeout, _Delta(kwargs['delay_seconds']))
# Step 2: HTTP Download
dl.timer_callback()
self.assertEqual(len(mock_http_downloads), 1)
http = mock_http_downloads[0]
self.assertEqual(http.url, kwargs['url'])
# Step 3: Install
dlfile = MockFile('/path/to/downloaded/file')
http.download_complete_cb(0, '', dlfile)
self.assertEqual(len(mock_installers), 1)
inst = mock_installers[0]
self.assertTrue(inst.did_install)
self.assertEqual(inst.filename, dlfile.name)
self.assertFalse(inst.did_reboot)
# Step 4: Install Succeeded, no reboot
inst.install_callback(0, '', must_reboot=False)
self.assertTrue(cmpl.transfer_complete_called)
self.assertEqual(cmpl.command_key, kwargs['command_key'])
self.assertEqual(cmpl.faultcode, 0)
self.assertEqual(cmpl.faultstring, '')
self.assertEqual(cmpl.starttime, self.mockTime())
self.assertEqual(cmpl.endtime, self.mockTime())
self.assertEqual(cmpl.event_code, 'M Download')
def testCancelRefused(self):
ioloop = MockIoloop()
cmpl = MockTransferComplete()
kwargs = dict(command_key='testCommandKey',
url='http://example.com/foo')
stateobj = persistobj.PersistentObject(objdir=self.tmpdir,
rootname='testObj',
filename=None, **kwargs)
dl = download.Download(stateobj=stateobj,
transfer_complete_cb=cmpl.SendTransferComplete,
ioloop=ioloop)
dl.do_start() # Step 1: Wait delay_seconds
dl.timer_callback() # Step 2: HTTP Download
dl.download_complete_callback(0, None, None) # Step 3: Install
self.assertTrue(dl.cleanup())
dl.installer_callback(0, None, must_reboot=True) # Step 4: Reboot
self.assertTrue(dl.cleanup())
dl.reboot_callback(0, '') # Step 5: Rebooted
self.assertFalse(dl.cleanup())
def testCommandKey(self):
kwargs = dict(command_key='testCommandKey')
stateobj = persistobj.PersistentObject(objdir=self.tmpdir,
rootname='testObj',
filename=None, **kwargs)
dl = download.Download(stateobj=stateobj, transfer_complete_cb=None)
self.assertEqual(dl.CommandKey(), kwargs['command_key'])
kwargs = dict()
stateobj = persistobj.PersistentObject(objdir=self.tmpdir,
rootname='testObj',
filename=None, **kwargs)
dl = download.Download(stateobj=stateobj, transfer_complete_cb=None)
self.assertEqual(dl.CommandKey(), None)
class MockDownloadObj(object):
def __init__(self, stateobj, transfer_complete_cb, done_cb=None,
download_dir=None, ioloop=None):
self.stateobj = stateobj
self.transfer_complete_cb = transfer_complete_cb
self.done_cb = done_cb
self.download_dir = download_dir
self.ioloop = ioloop
self.do_start_called = False
self.immediate_complete_called = False
self.faultcode = None
self.faultstring = None
self.reboot_callback_called = False
mock_downloads.append(self)
def do_start(self):
self.do_start_called = True
def do_immediate_complete(self, faultcode, faultstring):
self.immediate_complete_called = True
self.faultcode = faultcode
self.faultstring = faultstring
def reboot_callback(self, faultcode, faultstring):
self.reboot_callback_called = True
def get_queue_state(self):
return 'This_is_not_a_real_queue_state.'
class DownloadManagerTest(unittest.TestCase):
def setUp(self):
self.old_DOWNLOADOBJ = download.DOWNLOADOBJ
download.DOWNLOADOBJ = MockDownloadObj
self.tmpdir = tempfile.mkdtemp()
del mock_downloads[:]
def tearDown(self):
download.DOWNLOADOBJ = self.old_DOWNLOADOBJ
shutil.rmtree(self.tmpdir)
del mock_downloads[:]
def allocTestDM(self):
dm = download.DownloadManager()
dm.SetDirectories(self.tmpdir, self.tmpdir)
cmpl = MockTransferComplete()
dm.send_transfer_complete = cmpl.SendTransferComplete
return (dm, cmpl)
def testSimpleDownload(self):
(dm, _) = self.allocTestDM()
args = {'command_key': 'TestCommandKey',
'file_type': 'TestFileType',
'url': 'http://example.com/',
'username': 'TestUser',
'password': 'TestPassword',
'file_size': 99,
'target_filename': 'TestFilename',
'delay_seconds': 30}
(code, start, end) = dm.NewDownload(**args)
self.assertEqual(code, 1)
self.assertEqual(start, 0.0)
self.assertEqual(end, 0.0)
self.assertEqual(len(mock_downloads), 1)
dl = mock_downloads[0]
self.assertEqual(dl.stateobj.command_key, args['command_key'])
self.assertEqual(dl.stateobj.file_type, args['file_type'])
self.assertEqual(dl.stateobj.url, args['url'])
self.assertEqual(dl.stateobj.username, args['username'])
self.assertEqual(dl.stateobj.password, args['password'])
self.assertEqual(dl.stateobj.file_size, args['file_size'])
self.assertEqual(dl.stateobj.target_filename, args['target_filename'])
self.assertEqual(dl.stateobj.delay_seconds, args['delay_seconds'])
def testReadonlyConfigDir(self):
(dm, _) = self.allocTestDM()
dm.SetDirectories(config_dir='/user/nonexist', download_dir=self.tmpdir)
args = {'command_key': 'TestCommandKey',
'file_type': 'TestFileType',
'url': 'http://example.com/',
'username': 'TestUser',
'password': 'TestPassword',
'file_size': 99,
'target_filename': 'TestFilename',
'delay_seconds': 30}
(code, start, end) = dm.NewDownload(**args)
self.assertEqual(code, 1)
self.assertEqual(start, 0.0)
self.assertEqual(end, 0.0)
self.assertEqual(len(mock_downloads), 1)
dl = mock_downloads[0]
self.assertEqual(dl.stateobj.command_key, args['command_key'])
self.assertEqual(dl.stateobj.file_type, args['file_type'])
self.assertEqual(dl.stateobj.url, args['url'])
self.assertEqual(dl.stateobj.username, args['username'])
self.assertEqual(dl.stateobj.password, args['password'])
self.assertEqual(dl.stateobj.file_size, args['file_size'])
self.assertEqual(dl.stateobj.target_filename, args['target_filename'])
self.assertEqual(dl.stateobj.delay_seconds, args['delay_seconds'])
def testMaxDownloads(self):
(dm, _) = self.allocTestDM()
maxdl = download.DownloadManager.MAXDOWNLOADS
for i in range(maxdl):
args = {'command_key': 'TestCommandKey' + str(i),
'url': 'http://example.com/'}
(code, start, end) = dm.NewDownload(**args)
self.assertEqual(code, 1)
self.assertEqual(start, 0.0)
self.assertEqual(end, 0.0)
self.assertEqual(len(mock_downloads), maxdl)
self.assertRaises(core.ResourcesExceededError, dm.NewDownload, **args)
def testBadUrlScheme(self):
(dm, _) = self.allocTestDM()
args = {'command_key': 'TestCommandKey',
'url': 'invalid://bad.url/'}
self.assertRaises(core.FileTransferProtocolError, dm.NewDownload, **args)
def testRestoreMultiple(self):
(dm, _) = self.allocTestDM()
numdl = 4
for i in range(numdl):
args = {'command_key': 'TestCommandKey' + str(i),
'file_type': 'TestFileType',
'url': 'http://example.com/',
'username': 'TestUser',
'password': 'TestPassword',
'file_size': 99,
'target_filename': 'TestFilename',
'delay_seconds': 30}
persistobj.PersistentObject(objdir=dm.config_dir,
rootname=download.DNLDROOTNAME,
filename=None, **args)
dm.RestoreDownloads()
self.assertEqual(len(mock_downloads), numdl)
for i in range(numdl):
dl = mock_downloads[i]
self.assertFalse(dl.do_start_called)
self.assertFalse(dl.immediate_complete_called)
self.assertTrue(dl.reboot_callback_called)
def testRestoreNoCommandKey(self):
(dm, _) = self.allocTestDM()
args = {'delay_seconds': 30}
persistobj.PersistentObject(objdir=dm.config_dir,
rootname=download.DNLDROOTNAME,
filename=None, **args)
dm.RestoreDownloads()
self.assertEqual(len(mock_downloads), 0)
def testRestoreReboots(self):
(dm, _) = self.allocTestDM()
expected = set()
numrb = 3
for i in range(numrb):
key = u'TestCommandKey' + str(i)
args = {'command_key': key}
persistobj.PersistentObject(objdir=dm.config_dir,
rootname=download.BOOTROOTNAME,
filename=None, **args)
expected.add(('M Reboot', key))
# Plus an invalid object
args = {'foo': 'bar'}
persistobj.PersistentObject(objdir=dm.config_dir,
rootname=download.BOOTROOTNAME,
filename=None, **args)
reboots = set(dm.RestoreReboots())
self.assertEqual(reboots, expected)
def testGetAllQueuedTransfers(self):
(dm, _) = self.allocTestDM()
numdl = 1
for i in range(numdl):
args = {'command_key': 'TestCommandKey' + str(i),
'file_type': 'TestFileType',
'url': 'http://example.com/',
'username': 'TestUser',
'password': 'TestPassword',
'file_size': 99,
'target_filename': 'TestFilename',
'delay_seconds': 30}
dm.NewDownload(**args)
transfers = dm.GetAllQueuedTransfers()
self.assertEqual(len(transfers), numdl)
if __name__ == '__main__':
unittest.main()
|
|
#!/bin/env python3
import selectors
import socket
import loopfunction
import logging
import queue
import maxthreads
class Log:
INDENTATION = 4
def __init__(self, *args_):
self.do = {'errors': False,
'enter': False,
'exit': False,
'args': False}
for i in args_:
if i not in self.do and i != 'all':
print('ERROR:' + i)
raise ValueError('{} is not a valid variable'.format(i))
for i in self.do.keys():
if i in args_ or 'all' in args_:
self.do[i] = True
def __call__(self, f):
def wrapped_f(*args, **kwargs):
if self.do['enter']:
logging.debug(self._indent_string(
'function {} called with\n'.format(f.__name__) +
'args={}\n'.format(args) +
'kwargs={}'.format(kwargs), self.INDENTATION))
try:
f(*args, **kwargs)
except:
if self.do['errors']:
logging.error(self._indent_string(
'function {} was called with\n'.format(f.__name__) +
'args={}\n'.format(args) +
'kwargs={}\n'.format(kwargs) +
'and exited with error:\n' +
'-'*50 + '\n' +
logging.traceback.format_exc() +
'-'*50 + '\n', self.INDENTATION))
raise
else:
if self.do['exit']:
logging.debug('function {} exited normally'.format(f.__name__))
return wrapped_f
@staticmethod
def _indent_string(string, indentation):
return (' '*indentation).join(string.splitlines(True))
class SocketServer:
@Log('errors')
def __init__(self,
port=1234,
host=socket.gethostbyname(socket.gethostname()),
queue_size=1000,
block_time=2,
selector=selectors.EpollSelector,
handle_readable=lambda client: True,
handle_incoming=lambda client, address: True,
max_subthreads=-1):
self.port = port
self.host = host
self.queue_size = queue_size
self.block_time = block_time
self.selector = selector
self.handle_readable = handle_readable
self.handle_incoming = handle_incoming
self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server_socket.setblocking(False)
self._accept_selector = selector()
self._recv_selector = selector()
self._accept_selector.register(self._server_socket, selectors.EVENT_READ)
self._loop_objects = (
loopfunction.Loop(target=self._mainthread_accept_clients,
on_start=lambda: logging.debug('Thread started: Accept clients'),
on_stop=lambda: logging.debug('Thread stopped: Accept clients')),
loopfunction.Loop(target=self._mainthread_poll_readable,
on_start=lambda: logging.debug('Thread started: Poll for readable clients'),
on_stop=lambda: logging.debug('Thread stopped: Poll for readable clients')),
)
self._threads_limiter = maxthreads.MaxThreads(max_subthreads)
self.clients = {}
@Log('errors')
def _mainthread_accept_clients(self):
"""Accepts new clients and sends them to the to _handle_accepted within a subthread
"""
try:
if self._accept_selector.select(timeout=self.block_time):
client = self._server_socket.accept()
logging.info('Client connected: {}'.format(client[1]))
self._threads_limiter.start_thread(target=self._subthread_handle_accepted,
args=(client,))
except socket.error:
pass
@Log('errors')
def _mainthread_poll_readable(self):
"""Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
"""
events = self._recv_selector.select(self.block_time)
for key, mask in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable,
args=(key.fileobj,))
@Log('errors')
def _subthread_handle_accepted(self, client):
"""Gets accepted clients from the queue object and sets up the client socket.
The client can then be found in the clients dictionary with the socket object
as the key.
"""
conn, addr = client
if self.handle_incoming(conn, addr):
logging.info('Accepted connection from client: {}'.format(addr))
conn.setblocking(False)
self.clients[conn] = addr
self.register(conn)
else:
logging.info('Refused connection from client: {}'.format(addr))
self.disconnect(conn)
@Log('errors')
def _subthread_handle_readable(self, conn):
"""Handles readable client sockets. Calls the user modified handle_readable with
the client socket as the only variable. If the handle_readable function returns
true the client is again registered to the selector object otherwise the client
is disconnected.
"""
if self.handle_readable(conn):
self.register(conn)
else:
self.disconnect(conn)
@Log('all')
def start(self):
logging.info('Binding server socket to {}:{}'.format(self.host, self.port))
self._server_socket.bind((self.host, self.port))
self._server_socket.listen(self.queue_size)
logging.info('Server socket now listening (queue_size={})'.format(self.queue_size))
logging.info('Starting main threads...')
for loop_obj in self._loop_objects:
loop_obj.start()
logging.info('Main threads started')
@Log('all')
def stop(self):
logging.info('Closing all ({}) connections...'.format(len(self.clients)))
self.disconnect(self.clients)
logging.info('Stopping main threads...')
for loop_obj in self._loop_objects:
loop_obj.send_stop_signal(silent=True)
for loop_obj in self._loop_objects:
loop_obj.stop(silent=True)
logging.info('Shutting down server socket...')
self._server_socket.shutdown(socket.SHUT_RDWR)
logging.info('Closing server socket...')
self._server_socket.close()
@Log('errors')
def register(self, client, silent=False):
try:
self._recv_selector.register(client, selectors.EVENT_READ)
except KeyError:
if not silent:
logging.error(
'Tried to register an already registered client: {}'.format(self.clients[client]))
raise KeyError('Client already registered')
@Log('errors')
def unregister(self, client, silent=False):
try:
self._recv_selector.unregister(client)
except KeyError:
if not silent:
logging.error(
'Tried to unregister a client that is not registered: {}'.format(self.clients[client]))
raise KeyError('Client already registered')
@Log('errors')
def disconnect(self, client, how=socket.SHUT_RDWR):
if hasattr(client, '__iter__'):
if client == self.clients:
client = self.clients.copy()
for i in client:
self.disconnect(i, how)
else:
self.unregister(client, True)
address = 'Could not find address'
try:
address = client.getpeername()
client.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
client.close()
try:
address = self.clients[client]
del self.clients[client]
except KeyError:
pass
logging.info('Client disconnected: {}'.format(address))
|
|
# Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import re
from debian.debian_support import version_compare
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from debile.master.utils import emit
from debile.master.changes import Changes, ChangesFileException
from debile.master.reprepro import Repo, RepoSourceAlreadyRegistered, RepoPackageNotFound
from debile.master.orm import (Person, Builder, Suite, Component, Arch, Group,
GroupSuite, Source, Deb, Job,
create_source, create_jobs)
def process_changes(default_group, config, session, path):
try:
changes = Changes(path)
except Exception as e:
print('SKIP: Error loading changes file {tag} - ({exception}: {args})'.format(
tag=path,
exception=type(e),
args=e.args))
return
try:
changes.validate()
except Exception as e:
print('SKIP: Invalid changes file {tag} ({exception}: {args})'.format(
tag=path,
exception=type(e),
args=e.args))
return
try:
fingerprint = changes.validate_signature(config['keyrings']['pgp'])
except ChangesFileException as e:
return reject_changes(session, changes, "invalid-signature: " + e.message)
group = changes.get('X-Debile-Group', default_group)
try:
group = session.query(Group).filter_by(name=group).one()
except MultipleResultsFound:
return reject_changes(session, changes, "internal-error")
except NoResultFound:
return reject_changes(session, changes, "invalid-group")
# Sourceful Uploads
if changes.is_source_only_upload():
try:
user = session.query(Person).filter_by(pgp=fingerprint).one()
except NoResultFound:
return reject_changes(session, changes, "invalid-user")
return accept_source_changes(default_group, config, session, changes, user)
# Binary Uploads
if changes.is_binary_only_upload():
try:
builder = session.query(Builder).filter_by(pgp=fingerprint).one()
except NoResultFound:
return reject_changes(session, changes, "invalid-builder")
return accept_binary_changes(default_group, config, session, changes, builder)
return reject_changes(session, changes, "mixed-upload")
def reject_changes(session, changes, tag):
session.rollback()
print "REJECT: {source} because {tag}".format(
tag=tag, source=changes.get_package_name())
emit('reject', 'source', {
"tag": tag,
"source": changes.get_package_name(),
})
for fp in [changes.get_changes_file()] + changes.get_files():
os.unlink(fp)
# Note this in the log.
def accept_source_changes(default_group, config, session, changes, user):
group = changes.get('X-Debile-Group', default_group)
suite = changes['Distribution']
try:
group_suite = session.query(GroupSuite).join(GroupSuite.group).join(GroupSuite.suite).filter(
Group.name == group,
Suite.name == suite,
).one()
except MultipleResultsFound:
return reject_changes(session, changes, "internal-error")
except NoResultFound:
return reject_changes(session, changes, "invalid-suite-for-group")
dsc = changes.get_dsc_obj()
if dsc['Source'] != changes['Source']:
return reject_changes(session, changes, "dsc-does-not-march-changes")
if dsc['Version'] != changes['Version']:
return reject_changes(session, changes, "dsc-does-not-march-changes")
try:
source = session.query(Source).filter(
Source.name == dsc['Source'],
Source.version == dsc['Version'],
GroupSuite.group == group_suite.group,
).one()
return reject_changes(session, changes, "source-already-in-group")
except MultipleResultsFound:
return reject_changes(session, changes, "internal-error")
except NoResultFound:
pass
oldsources = session.query(Source).filter(
Source.group_suite == group_suite,
Source.name == dsc['Source'],
)
for oldsource in oldsources:
if version_compare(oldsource.version, dsc['Version']) > 0:
return reject_changes(session, changes, "newer-source-already-in-suite")
# Drop any old jobs that are still pending.
for oldsource in oldsources:
for job in oldsource.jobs:
if (not any(job.results) and not any(job.built_binaries)):
session.delete(job)
elif job.failed is None:
job.failed = True
if not any(oldsource.jobs):
session.delete(oldsource)
component = session.query(Component).filter_by(name="main").one()
if 'Build-Architecture-Indep' in dsc:
valid_affinities = dsc['Build-Architecture-Indep']
elif 'X-Build-Architecture-Indep' in dsc:
valid_affinities = dsc['X-Build-Architecture-Indep']
elif 'X-Arch-Indep-Build-Arch' in dsc:
valid_affinities = dsc['X-Arch-Indep-Build-Arch']
else:
valid_affinities = "any"
with session.no_autoflush:
source = create_source(dsc, group_suite, component, user,
config["affinity_preference"], valid_affinities)
create_jobs(source)
session.add(source)
# We have a changes in order. Let's roll.
repo = Repo(group_suite.group.repo_path)
repo.add_changes(changes)
try:
(source.directory, source.dsc_filename) = repo.find_dsc(source)
except RepoPackageNotFound:
return reject_changes(session, changes, "reprepo-package-not-found")
emit('accept', 'source', source.debilize())
# OK. It's safely in the database and repo. Let's cleanup.
for fp in [changes.get_changes_file()] + changes.get_files():
os.unlink(fp)
def accept_binary_changes(default_group, config, session, changes, builder):
# OK. We'll relate this back to a build job.
job = changes.get('X-Debile-Job', None)
if job is None:
return reject_changes(session, changes, "no-job")
job = session.query(Job).get(job)
source = job.source
if changes.get('Source') != source.name:
return reject_changes(session, changes, "binary-source-name-mismatch")
if changes.get("Version") != source.version:
return reject_changes(
session, changes, "binary-source-version-mismatch")
if changes.get('X-Debile-Group', default_group) != source.group.name:
return reject_changes(session, changes, "binary-source-group-mismatch")
if changes.get('Distribution') != source.suite.name:
return reject_changes(session, changes, "binary-source-suite-mismatch")
if builder != job.builder:
return reject_changes(session, changes, "wrong-builder")
anames = changes.get("Architecture").split(None)
arches = session.query(Arch).filter(Arch.name.in_(anames)).all()
binaries = {}
for arch in arches:
if arch.name not in [job.arch.name, "all"]:
return reject_changes(session, changes, "wrong-architecture")
binaries[arch.name] = job.new_binary(arch)
if not binaries:
return reject_changes(session, changes, "no-architecture")
session.add_all(binaries.values())
PATH = re.compile("^/pool/.*/")
ARCH = re.compile(".+_(?P<arch>[^_]+)\.u?deb$")
for entry in changes.get('Files'):
directory = source.directory
if '/' in entry['section']:
component, section = entry['section'].split('/', 1)
directory = PATH.sub("/pool/%s/" % component, directory)
arch = ARCH.match(entry['name']).groupdict().get('arch')
if arch not in binaries:
return reject_changes(session, changes, "bad-architecture-of-file")
deb = Deb(binary=binaries[arch], directory=directory, filename=entry['name'])
session.add(deb)
# OK. Let's make sure we can add this.
try:
repo = Repo(job.group.repo_path)
repo.add_changes(changes)
except RepoSourceAlreadyRegistered:
return reject_changes(session, changes, 'stupid-source-thing')
for binary in binaries.values():
emit('accept', 'binary', binary.debilize())
# OK. It's safely in the database and repo. Let's cleanup.
for fp in [changes.get_changes_file()] + changes.get_files():
os.unlink(fp)
|
|
"""Get ride details and liveboard details for NMBS (Belgian railway)."""
import logging
from pyrail import iRail
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
TIME_MINUTES,
)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "NMBS"
DEFAULT_ICON = "mdi:train"
DEFAULT_ICON_ALERT = "mdi:alert-octagon"
CONF_STATION_FROM = "station_from"
CONF_STATION_TO = "station_to"
CONF_STATION_LIVE = "station_live"
CONF_EXCLUDE_VIAS = "exclude_vias"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATION_FROM): cv.string,
vol.Required(CONF_STATION_TO): cv.string,
vol.Optional(CONF_STATION_LIVE): cv.string,
vol.Optional(CONF_EXCLUDE_VIAS, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
}
)
def get_time_until(departure_time=None):
"""Calculate the time between now and a train's departure time."""
if departure_time is None:
return 0
delta = dt_util.utc_from_timestamp(int(departure_time)) - dt_util.now()
return round(delta.total_seconds() / 60)
def get_delay_in_minutes(delay=0):
"""Get the delay in minutes from a delay in seconds."""
return round(int(delay) / 60)
def get_ride_duration(departure_time, arrival_time, delay=0):
"""Calculate the total travel time in minutes."""
duration = dt_util.utc_from_timestamp(
int(arrival_time)
) - dt_util.utc_from_timestamp(int(departure_time))
duration_time = int(round(duration.total_seconds() / 60))
return duration_time + get_delay_in_minutes(delay)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NMBS sensor with iRail API."""
api_client = iRail()
name = config[CONF_NAME]
show_on_map = config[CONF_SHOW_ON_MAP]
station_from = config[CONF_STATION_FROM]
station_to = config[CONF_STATION_TO]
station_live = config.get(CONF_STATION_LIVE)
excl_vias = config[CONF_EXCLUDE_VIAS]
sensors = [
NMBSSensor(api_client, name, show_on_map, station_from, station_to, excl_vias)
]
if station_live is not None:
sensors.append(
NMBSLiveBoard(api_client, station_live, station_from, station_to)
)
add_entities(sensors, True)
class NMBSLiveBoard(SensorEntity):
"""Get the next train from a station's liveboard."""
def __init__(self, api_client, live_station, station_from, station_to):
"""Initialize the sensor for getting liveboard data."""
self._station = live_station
self._api_client = api_client
self._station_from = station_from
self._station_to = station_to
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the sensor default name."""
return f"NMBS Live ({self._station})"
@property
def unique_id(self):
"""Return a unique ID."""
unique_id = f"{self._station}_{self._station_from}_{self._station_to}"
return f"nmbs_live_{unique_id}"
@property
def icon(self):
"""Return the default icon or an alert icon if delays."""
if self._attrs and int(self._attrs["delay"]) > 0:
return DEFAULT_ICON_ALERT
return DEFAULT_ICON
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def extra_state_attributes(self):
"""Return the sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs["delay"])
departure = get_time_until(self._attrs["time"])
attrs = {
"departure": f"In {departure} minutes",
"departure_minutes": departure,
"extra_train": int(self._attrs["isExtra"]) > 0,
"vehicle_id": self._attrs["vehicle"],
"monitored_station": self._station,
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if delay > 0:
attrs["delay"] = f"{delay} minutes"
attrs["delay_minutes"] = delay
return attrs
def update(self):
"""Set the state equal to the next departure."""
liveboard = self._api_client.get_liveboard(self._station)
if liveboard is None or not liveboard["departures"]:
return
next_departure = liveboard["departures"]["departure"][0]
self._attrs = next_departure
self._state = (
f"Track {next_departure['platform']} - {next_departure['station']}"
)
class NMBSSensor(SensorEntity):
"""Get the the total travel time for a given connection."""
def __init__(
self, api_client, name, show_on_map, station_from, station_to, excl_vias
):
"""Initialize the NMBS connection sensor."""
self._name = name
self._show_on_map = show_on_map
self._api_client = api_client
self._station_from = station_from
self._station_to = station_to
self._excl_vias = excl_vias
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TIME_MINUTES
@property
def icon(self):
"""Return the sensor default icon or an alert icon if any delay."""
if self._attrs:
delay = get_delay_in_minutes(self._attrs["departure"]["delay"])
if delay > 0:
return "mdi:alert-octagon"
return "mdi:train"
@property
def extra_state_attributes(self):
"""Return sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs["departure"]["delay"])
departure = get_time_until(self._attrs["departure"]["time"])
attrs = {
"departure": f"In {departure} minutes",
"departure_minutes": departure,
"destination": self._station_to,
"direction": self._attrs["departure"]["direction"]["name"],
"platform_arriving": self._attrs["arrival"]["platform"],
"platform_departing": self._attrs["departure"]["platform"],
"vehicle_id": self._attrs["departure"]["vehicle"],
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if self._show_on_map and self.station_coordinates:
attrs[ATTR_LATITUDE] = self.station_coordinates[0]
attrs[ATTR_LONGITUDE] = self.station_coordinates[1]
if self.is_via_connection and not self._excl_vias:
via = self._attrs["vias"]["via"][0]
attrs["via"] = via["station"]
attrs["via_arrival_platform"] = via["arrival"]["platform"]
attrs["via_transfer_platform"] = via["departure"]["platform"]
attrs["via_transfer_time"] = get_delay_in_minutes(
via["timeBetween"]
) + get_delay_in_minutes(via["departure"]["delay"])
if delay > 0:
attrs["delay"] = f"{delay} minutes"
attrs["delay_minutes"] = delay
return attrs
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def station_coordinates(self):
"""Get the lat, long coordinates for station."""
if self._state is None or not self._attrs:
return []
latitude = float(self._attrs["departure"]["stationinfo"]["locationY"])
longitude = float(self._attrs["departure"]["stationinfo"]["locationX"])
return [latitude, longitude]
@property
def is_via_connection(self):
"""Return whether the connection goes through another station."""
if not self._attrs:
return False
return "vias" in self._attrs and int(self._attrs["vias"]["number"]) > 0
def update(self):
"""Set the state to the duration of a connection."""
connections = self._api_client.get_connections(
self._station_from, self._station_to
)
if connections is None or not connections["connection"]:
return
if int(connections["connection"][0]["departure"]["left"]) > 0:
next_connection = connections["connection"][1]
else:
next_connection = connections["connection"][0]
self._attrs = next_connection
if self._excl_vias and self.is_via_connection:
_LOGGER.debug(
"Skipping update of NMBSSensor \
because this connection is a via"
)
return
duration = get_ride_duration(
next_connection["departure"]["time"],
next_connection["arrival"]["time"],
next_connection["departure"]["delay"],
)
self._state = duration
|
|
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from lxml import etree
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
import six
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
from nova.tests.unit.virt import fakelibosinfo
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import host
from nova.virt.libvirt import vif
CONF = cfg.CONF
class LibvirtVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99, mtu=9000)
vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_BRIDGE,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface=None,
vlan=99, mtu=1000)
network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface=None,
vlan=99)
vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'ovs_hybrid_plug': True,
'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid=None)
vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid='aaa')
vif_ivs_filter_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_filter_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={
'port_filter': True,
'ovs_hybrid_plug': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0',
vlan=99)
vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBH,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_PROFILEID:
'MyPortProfile'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: '100'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hostdev_physical = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HOSTDEV,
vnic_type=
network_model.VNIC_TYPE_DIRECT_PHYSICAL,
ovs_interfaceid=None,
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb_macvtap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_MACVTAP,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: '100'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBG,
ovs_interfaceid=None,
qbg_params=network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff"))
network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4],
interface='eth0')
network_vrouter = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0')
vif_vrouter = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_vrouter,
type=network_model.VIF_TYPE_VROUTER,
devname='tap-xxx-yyy-zzz')
vif_ib_hostdev = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_IB_HOSTDEV,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: '100'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_midonet,
type=network_model.VIF_TYPE_MIDONET,
devname='tap-xxx-yyy-zzz')
vif_tap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
type=network_model.VIF_TYPE_TAP,
devname='tap-xxx-yyy-zzz')
vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_IOVISOR,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
vif_vhostuser = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details = {network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
'/tmp/vif-xxx-yyy-zzz'}
)
vif_vhostuser_fp = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
devname='tap-xxx-yyy-zzz',
details = {network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
'/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True},
)
vif_vhostuser_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details = {network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
'/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
ovs_interfaceid='aaa-bbb-ccc', mtu=1500
)
vif_vhostuser_ovs_fp = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details = {network_model.VIF_DETAILS_VHOSTUSER_MODE: 'server',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
'/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True,
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc'
)
vif_vhostuser_ovs_fp_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details = {'ovs_hybrid_plug': True,
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'server',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
'/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True,
network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc'
)
vif_vhostuser_no_path = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details = {network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client'})
vif_macvtap_vlan = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={network_model.VIF_DETAILS_VLAN: '1',
network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0.1',
network_model.VIF_DETAILS_MACVTAP_MODE: 'vepa'})
vif_macvtap_flat = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_MODE: 'bridge'})
vif_macvtap_exception = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP)
instance = objects.Instance(id=1,
uuid='f0000000-0000-0000-0000-000000000001',
project_id=723)
bandwidth = {
'quota:vif_inbound_peak': '200',
'quota:vif_outbound_peak': '20',
'quota:vif_inbound_average': '100',
'quota:vif_outbound_average': '10',
'quota:vif_inbound_burst': '300',
'quota:vif_outbound_burst': '30'
}
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.flags(allow_same_net_traffic=True)
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def _get_node(self, xml):
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
return ret[0]
def _assertMacEquals(self, node, vif):
mac = node.find("mac").get("address")
self.assertEqual(mac, vif['address'])
def _assertTypeEquals(self, node, type, attr, source, br_want,
prefix=None):
self.assertEqual(node.get("type"), type)
br_name = node.find(attr).get(source)
if prefix is None:
self.assertEqual(br_name, br_want)
else:
self.assertTrue(br_name.startswith(prefix))
def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
br_want=None, size=0, prefix=None):
ret = node.findall("filterref")
self.assertEqual(len(ret), size)
self._assertTypeEquals(node, type, attr, source, br_want,
prefix)
self._assertMacEquals(node, vif)
def _assertModel(self, xml, model_want=None, driver_want=None):
node = self._get_node(xml)
if model_want is None:
ret = node.findall("model")
self.assertEqual(len(ret), 0)
else:
model = node.find("model").get("type")
self.assertEqual(model, model_want)
if driver_want is None:
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
else:
driver = node.find("driver").get("name")
self.assertEqual(driver, driver_want)
def _assertTypeAndPciEquals(self, node, type, vif):
self.assertEqual(node.get("type"), type)
self._assertPciEqual(node, vif, type="pci")
def _assertPciEqual(self, node, vif, type=None):
address = node.find("source").find("address")
if type:
addr_type = address.get("type")
self.assertEqual(type, addr_type)
pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
'domain': address.get("domain")[2:],
'bus': address.get("bus")[2:],
'slot': address.get("slot")[2:],
'func': address.get("function")[2:]}
pci_slot_want = vif['profile']['pci_slot']
self.assertEqual(pci_slot, pci_slot_want)
def _get_conf(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
return conf
def _get_instance_xml(self, driver, vif, image_meta=None, flavor=None):
if flavor is None:
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=1,
root_gb=0,
ephemeral_gb=0,
swap=0,
extra_specs=dict(self.bandwidth),
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
conf = self._get_conf()
hostimpl = host.Host("qemu:///system")
nic = driver.get_config(self.instance, vif, image_meta,
flavor, CONF.libvirt.virt_type,
hostimpl)
conf.add_device(nic)
return conf.to_xml()
def test_virtio_multiqueue(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=4,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta, flavor)
node = self._get_node(xml)
driver = node.find("driver").get("name")
self.assertEqual(driver, 'vhost')
queues = node.find("driver").get("queues")
self.assertEqual(queues, '4')
def test_multiple_nics(self):
conf = self._get_conf()
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(use_virtio_for_bridges=False,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_model_kvm(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_kvm_qemu_custom(self):
for virt in ('kvm', 'qemu'):
self.flags(use_virtio_for_bridges=True,
virt_type=virt,
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
supported = (network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN)
for model in supported:
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': model}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta)
self._assertModel(xml, model)
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config')
def test_model_with_osinfo(self, mock_set):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.osinfo.libosinfo',
fakelibosinfo))
d = vif.LibvirtGenericVIFDriver()
image_meta = {'properties': {'os_name': 'fedora22'}}
image_meta = objects.ImageMeta.from_dict(image_meta)
d.get_base_config(None, self.vif_bridge, image_meta,
None, 'kvm')
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
'virtio', None, None)
def _test_model_qemu(self, *vif_objs, **kw):
libvirt_version = kw.get('libvirt_version')
self.flags(use_virtio_for_bridges=True,
virt_type='qemu',
group='libvirt')
for vif_obj in vif_objs:
d = vif.LibvirtGenericVIFDriver()
if libvirt_version is not None:
d.libvirt_version = libvirt_version
xml = self._get_instance_xml(d, vif_obj)
doc = etree.fromstring(xml)
bandwidth = doc.find('./devices/interface/bandwidth')
self.assertNotEqual(bandwidth, None)
inbound = bandwidth.find('inbound')
self.assertEqual(inbound.get("average"),
self.bandwidth['quota:vif_inbound_average'])
self.assertEqual(inbound.get("peak"),
self.bandwidth['quota:vif_inbound_peak'])
self.assertEqual(inbound.get("burst"),
self.bandwidth['quota:vif_inbound_burst'])
outbound = bandwidth.find('outbound')
self.assertEqual(outbound.get("average"),
self.bandwidth['quota:vif_outbound_average'])
self.assertEqual(outbound.get("peak"),
self.bandwidth['quota:vif_outbound_peak'])
self.assertEqual(outbound.get("burst"),
self.bandwidth['quota:vif_outbound_burst'])
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
def test_model_qemu_no_firewall(self):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_ovs,
)
def test_model_qemu_iptables(self):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_ovs,
self.vif_ivs,
self.vif_8021qbg,
self.vif_iovisor
)
def test_model_xen(self):
self.flags(use_virtio_for_bridges=True,
virt_type='xen',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_generic_driver_none(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.vif_none)
def _check_bridge_driver(self, d, vif, br_want):
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_bridge, br_want, 1)
def test_generic_driver_bridge(self):
d = vif.LibvirtGenericVIFDriver()
self._check_bridge_driver(d,
self.vif_bridge,
self.vif_bridge['network']['bridge'])
def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, prefix=dev_prefix)
script = node.find("script").get("path")
self.assertEqual(script, "")
def test_unplug_ivs_ethernet(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
delete.side_effect = processutils.ProcessExecutionError
d.unplug(self.instance, self.vif_ivs)
def _test_plug_ovs_hybrid(self, ipv6_exists):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy', 1000)],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1])],
'create_ovs_vif_port': [mock.call(
'br0', 'qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'f0000000-0000-0000-0000-000000000001',
1000)]
}
# The disable_ipv6 call needs to be added in the middle, if required
if ipv6_exists:
calls['execute'].extend([
mock.call('tee', ('/proc/sys/net/ipv6/conf'
'/qbrvif-xxx-yyy/disable_ipv6'),
process_input='1', run_as_root=True,
check_exit_code=[0, 1])])
calls['execute'].extend([
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True)])
with test.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ovs_vif_port'),
mock.patch.object(os.path, 'exists', return_value=ipv6_exists)
) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port,
path_exists):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
def test_plug_ovs_hybrid_ipv6(self):
self._test_plug_ovs_hybrid(ipv6_exists=True)
def test_plug_ovs_hybrid_no_ipv6(self):
self._test_plug_ovs_hybrid(ipv6_exists=False)
def test_unplug_ovs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
}
with test.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=True),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ovs_vif_port')
) as (device_exists, execute, delete_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
execute.assert_has_calls(calls['execute'])
delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
@mock.patch.object(utils, 'execute')
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
@mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
def _test_hw_veb_op(self, op, vlan, mock_get_vf_num, mock_get_ifname,
mock_execute):
mock_get_ifname.side_effect = ['eth1', 'eth13']
exit_code = [0, 2, 254]
port_state = 'up' if vlan > 0 else 'down'
calls = {
'get_ifname':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'],
pf_interface=True),
mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'get_vf_num':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'execute': [mock.call('ip', 'link', 'set', 'eth1',
'vf', 1, 'mac',
self.vif_hw_veb_macvtap['address'],
'vlan', vlan,
run_as_root=True,
check_exit_code=exit_code),
mock.call('ip', 'link', 'set',
'eth13', port_state,
run_as_root=True,
check_exit_code=exit_code)]
}
op(self.instance, self.vif_hw_veb_macvtap)
mock_get_ifname.assert_has_calls(calls['get_ifname'])
mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
mock_execute.assert_has_calls(calls['execute'])
def test_plug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(
d.plug,
self.vif_hw_veb_macvtap['details'][network_model.VIF_DETAILS_VLAN])
def test_unplug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(d.unplug, 0)
def test_unplug_ovs_hybrid_bridge_does_not_exist(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
}
with test.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(linux_net, 'delete_ovs_vif_port')
) as (device_exists, delete_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
def test_plug_ivs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy', None)],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('tee', ('/proc/sys/net/ipv6/conf'
'/qbrvif-xxx-yyy/disable_ipv6'),
process_input='1', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True)],
'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'f0000000-0000-0000-0000-000000000001')]
}
with test.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ivs_vif_port'),
mock.patch.object(os.path, 'exists', return_value=True)
) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port,
path_exists):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_ivs)
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
def test_unplug_ivs_hybrid(self):
calls = {
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
}
with test.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ivs_vif_port')
) as (execute, delete_ivs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_ivs)
execute.assert_has_calls(calls['execute'])
delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug(self.instance, self.vif_ivs)
def test_unplug_iovisor(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug(self.instance, self.vif_ivs)
@mock.patch('nova.network.linux_net.device_exists')
def test_plug_iovisor(self, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.plug(self.instance, self.vif_ivs)
def test_unplug_vrouter_with_details(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
d.unplug(self.instance, self.vif_vrouter)
execute.assert_called_once_with(
'vrouter-port-control',
'--oper=delete --uuid=vif-xxx-yyy-zzz',
run_as_root=True)
def test_plug_vrouter_with_details(self):
d = vif.LibvirtGenericVIFDriver()
instance = mock.Mock()
instance.name = 'instance-name'
instance.uuid = '46a4308b-e75a-4f90-a34a-650c86ca18b2'
instance.project_id = 'b168ea26fa0c49c1a84e1566d9565fa5'
instance.display_name = 'instance1'
with mock.patch.object(utils, 'execute') as execute:
d.plug(instance, self.vif_vrouter)
execute.assert_has_calls([
mock.call('ip', 'tuntap', 'add', 'tap-xxx-yyy-zzz', 'mode',
'tap', run_as_root=True, check_exit_code=[0, 2, 254]),
mock.call('ip', 'link', 'set', 'tap-xxx-yyy-zzz', 'up',
run_as_root=True, check_exit_code=[0, 2, 254]),
mock.call('vrouter-port-control',
'--oper=add --uuid=vif-xxx-yyy-zzz '
'--instance_uuid=46a4308b-e75a-4f90-a34a-650c86ca18b2 '
'--vn_uuid=network-id-xxx-yyy-zzz '
'--vm_project_uuid=b168ea26fa0c49c1a84e1566d9565fa5 '
'--ip_address=0.0.0.0 '
'--ipv6_address=None '
'--vm_name=instance1 '
'--mac=ca:fe:de:ad:be:ef '
'--tap_name=tap-xxx-yyy-zzz '
'--port_type=NovaVMPort '
'--tx_vlan_id=-1 '
'--rx_vlan_id=-1', run_as_root=True)])
def test_ivs_ethernet_driver(self):
d = vif.LibvirtGenericVIFDriver()
self._check_ivs_ethernet_driver(d,
self.vif_ivs,
"tap")
def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
vif, vif['devname'])
def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, "br0")
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
def test_generic_ovs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ovs['ovs_interfaceid']
self._check_ovs_virtualport_driver(d,
self.vif_ovs,
want_iface_id)
def test_generic_ivs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ivs['ovs_interfaceid']
self._check_ivs_virtualport_driver(d,
self.vif_ivs,
want_iface_id)
def test_ivs_plug_with_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs, br_want, 1)
def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs_filter_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_filter_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs_filter_hybrid, br_want, 0)
def test_ivs_plug_with_port_filter_hybrid_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_ivs_filter_direct['devname']
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_filter_direct)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs_filter_direct, br_want, 0)
def test_hybrid_plug_without_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ovs_hybrid, br_want, 0)
def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
self.vif_ovs_filter_cap, br_want)
def _check_neutron_hybrid_driver(self, d, vif, br_want):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, br_want, 1)
def test_generic_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ovs,
br_want)
def test_ivs_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ivs,
br_want)
def test_ib_hostdev_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ib_hostdev)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_ib_hostdev)
def test_midonet_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_midonet)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_midonet, br_want)
def test_tap_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_tap['devname']
xml = self._get_instance_xml(d, self.vif_tap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_tap, br_want)
@mock.patch('nova.network.linux_net.device_exists')
def test_plug_tap(self, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_tap)
def test_unplug_tap(self):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_tap)
def test_generic_8021qbh_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbh)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
self._assertMacEquals(node, self.vif_8021qbh)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
details = self.vif_8021qbh["details"]
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
details[network_model.VIF_DETAILS_PROFILEID])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_hw_veb_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
self._assertMacEquals(node, self.vif_hw_veb)
vlan = node.find("vlan").find("tag").get("id")
vlan_want = self.vif_hw_veb["details"]["vlan"]
self.assertEqual(vlan, vlan_want)
def test_hostdev_physical_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hostdev_physical)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
def test_hw_veb_driver_macvtap(self, mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_hw_veb_macvtap)
vlan = node.find("vlan")
self.assertIsNone(vlan)
def test_driver_macvtap_vlan(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_vlan)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0.1")
self._assertTypeEquals(node, "direct", "source",
"mode", "vepa")
self._assertMacEquals(node, self.vif_macvtap_vlan)
def test_driver_macvtap_flat(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_flat)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0")
self._assertTypeEquals(node, "direct", "source",
"mode", "bridge")
self._assertMacEquals(node, self.vif_macvtap_flat)
def test_driver_macvtap_exception(self):
d = vif.LibvirtGenericVIFDriver()
e = self.assertRaises(exception.VifDetailsMissingMacvtapParameters,
self._get_instance_xml,
d,
self.vif_macvtap_exception)
self.assertIn('macvtap_source', six.text_type(e))
self.assertIn('macvtap_mode', six.text_type(e))
self.assertIn('physical_interface', six.text_type(e))
@mock.patch.object(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan')
def test_macvtap_plug_vlan(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_vlan)
ensure_vlan_mock.assert_called_once_with('1', 'eth0',
interface='eth0.1')
@mock.patch.object(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan')
def test_macvtap_plug_flat(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_flat)
self.assertFalse(ensure_vlan_mock.called)
def test_generic_iovisor_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_ivs['devname']
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, br_want)
def test_generic_8021qbg_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbg)
node = self._get_node(xml)
self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
self._assertMacEquals(node, self.vif_8021qbg)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.vif_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
def test_vhostuser_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_vhostuser)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/vif-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_vhostuser_no_queues(self):
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
xml = self._get_instance_xml(d, self.vif_vhostuser, image_meta)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertMacEquals(node, self.vif_vhostuser)
driver = node.find("driver")
self.assertIsNone(driver, None)
def test_vhostuser_driver_no_path(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.VifDetailsMissingVhostuserSockPath,
self._get_instance_xml,
d,
self.vif_vhostuser_no_path)
def test_vhostuser_driver_ovs(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_vhostuser_ovs)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser_ovs)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
@mock.patch.object(linux_net, 'create_fp_dev')
def test_vhostuser_fp_plug(self, mock_create_fp_dev):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_vhostuser_fp)
mock_create_fp_dev.assert_has_calls(
[mock.call('tap-xxx-yyy-zzz', '/tmp/usv-xxx-yyy-zzz', 'client')])
@mock.patch.object(linux_net, 'delete_fp_dev')
def test_vhostuser_fp_unplug(self, mock_delete_fp_dev):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_vhostuser_fp)
mock_delete_fp_dev.assert_has_calls([mock.call('tap-xxx-yyy-zzz')])
def test_vhostuser_ovs_plug(self):
calls = {
'create_ovs_vif_port': [
mock.call(
'br0', 'usv-xxx-yyy-zzz',
'aaa-bbb-ccc', 'ca:fe:de:ad:be:ef',
'f0000000-0000-0000-0000-000000000001', 9000,
interface_type=network_model.OVS_VHOSTUSER_INTERFACE_TYPE
)]
}
with mock.patch.object(linux_net,
'create_ovs_vif_port') as create_ovs_vif_port:
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_vhostuser_ovs)
create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
def test_vhostuser_ovs_unplug(self):
calls = {
'delete_ovs_vif_port': [mock.call('br0', 'usv-xxx-yyy-zzz')]
}
with mock.patch.object(linux_net,
'delete_ovs_vif_port') as delete_port:
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_vhostuser_ovs)
delete_port.assert_has_calls(calls['delete_ovs_vif_port'])
def test_vhostuser_ovs_fp_plug(self):
calls = {
'create_fp_dev': [mock.call('tap-xxx-yyy-zzz',
'/tmp/usv-xxx-yyy-zzz',
'client')],
'create_ovs_vif_port': [mock.call(
'br0', 'tap-xxx-yyy-zzz',
'aaa-bbb-ccc', 'ca:fe:de:ad:be:ef',
'f0000000-0000-0000-0000-000000000001',
9000)]
}
with test.nested(
mock.patch.object(linux_net, 'create_fp_dev'),
mock.patch.object(linux_net, 'create_ovs_vif_port'),
) as (create_fp_dev, create_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.plug_vhostuser(self.instance, self.vif_vhostuser_ovs_fp)
create_fp_dev.assert_has_calls(calls['create_fp_dev'])
create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
def test_vhostuser_ovs_fp_unplug(self):
calls = {
'delete_ovs_vif_port': [mock.call('br0', 'tap-xxx-yyy-zzz',
False)],
'delete_fp_dev': [mock.call('tap-xxx-yyy-zzz')],
}
with test.nested(
mock.patch.object(linux_net, 'delete_ovs_vif_port'),
mock.patch.object(linux_net, 'delete_fp_dev')
) as (delete_ovs_port, delete_fp_dev):
d = vif.LibvirtGenericVIFDriver()
d.unplug_vhostuser(None, self.vif_vhostuser_ovs_fp)
delete_ovs_port.assert_has_calls(calls['delete_ovs_vif_port'])
delete_fp_dev.assert_has_calls(calls['delete_fp_dev'])
def test_vhostuser_ovs_fp_hybrid_plug(self):
calls = {
'create_fp_dev': [mock.call('tap-xxx-yyy-zzz',
'/tmp/usv-xxx-yyy-zzz',
'client')],
'device_exists': [mock.call('tap-xxx-yyy-zzz'),
mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy', 9000)],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'tap-xxx-yyy-zzz', run_as_root=True)],
'create_ovs_vif_port': [mock.call(
'br0', 'qvovif-xxx-yyy',
'aaa-bbb-ccc', 'ca:fe:de:ad:be:ef',
'f0000000-0000-0000-0000-000000000001',
9000)]
}
with test.nested(
mock.patch.object(linux_net, 'create_fp_dev'),
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ovs_vif_port')
) as (create_fp_dev, device_exists, execute, _create_veth_pair,
create_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.plug_vhostuser(self.instance, self.vif_vhostuser_ovs_fp_hybrid)
create_fp_dev.assert_has_calls(calls['create_fp_dev'])
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
def test_vhostuser_ovs_fp_hybrid_unplug(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')],
'delete_fp_dev': [mock.call('tap-xxx-yyy-zzz')]
}
with test.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=True),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ovs_vif_port'),
mock.patch.object(linux_net, 'delete_fp_dev')
) as (device_exists, execute, delete_ovs_vif_port, delete_fp_dev):
d = vif.LibvirtGenericVIFDriver()
d.unplug_vhostuser(None, self.vif_vhostuser_ovs_fp_hybrid)
device_exists.assert_has_calls(calls['device_exists'])
execute.assert_has_calls(calls['execute'])
delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
delete_fp_dev.assert_has_calls(calls['delete_fp_dev'])
|
|
#!/usr/bin/env python
import os
import rlp
from ethereum import utils
from ethereum.utils import to_string
from ethereum.utils import is_string
import copy
from rlp.utils import decode_hex, encode_hex, ascii_chr, str_to_bytes
import sys
bin_to_nibbles_cache = {}
hti = {}
for i, c in enumerate(b'0123456789abcdef'):
hti[c] = i
def bin_to_nibbles(s):
"""convert string s to nibbles (half-bytes)
>>> bin_to_nibbles("")
[]
>>> bin_to_nibbles("h")
[6, 8]
>>> bin_to_nibbles("he")
[6, 8, 6, 5]
>>> bin_to_nibbles("hello")
[6, 8, 6, 5, 6, 12, 6, 12, 6, 15]
"""
return [hti[c] for c in encode_hex(s)]
def nibbles_to_bin(nibbles):
if any(x > 15 or x < 0 for x in nibbles):
raise Exception("nibbles can only be [0,..15]")
if len(nibbles) % 2:
raise Exception("nibbles must be of even numbers")
res = b''
for i in range(0, len(nibbles), 2):
res += ascii_chr(16 * nibbles[i] + nibbles[i + 1])
return res
NIBBLE_TERMINATOR = 16
RECORDING = 1
NONE = 0
VERIFYING = -1
ZERO_ENCODED = utils.encode_int(0)
proving = False
class ProofConstructor():
def __init__(self):
self.mode = []
self.nodes = []
self.exempt = []
def push(self, mode, nodes=[]):
global proving
proving = True
self.mode.append(mode)
self.exempt.append(set())
if mode == VERIFYING:
self.nodes.append(set([rlp.encode(x) for x in nodes]))
else:
self.nodes.append(set())
def pop(self):
global proving
self.mode.pop()
self.nodes.pop()
self.exempt.pop()
if not self.mode:
proving = False
def get_nodelist(self):
return list(map(rlp.decode, list(self.nodes[-1])))
def get_nodes(self):
return self.nodes[-1]
def add_node(self, node):
node = rlp.encode(node)
if node not in self.exempt[-1]:
self.nodes[-1].add(node)
def add_exempt(self, node):
self.exempt[-1].add(rlp.encode(node))
def get_mode(self):
return self.mode[-1]
proof = ProofConstructor()
class InvalidSPVProof(Exception):
pass
def with_terminator(nibbles):
nibbles = nibbles[:]
if not nibbles or nibbles[-1] != NIBBLE_TERMINATOR:
nibbles.append(NIBBLE_TERMINATOR)
return nibbles
def without_terminator(nibbles):
nibbles = nibbles[:]
if nibbles and nibbles[-1] == NIBBLE_TERMINATOR:
del nibbles[-1]
return nibbles
def adapt_terminator(nibbles, has_terminator):
if has_terminator:
return with_terminator(nibbles)
else:
return without_terminator(nibbles)
def pack_nibbles(nibbles):
"""pack nibbles to binary
:param nibbles: a nibbles sequence. may have a terminator
"""
if nibbles[-1:] == [NIBBLE_TERMINATOR]:
flags = 2
nibbles = nibbles[:-1]
else:
flags = 0
oddlen = len(nibbles) % 2
flags |= oddlen # set lowest bit if odd number of nibbles
if oddlen:
nibbles = [flags] + nibbles
else:
nibbles = [flags, 0] + nibbles
o = b''
for i in range(0, len(nibbles), 2):
o += ascii_chr(16 * nibbles[i] + nibbles[i + 1])
return o
def unpack_to_nibbles(bindata):
"""unpack packed binary data to nibbles
:param bindata: binary packed from nibbles
:return: nibbles sequence, may have a terminator
"""
o = bin_to_nibbles(bindata)
flags = o[0]
if flags & 2:
o.append(NIBBLE_TERMINATOR)
if flags & 1 == 1:
o = o[1:]
else:
o = o[2:]
return o
def starts_with(full, part):
''' test whether the items in the part is
the leading items of the full
'''
if len(full) < len(part):
return False
return full[:len(part)] == part
(
NODE_TYPE_BLANK,
NODE_TYPE_LEAF,
NODE_TYPE_EXTENSION,
NODE_TYPE_BRANCH
) = tuple(range(4))
def is_key_value_type(node_type):
return node_type in [NODE_TYPE_LEAF,
NODE_TYPE_EXTENSION]
BLANK_NODE = b''
BLANK_ROOT = utils.sha3rlp(b'')
DEATH_ROW_OFFSET = 2**62
def transient_trie_exception(*args):
raise Exception("Transient trie")
class Trie(object):
def __init__(self, db, root_hash=BLANK_ROOT, transient=False):
'''it also present a dictionary like interface
:param db key value database
:root: blank or trie node in form of [key, value] or [v0,v1..v15,v]
'''
self.db = db # Pass in a database object directly
self.transient = transient
if self.transient:
self.update = self.get = self.delete = transient_trie_exception
self.set_root_hash(root_hash)
self.death_row_timeout = 5000
self.nodes_for_death_row = []
self.journal = []
# def __init__(self, dbfile, root_hash=BLANK_ROOT):
# '''it also present a dictionary like interface
# :param dbfile: key value database
# :root: blank or trie node in form of [key, value] or [v0,v1..v15,v]
# '''
# if isinstance(dbfile, str):
# dbfile = os.path.abspath(dbfile)
# self.db = DB(dbfile)
# else:
# self.db = dbfile # Pass in a database object directly
# self.set_root_hash(root_hash)
# For SPV proof production/verification purposes
def spv_grabbing(self, node):
global proving
if not proving:
pass
elif proof.get_mode() == RECORDING:
proof.add_node(copy.copy(node))
# print('recording %s' % encode_hex(utils.sha3(rlp.encode(node))))
elif proof.get_mode() == VERIFYING:
# print('verifying %s' % encode_hex(utils.sha3(rlp.encode(node))))
if rlp.encode(node) not in proof.get_nodes():
raise InvalidSPVProof("Proof invalid!")
def spv_storing(self, node):
global proving
if not proving:
pass
elif proof.get_mode() == RECORDING:
proof.add_exempt(copy.copy(node))
elif proof.get_mode() == VERIFYING:
proof.add_node(copy.copy(node))
@property
def root_hash(self):
'''always empty or a 32 bytes string
'''
return self.get_root_hash()
def get_root_hash(self):
if self.transient:
return self.transient_root_hash
if self.root_node == BLANK_NODE:
return BLANK_ROOT
assert isinstance(self.root_node, list)
val = rlp.encode(self.root_node)
key = utils.sha3(val)
self.spv_grabbing(self.root_node)
return key
def replace_root_hash(self, old_node, new_node):
# sys.stderr.write('rrh %r %r\n' % (old_node, new_node))
self._delete_node_storage(old_node, is_root=True)
self._encode_node(new_node, is_root=True)
self.root_node = new_node
# sys.stderr.write('nrh: %s\n' % self.root_hash.encode('hex'))
@root_hash.setter
def root_hash(self, value):
self.set_root_hash(value)
def set_root_hash(self, root_hash):
assert is_string(root_hash)
assert len(root_hash) in [0, 32]
if self.transient:
self.transient_root_hash = root_hash
return
if root_hash == BLANK_ROOT:
self.root_node = BLANK_NODE
return
# print repr(root_hash)
self.root_node = self._decode_to_node(root_hash)
# dummy to increase reference count
# self._encode_node(self.root_node)
def all_nodes(self, node=None):
proof.push(RECORDING)
self.get_root_hash()
self.to_dict()
o = proof.get_nodelist()
proof.pop()
return list(o)
# if node is None:
# node = self.root_node
# node_type = self._get_node_type(node)
# o = 1 if len(rlp.encode(node)) >= 32 else 0
# if node_type == NODE_TYPE_BRANCH:
# for item in node[:16]:
# o += self.total_node_count(self._decode_to_node(item))
# elif is_key_value_type(node_type):
# if node_type == NODE_TYPE_EXTENSION:
# o += self.total_node_count(self._decode_to_node(node[1]))
# return o
def clear(self):
''' clear all tree data
'''
self._delete_child_storage(self.root_node)
self._delete_node_storage(self.root_node)
self.root_node = BLANK_NODE
def _delete_child_storage(self, node):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BRANCH:
for item in node[:16]:
self._delete_child_storage(self._decode_to_node(item))
elif is_key_value_type(node_type):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_EXTENSION:
self._delete_child_storage(self._decode_to_node(node[1]))
def _encode_node(self, node, is_root=False):
if node == BLANK_NODE:
return BLANK_NODE
# assert isinstance(node, list)
rlpnode = rlp.encode(node)
if len(rlpnode) < 32 and not is_root:
return node
hashkey = utils.sha3(rlpnode)
self.db.inc_refcount(hashkey, rlpnode)
return hashkey
def _decode_to_node(self, encoded):
if encoded == BLANK_NODE:
return BLANK_NODE
if isinstance(encoded, list):
return encoded
o = rlp.decode(self.db.get(encoded))
self.spv_grabbing(o)
return o
def _get_node_type(self, node):
''' get node type and content
:param node: node in form of list, or BLANK_NODE
:return: node type
'''
if node == BLANK_NODE:
return NODE_TYPE_BLANK
if len(node) == 2:
nibbles = unpack_to_nibbles(node[0])
has_terminator = (nibbles and nibbles[-1] == NIBBLE_TERMINATOR)
return NODE_TYPE_LEAF if has_terminator\
else NODE_TYPE_EXTENSION
if len(node) == 17:
return NODE_TYPE_BRANCH
def _get(self, node, key):
""" get value inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
:return:
BLANK_NODE if does not exist, otherwise value or hash
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
# already reach the expected node
if not key:
return node[-1]
sub_node = self._decode_to_node(node[key[0]])
return self._get(sub_node, key[1:])
# key value node
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
return node[1] if key == curr_key else BLANK_NODE
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
if starts_with(key, curr_key):
sub_node = self._decode_to_node(node[1])
return self._get(sub_node, key[len(curr_key):])
else:
return BLANK_NODE
def _update(self, node, key, value):
# sys.stderr.write('u\n')
""" update item inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
.. note:: key may be []
:param value: value string
:return: new node
if this node is changed to a new node, it's parent will take the
responsibility to *store* the new node storage, and delete the old
node storage
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
o = [pack_nibbles(with_terminator(key)), value]
self._encode_node(o)
return o
elif node_type == NODE_TYPE_BRANCH:
if not key:
node[-1] = value
else:
new_node = self._update_and_delete_storage(
self._decode_to_node(node[key[0]]),
key[1:], value)
node[key[0]] = self._encode_node(new_node)
self._delete_node_storage(new_node)
self._encode_node(node)
return node
elif is_key_value_type(node_type):
return self._update_kv_node(node, key, value)
def _update_and_delete_storage(self, node, key, value):
# sys.stderr.write('uds_start %r\n' % node)
old_node = copy.deepcopy(node)
new_node = self._update(node, key, value)
# sys.stderr.write('uds_mid %r\n' % old_node)
self._delete_node_storage(old_node)
# sys.stderr.write('uds_end %r\n' % old_node)
return new_node
def _update_kv_node(self, node, key, value):
node_type = self._get_node_type(node)
curr_key = without_terminator(unpack_to_nibbles(node[0]))
is_inner = node_type == NODE_TYPE_EXTENSION
# sys.stderr.write('ukv %r %r\n' % (key, value))
# find longest common prefix
prefix_length = 0
for i in range(min(len(curr_key), len(key))):
if key[i] != curr_key[i]:
break
prefix_length = i + 1
# sys.stderr.write('pl: %d\n' % prefix_length)
remain_key = key[prefix_length:]
remain_curr_key = curr_key[prefix_length:]
new_node_encoded = False
if remain_key == [] == remain_curr_key:
# sys.stderr.write('1111\n')
if not is_inner:
o = [node[0], value]
self._encode_node(o)
return o
new_node = self._update_and_delete_storage(
self._decode_to_node(node[1]), remain_key, value)
new_node_encoded = True
elif remain_curr_key == []:
if is_inner:
# sys.stderr.write('22221\n')
new_node = self._update_and_delete_storage(
self._decode_to_node(node[1]), remain_key, value)
new_node_encoded = True
# sys.stderr.write('22221e\n')
else:
# sys.stderr.write('22222\n')
new_node = [BLANK_NODE] * 17
new_node[-1] = node[1]
new_node[remain_key[0]] = self._encode_node([
pack_nibbles(with_terminator(remain_key[1:])),
value
])
else:
# sys.stderr.write('3333\n')
new_node = [BLANK_NODE] * 17
if len(remain_curr_key) == 1 and is_inner:
new_node[remain_curr_key[0]] = node[1]
else:
new_node[remain_curr_key[0]] = self._encode_node([
pack_nibbles(
adapt_terminator(remain_curr_key[1:], not is_inner)
),
node[1]
])
if remain_key == []:
new_node[-1] = value
else:
new_node[remain_key[0]] = self._encode_node([
pack_nibbles(with_terminator(remain_key[1:])), value
])
if prefix_length:
# sys.stderr.write('444441: %d\n' % prefix_length)
# create node for key prefix
o = [pack_nibbles(curr_key[:prefix_length]),
self._encode_node(new_node)]
if new_node_encoded:
self._delete_node_storage(new_node)
self._encode_node(o)
return o
else:
# sys.stderr.write('444442: %d\n' % prefix_length)
if not new_node_encoded:
self._encode_node(new_node)
return new_node
def _getany(self, node, reverse=False, path=[]):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return None
if node_type == NODE_TYPE_BRANCH:
if node[16]:
return [16]
scan_range = list(range(16))
if reverse:
scan_range.reverse()
for i in scan_range:
o = self._getany(self._decode_to_node(node[i]), path=path + [i])
if o:
return [i] + o
return None
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
return curr_key
if node_type == NODE_TYPE_EXTENSION:
curr_key = without_terminator(unpack_to_nibbles(node[0]))
sub_node = self._decode_to_node(node[1])
return self._getany(sub_node, path=path + curr_key)
def _iter(self, node, key, reverse=False, path=[]):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return None
elif node_type == NODE_TYPE_BRANCH:
if len(key):
sub_node = self._decode_to_node(node[key[0]])
o = self._iter(sub_node, key[1:], reverse, path + [key[0]])
if o:
return [key[0]] + o
if reverse:
scan_range = list(range(key[0] if len(key) else 0))
else:
scan_range = list(range(key[0] + 1 if len(key) else 0, 16))
for i in scan_range:
sub_node = self._decode_to_node(node[i])
o = self._getany(sub_node, reverse, path + [i])
if o:
return [i] + o
if reverse and node[16]:
return [16]
return None
descend_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
if reverse:
return descend_key if descend_key < key else None
else:
return descend_key if descend_key > key else None
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
sub_node = self._decode_to_node(node[1])
sub_key = key[len(descend_key):]
if starts_with(key, descend_key):
o = self._iter(sub_node, sub_key, reverse, path + descend_key)
elif descend_key > key[:len(descend_key)] and not reverse:
o = self._getany(sub_node, sub_key, False, path + descend_key)
elif descend_key < key[:len(descend_key)] and reverse:
o = self._getany(sub_node, sub_key, True, path + descend_key)
else:
o = None
return descend_key + o if o else None
def next(self, key):
key = bin_to_nibbles(key)
o = self._iter(self.root_node, key)
return nibbles_to_bin(o) if o else None
def prev(self, key):
key = bin_to_nibbles(key)
o = self._iter(self.root_node, key, reverse=True)
return nibbles_to_bin(o) if o else None
def _delete_node_storage(self, node, is_root=False):
'''delete storage
:param node: node in form of list, or BLANK_NODE
'''
if node == BLANK_NODE:
return
# assert isinstance(node, list)
encoded = rlp.encode(node)
if len(encoded) < 32 and not is_root:
return
"""
===== FIXME ====
in the current trie implementation two nodes can share identical subtrees
thus we can not safely delete nodes for now
"""
hashkey = utils.sha3(encoded)
self.db.dec_refcount(hashkey)
def _delete(self, node, key):
""" update item inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
.. note:: key may be []
:return: new node
if this node is changed to a new node, it's parent will take the
responsibility to *store* the new node storage, and delete the old
node storage
"""
# sys.stderr.write('del\n')
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
return self._delete_branch_node(node, key)
if is_key_value_type(node_type):
return self._delete_kv_node(node, key)
def _normalize_branch_node(self, node):
# sys.stderr.write('nbn\n')
'''node should have only one item changed
'''
not_blank_items_count = sum(1 for x in range(17) if node[x])
assert not_blank_items_count >= 1
if not_blank_items_count > 1:
self._encode_node(node)
return node
# now only one item is not blank
not_blank_index = [i for i, item in enumerate(node) if item][0]
# the value item is not blank
if not_blank_index == 16:
o = [pack_nibbles(with_terminator([])), node[16]]
self._encode_node(o)
return o
# normal item is not blank
sub_node = self._decode_to_node(node[not_blank_index])
sub_node_type = self._get_node_type(sub_node)
if is_key_value_type(sub_node_type):
# collape subnode to this node, not this node will have same
# terminator with the new sub node, and value does not change
self._delete_node_storage(sub_node)
new_key = [not_blank_index] + \
unpack_to_nibbles(sub_node[0])
o = [pack_nibbles(new_key), sub_node[1]]
self._encode_node(o)
return o
if sub_node_type == NODE_TYPE_BRANCH:
o = [pack_nibbles([not_blank_index]),
node[not_blank_index]]
self._encode_node(o)
return o
assert False
def _delete_and_delete_storage(self, node, key):
# sys.stderr.write('dds_start %r\n' % node)
old_node = copy.deepcopy(node)
new_node = self._delete(node, key)
# sys.stderr.write('dds_mid %r\n' % old_node)
self._delete_node_storage(old_node)
# sys.stderr.write('dds_end %r %r\n' % (old_node, new_node))
return new_node
def _delete_branch_node(self, node, key):
# sys.stderr.write('dbn\n')
# already reach the expected node
if not key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
o = self._delete_and_delete_storage(
self._decode_to_node(node[key[0]]), key[1:])
encoded_new_sub_node = self._encode_node(o)
self._delete_node_storage(o)
# sys.stderr.write('dbn2\n')
# if encoded_new_sub_nod == node[key[0]]:
# return node
node[key[0]] = encoded_new_sub_node
if encoded_new_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
self._encode_node(node)
return node
def _delete_kv_node(self, node, key):
# sys.stderr.write('dkv\n')
node_type = self._get_node_type(node)
assert is_key_value_type(node_type)
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if not starts_with(key, curr_key):
# key not found
self._encode_node(node)
return node
if node_type == NODE_TYPE_LEAF:
if key == curr_key:
return BLANK_NODE
else:
self._encode_node(node)
return node
# for inner key value type
new_sub_node = self._delete_and_delete_storage(
self._decode_to_node(node[1]), key[len(curr_key):])
# sys.stderr.write('nsn: %r %r\n' % (node, new_sub_node))
# if self._encode_node(new_sub_node) == node[1]:
# return node
# new sub node is BLANK_NODE
if new_sub_node == BLANK_NODE:
return BLANK_NODE
assert isinstance(new_sub_node, list)
# new sub node not blank, not value and has changed
new_sub_node_type = self._get_node_type(new_sub_node)
if is_key_value_type(new_sub_node_type):
# sys.stderr.write('nsn1\n')
# collape subnode to this node, not this node will have same
# terminator with the new sub node, and value does not change
new_key = curr_key + unpack_to_nibbles(new_sub_node[0])
o = [pack_nibbles(new_key), new_sub_node[1]]
self._delete_node_storage(new_sub_node)
self._encode_node(o)
return o
if new_sub_node_type == NODE_TYPE_BRANCH:
# sys.stderr.write('nsn2\n')
o = [pack_nibbles(curr_key), self._encode_node(new_sub_node)]
self._delete_node_storage(new_sub_node)
self._encode_node(o)
return o
# should be no more cases
assert False
def delete(self, key):
'''
:param key: a string with length of [0, 32]
'''
if not is_string(key):
raise Exception("Key must be string")
if len(key) > 32:
raise Exception("Max key length is 32")
old_root = copy.deepcopy(self.root_node)
self.root_node = self._delete_and_delete_storage(
self.root_node,
bin_to_nibbles(to_string(key)))
self.replace_root_hash(old_root, self.root_node)
def clear_all(self, node=None):
if node is None:
node = self.root_node
self._delete_node_storage(node)
if node == BLANK_NODE:
return
node_type = self._get_node_type(node)
self._delete_node_storage(node)
if is_key_value_type(node_type):
value_is_node = node_type == NODE_TYPE_EXTENSION
if value_is_node:
self.clear_all(self._decode_to_node(node[1]))
elif node_type == NODE_TYPE_BRANCH:
for i in range(16):
self.clear_all(self._decode_to_node(node[i]))
def _get_size(self, node):
'''Get counts of (key, value) stored in this and the descendant nodes
:param node: node in form of list, or BLANK_NODE
'''
if node == BLANK_NODE:
return 0
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
value_is_node = node_type == NODE_TYPE_EXTENSION
if value_is_node:
return self._get_size(self._decode_to_node(node[1]))
else:
return 1
elif node_type == NODE_TYPE_BRANCH:
sizes = [self._get_size(self._decode_to_node(node[x]))
for x in range(16)]
sizes = sizes + [1 if node[-1] else 0]
return sum(sizes)
def _to_dict(self, node):
'''convert (key, value) stored in this and the descendant nodes
to dict items.
:param node: node in form of list, or BLANK_NODE
.. note::
Here key is in full form, rather than key of the individual node
'''
if node == BLANK_NODE:
return {}
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
nibbles = without_terminator(unpack_to_nibbles(node[0]))
key = b'+'.join([to_string(x) for x in nibbles])
if node_type == NODE_TYPE_EXTENSION:
sub_dict = self._to_dict(self._decode_to_node(node[1]))
else:
sub_dict = {to_string(NIBBLE_TERMINATOR): node[1]}
# prepend key of this node to the keys of children
res = {}
for sub_key, sub_value in sub_dict.items():
full_key = (key + b'+' + sub_key).strip(b'+')
res[full_key] = sub_value
return res
elif node_type == NODE_TYPE_BRANCH:
res = {}
for i in range(16):
sub_dict = self._to_dict(self._decode_to_node(node[i]))
for sub_key, sub_value in sub_dict.items():
full_key = (str_to_bytes(str(i)) + b'+' + sub_key).strip(b'+')
res[full_key] = sub_value
if node[16]:
res[to_string(NIBBLE_TERMINATOR)] = node[-1]
return res
def to_dict(self):
d = self._to_dict(self.root_node)
res = {}
for key_str, value in d.items():
if key_str:
nibbles = [int(x) for x in key_str.split(b'+')]
else:
nibbles = []
key = nibbles_to_bin(without_terminator(nibbles))
res[key] = value
return res
def get(self, key):
return self._get(self.root_node, bin_to_nibbles(to_string(key)))
def __len__(self):
return self._get_size(self.root_node)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.update(key, value)
def __delitem__(self, key):
return self.delete(key)
def __iter__(self):
return iter(self.to_dict())
def __contains__(self, key):
return self.get(key) != BLANK_NODE
def update(self, key, value):
'''
:param key: a string
:value: a string
'''
if not is_string(key):
raise Exception("Key must be string")
# if len(key) > 32:
# raise Exception("Max key length is 32")
if not is_string(value):
raise Exception("Value must be string")
# if value == '':
# return self.delete(key)
old_root = copy.deepcopy(self.root_node)
self.root_node = self._update_and_delete_storage(
self.root_node,
bin_to_nibbles(to_string(key)),
value)
self.replace_root_hash(old_root, self.root_node)
def root_hash_valid(self):
if self.root_hash == BLANK_ROOT:
return True
return self.root_hash in self.db
def produce_spv_proof(self, key):
proof.push(RECORDING)
self.get(key)
o = proof.get_nodelist()
proof.pop()
return o
def verify_spv_proof(root, key, proof):
proof.push(VERIFYING, proof)
t = Trie(db.EphemDB())
for i, node in enumerate(proof):
R = rlp.encode(node)
H = utils.sha3(R)
t.db.put(H, R)
try:
t.root_hash = root
t.get(key)
proof.pop()
return True
except Exception as e:
print(e)
proof.pop()
return False
if __name__ == "__main__":
from . import db
_db = db.DB(sys.argv[2])
def encode_node(nd):
if is_string(nd):
return encode_hex(nd)
else:
return encode_hex(rlp.encode(nd))
if len(sys.argv) >= 2:
if sys.argv[1] == 'insert':
t = Trie(_db, decode_hex(sys.argv[3]))
t.update(sys.argv[4], sys.argv[5])
print(encode_node(t.root_hash))
elif sys.argv[1] == 'get':
t = Trie(_db, decode_hex(sys.argv[3]))
print(t.get(sys.argv[4]))
|
|
"""
This is a placeholder for code that uses the SQLAlchemy ORM. In contains
helper functions that should make it easier to query the database
An example how to use this is shown in an IPython notebook:
https://github.com/transientskp/notebooks/blob/master/transients.ipynb
"""
from sqlalchemy.orm import aliased
from sqlalchemy.sql import func
from tkp.db.model import (Assocxtrsource, Extractedsource, Image, Newsource,
Runningcatalog)
def _last_assoc_timestamps(session, dataset):
"""
Get the timestamps of the latest assocxtrc per runningcatalog and band.
We can't get the assoc ID's directly, because they are unique and can't
by put in the group by. You can get the eventual assoc ID's by joining
this query again with the assoc table (see last_assoc_per_band func)
args:
session (session): A SQLAlchemy session
dataset (Dataset): A SQLALchemy dataset model
returns: a SQLAlchemy subquery containing runcat id, timestamp, band id
"""
a = aliased(Assocxtrsource, name='a_timestamps')
e = aliased(Extractedsource, name='e_timestamps')
r = aliased(Runningcatalog, name='r_timestamps')
i = aliased(Image, name='i_timestamps')
return session.query(r.id.label('runcat'),
func.max(i.taustart_ts).label('max_time'),
i.band_id.label('band')
). \
select_from(r). \
join(a, r.id == a.runcat_id). \
join(e, a.xtrsrc_id == e.id). \
join(i, i.id == e.image_id). \
group_by(r.id, i.band_id). \
filter(i.dataset == dataset). \
subquery(name='last_assoc_timestamps')
def _last_assoc_per_band(session, dataset):
"""
Get the ID's of the latest assocxtrc per runningcatalog and band.
Very similar to last_assoc_timestamps, but returns the ID's
args:
session: SQLalchemy session objects
dataset: tkp.db.model.dataset object
returns: SQLAlchemy subquery
"""
l = _last_assoc_timestamps(session, dataset)
a = aliased(Assocxtrsource, name='a_laids')
e = aliased(Extractedsource, name='e_laids')
i = aliased(Image, name='i_laids')
return session.query(a.id.label('assoc_id'), l.c.max_time,
l.c.band, l.c.runcat). \
select_from(l). \
join(a, a.runcat_id == l.c.runcat). \
join(e, a.xtrsrc_id == e.id). \
join(i, (i.id == e.image_id) & (i.taustart_ts == l.c.max_time)). \
subquery(name='last_assoc_per_band')
def _last_ts_fmax(session, dataset):
"""
Select peak flux per runcat at last timestep (over all bands)
args:
session: SQLalchemy session objects
dataset: tkp.db.model.dataset object
returns: SQLAlchemy subquery
"""
a = aliased(Assocxtrsource, name='a_lt')
e = aliased(Extractedsource, name='e_lt')
subquery = _last_assoc_per_band(session, dataset)
return session.query(a.runcat_id.label('runcat_id'),
func.max(e.f_int).label('max_flux')
). \
select_from(subquery). \
join(a, a.id == subquery.c.assoc_id). \
join(e, a.xtrsrc_id == e.id). \
group_by(a.runcat_id). \
subquery(name='last_ts_fmax')
def _newsrc_trigger(session, dataset):
"""
Grab newsource /trigger details where possible
args:
session: SQLalchemy session objects
returns: SQLAlchemy subquery
"""
newsource = aliased(Newsource, name='n_ntr')
e = aliased(Extractedsource, name='e_ntr')
i = aliased(Image, name='i_ntr')
return session.query(
newsource.id,
newsource.runcat_id.label('rc_id'),
(e.f_int / i.rms_min).label('sigma_rms_min'),
(e.f_int / i.rms_max).label('sigma_rms_max')
). \
select_from(newsource). \
join(e, e.id == newsource.trigger_xtrsrc_id). \
join(i, i.id == newsource.previous_limits_image_id). \
filter(i.dataset == dataset). \
subquery(name='newsrc_trigger')
def _combined(session, dataset):
"""
args:
session (Session): SQLAlchemy session
runcat (Runningcatalog): Running catalog model object
dataset (Dataset): Dataset model object
return: a SQLALchemy subquery
"""
runcat = aliased(Runningcatalog, name='r')
match_assoc = aliased(Assocxtrsource, name='match_assoc')
match_ex = aliased(Extractedsource, name='match_ex')
match_img = aliased(Image, name='match_img')
agg_img = aliased(Image, name='agg_img')
agg_assoc = aliased(Assocxtrsource, name='agg_assoc')
agg_ex = aliased(Extractedsource, name='agg_ex')
newsrc_trigger_query = _newsrc_trigger(session, dataset)
last_ts_fmax_query = _last_ts_fmax(session, dataset)
return session.query(
runcat.id,
runcat.wm_ra.label('ra'),
runcat.wm_decl.label('decl'),
runcat.wm_uncertainty_ew,
runcat.wm_uncertainty_ns,
runcat.xtrsrc_id,
runcat.dataset_id.label('dataset_id'),
runcat.datapoints,
match_assoc.v_int,
match_assoc.eta_int,
match_img.band_id,
newsrc_trigger_query.c.id.label('newsource'),
newsrc_trigger_query.c.sigma_rms_max.label('sigma_rms_max'),
newsrc_trigger_query.c.sigma_rms_min.label('sigma_rms_min'),
func.max(agg_ex.f_int).label('lightcurve_max'),
func.avg(agg_ex.f_int).label('lightcurve_avg'),
func.median(agg_ex.f_int).label('lightcurve_median')
). \
select_from(last_ts_fmax_query). \
join(match_assoc, match_assoc.runcat_id == last_ts_fmax_query.c.runcat_id). \
join(match_ex,
(match_assoc.xtrsrc_id == match_ex.id) &
(match_ex.f_int == last_ts_fmax_query.c.max_flux)). \
join(runcat, runcat.id == last_ts_fmax_query.c.runcat_id). \
join(match_img, match_ex.image_id == match_img.id). \
outerjoin(newsrc_trigger_query, newsrc_trigger_query.c.rc_id == runcat.id). \
join(agg_assoc, runcat.id == agg_assoc.runcat_id). \
join(agg_ex, agg_assoc.xtrsrc_id == agg_ex.id). \
join(agg_img,
(agg_ex.image_id == agg_img.id) & (agg_img.band_id == match_img.band_id)). \
group_by(runcat.id,
runcat.wm_ra,
runcat.wm_decl,
runcat.wm_uncertainty_ew,
runcat.wm_uncertainty_ns,
runcat.xtrsrc_id,
runcat.dataset_id,
runcat.datapoints,
match_assoc.v_int,
match_assoc.eta_int,
match_img.band_id,
newsrc_trigger_query.c.id,
newsrc_trigger_query.c.sigma_rms_max,
newsrc_trigger_query.c.sigma_rms_min,
). \
filter(runcat.dataset == dataset). \
subquery()
def transients(session, dataset, ra_range=None, decl_range=None,
v_int_min=None, eta_int_min=None, sigma_rms_min_range=None,
sigma_rms_max_range=None, new_src_only=False):
"""
Calculate sigma_min, sigma_max, v_int, eta_int and the max and avg
values for lightcurves, for all runningcatalogs
It starts by getting the extracted source from latest image for a runcat.
This is arbitrary, since you have multiple bands. We pick the band with the
max integrated flux. Now we have v_int and eta_int.
The flux is then devided by the RMS_max and RMS_min of the previous image
(stored in newsource.previous_limits_image) to obtain sigma_max and
sigma_min.
args:
dataset (Dataset): SQLAlchemy dataset object
ra_range (tuple): 2 element tuple of ra range
decl_range (tuple): 2 element tuple
v_int_min (float): 2 element tuple
eta_int_min (float): 2 element tuple
sigma_rms_min_range (tuple): 2 element tuple
sigma_rms_max_range (tuple): 2 element tuple
new_src_only (bool): New sources only
returns: a SQLAlchemy query
"""
subquery = _combined(session, dataset=dataset)
query = session.query(subquery)
if ra_range and decl_range:
query = query.filter(subquery.c.ra.between(*ra_range) &
subquery.c.decl.between(*decl_range))
if v_int_min != None:
query = query.filter(subquery.c.v_int >= v_int_min)
if eta_int_min != None:
query = query.filter(subquery.c.eta_int >= eta_int_min)
if sigma_rms_min_range:
query = query.filter(subquery.c.sigma_rms_min.between(*sigma_rms_min_range))
if sigma_rms_max_range:
query = query.filter(subquery.c.sigma_rms_max.between(*sigma_rms_max_range))
if new_src_only:
query = query.filter(subquery.c.newsource != None)
return query
|
|
"""
Here is probably the place to write the docs, since the test-cases
show how the type behave.
Later...
"""
from ctypes import *
from ctypes.test import need_symbol
import sys, unittest
from ctypes.test import xfail
from test.support import impl_detail
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
if sys.platform == "win32":
windll = WinDLL(_ctypes_test.__file__)
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("left", c_int), ("top", c_int),
("right", c_int), ("bottom", c_int)]
class FunctionTestCase(unittest.TestCase):
def test_mro(self):
# in Python 2.3, this raises TypeError: MRO conflict among bases classes,
# in Python 2.2 it works.
#
# But in early versions of _ctypes.c, the result of tp_new
# wasn't checked, and it even crashed Python.
# Found by Greg Chapman.
try:
class X(object, Array):
_length_ = 5
_type_ = "i"
except TypeError:
pass
from _ctypes import _Pointer
try:
class X(object, _Pointer):
pass
except TypeError:
pass
from _ctypes import _SimpleCData
try:
class X(object, _SimpleCData):
_type_ = "i"
except TypeError:
pass
try:
class X(object, Structure):
_fields_ = []
except TypeError:
pass
@need_symbol('c_wchar')
def test_wchar_parm(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(1, "x", 3, 4, 5.0, 6.0)
self.assertEqual(result, 139)
self.assertEqual(type(result), int)
@need_symbol('c_wchar')
def test_wchar_result(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_wchar
result = f(0, 0, 0, 0, 0, 0)
self.assertEqual(result, '\x00')
def test_voidresult(self):
f = dll._testfunc_v
f.restype = None
f.argtypes = [c_int, c_int, POINTER(c_int)]
result = c_int()
self.assertEqual(None, f(1, 2, byref(result)))
self.assertEqual(result.value, 3)
def test_intresult(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), int)
# If we declare the function to return a short,
# is the high part split off?
f.restype = c_short
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(1, 2, 3, 0x10004, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
# You cannot assign character format codes as restype any longer
self.assertRaises(TypeError, setattr, f, "restype", "i")
def test_floatresult(self):
f = dll._testfunc_f_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_float
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_doubleresult(self):
f = dll._testfunc_d_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_double
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
@impl_detail('long double not supported by PyPy', pypy=False)
def test_longdoubleresult(self):
f = dll._testfunc_D_bhilfD
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_longdouble]
f.restype = c_longdouble
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
@need_symbol('c_longlong')
def test_longlongresult(self):
f = dll._testfunc_q_bhilfd
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
f = dll._testfunc_q_bhilfdq
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double, c_longlong]
result = f(1, 2, 3, 4, 5.0, 6.0, 21)
self.assertEqual(result, 42)
def test_stringresult(self):
f = dll._testfunc_p_p
f.argtypes = None
f.restype = c_char_p
result = f(b"123")
self.assertEqual(result, b"123")
result = f(None)
self.assertEqual(result, None)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(pointer(v))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(pointer(v))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(p)
self.assertEqual(result.contents.value, 99)
arg = byref(v)
result = f(arg)
self.assertNotEqual(result.contents, v.value)
self.assertRaises(ArgumentError, f, byref(c_short(22)))
# It is dangerous, however, because you don't control the lifetime
# of the pointer:
result = f(byref(c_int(99)))
self.assertNotEqual(result.contents, 99)
def test_errors(self):
f = dll._testfunc_p_p
f.restype = c_int
class X(Structure):
_fields_ = [("y", c_int)]
self.assertRaises(TypeError, f, X()) #cannot convert parameter
################################################################
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(2**18, cb)
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
f.argtypes = None
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
AnotherCallback = WINFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, -10, cb)
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
@need_symbol('c_longlong')
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertIsInstance(value, int)
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, f(1000000000000, cb))
def test_errors(self):
self.assertRaises(AttributeError, getattr, dll, "_xxx_yyy")
self.assertRaises(ValueError, c_int.in_dll, dll, "_xxx_yyy")
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(inp)
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
def test_struct_return_2H_stdcall(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
windll.s_ret_2h_func.restype = S2H
windll.s_ret_2h_func.argtypes = [S2H]
s2h = windll.s_ret_2h_func(S2H(99, 88))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
def test_struct_return_8H_stdcall(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
windll.s_ret_8i_func.restype = S8I
windll.s_ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = windll.s_ret_8i_func(inp)
self.assertEqual(
(s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
@xfail
def test_sf1651235(self):
# see http://www.python.org/sf/1651235
proto = CFUNCTYPE(c_int, RECT, POINT)
def callback(*args):
return 0
callback = proto(callback)
self.assertRaises(ArgumentError, lambda: callback((1, 2, 3, 4), POINT()))
if __name__ == '__main__':
unittest.main()
|
|
# Michelle Deng
# fallDown.py
# Import modules:
import os, sys, random
import pygame
from pygame.locals import *
# Uploads an image file
# I did NOT write this function!
# Credits: http://www.pygame.org/docs/tut/chimp/ChimpLineByLine.html
def load_image(name, colorkey = None):
fullname = os.path.join('', name)
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print "Cannot load image:", name
raise SystemExit, message
image = image.convert()
if colorkey is not None:
# If there isn't an image file, changes the color to the one at the
# top left corner (sets transparency)
if colorkey is -1:
colorkey = image.get_at((0,0))
# RLEACCEL is for older computers; tunes the graphics
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
class Screens(object):
def __init__(self):
# Draws screens and buttons
pass
def draw(self, screen, screenWidth, screenHeight, image):
image = pygame.Surface([screenWidth, screenHeight])
image = pygame.transform.smoothscale(image, (screenWidth,screenHeight))
screen.blit(image, (0,0))
# Draws menu splash screen with buttons
class MenuScreen(Screens):
def __init__(self, screenWidth, screenHeight):
self.menuSplash = load_image("menusplashwtitle.png")
iconMaxWidth = 275
iconMaxHeight = 150
self.buttonPressed = False
self.active = False
self.iconWidth = int(0.4 * screenWidth)
self.iconHeight = int(0.2 * screenHeight)
self.iconSize = (self.iconWidth, self.iconHeight)
self.menuPlay = load_image("menuPlayTxt.png", -1)
self.menuPlay = pygame.Surface(self.iconSize)
self.menuPlayPressed = load_image("menuplay_pressed.png", -1)
self.menuPlayPressed = pygame.Surface(self.iconSize)
self.menuTut = load_image("menututTxt.png", -1)
self.menuTut = pygame.Surface(self.iconSize)
self.menuTutPressed = load_image("menutut_pressed.png", -1)
self.menuTutPressed = pygame.Surface(self.iconSize)
self.menuCreds = load_image("menucredsTxt.png", -1)
self.menuCreds = pygame.Surface(self.iconSize)
self.menuCredsPressed = load_image("menucreds_pressed.png", -1)
self.menuCredsPressed = pygame.Surface(self.iconSize)
if (self.iconWidth > iconMaxWidth):
self.iconWidth = iconMaxWidth
if (self.iconHeight > iconMaxHeight):
self.iconHeight = iconMaxHeight
# Scale icons
self.menuPlay = pygame.transform.smoothscale(self.menuPlay,
self.iconSize)
self.menuTut = pygame.transform.smoothscale(self.menuTut, self.iconSize)
self.menuCreds = pygame.transform.smoothscale(self.menuCreds,
self.iconSize)
# Scale pressed icons
self.menuPlayPressed = pygame.transform.smoothscale(self.menuPlayPressed,
self.iconSize)
self.menuTutPressed = pygame.transform.smoothscale(self.menuTutPressed,
self.iconSize)
self.menuCredsPressed = pygame.transform.smoothscale(self.menuCredsPressed,
self.iconSize)
def draw(self, screen, screenWidth, screenHeight):
pygame.mouse.set_visible(1)
# Draw menu screen
Screens().draw(screen, screenWidth, screenHeight, self.menuSplash)
# Position buttons
playPos = (int(0.5 * screenWidth), int(0.35 * screenHeight))
tutPos = (int(0.5 * screenWidth),
int(0.35 * screenHeight + 1.25 * self.iconHeight))
credsPos = (int(0.5 * screenWidth),
int(0.70 * screenHeight + 0.25 * self.iconHeight))
# Show pressed state when button is clicked
if not(self.buttonPressed):
screen.blit(self.menuPlay, playPos)
screen.blit(self.menuTut, tutPos)
screen.blit(self.menuCreds, credsPos)
else:
screen.blit(self.menuPlayPressed, playPos)
screen.blit(self.menuTutPressed, tutPos)
screen.blit(self.menuCredsPressed, credsPos)
def updateAll(self, screenWidth, screenHeight):
(mouseClickX, mouseClickY) = pygame.mouse.get_pos()
playPos = (int(0.5 * screenWidth), int(0.35 * screenHeight))
tutPos = (int(0.5 * screenWidth),
int(0.35 * screenHeight + 1.25 * self.iconHeight))
credsPos = (int(0.5 * screenWidth),
int(0.70 * screenHeight + 0.25 * self.iconHeight))
# Store icon coordinates (top left & bottom right)
self.playCoords = [playPos, (playPos[0] + self.iconWidth, playPos[1] +
self.iconHeight)]
self.tutCoords = [tutPos, (tutPos[0] + self.iconWidth, tutPos[1] +
self.iconHeight)]
self.credsCoords = [credsPos, (credsPos[0] + self.iconWidth, credsPos[1] +
self.iconHeight)]
# Check if player clicks the icons
if ((mouseClickX < self.playCoords[1][0]) and
(mouseClickX > self.playCoords[0][0])):
if ((mouseClickY < self.playCoords[1][1]) and
(mouseClickY > self.playCoords[0][1])):
self.buttonPressed = True
return
elif ((mouseClickX < self.tutCoords[1][0]) and
(mouseClickX > self.tutCoords[0][0])):
if ((mouseClickY < self.tutCoords[0][1])):
self.buttonPressed = True
return
elif ((mouseClickX < self.credsCoords[1][0]) and
(mouseClickX > self.credsCoords[0][0])):
if ((mouseCLickY < self.credsCoords[0][1])):
self.buttonPressed = True
return
self.buttonPressed = False
def clicked(self):
return self.buttonPressed
class Paused(Screens):
def __init__(self, screenWidth, screenHeight):
self.pausedSplash = load_image("pausesplash.png", -1)
def draw(self, screen, screenWidth, screenHeight):
Screens().draw(screen, screenWidth, screenHeight,
self.pausedSplash)
class GameOver(Screens):
def __init__(self, screenWidth, screenHeight):
self.ggScreen = load_image("ggsplash.png", -1)
iconMaxWidth = 275
iconMaxHeight = 150
self.iconWidth = int(0.4 * screenWidth)
self.iconHeight = int(0.2 * screenHeight)
self.iconSize = [self.iconWidth, self.iconHeight]
self.buttonPressed = False
self.menuPressed = load_image("menubutt-pressed.png", -1)
self.menuPressed = pygame.Surface(self.iconSize)
self.menuButt = load_image("menubutt.png", -1)
self.menuButt = pygame.Surface(self.iconSize)
self.playPressed = load_image("menuplay_pressed.png", -1)
self.playPressed = pygame.Surface(self.iconSize)
self.playButt = load_image("menuplayTxt.png", -1)
self.playButt = pygame.Surface(self.iconSize)
if (self.iconWidth > iconMaxWidth):
self.iconWidth = iconMaxWidth
if (self.iconHeight > iconMaxHeight):
self.iconHeight = iconMaxHeight
# Scale icons
self.menuButt = pygame.transform.smoothscale(self.menuButt,
self.iconSize)
self.playButt = pygame.transform.smoothscale(self.playButt,
self.iconSize)
# Scale pressed icons
self.menuPressed = pygame.transform.smoothscale(self.menuPressed,
self.iconSize)
self.playPressed = pygame.transform.smoothscale(self.playPressed,
self.iconSize)
def draw(self, screen, screenWidth, screenHeight):
pygame.mouse.set_visible(1)
Screens().draw(screen, screenWidth, screenHeight, self.ggScreen)
# Position buttons
menuPos = (int(0.25 * screenWidth), int(0.85 * screenHeight))
playPos = (int(0.75 * screenWidth), int(0.85 * screenHeight))
# Show pressed state when button is clicked
if not(self.buttonPressed):
screen.blit(self.menuButt, menuPos)
screen.blit(self.playButt, playPos)
else:
screen.blit(self.menuPressed, menuPos)
screen.blit(self.playPressed, playPos)
def updateAll(self):
(mouseClickX, mouseClickY) = pygame.mouse.get_pos()
# Store icon coordinates (top left & bottom right)
self.menuCoords = [menuPos, (menuPos[0] + self.iconSize, pos[1] +
self.iconSize)]
self.playCoords = [playPos, (playPos[0] + self.iconSize, pos[1] +
self.iconSize)]
# Check if player clicks the icons
if ((mouseClickX < self.menuCoords[1][0]) and
(mouseClickX > self.menuCoords[0][0])):
if ((mouseClickY < self.menuCoords[1][1]) and
(mouseClickY > self.menuCoords[0][1])):
self.buttonPressed = True
return
elif ((mouseClickX < self.playCoords[1][0]) and
(mouseClickX > self.playCoords[0][0])):
if ((mouseClickY < self.playCoords[1][1]) and
(mouseClickY > self.playCoords[0][1])):
self.buttonPressed = True
return
self.buttonPressed = False
def clicked(self):
return self.buttonPressed
class Tutorial(Screens):
def __init__(self, screenWidth, screenHeight):
self.tutScreen = load_image("tutsplash.png", -1)
iconMaxWidth = 275
iconMaxHeight = 150
self.iconWidth = int(0.5 * screenWidth)
self.iconHeight = int(0.2 * screenHeight)
self.iconSize = (self.iconWidth, self.iconHeight)
self.buttonPressed = False
self.menuPressed = load_image("menubutt-pressed.png", -1)
self.menuPressed = pygame.Surface(self.iconSize)
self.menuButt = load_image("menubutt.png", -1)
self.menuButt = pygame.Surface(self.iconSize)
self.playPressed = load_image("menuplay_pressed.png", -1)
self.playPressed = pygame.Surface(self.iconSize)
self.playButt = load_image("menuplayTxt.png", -1)
self.playButt = pygame.Surface(self.iconSize)
if (self.iconWidth > iconMaxWidth):
self.iconWidth = iconMaxWidth
if (self.iconHeight > iconMaxHeight):
self.iconHeight = iconMaxHeight
# Scale icons
self.menuButt = pygame.transform.smoothscale(self.menuButt,
self.iconSize)
self.playButt = pygame.transform.smoothscale(self.playButt,
self.iconSize)
# Scale pressed icons
self.menuPressed = pygame.transform.smoothscale(self.menuPressed,
self.iconSize)
self.playPressed = pygame.transform.smoothscale(self.playPressed,
self.iconSize)
def draw(self, screen, screenWidth, screenHeight):
pygame.mouse.set_visible(1)
Screens().draw(screen, screenWidth, screenHeight, self.credsScreen)
# Position buttons
menuPos = (int(0.25 * screenWidth), int(0.9 * screenHeight))
playPos = (int(0.75 * screenWidth), int(0.9 * screenHeight))
# Show pressed state when button is clicked
if not(self.buttonPressed):
screen.blit(self.menuButt, menuPos)
screen.blit(self.playButt, playPos)
else:
screen.blit(self.menuPressed, menuPos)
screen.blit(self.playPressed, playPos)
def updateAll(self):
(mouseClickX, mouseClickY) = pygame.mouse.get_pos()
# Store icon coordinates (top left & bottom right)
self.menuCoords = [menuPos, (menuPos[0] + self.iconSize, pos[1] +
self.iconSize)]
self.playCoords = [playPos, (playPos[0] + self.iconSize, pos[1] +
self.iconSize)]
# Check if player clicks the icons
if ((mouseClickX < self.menuCoords[1][0]) and
(mouseClickX > self.menuCoords[0][0])):
if ((mouseClickY < self.menuCoords[1][1]) and
(mouseClickY > self.menuCoords[0][1])):
self.buttonPressed = True
return
elif ((mouseClickX < self.playCoords[1][0]) and
(mouseClickX > self.playCoords[0][0])):
if ((mouseClickY < self.playCoords[1][1]) and
(mouseClickY > self.playCoords[0][1])):
self.buttonPressed = True
return
self.buttonPressed = False
def clicked(self):
return self.buttonPressed
class Creds(Screens):
def __init__(self):
self.credsScreen = load_image("credssplash.png", -1)
iconMaxWidth = 275
iconMaxHeight = 150
self.buttonPressed = False
self.menuPressed = load_image("menubutt-pressed.png", -1)
self.menuButt = load_image("menubutt.png", -1)
self.iconWidth = int(0.3 * screenWidth)
self.iconHeight = int(0.2 * screenHeight)
self.iconSize = (self.iconWidth, self.iconHeight)
if (self.iconWidth > iconMaxWidth):
self.iconWidth = iconMaxWidth
if (self.iconHeight > iconMaxHeight):
self.iconHeight = iconMaxHeight
# Scale icons
self.menuButt = pygame.transform.smoothscale(self.menuButt,
self.iconSize)
# Scale pressed icons
self.menuPressed = pygame.transform.smoothscale(self.menuPressed,
self.iconSize)
def draw(self, screen, screenWidth, screenHeight):
pygame.mouse.set_visible(1)
Screens().draw(screen, screenWidth, screenHeight, self.credsScreen)
# Position buttons
menuPos = (int(0.50 * screenWidth), int(0.9 * screenHeight))
# Show pressed state when button is clicked
if not(self.buttonPressed):
screen.blit(self.menuButt, menuPos)
else:
screen.blit(self.menuPressed, menuPos)
def update(self):
(mouseClickX, mouseClickY) = pygame.mouse.get_pos()
# Store icon coordinates (top left & bottom right)
self.menuCoords = [menuPos, (menuPos[0] + self.iconSize, pos[1] +
self.iconSize)]
# Check if player clicks the icons
if ((mouseClickX < self.menuCoords[1][0]) and
(mouseClickX > self.menuCoords[0][0])):
if ((mouseClickY < self.menuCoords[1][1]) and
(mouseClickY > self.menuCoords[0][1])):
self.buttonPressed = True
return
self.buttonPressed = False
def clicked(self):
return self.buttonPressed
# Create the ball
class Ball(pygame.sprite.Sprite):
def __init__(self):
# Call Sprite initializer
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("crystal_sphere.png", -1)
self.speedx = 19
self.speedy = 0
self.area = self.image.get_rect()
self.width = self.area.width
self.height = self.area.height
self.loBound = self.rect.y + self.height
self.gravity = 25
self.wasColliding = False
# Moving the ball
def moveLeft(self):
if (self.rect.x != 0):
self.rect = self.rect.move(-self.speedx, self.speedy)
else:
print "no move left"
def moveRight(self, data):
margin = 3
if (self.rect.x + self.width + margin < data.width):
self.rect = self.rect.move(self.speedx, self.speedy)
else:
print "no move right"
def ballStepsColl(self, data):
# make sure ball still moves along step
margin = 7
isColliding = False
for step in data.stepsList:
if (len(step.rect.collidelistall([self])) > 0):
isColliding = True
if (isColliding):
#self.handleCollision(data)
self.rect = self.rect.move(0, -data.speedy)
if not self.wasColliding:
data.bounceSound.play()
data.bounceSound.set_volume(0.45)
self.wasColliding = True
elif ((self.rect.y + margin) > (data.height - self.height)):
pass
else:
self.wasColliding = False
self.rect = self.rect.move(0, self.gravity)
def handleCollision(self, data):
# right clip
# left clip
# top clip
# bottom clip
# correct for collisions
for step in data.stepsList:
bottomClip = (data.stepHeight - (self.height/2))
print "bottom clip: ", bottomClip/4
if (self.rect.bottom > step.rect.y):
self.rect.bottom -= bottomClip/4
else:
pass
# # Rotate an image around its center
# # I did NOT write this function!
# # Credits: http://stackoverflow.com/questions/4183208/how-do-i-rotate-an-image-around-its-center-using-pygame
# def rot_center(image, angle):
# orig_rect = image.get_rect()
# rot_image = pygame.transform.rotate(image, angle)
# rot_rect = orig_rect.copy()
# rot_rect.center = rot_image.get_rect().center
# rot_image = rot_image.subsurface(rot_rect).copy()
# return rot_image
class Steps(pygame.sprite.Sprite):
def __init__(self, width, height, color):
pygame.sprite.Sprite.__init__(self)
self.height = height
self.image = pygame.Surface([width, self.height])
self.rect = self.image.get_rect()
self.image.fill(color)
#############
# RED STEPS #
#############
def oneHoleRed(data):
red = data.redColor
randHolePos = random.randint(data.spaceX, data.width - data.spaceX)
leftStepWidth = randHolePos - data.spaceX/2
data.leftStep = Steps(leftStepWidth, data.stepHeight, red)
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = data.width - (randHolePos + data.spaceX/2)
data.rightStep = Steps(rightStepWidth, data.stepHeight, red)
# Sets each step on the right side of the screen
data.rightStep.rect.x = data.width - rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
def twoHolesRed(data):
red = data.redColor
leftStepWidth = random.randint(0, (data.width/2 - 2*data.spaceX))
data.leftStep = Steps(leftStepWidth, data.stepHeight, red)
# Position the step on the left edge of the screen
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = random.randint((data.width/2 + 2*data.spaceX), data.width)
data.rightStep = Steps(rightStepWidth, data.stepHeight, red)
# Position the step on the right edge of the screen
data.rightStep.rect.x = rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
spaceWidth = data.rightStep.rect.x - leftStepWidth
midStepWidth = spaceWidth - 2*data.spaceX
data.midStep = Steps(midStepWidth, data.stepHeight, red)
data.midStep.rect.x = leftStepWidth + data.spaceX
data.midStep.rect.y = data.height
data.stepsList.add(data.midStep)
def createRandRedStep(data):
data.ballWidth = 40
# The width of each step needs to be random
data.stepHeight = 20
(data.speedx, data.speedy) = (0, 5)
# Minimum distance between two steps on top of each other
data.spaceY = int(data.ballWidth * 1.75)
data.spaceX = int(data.ballWidth * 1.25)
# There should be at most 3 steps in each row
numHoles = random.randint(1, 2)
if (numHoles == 1):
oneHoleRed(data)
else:
twoHolesRed(data)
def updateRedSteps(data):
for step in data.stepsList:
step.rect.y -= data.speedy
if (step.rect.y + data.stepHeight == 0):
data.stepsList.remove(step)
def trySpawnNewRedStep(data):
data.lowest += data.speedy
data.lowest %= data.spaceY
if (data.lowest == 0):
createRandRedStep(data)
#############
# ORANGE #
#############
def oneHoleOrange(data):
orange = data.orangeColor
randHolePos = random.randint(data.spaceX, data.width - data.spaceX)
leftStepWidth = randHolePos - data.spaceX/2
data.leftStep = Steps(leftStepWidth, data.stepHeight, orange)
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = data.width - (randHolePos + data.spaceX/2)
data.rightStep = Steps(rightStepWidth, data.stepHeight, orange)
# Sets each step on the right side of the screen
data.rightStep.rect.x = data.width - rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
def twoHolesOrange(data):
orange = data.orangeColor
leftStepWidth = random.randint(0, (data.width/2 - 2*data.spaceX))
data.leftStep = Steps(leftStepWidth, data.stepHeight, orange)
# Position the step on the left edge of the screen
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = random.randint((data.width/2 + 2*data.spaceX), data.width)
data.rightStep = Steps(rightStepWidth, data.stepHeight, orange)
# Position the step on the right edge of the screen
data.rightStep.rect.x = rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
spaceWidth = data.rightStep.rect.x - leftStepWidth
midStepWidth = spaceWidth - 2*data.spaceX
data.midStep = Steps(midStepWidth, data.stepHeight, orange)
data.midStep.rect.x = leftStepWidth + data.spaceX
data.midStep.rect.y = data.height
data.stepsList.add(data.midStep)
def createRandOrangeStep(data):
data.ballWidth = 40
# The width of each step needs to be random
data.stepHeight = 20
(data.speedx, data.speedy) = (0, 5)
# Minimum distance between two steps on top of each other
data.spaceY = int(data.ballWidth * 1.75)
data.spaceX = int(data.ballWidth * 1.25)
# There should at most be 3 steps in each row
numHoles = random.randint(1, 2)
if (numHoles == 1):
oneHoleOrange(data)
else:
twoHolesOrange(data)
def updateOrangeSteps(data):
for step in data.stepsList:
step.rect.y -= data.speedy
if (step.rect.y + data.stepHeight == 0):
data.stepsList.remove(step)
def trySpawnNewOrangeStep(data):
data.lowest += data.speedy
data.lowest %= data.spaceY
if (data.lowest == 0):
createRandOrangeStep(data)
#############
# YELLOW #
#############
def oneHoleYell(data):
yellow = data.yellowColor
randHolePos = random.randint(data.spaceX, data.width - data.spaceX)
leftStepWidth = randHolePos - data.spaceX/2
data.leftStep = Steps(leftStepWidth, data.stepHeight, yellow)
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = data.width - (randHolePos + data.spaceX/2)
data.rightStep = Steps(rightStepWidth, data.stepHeight, yellow)
# Sets each step on the right side of the screen
data.rightStep.rect.x = data.width - rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
def twoHolesYell(data):
yellow = data.yellowColor
leftStepWidth = random.randint(0, (data.width/2 - 2*data.spaceX))
data.leftStep = Steps(leftStepWidth, data.stepHeight, yellow)
# Position the step on the left edge of the screen
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = random.randint((data.width/2 + 2*data.spaceX), data.width)
data.rightStep = Steps(rightStepWidth, data.stepHeight, yellow)
# Position the step on the right edge of the screen
data.rightStep.rect.x = rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
spaceWidth = data.rightStep.rect.x - leftStepWidth
midStepWidth = spaceWidth - 2*data.spaceX
data.midStep = Steps(midStepWidth, data.stepHeight, yellow)
data.midStep.rect.x = leftStepWidth + data.spaceX
data.midStep.rect.y = data.height
data.stepsList.add(data.midStep)
def createRandYellowStep(data):
data.ballWidth = 40
# The width of each step needs to be random
data.stepHeight = 20
(data.speedx, data.speedy) = (0, 5)
# Minimum distance between two steps on top of each other
data.spaceY = int(data.ballWidth * 1.75)
data.spaceX = int(data.ballWidth * 1.25)
# There should at most be 3 steps in each row
numHoles = random.randint(1, 2)
if (numHoles == 1):
oneHoleYell(data)
else:
twoHolesYell(data)
def updateYellowSteps(data):
for step in data.stepsList:
step.rect.y -= data.speedy
if (step.rect.y + data.stepHeight == 0):
data.stepsList.remove(step)
def trySpawnNewYellowStep(data):
data.lowest += data.speedy
data.lowest %= data.spaceY
if (data.lowest == 0):
createRandYellowStep(data)
#############
# GREEN #
#############
def oneHoleGreen(data):
green = data.greenColor
randHolePos = random.randint(data.spaceX, data.width - data.spaceX)
leftStepWidth = randHolePos - data.spaceX/2
data.leftStep = Steps(leftStepWidth, data.stepHeight, green)
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = data.width - (randHolePos + data.spaceX/2)
data.rightStep = Steps(rightStepWidth, data.stepHeight, green)
# Sets each step on the right side of the screen
data.rightStep.rect.x = data.width - rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
def twoHolesGreen(data):
green = data.greenColor
leftStepWidth = random.randint(0, (data.width/2 - 2*data.spaceX))
data.leftStep = Steps(leftStepWidth, data.stepHeight, green)
# Position the step on the left edge of the screen
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = random.randint((data.width/2 + 2*data.spaceX), data.width)
data.rightStep = Steps(rightStepWidth, data.stepHeight, green)
# Position the step on the right edge of the screen
data.rightStep.rect.x = rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
spaceWidth = data.rightStep.rect.x - leftStepWidth
midStepWidth = spaceWidth - 2*data.spaceX
data.midStep = Steps(midStepWidth, data.stepHeight, green)
data.midStep.rect.x = leftStepWidth + data.spaceX
data.midStep.rect.y = data.height
data.stepsList.add(data.midStep)
def createRandGreenStep(data):
data.ballWidth = 40
# The width of each step needs to be random
data.stepHeight = 20
(data.speedx, data.speedy) = (0, 5)
# Minimum distance between two steps on top of each other
data.spaceY = int(data.ballWidth * 1.75)
data.spaceX = int(data.ballWidth * 1.25)
# There should at most be 3 steps in each row
numHoles = random.randint(1, 2)
if (numHoles == 1):
oneHoleGreen(data)
else:
twoHolesGreen(data)
def updateGreenSteps(data):
for step in data.stepsList:
step.rect.y -= data.speedy
if (step.rect.y + data.stepHeight == 0):
data.stepsList.remove(step)
def trySpawnNewGreenStep(data):
data.lowest += data.speedy
data.lowest %= data.spaceY
if (data.lowest == 0):
createRandGreenStep(data)
#############
# BLUE #
#############
def oneHoleBlue(data):
blue = data.blueColor
randHolePos = random.randint(data.spaceX, data.width - data.spaceX)
leftStepWidth = randHolePos - data.spaceX/2
data.leftStep = Steps(leftStepWidth, data.stepHeight, blue)
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = data.width - (randHolePos + data.spaceX/2)
data.rightStep = Steps(rightStepWidth, data.stepHeight, blue)
# Sets each step on the right side of the screen
data.rightStep.rect.x = data.width - rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
def twoHolesBlue(data):
blue = data.blueColor
leftStepWidth = random.randint(0, (data.width/2 - 2*data.spaceX))
data.leftStep = Steps(leftStepWidth, data.stepHeight, blue)
# Position the step on the left edge of the screen
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = random.randint((data.width/2 + 2*data.spaceX), data.width)
data.rightStep = Steps(rightStepWidth, data.stepHeight, blue)
# Position the step on the right edge of the screen
data.rightStep.rect.x = rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
spaceWidth = data.rightStep.rect.x - leftStepWidth
midStepWidth = spaceWidth - 2*data.spaceX
data.midStep = Steps(midStepWidth, data.stepHeight, blue)
data.midStep.rect.x = leftStepWidth + data.spaceX
data.midStep.rect.y = data.height
data.stepsList.add(data.midStep)
def createRandBlueStep(data):
data.ballWidth = 40
# The width of each step needs to be random
data.stepHeight = 20
(data.speedx, data.speedy) = (0, 5)
# Minimum distance between two steps on top of each other
data.spaceY = int(data.ballWidth * 1.75)
data.spaceX = int(data.ballWidth * 1.25)
# There should at most be 3 steps in each row
numHoles = random.randint(1, 2)
if (numHoles == 1):
oneHoleBlue(data)
else:
twoHolesBlue(data)
def updateBlueSteps(data):
for step in data.stepsList:
step.rect.y -= data.speedy
if (step.rect.y + data.stepHeight == 0):
data.stepsList.remove(step)
def trySpawnNewBlueStep(data):
data.lowest += data.speedy
data.lowest %= data.spaceY
if (data.lowest == 0):
createRandBlueStep(data)
#############
# INDIGO #
#############
def oneHoleIndigo(data):
indigo = data.indigoColor
randHolePos = random.randint(data.spaceX, data.width - data.spaceX)
leftStepWidth = randHolePos - data.spaceX/2
data.leftStep = Steps(leftStepWidth, data.stepHeight, indigo)
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = data.width - (randHolePos + data.spaceX/2)
data.rightStep = Steps(rightStepWidth, data.stepHeight, indigo)
# Sets each step on the right side of the screen
data.rightStep.rect.x = data.width - rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
def twoHolesIndigo(data):
indigo = data.indigoColor
leftStepWidth = random.randint(0, (data.width/2 - 2*data.spaceX))
data.leftStep = Steps(leftStepWidth, data.stepHeight, indigo)
# Position the step on the left edge of the screen
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = random.randint((data.width/2 + 2*data.spaceX), data.width)
data.rightStep = Steps(rightStepWidth, data.stepHeight, indigo)
# Position the step on the right edge of the screen
data.rightStep.rect.x = rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
spaceWidth = data.rightStep.rect.x - leftStepWidth
midStepWidth = spaceWidth - 2*data.spaceX
data.midStep = Steps(midStepWidth, data.stepHeight, indigo)
data.midStep.rect.x = leftStepWidth + data.spaceX
data.midStep.rect.y = data.height
data.stepsList.add(data.midStep)
def createRandIndyStep(data):
data.ballWidth = 40
# The width of each step needs to be random
data.stepHeight = 20
(data.speedx, data.speedy) = (0, 5)
# Minimum distance between two steps on top of each other
data.spaceY = int(data.ballWidth * 1.75)
data.spaceX = int(data.ballWidth * 1.25)
# There should at most be 3 steps in each row
numHoles = random.randint(1, 2)
if (numHoles == 1):
oneHoleIndigo(data)
else:
twoHolesIndigo(data)
def updateIndySteps(data):
for step in data.stepsList:
step.rect.y -= data.speedy
if (step.rect.y + data.stepHeight == 0):
data.stepsList.remove(step)
def trySpawnNewIndyStep(data):
data.lowest += data.speedy
data.lowest %= data.spaceY
if (data.lowest == 0):
createRandIndyStep(data)
#############
# VIOLET #
#############
def oneHoleViolet(data):
violet = data.violetColor
randHolePos = random.randint(data.spaceX, data.width - data.spaceX)
leftStepWidth = randHolePos - data.spaceX/2
data.leftStep = Steps(leftStepWidth, data.stepHeight, violet)
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = data.width - (randHolePos + data.spaceX/2)
data.rightStep = Steps(rightStepWidth, data.stepHeight, violet)
# Sets each step on the right side of the screen
data.rightStep.rect.x = data.width - rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
def twoHolesViolet(data):
violet = data.violetColor
leftStepWidth = random.randint(0, (data.width/2 - 2*data.spaceX))
data.leftStep = Steps(leftStepWidth, data.stepHeight, violet)
# Position the step on the left edge of the screen
data.leftStep.rect.x = 0
data.leftStep.rect.y = data.height
data.stepsList.add(data.leftStep)
rightStepWidth = random.randint((data.width/2 + 2*data.spaceX), data.width)
data.rightStep = Steps(rightStepWidth, data.stepHeight, violet)
# Position the step on the right edge of the screen
data.rightStep.rect.x = rightStepWidth
data.rightStep.rect.y = data.height
data.stepsList.add(data.rightStep)
spaceWidth = data.rightStep.rect.x - leftStepWidth
midStepWidth = spaceWidth - 2*data.spaceX
data.midStep = Steps(midStepWidth, data.stepHeight, violet)
data.midStep.rect.x = leftStepWidth + data.spaceX
data.midStep.rect.y = data.height
data.stepsList.add(data.midStep)
def createRandVioletStep(data):
data.ballWidth = 40
# The width of each step needs to be random
data.stepHeight = 20
(data.speedx, data.speedy) = (0, 5)
# Minimum distance between two steps on top of each other
data.spaceY = int(data.ballWidth * 1.75)
data.spaceX = int(data.ballWidth * 1.25)
# There should at most be 3 steps in each row
numHoles = random.randint(1, 2)
if (numHoles == 1):
oneHoleViolet(data)
else:
twoHolesViolet(data)
def updateVioletSteps(data):
for step in data.stepsList:
step.rect.y -= data.speedy
if (step.rect.y + data.stepHeight == 0):
data.stepsList.remove(step)
def trySpawnNewVioletStep(data):
data.lowest += data.speedy
data.lowest %= data.spaceY
if (data.lowest == 0):
createRandVioletStep(data)
def changeColor(data):
if (data.level == 1):
updateRedSteps(data)
trySpawnNewRedStep(data)
elif (data.level == 2):
updateOrangeSteps(data)
trySpawnNewOrangeStep(data)
elif (data.level == 3):
updateYellowSteps(data)
trySpawnNewYellowStep(data)
elif (data.level == 4):
updateGreenSteps(data)
trySpawnNewGreenStep(data)
elif (data.level == 5):
updateBlueSteps(data)
trySpawnNewBlueStep(data)
elif(data.level == 6):
updateIndySteps(data)
trySpawnNewIndyStep(data)
else:
updateVioletSteps(data)
trySpawnNewVioletStep(data)
def changeLevel(data):
data.timeElapsed += data.clock.tick(data.FPS)
if (data.timeElapsed > data.timeForLevelChange):
print "new level!"
data.FPS += 18
data.level += 1
data.timeElapsed = 0
def enteredNewLevel(data):
fontHeight = 150
levelFont = pygame.font.Font("Font/Level_Score.ttf", fontHeight)
label = levelFont.render("New Level!", 1, data.whiteText)
#data.textRenderList.add(label)
if (changeLevel(data)):
data.screen.blit(label, (data.width/2, data.height/2))
# if (data.score == data.score + 5):
# data.textRenderList.remove(label)
print "new level draw!"
###############
## POWER UPS ##
###############
# freeze screen for 5s (3rd most common)
# +5 points (most common)
# bomb (bad) (least common)
# increase ball speed for 5s (2nd most common)
# also have to determine how often a powerup is placed on the screen
# if random.randint(1, 4) == 1 then decide on powerup
class Powerup(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("powerup.png", -1)
self.area = self.image.get_rect()
self.width = self.area.width
self.height = self.area.height
self.randNum = random.randint(1, 10)
# randomly generate a loc to place powerup; has to be on a step
# 1, 2, 3 --> +5 points
# 4, 5, 6 --> increase ball speed
# 7, 8 --> freeze
# 9, 10 --> kill enemy (only appear if there's an enemy on screen??)
def genLocation(self, data):
# randomly generate a location to place the powerup
self.randx = random.randint(0, data.width)
self.randy = random.randint(0, data.height)
pass
def update(self, data, position):
# draw powerup to screen
# make sure it's sitting on a step
data.screen.blit(self.image, position)
for step in data.stepsList:
if (pygame.sprite.spritecollide(self, step) == True):
self.rect = self.rect.move(0, -data.speedy)
if ((self.rect.y + self.height) == 0):
data.spritesList.remove(self)
# as long as it doesn't overlap with step we are good
# if touching a step move with step
pass
def remove(self):
# if ball collides with powerup, remove from screen
pass
class addPoints(Powerup):
def whenToSpawn(self):
if (self.randNum == 1) or (self.randNum == 2) or (self.randNum == 3):
# self.draw(somewhere), screen.blit?
pass
def plusFive(self, data):
# +5 appears on screen then disappears
data.score += 5
class freezeScreen(Powerup):
# def __init__(self):
# self.timeToWait = 5000
# self.
def whenToSpawn(self):
if (self.randNum == 7) or (self.randNum == 8):
pass
def freezeScreen(self):
# Freezes the steps for 5 seconds
pass
class incBallSpeed(Powerup):
def whenToSpawn(self):
if (self.randNum == 4) or (self.randNum == 5) or (self.randNum == 6):
# screen.blit(somewhere)
pass
def increase(self):
# Increases the speed of the ball for 5 seconds
data.ball.speedx += 5
# for 5 seconds (5000ms)
def drawPause(data):
fontHeight = 105
pauseFont = pygame.font.Font("Font/theme_font.TTF", fontHeight)
label = pauseFont.render("Paused!", 1, data.whiteText)
#data.textRenderList.add(label)
data.screen.blit(label, (data.width/6, data.height/3))
def mousePressed(event, data):
print "Mouse Pressed"
if (data.activateMenu):
pass
redrawAll(data)
def keyPressed(event, data):
if (event.key == pygame.K_LEFT):
if not data.paused:
data.ball.moveLeft()
elif (event.key == pygame.K_RIGHT):
if not data.paused:
data.ball.moveRight(data)
elif (event.key == pygame.K_p):
data.paused = not data.paused
# Takes player to pause screen
drawPause(data)
pygame.display.update()
elif (event.key == pygame.K_r):
data.themeSound.stop()
init(data)
def drawGameOver(data):
fontHeight = 69
scoreFont = pygame.font.Font("Font/theme_font.TTF", fontHeight)
label = scoreFont.render("Game Over!", 1, data.whiteText)
data.screen.blit(label, (data.width*.1, data.height*.2))
def drawFinalScoreText(data):
scoreFontHeight = 45
scoreFont = pygame.font.Font("Font/Level_Score.ttf", scoreFontHeight)
label = scoreFont.render("Final Score:", 1, data.whiteText)
data.screen.blit(label, (data.width/4, data.height*.45))
def gameOver(data):
if (data.ball.rect.top <= 0):
data.ballDiesSound.play()
win(data)
if (data.level == 8):
win(data)
# Check for win
def win(data):
data.mode = "Done"
# win screen, etc.
data.screen.fill((0, 0, 0))
drawGameOver(data)
drawFinalScoreText(data)
fontHeight = 35
scoreFont = pygame.font.Font("Font/Level_Score.ttf", fontHeight)
label = scoreFont.render("%d" % (data.score), 1, data.whiteText)
data.screen.blit(label, (data.width*.45, data.height*.55))
pygame.display.update()
while True:
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
data.mode = "Done"
# if (data.mode == "Done"):
# if (event.key == pygame.K_r):
# data.themeSound.stop()
# init(data)
def drawScore(data):
fontHeight = 25
scoreFont = pygame.font.Font("Font/Level_Score.ttf", fontHeight)
label = scoreFont.render("Score: %d" % (data.score), 1, data.whiteText)
data.screen.blit(label, (0, 0))
def updateScore(data):
data.timeElapsedScore += data.clock.tick(data.FPS)
if (data.timeElapsedScore > data.timeToChangeScore):
data.score += 1
data.timeElapsedScore = 0
def drawLevel(data):
fontHeight = 25
levelFont = pygame.font.Font("Font/Level_Score.ttf", fontHeight)
label = levelFont.render("Level: %d" %(data.level), 1, data.whiteText)
#data.textRenderList.add(label)
data.screen.blit(label, (data.width*.8, 0))
def timerFired(data):
redrawAll(data)
data.clock.tick(data.FPS)
data.mousePos = pygame.mouse.get_pos()
data.ball.ballStepsColl(data)
changeLevel(data)
updateScore(data)
gameOver(data)
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
data.mode = "Done"
elif (event.type == pygame.MOUSEBUTTONDOWN):
mousePressed(event, data)
elif (event.type == pygame.KEYDOWN):
keyPressed(event, data)
def redrawAll(data):
data.ballSprite.update()
data.screen.blit(data.background, (0, 0))
data.ballSprite.draw(data.screen)
data.stepsList.draw(data.screen)
drawScore(data)
drawLevel(data)
enteredNewLevel(data)
changeColor(data)
# if (data.activateMenu) and (not data.gameActive):
# data.menuScreen.draw(data.screen, data.width, data.height)
# data.menuScreen.updateAll(data.width, data.height)
pygame.display.update()
pygame.display.flip()
def initSounds(data):
data.themeSound = pygame.mixer.Sound("Sounds/fallDown.wav")
data.bounceSound = pygame.mixer.Sound("Sounds/fx/ballbounce.wav")
data.chachingSound = pygame.mixer.Sound("Sounds/fx/chaching.wav")
data.freezeSound = pygame.mixer.Sound("Sounds/fx/freeze.wav")
data.ballDiesSound = pygame.mixer.Sound("Sounds/fx/squish.wav")
data.ballSpeedsUpSound = pygame.mixer.Sound("Sounds/fx/zoom.wav")
# Loops through theme song forever
data.themeSound.play(-1)
def initSteps(data):
# Creating a sprites group for all the steps
data.stepsList = pygame.sprite.Group()
data.lowest = 0
data.whiteText = (255, 255, 255)
data.redColor = (255, 0, 60)
data.orangeColor = (255, 96, 0)
data.yellowColor = (255, 192, 0)
data.greenColor = (52, 224, 125)
data.blueColor = (0, 124, 229)
data.indigoColor = (41, 60, 240)
data.violetColor = (111, 57, 234)
createRandRedStep(data)
updateRedSteps(data)
def initBall(data):
data.ball = Ball()
data.ballSprite = pygame.sprite.RenderPlain(data.ball)
data.ball.moveRight(data)
def initPowerups(data):
data.powerupsList = pygame.sprite.Group()
def initTimes(data):
data.timeElapsed = 0
data.timeElapsedScore = 0
data.timeForLevelChange = 18250
data.timeToChangeScore = 200
def initBackground(data):
data.screen = pygame.display.get_surface()
# data.background = load_image("menusplash.png", -1)
# data.background = pygame.Surface(data.screenSize)
# data.background = pygame.transform.smoothscale(data.background, data.screenSize)
data.background = pygame.Surface(data.screen.get_size())
data.background = data.background.convert()
data.background.fill((0, 0, 0))
def init(data):
data.mode = "Running"
# Frames per second
data.FPS = 30
data.score = 0
data.level = 1
# Hides or shows the cursor by taking in a bool
pygame.mouse.set_visible(0)
data.paused = False
data.activateMenu = True
data.gameActive = False
data.menuScreen = MenuScreen(data.width, data.height)
initTimes(data)
initSteps(data)
initBall(data)
initPowerups(data)
initSounds(data)
initBackground(data)
def run():
pygame.init()
class Struct: pass
data = Struct()
# Initialize screen
(data.width, data.height) = (350, 500)
data.screenSize = (data.width, data.height)
data.screen = pygame.display.set_mode(data.screenSize)
pygame.display.set_caption("FallDown!")
# Initialize clock
data.clock = pygame.time.Clock()
init(data)
timerFired(data)
while (data.mode != "Done"):
data.gameActive = True
if not data.paused:
timerFired(data)
for event in pygame.event.get():
#data.menuScreen.updateAll()
if (event.type == pygame.QUIT):
pygame.quit()
data.gameActive = False
data.mode = "Done"
elif (event.type == pygame.MOUSEBUTTONDOWN):
mousePressed(event, data)
elif (event.type == pygame.KEYDOWN):
keyPressed(event, data)
run()
|
|
import sys
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.cache import caches
from django.db import models
from django.forms.models import fields_for_model
from django.template import RequestContext, loader
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import FeinCMSInline
from feincms.models import Base as FeinCMSBase
from leonardo.utils.memoized import widget_memoized
from leonardo.utils.templates import find_all_templates, template_choices
from ..processors.config import ContextConfig
from ..const import *
from ..widgets.const import ENTER_EFFECT_CHOICES, WIDGET_COLOR_SCHEME_CHOICES
from ..widgets.forms import WIDGETS, WidgetForm
from ..widgets.mixins import ContentProxyWidgetMixin, ListWidgetMixin
try:
from django.contrib.contenttypes import generic
except ImportError:
from django.contrib.contenttypes.fields import GenericForeignKey
class WidgetInline(FeinCMSInline):
form = WidgetForm
template = 'admin/leonardo/widget_inline.html'
def formfield_for_dbfield(self, db_field, request, **kwargs):
widget = self.model.get_widget_for_field(db_field.name)
if widget:
kwargs['widget'] = widget
return db_field.formfield(**kwargs)
return super(WidgetInline, self).formfield_for_dbfield(
db_field, request=request, **kwargs)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "base_theme":
queryset = WidgetBaseTheme.objects.all()
kwargs["queryset"] = queryset.exclude(name__startswith="_")
kwargs["initial"] = queryset.first()
if db_field.name == "content_theme":
queryset = WidgetContentTheme.objects.filter(
widget_class=self.model.__name__)
kwargs["queryset"] = queryset.exclude(name__startswith="_")
kwargs["initial"] = queryset.first()
form_field = super(WidgetInline, self).formfield_for_foreignkey(
db_field, request, **kwargs)
# bootstrap field
form_field.widget.attrs['class'] = 'form-control'
return form_field
def __init__(self, *args, **kwargs):
super(WidgetInline, self).__init__(*args, **kwargs)
self.fieldsets = [
(None, {
'fields': [
list(self.model.fields())
],
}),
(_('Theme'), {
'fields': [
('label', 'base_theme', 'content_theme',
'prerendered_content',
'layout', 'align', 'enabled', 'color_scheme'),
],
}),
(_('Effects'), {
'fields': [
('enter_effect_style', 'enter_effect_duration',
'enter_effect_delay', 'enter_effect_offset',
'enter_effect_iteration', 'enabled',),
],
}),
]
@python_2_unicode_compatible
class WidgetDimension(models.Model):
widget_type = models.ForeignKey(ContentType)
widget_id = models.PositiveIntegerField()
widget_object = generic.GenericForeignKey('widget_type', 'widget_id')
size = models.CharField(
verbose_name="Size", max_length=20, choices=DISPLAY_SIZE_CHOICES, default='md')
width = models.IntegerField(verbose_name=_("Width"),
choices=COLUMN_CHOICES, default=DEFAULT_WIDTH)
height = models.IntegerField(verbose_name=_("Height"),
choices=ROW_CHOICES, default=0)
offset = models.IntegerField(verbose_name=_("Offset"),
choices=COLUMN_CHOICES, default=0)
@cached_property
def classes(self):
classes = []
classes.append('col-{}-{}'.format(self.size, self.width))
if self.height != 0:
classes.append('row-{}-{}'.format(self.size, self.height))
classes.append('col-{}-offset-{}'.format(self.size, self.offset))
return ' '.join(classes)
def __str__(self):
return smart_text("{0} - {1}".format(self.widget_type, self.classes))
class Meta:
verbose_name = _("Widget dimension")
verbose_name_plural = _("Widget dimensions")
app_label = "web"
@python_2_unicode_compatible
class WidgetContentTheme(models.Model):
name = models.CharField(
verbose_name=_("Name"), max_length=255, null=True, blank=True)
label = models.CharField(
verbose_name=_("Title"), max_length=255, null=True, blank=True)
template = models.ForeignKey(
'dbtemplates.Template', verbose_name=_('Content template'),
related_name='content_templates', limit_choices_to={'name__startswith': "widget/"})
style = models.TextField(verbose_name=_('Content style'), blank=True)
widget_class = models.CharField(
verbose_name=_('Widget class'), max_length=255)
def __str__(self):
return self.label or smart_text(self._meta.verbose_name + ' %s' % self.pk)
class Meta:
verbose_name = _("Widget content theme")
verbose_name_plural = _("Widget content themes")
app_label = "web"
@python_2_unicode_compatible
class WidgetBaseTheme(models.Model):
name = models.CharField(
verbose_name=_("Name"), max_length=255, null=True, blank=True)
label = models.CharField(
verbose_name=_("Title"), max_length=255, null=True, blank=True)
template = models.ForeignKey(
'dbtemplates.Template', verbose_name=_('Base template'),
related_name='base_templates', limit_choices_to={'name__startswith': "base/widget/"})
style = models.TextField(verbose_name=_('Base style'), blank=True)
def __str__(self):
return self.label or smart_text(self._meta.verbose_name + ' %s' % self.pk)
class Meta:
verbose_name = _("Widget base theme")
verbose_name_plural = _("Widget base themes")
app_label = "web"
@python_2_unicode_compatible
class Widget(FeinCMSBase):
feincms_item_editor_inline = WidgetInline
enabled = models.NullBooleanField(
verbose_name=_('Is visible?'), default=True)
label = models.CharField(
verbose_name=_("Title"), max_length=255, null=True, blank=True)
base_theme = models.ForeignKey(
WidgetBaseTheme, verbose_name=_('Base theme'),
related_name="%(app_label)s_%(class)s_related")
content_theme = models.ForeignKey(
WidgetContentTheme, verbose_name=_('Content theme'),
related_name="%(app_label)s_%(class)s_related")
layout = models.CharField(
verbose_name=_("Layout"), max_length=25,
default='inline', choices=WIDGET_LAYOUT_CHOICES)
align = models.CharField(
verbose_name=_("Alignment"), max_length=25,
default='left', choices=WIDGET_ALIGN_CHOICES)
vertical_align = models.CharField(
verbose_name=_("Vertical Alignment"), max_length=25,
default='top', choices=VERTICAL_ALIGN_CHOICES)
# TODO: rename this to widget_classes
prerendered_content = models.TextField(
verbose_name=_('prerendered content'), blank=True)
# common attributes
enter_effect_style = models.CharField(
verbose_name=_("Enter effect style"),
max_length=25, default='disabled', choices=ENTER_EFFECT_CHOICES)
enter_effect_duration = models.PositiveIntegerField(verbose_name=_(
'Enter Effect Duration'), null=True, blank=True)
enter_effect_delay = models.PositiveIntegerField(null=True, blank=True)
enter_effect_offset = models.PositiveIntegerField(null=True, blank=True)
enter_effect_iteration = models.PositiveIntegerField(null=True, blank=True)
color_scheme = models.CharField(
verbose_name=_("Color scheme"),
max_length=25, default='default', choices=WIDGET_COLOR_SCHEME_CHOICES)
def save(self, created=True, *args, **kwargs):
self.created = False
if self.pk is None and created:
self.created = True
# for CT Inventory we need flush cache
if hasattr(self.parent, 'flush_ct_inventory'):
self.parent.flush_ct_inventory()
super(Widget, self).save(*args, **kwargs)
if not self.dimensions.exists() and self.created:
WidgetDimension(**{
'widget_id': self.pk,
'widget_type': self.content_type,
'size': 'xs'
}).save()
self.purge_from_cache()
# if anyone tells you otherwise we needs update
# this flag is handled by leonardo_channels.widgets.reciever
self.update_view = True
def delete(self, *args, **kwargs):
region = self.region
parent = self.parent
# this is required for flushing inherited content
# is important to do this before widget delete
# because delete trigger render before flush cache
if hasattr(parent, 'flush_ct_inventory'):
parent.flush_ct_inventory()
super(Widget, self).delete(*args, **kwargs)
[d.delete() for d in self.dimensions]
# sort widgets in region
widgets = getattr(parent.content, region)
widgets.sort(key=lambda w: w.ordering)
for i, w in enumerate(widgets):
w.ordering = i
w.update_view = False
w.save()
# this is page specific
if hasattr(parent, 'invalidate_cache'):
parent.invalidate_cache()
class Meta:
abstract = True
verbose_name = _("Abstract widget")
verbose_name_plural = _("Abstract widgets")
app_label = "web"
def __str__(self):
return self.label or smart_text(
'%s<pk=%s, parent=%s<pk=%s, %s>, region=%s,'
' ordering=%d>') % (
self.__class__.__name__,
self.pk,
self.parent.__class__.__name__,
self.parent.pk,
self.parent,
self.region,
self.ordering)
@cached_property
def get_ct_name(self):
"""returns content type name with app label
"""
return ".".join([self._meta.app_label, self._meta.model_name])
@cached_property
def content_type(self):
return ContentType.objects.get_for_model(self)
def get_template_name(self):
return self.content_theme.template
@cached_property
def get_template(self):
return self.content_theme.template
def _template_xml_name(self):
template = 'default'
return 'widget/%s/%s.xml' % (self.widget_name, template)
template_xml_name = property(_template_xml_name)
@property
def widget_name(self):
return self.__class__.__name__.lower().replace('widget', '')
@cached_property
def get_base_template(self):
return self.base_theme.template
@cached_property
def widget_label(self):
return self._meta.verbose_name
@cached_property
def template_source(self):
template = loader.get_template(self.content_theme.template)
return template
def render(self, **kwargs):
return self.render_content(kwargs)
@widget_memoized
def render_with_cache(self, options):
"""proxy for render_content with memoized
this method provide best performence for complicated
widget content like a context navigation
"""
return self.render_content(options)
def get_template_data(self, request):
'''returns dictionary
use this method for providing context data
'''
return {}
def get_context_data(self, request):
'''returns initial context'''
context = RequestContext(request, {
'widget': self,
'base_template': self.get_base_template,
'request': request,
'LEONARDO_CONFIG': ContextConfig(request)
})
context.push(self.get_template_data(request))
return context
def render_content(self, options):
'''returns rendered widget and handle error during rendering'''
request = options.get('request', {})
context = self.get_context_data(request)
try:
rendered_content = self.template_source.render(context)
except Exception as e:
if settings.DEBUG:
exc_info = sys.exc_info()
raise six.reraise(*exc_info)
rendered_content = self.render_error(context, e)
return rendered_content
def render_error(self, context, exception):
'''returns rendered widget with error
maybe we shouldn't render error to page without debug
'''
context.push({'error': str(exception)})
return render_to_string("widget/error.html", context)
def handle_exception(self, request, exc):
"""Handle exception and returns rendered error template
"""
return self.render_error(self.get_context_data(request), exc)
def render_response(self, context={}):
'''just render to string shortcut for less imports'''
return render_to_string(self.get_template_name, context)
@cached_property
def model_cls(self):
return self.__class__.__name__
@cached_property
def dimensions(self):
return WidgetDimension.objects.filter(
widget_id=self.pk,
widget_type=ContentType.objects.get_for_model(self))
@cached_property
def get_dimension_classes(self):
"""agreggate all css classes
"""
classes = []
for d in self.dimensions:
# do not duplicate same classes
for cls in d.classes.split(' '):
if cls not in classes:
classes.append(cls)
return classes
@cached_property
def render_content_classes(self):
"""agreggate all content classes
ordered from abstract to concrete instance
"""
classes = [
'leonardo-content',
'template-%s' % self.content_theme.name,
'%s-content-%s' % (self.widget_name, self.content_theme.name),
'%s-content' % self.fe_identifier,
]
if self.vertical_align == "middle":
classes.append("centered")
return " ".join(classes)
@cached_property
def render_base_classes(self):
"""agreggate all wrapper classes
ordered from abstract to concrete instance
"""
classes = self.get_dimension_classes
classes.append('leonardo-widget')
classes.append('text-%s' % self.align)
classes.append('%s-base-%s' % (self.widget_name, self.base_theme.name))
classes.append('%s-base' % (self.fe_identifier))
# trigger widget auto-reload
if getattr(self, 'auto_reload', False):
classes.append('auto-reload')
# special vertical align
if self.vertical_align == 'middle':
classes.append("valignContainer")
# specific widget classes without overhead
if hasattr(self, 'classes') and isinstance(self.classes, str):
classes += self.classes.split(' ')
# TODO: add or rename this field to widget_classes
if self.prerendered_content:
classes.append(self.prerendered_content)
return " ".join(classes)
@classmethod
def get_widget_for_field(cls, name):
'''returns widget for field
if has widgets declared
support function instead of widgets
'''
if hasattr(cls, 'widgets') and name in cls.widgets:
widget = cls.widgets[name]
if callable(widget):
widget = widget()
# save for later
if widget:
cls.widgets[name] = widget
return widget
return
@classmethod
def init_widgets(cls):
'''init all widget widgets
'''
if hasattr(cls, 'widgets'):
for field, widget in cls.widgets.items():
if callable(widget):
widget = widget()
if widget:
cls.widgets[field] = widget
@classmethod
def templates(cls, choices=False, suffix=True):
"""returns widget templates located in ``templates/widget/widgetname``
"""
widget_name = cls.__name__.lower().replace('widget', '')
pattern = 'widget/{0}/'.format(widget_name)
res = find_all_templates('{0}*'.format(pattern))
if choices:
return template_choices(res, suffix=suffix)
return res
@classmethod
def fields(cls):
widget_fields = [
f.name for f in Widget._meta.fields]
return fields_for_model(
cls, exclude=widget_fields,
widgets=WIDGETS)
@property
def next_ordering(self):
"""return order for creating in content region
"""
if self.parent:
return len(getattr(self.parent.content, self.region, [])) + 1
else:
return 0
@cached_property
def fe_identifier(self):
"""
Returns an identifier which is understood by the frontend
editing javascript code. (It is used to find the URL which
should be used to load the form for every given block of
content.)
"""
meta = self.__class__._meta
return '%s-%s-%s-%s' % (
meta.app_label,
meta.model_name,
self.parent_id,
self.id,
)
@classmethod
def get_widget_icon(cls):
return getattr(cls, 'icon', 'fa fa-plus')
# CACHE TOOLS
@cached_property
def cache(self):
'''default cache'''
return caches['default']
@cached_property
def cache_key(self):
'''default key for html content'''
return 'widget.%s.html' % self.fe_identifier
@cached_property
def cache_keys(self):
'''Returns all cache keys which would be
flushed after save
'''
return [self.cache_key]
@cached_property
def widget_cache_timeout(self):
'''allow widget to set custom cache timeout'''
return getattr(self, 'cache_timeout', settings.LEONARDO_CACHE_TIMEOUT)
def is_cached(self, request):
'''returns True if widget will be cached or not
in the deafult state returns False and it's driven
by ``leonardo_cache`` property
'''
if request.frontend_editing:
return False
return getattr(self, 'leonardo_cache', False)
def purge_from_cache(self):
'''Purge widget content from cache'''
self.cache.delete_many(self.cache_keys)
class ListWidget(Widget, ListWidgetMixin):
"""Base class for object list widget
"""
class Meta:
abstract = True
|
|
# coding: utf-8
# In[1]:
import tsyganenko as tsyg
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from spacepy import coordinates as coord
import spacepy.time as spt
from spacepy.time import Ticktock
import datetime as dt
from mpl_toolkits.mplot3d import Axes3D
import sys
earth_radius_ax = 1.5*6371 #km
#adding the year data here so I don't have to crush my github repo
pathname = '../../data-se3-path-planner/yearData/batch2015/'
# pathname = '../../data-se3-path-planner/yearData/batch2019/'
# pathname = '../../batch2019/'
sys.path.append(pathname)
# now we are making this a function, which will be copy pasted into the tsyganenko
# library and then I'll call it from there in a notebook for presentation purposes
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
# months = ['Jan', 'Jul', 'Dec']
inclinations = [i for i in range(55,90,5)] #8
# inclinations = inclinations[::-1]
# months = months[::-1]
# In[2]:
def getColorMap(filename):
"""
reads in a bunch of different data files and outputs the
colormap
"""
df = pd.read_csv(filename)
# df = pd.read_csv(pathname+'65_year.csv')
# df = pd.read_csv(pathname+'Jan65.csv')
# df = pd.read_csv(pathname+'Jan80.csv')
# df = pd.read_csv(pathname+'Jul65.csv')
# df = pd.read_csv(pathname+'Jul90.csv')
GMAT_MJD_OFFSET = 29999.5
t = df['DefaultSC.A1ModJulian'] + GMAT_MJD_OFFSET
x = df['DefaultSC.gse.X']
y = df['DefaultSC.gse.Y']
z = df['DefaultSC.gse.Z']
spacecraft = coord.Coords([[i,j,k] for i,j,k in zip(x,y,z)], 'GSE', 'car')
spacecraft.ticks = Ticktock(t,'MJD')
# originally SM
spacecraft = spacecraft.convert('SM','car')
points = 10000
# this figure validates what I already expected
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# ax.plot(spacecraft.x[:points],spacecraft.y[:points],spacecraft.z[:points])
# plt.title('SM Orbit')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
# okay i've looked at a couple of orbits from the GSE point of view and
# i now think that it's okay for a zero inclined orbit WRT to the earth
# equator to be inclined WRT to the ecliptic, but like holy moley
# these orbits are confusing sometimes.
# In[3]:
# goal, plot PHI on the same plot
xc,yc,zc = tsyg.orbitalCuspLocation(spacecraft,t)
# originally 'SM'
cusp_location = coord.Coords([[i,j,k] for i,j,k in zip(xc,yc,zc)], 'SM', 'sph') # changed
cusp_location.ticks = Ticktock(t,'MJD')
# cusp_location = cusp_location.convert('SM','car')
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# if I just want to :points
# ax.plot(spacecraft.x[:points],spacecraft.y[:points],spacecraft.z[:points])
# ax.plot(cusp_location.x[:points], cusp_location.y[:points],cusp_location.z[:points])
# if I want EVERYTHING
# ax.plot(spacecraft.x,spacecraft.y, spacecraft.z)
# ax.scatter(cusp_location.x, cusp_location.y,cusp_location.z)
# plt.title('SM Orbit and Corresponding Cusp Location')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# ax.set_xlim3d(-earth_radius_ax, earth_radius_ax)
# ax.set_ylim3d(-earth_radius_ax, earth_radius_ax)
# ax.set_zlim3d(-earth_radius_ax, earth_radius_ax)
# plt.show()
# plt.plot(cusp_location.x,cusp_location.y)
# plt.show()
# In[4]:
# plt.plot(spacecraft.x,spacecraft.z)
# plt.plot(cusp_location.x,cusp_location.z)
# plt.xlim([-0.5*earth_radius_ax, earth_radius_ax])
# plt.ylim([-0.5*earth_radius_ax, earth_radius_ax])
# plt.xlabel('x')
# plt.ylabel('z')
# plt.title('xz plane of the cusp model')
# plt.show()
# In[5]:
# the working configuration is 'SM'
spacecraft_sph = spacecraft.convert('GSM','sph')
cusp_location_sph = cusp_location.convert('GSM','sph')
# In[6]:
# making the plots
points = 10000# len(spacecraft_sph.ticks.MJD)
lowBound = 0# 156000
highBound = points# 166000
# plt.plot(spacecraft_sph.ticks.MJD[lowBound:highBound],spacecraft_sph.lati[lowBound:highBound],label='sc')
# i was doing 90 - cusp location?
# plt.plot(cusp_location_sph.ticks.MJD[lowBound:highBound],90-cusp_location_sph.lati[lowBound:highBound],label='cusp')
# plt.legend()
# plt.xlabel('mjd ticks')
# plt.ylabel('sm latitude')
# plt.title('mjd ticks vs sm latitude (cusp and spacecraft)')
# plt.show()
# plt.plot(spacecraft_sph.ticks.MJD[lowBound:highBound], spacecraft_sph.long[lowBound:highBound],label='sc')
# plt.plot(cusp_location_sph.ticks.MJD[lowBound:highBound],cusp_location_sph.long[lowBound:highBound],label='cusp')
# plt.show()
# modlat = 90 - cusp_location_sph.lati
modlat = cusp_location_sph.lati
print("LATITUDE IN CUSP LOCATION OBJECT",modlat)
# In[7]:
# count it up
count = []
c = 0
for satlat,cusplat, satlon,cusplon in zip(spacecraft_sph.lati, modlat, spacecraft_sph.long, cusp_location_sph.long):
# 0<cusplon<180 i think i need a way to ensure that I'm looking at the dayside
# bear in mind that these bounds WILL ONLY WORK in earth - sun line centered coordinate systems
if abs(satlat - cusplat)<=2 and abs(satlon-cusplon)<=2: #earlier using 4 and 4
# right now i'm using +/- 2 deg for the latitude,
# and +/- 2 deg for the longitude
c+=1
count.append(c)
else:
count.append(c)
# plt.plot(spacecraft_sph.ticks.MJD, count)
# plt.xlabel('MJD tick')
# plt.ylabel('cusp crossings')
# plt.title('Cusp Crossings vs. MJD ticks')
#plt.xlim([58700, 58800])
# plt.show()
print("final crossings count = ",c)
# mean latitude of the cusp
print("mean sm lat of cusp", 90 - sum(cusp_location_sph.lati)/len(cusp_location_sph.lati))
print("mean sm lon of cusp", sum(cusp_location_sph.long)/len(cusp_location_sph.long))
# In[8]:
# lets' see if we can check the psi function before 1pm
r = 1.127
psi = tsyg.getTilt(t)
psi = np.asarray(psi)
phic = tsyg.getPhi_c(r,psi)
# plt.plot(phic)
# plt.title('plot of phi_c for troubleshooting')
# plt.show()
# show the date in UTC
print("UTC date", spacecraft_sph.ticks.UTC)
return c
cma2 =[[getColorMap(pathname+month+str(inclination)+'_results.csv') for month in months ] for inclination in inclinations]
if __name__ == "__main__":
# cdict = {'red': ((0.0, 0.0, 0.0),
# (0.5, 1.0, 0.7),
# (1.0, 1.0, 1.0)),
# 'green': ((0.0, 0.0, 0.0),
# (0.5, 1.0, 0.0),
# (1.0, 1.0, 1.0)),
# 'blue': ((0.0, 0.0, 0.0),
# (0.5, 1.0, 0.0),
# (1.0, 0.5, 1.0))}
cdict = {'red': ((0.0, 0.0, 0.0),
(0.1, 0.5, 0.5),
(0.2, 0.0, 0.0),
(0.4, 0.2, 0.2),
(0.6, 0.0, 0.0),
(0.8, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green':((0.0, 0.0, 0.0),
(0.1, 0.0, 0.0),
(0.2, 0.0, 0.0),
(0.4, 1.0, 1.0),
(0.6, 1.0, 1.0),
(0.8, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(0.1, 0.5, 0.5),
(0.2, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.6, 0.0, 0.0),
(0.8, 0.0, 0.0),
(1.0, 0.0, 0.0))}
my_cmap = colors.LinearSegmentedColormap('my_colormap',cdict,256)
plt.pcolor(cma2,cmap=my_cmap)
plt.colorbar()
plt.xlabel('Start Month')
# y_labels = [str(i) for i in range(0,90,5)] #8
# plt.yticks(inclinations,str(inclinations))
plt.ylabel('Inclinations')
plt.title('Cusp Crossings Analysis 2015')
plt.show()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet backbone model definition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from REDACTED.mask_rcnn import spatial_transform
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-4
def batch_norm_relu(inputs,
is_training_bn,
relu=True,
init_zero=False,
data_format='channels_last',
name=None):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training_bn: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training_bn,
fused=True,
gamma_initializer=gamma_initializer,
name=name)
if relu:
inputs = tf.nn.relu(inputs)
return inputs
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format='channels_last'):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
if strides > 1:
inputs = spatial_transform.fixed_padding(inputs, kernel_size,
data_format=data_format)
return tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
def residual_block(inputs,
filters,
is_training_bn,
strides,
use_projection=False,
data_format='channels_last'):
"""Standard building block for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training_bn: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut in first layer to match filters and strides
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=strides,
data_format=data_format)
shortcut = batch_norm_relu(
shortcut, is_training_bn, relu=False, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(
inputs,
is_training_bn,
relu=False,
init_zero=True,
data_format=data_format)
return tf.nn.relu(inputs + shortcut)
def bottleneck_block(inputs,
filters,
is_training_bn,
strides,
use_projection=False,
data_format='channels_last'):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training_bn: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
data_format=data_format)
shortcut = batch_norm_relu(
shortcut, is_training_bn, relu=False, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(
inputs,
is_training_bn,
relu=False,
init_zero=True,
data_format=data_format)
return tf.nn.relu(inputs + shortcut)
def block_group(inputs,
filters,
block_fn,
blocks,
strides,
is_training_bn,
name,
data_format='channels_last'):
"""Creates one group of blocks for the ResNet model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
block_fn: `function` for the block to use within the model
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
is_training_bn: `bool` for whether the model is training.
name: `str`name for the Tensor output of the block layer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
is_training_bn,
strides,
use_projection=True,
data_format=data_format)
for _ in range(1, blocks):
inputs = block_fn(
inputs, filters, is_training_bn, 1, data_format=data_format)
return tf.identity(inputs, name)
def transform_space_to_depth_kernel(kernel, dtype, block_size=2):
"""Transforms the convolution kernel for space-to-depth computation.
This function transforms the kernel for space-to-depth convolution. For
example, the kernel size is [7, 7, 3, 64] (conv0 in ResNet), and the
block_size is 2. First the kernel is padded with (top and left) zeros to
[8, 8, 3, 64]. Then, it is transformed to [4, 4, 12, 64] and casted to the
`dtype`.
Args:
kernel: A tensor with a shape of [height, width, in_depth, out_depth].
dtype: The type of the input of the convoluation kernel. The kernel will be
casted to this type.
block_size: An `int` to indicate the block size in space-to-depth
transform.
Returns:
A transformed kernel that has the same type as `dtype`. The shape is
[height // block_size, width // block_size, in_depth * (block_size ** 2),
out_depth].
"""
def _round_up(num, multiple):
remainder = num % multiple
if remainder == 0:
return num
else:
return num + multiple - remainder
h, w, in_d, out_d = kernel.get_shape().as_list()
pad_h = _round_up(h, block_size) - h
pad_w = _round_up(w, block_size) - w
kernel = tf.pad(
kernel, paddings=tf.constant([[pad_h, 0], [pad_w, 0], [0, 0,], [0, 0]]),
mode='CONSTANT', constant_values=0.)
kernel = tf.reshape(kernel, [(h + pad_h) // block_size, block_size,
(w + pad_w) // block_size, block_size,
in_d, out_d])
kernel = tf.transpose(kernel, [0, 2, 1, 3, 4, 5])
kernel = tf.reshape(kernel, [(h + pad_h) // block_size,
(w + pad_w) // block_size,
in_d * (block_size ** 2), out_d])
kernel = tf.cast(kernel, dtype)
return kernel
def conv0_space_to_depth(inputs, filters, kernel_size, strides,
data_format='channels_last',
space_to_depth_block_size=2):
"""Uses space-to-depth convolution for conv0.
This function replaces the first convolution (conv0) in ResNet with
space-to-depth transformation. It creates a convolution kernel, whose
dimension and name are the same as those of conv0. The `inputs` is an image
tensor that already has the space-to-depth transform.
Args:
inputs: `Tensor` of size `[batch, height_in, width_in, channels]`.
filters: An `int` number of filters in the convolution.
kernel_size: An `int` size of the kernel to be used in the convolution.
strides: A `int` strides of the convolution.
data_format: A `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
space_to_depth_block_size: An `int` indicates the block size of
space-to-depth convolution for conv0. Specific to ResNet, this currently
supports only block_size=2.
Returns:
A `Tensor` with the same type as `inputs`.
Raises:
ValueError if `space_to_depth_block_size` is not 2.
"""
if space_to_depth_block_size != 2:
raise ValueError('Space-to-depth does not support block_size (%d).' %
space_to_depth_block_size)
conv0 = tf.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
data_format=data_format,
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer())
# Use the image size without space-to-depth transform as the input of conv0.
# This allows the kernel size to be the same as the original conv0 such that
# the model is able to load the pre-trained ResNet checkpoint.
batch_size, h, w, c = inputs.get_shape().as_list()
conv0.build([batch_size,
h * space_to_depth_block_size,
w * space_to_depth_block_size,
c // (space_to_depth_block_size ** 2)])
kernel = conv0.weights[0]
kernel = transform_space_to_depth_kernel(
kernel, inputs.dtype, block_size=space_to_depth_block_size)
inputs = spatial_transform.space_to_depth_fixed_padding(
inputs, kernel_size, data_format, space_to_depth_block_size)
return tf.nn.conv2d(
input=inputs, filter=kernel, strides=[1, 1, 1, 1], padding='VALID',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW',
name='conv2d/Conv2D')
def resnet_v1_generator(block_fn, layers, data_format='channels_last',
conv0_kernel_size=7,
space_to_depth_block_size=0):
"""Generator of ResNet v1 model with classification layers removed.
Our actual ResNet network. We return the output of c2, c3,c4,c5
N.B. batch norm is always run with trained parameters, as we use very small
batches when training the object layers.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
conv0_kernel_size: an integer of the kernel size of the first convolution.
space_to_depth_block_size: an integer indicates the block size of
space-to-depth convolution for conv0. `0` means use the original conv2d
in ResNet.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training_bn=False):
"""Creation of the model graph."""
if space_to_depth_block_size != 0:
# conv0 uses space-to-depth transform for TPU performance.
inputs = conv0_space_to_depth(
inputs=inputs,
filters=64,
kernel_size=conv0_kernel_size,
strides=2,
data_format=data_format,
space_to_depth_block_size=space_to_depth_block_size)
else:
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=64,
kernel_size=conv0_kernel_size,
strides=2,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = tf.layers.max_pooling2d(
inputs=inputs,
pool_size=3,
strides=2,
padding='SAME',
data_format=data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
blocks=layers[0],
strides=1,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group1',
data_format=data_format)
c3 = block_group(
inputs=c2,
filters=128,
blocks=layers[1],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group2',
data_format=data_format)
c4 = block_group(
inputs=c3,
filters=256,
blocks=layers[2],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group3',
data_format=data_format)
c5 = block_group(
inputs=c4,
filters=512,
blocks=layers[3],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group4',
data_format=data_format)
return c2, c3, c4, c5
return model
def resnet_v1(resnet_depth, conv0_kernel_size, conv0_space_to_depth_block_size,
data_format='channels_last'):
"""Returns the ResNet model for a given size and number of output classes."""
model_params = {
18: {'block': residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
raise ValueError('Not a valid resnet_depth:', resnet_depth)
params = model_params[resnet_depth]
return resnet_v1_generator(
params['block'], params['layers'], data_format, conv0_kernel_size,
conv0_space_to_depth_block_size)
|
|
"""Representation of Data
.. contents:: :local:
Before loading data, you need to define it. A single subclass of
DataSet represents a database relation in Python code. Think of the class as a
table, each inner class as a row, and each attribute per row as a column value.
For example::
>>> from fixture import DataSet
>>> class Authors(DataSet):
... class frank_herbert:
... first_name = "Frank"
... last_name = "Herbert"
The inner class ``frank_herbert`` defines a row with the columns ``first_name``
and ``last_name``. The name ``frank_herbert`` is an identifier that you can use
later on, when you want to refer to this specific row.
The main goal will be to load this data into something useful, like a database.
But notice that the ``id`` values aren't defined in the DataSet. This is because
the database will most likely create an ``id`` for you when you insert the row
(however, if you need to specify a specific ``id`` number, you are free to do
so). How you create a DataSet will be influenced by how the underlying data object saves data.
Inheriting DataSet rows
~~~~~~~~~~~~~~~~~~~~~~~
Since a row is just a Python class, you can inherit from a row to morph its values, i.e.::
>>> class Authors(DataSet):
... class frank_herbert:
... first_name = "Frank"
... last_name = "Herbert"
... class brian_herbert(frank_herbert):
... first_name = "Brian"
This is useful for adhering to the DRY principle (Don't Repeat Yourself) as well
as for `testing edge cases`_.
.. note::
The primary key value will not be inherited from a row. See
`Customizing a DataSet`_ if you need to set the name of a DataSet's primary
key to something other than ``id``.
Referencing foreign DataSet classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When defining rows in a DataSet that reference foreign keys, you need to mimic how your data object wants to save such a reference. If your data object wants to save foreign keys as objects (not ID numbers) then you can simply reference another row in a DataSet as if it were an object.::
>>> class Books(DataSet):
... class dune:
... title = "Dune"
... author = Authors.frank_herbert
... class sudanna:
... title = "Sudanna Sudanna"
... author = Authors.brian_herbert
During data loading, the reference to DataSet ``Authors.brian_herbert`` will be replaced with the actual stored object used to load that row into the database. This will work as expected for one-to-many relationships, i.e.::
>>> class Books(DataSet):
... class two_worlds:
... title = "Man of Two Worlds"
... authors = [Authors.frank_herbert, Authors.brian_herbert]
However, in some cases you may need to reference an attribute that does not have a value until it is loaded, like a serial ID column. (Note that this is not supported by the `sqlalchemy`_ data layer when using sessions.) To facilitate this, each inner class of a DataSet gets decorated with a special method, ``ref()``,
that can be used to reference a column value before it exists, i.e.::
>>> class Books(DataSet):
... class dune:
... title = "Dune"
... author_id = Authors.frank_herbert.ref('id')
... class sudanna:
... title = "Sudanna Sudanna"
... author_id = Authors.brian_herbert.ref('id')
This sets the ``author_id`` to the ``id`` of another row in ``Author``, as if it
were a foreign key. But notice that the ``id`` attribute wasn't explicitly
defined by the ``Authors`` data set. When the ``id`` attribute is accessed later
on, its value is fetched from the actual row inserted.
Customizing a Dataset
~~~~~~~~~~~~~~~~~~~~~
A DataSet can be customized by defining a special inner class named ``Meta``.
See the `DataSet.Meta`_ API for more info.
.. _DataSet.Meta: ../apidocs/fixture.dataset.DataSet.Meta.html
.. _testing edge cases: http://brian.pontarelli.com/2006/12/04/the-importance-of-edge-case-testing/
.. api_only::
The fixture.dataset module
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import sys, types
from fixture.util import ObjRegistry
__all__ = ['DataSet']
class DataContainer(object):
"""contains data accessible by attribute and/or key.
for all internally used attributes, use the inner class Meta.
On instances, use self.meta instead.
"""
_reserved_attr = ('meta', 'Meta', 'ref', 'get')
class Meta:
data = None
keys = None
def __init__(self, data=None, keys=None):
lazy_meta(self)
if not data:
data = {}
self.meta.data = data
if not keys:
keys = []
self.meta.keys = keys
def __contains__(self, name):
return name in self.meta.keys
def __getitem__(self, key):
return self.meta.data[key]
def __getattribute__(self, name):
# it is necessary to completely override __getattr__
# so that class attributes don't interfer
if name.startswith('_') or name in self._reserved_attr:
return object.__getattribute__(self, name)
try:
return self.meta.data[name]
except KeyError:
raise AttributeError("%s has no attribute '%s'" % (self, name))
def __repr__(self):
if hasattr(self, 'meta'):
keys = self.meta.keys
else:
keys = None
return "<%s at %s with keys %s>" % (
self.__class__.__name__,
hex(id(self)), keys)
def get(self, k, default=None):
return self.meta.data.get(k, default)
def _setdata(self, key, value):
if key not in self.meta.data:
self.meta.keys.append(key)
self.meta.data[key] = value
class Ref(object):
"""A reference to a row in a DataSet class.
This allows a DataSet to reference an id column of a "foreign key" DataSet
before it exists.
Ref is a Descriptor containing a deferred value to an attribute of a data
object (like an instance of a SQLAlchemy mapped class). It provides the
DataSet a way to cloak the fact that "id" is an attribute only populated
after said data object is saved to the database. In other words, the
DataSet doesn't know or care when it has been loaded or not. It thinks it
is referencing "id" all the same. The timing of when id is accessed is
handled by the LoadableFixture.
"""
class Value(object):
"""A reference to a value in a row of a DataSet class."""
def __init__(self, ref, attr_name):
self.attr_name = attr_name
self.ref = ref
def __repr__(self):
return "<%s.%s for %s.%s.%s (%s)>" % (
Ref.__name__, self.__class__.__name__,
self.ref.dataset_class.__name__, self.ref.key, self.attr_name,
(self.ref.dataset_obj is None and 'not yet loaded' or 'loaded'))
def __get__(self, obj, type=None):
if obj is None:
# self was assigned to a class object
return self
else:
# self was assigned to an instance
if self.ref.dataset_obj is None:
raise RuntimeError(
"Cannot access %s, referenced %s %s has not "
"been loaded yet" % (
self, DataSet.__name__, self.ref.dataset_class))
obj = self.ref.dataset_obj.meta._stored_objects.get_object(
self.ref.key)
return getattr(obj, self.attr_name)
# raise ValueError("called __get__(%s, %s)" % (obj, type))
def __init__(self, dataset_class, row):
self.dataset_class = dataset_class
self.dataset_obj = None
self.row = row
# i.e. the name of the row class...
self.key = self.row.__name__
def __call__(self, ref_name):
return self.Value(self, ref_name)
def __repr__(self):
return "<%s to %s.%s at %s>" % (
self.__class__.__name__, self.dataset_class.__name__,
self.row.__name__, hex(id(self)))
def is_row_class(attr):
attr_type = type(attr)
return ((attr_type==types.ClassType or attr_type==type) and
attr.__name__ != 'Meta' and
not issubclass(attr, DataContainer.Meta))
class DataType(type):
"""meta class for creating DataSet classes."""
default_primary_key = ['id']
def __init__(cls, name, bases, cls_attr):
super(DataType, cls).__init__(name, bases, dict)
if 'Meta' in cls_attr and hasattr(cls_attr['Meta'], 'primary_key'):
cls_attr['_primary_key'] = cls_attr['Meta'].primary_key
else:
cls_attr['_primary_key'] = cls.default_primary_key
# just like dir(), we should do this in alpha order :
## NOTE: dropping support for <2.4 here...
for name in sorted(cls_attr.keys()):
attr = cls_attr[name]
if is_row_class(attr):
cls.decorate_row(attr, name, bases, cls_attr)
del cls_attr['_primary_key']
def decorate_row(cls, row, name, bases, cls_attr):
# store a backref to the container dataset
row._dataset = cls
# bind a ref method
row.ref = Ref(cls, row)
# fix inherited primary keys
names_to_uninherit = []
for name in dir(row):
if name in cls_attr['_primary_key']:
if name not in row.__dict__:
# then this was an inherited value, so we need to nullify it
# without 1) disturbing the other inherited values and 2)
# disturbing the inherited class. is this nuts?
names_to_uninherit.append(name)
bases_to_replace = []
if names_to_uninherit:
base_pos = 0
for c in row.__bases__:
for name in names_to_uninherit:
if name in c.__dict__:
bases_to_replace.append((c, base_pos))
# just need to detect one attribute...
break
base_pos += 1
new_bases = [b for b in row.__bases__]
for base_c, base_pos in bases_to_replace:
# this may not work if the row's base was a new-style class
new_base = types.ClassType(
base_c.__name__, base_c.__bases__,
dict([(k, getattr(base_c, k)) for k in dir(base_c) \
if not k.startswith('_') and \
k not in names_to_uninherit]))
new_bases[base_pos] = new_base
if new_bases:
row.__bases__ = tuple(new_bases)
def is_rowlike(candidate):
"""returns True if candidate is *like* a DataRow.
Not to be confused with issubclass(candidate, DataRow).
A regular or new-style class is row-like because DataSet objects allow any
type of class to declare a row of data
"""
return hasattr(candidate, '_dataset') and type(candidate._dataset) in (
DataType, DataSet)
class DataRow(object):
"""a DataSet row, values accessible by attibute or key."""
_reserved_attr = ('columns',)
def __init__(self, dataset):
object.__setattr__(self, '_dataset', dataset)
# i.e. the name of the row class...
object.__setattr__(self, '_key', self.__class__.__name__)
def __getitem__(self, item):
return getattr(self, item)
def __getattr__(self, name):
# an undefined data attribute was referenced,
# let's look for it in the stored object.
# an example of this would be an ID, which was
# created only after load
if name.startswith('_'):
return object.__getattribute__(self, name)
obj = self._dataset.meta._stored_objects.get_object(self._key)
return getattr(obj, name)
@classmethod
def columns(self):
for k in dir(self):
if k.startswith('_') or k in self._reserved_attr:
continue
yield k
class DataSetStore(list):
"""keeps track of actual objects stored in a dataset."""
def __init__(self, dataset):
list.__init__(self)
self.dataset = dataset
self._ds_key_map = {}
def get_object(self, key):
"""returns the object at this key.
In this example...
>>> class EventData(DataSet):
... class click:
... id=1
...the key is "click." The object returned would be an adapter for
EventData, probably an Event object
"""
try:
return self[ self._ds_key_map[key] ]
except (IndexError, KeyError):
etype, val, tb = sys.exc_info()
raise etype("row '%s' hasn't been loaded for %s (loaded: %s)" % (
key, self.dataset, self)), None, tb
def store(self, key, obj):
self.append(obj)
pos = len(self)-1
self._ds_key_map[key] = pos
dataset_registry = ObjRegistry()
class DataSet(DataContainer):
"""defines data to be loaded
a loader will typically want to load a dataset into a
single storage medium. I.E. a table in a database.
Note that rows are always classes until the dataset instance has been
loaded::
>>> class Flowers(DataSet):
... class violets:
... color = 'blue'
... class roses:
... color = 'red'
...
>>> f = Flowers()
>>> f.violets.color
'blue'
Row values can also be inherited from other rows, just as normal inheritance
works in Python. See the primary_key Meta attribute above for how
inheritance works on primary keys::
>>> class Recipes(DataSet):
... class chowder:
... is_soup = True
... name = "Clam Chowder"
... class tomato_bisque(chowder):
... name = "Tomato Bisque"
...
>>> r = Recipes()
>>> r.chowder.is_soup
True
>>> r.tomato_bisque.is_soup
True
Keyword Arguments
-----------------
- default_refclass
- a SuperSet to use if None has already been specified in Meta
Special inner Meta class
------------------------
See DataSet.Meta for details
"""
__metaclass__ = DataType
_reserved_attr = DataContainer._reserved_attr + ('data', 'shared_instance')
ref = None
class Meta(DataContainer.Meta):
"""configures a DataSet class.
The inner class Meta is used to configure a DataSet . The following are
acknowledged attributes:
storable
an object that should be used to store this DataSet. If omitted the
loader's style object will look for a storable object in its env,
using storable_name
storable_name
the name of the storable object that the loader should fetch from
its env to load this DataSet with. If omitted, the loader's style
object will try to guess the storable_name based on its env and the
name of the DataSet class
primary_key
this is a list of names that should be acknowledged as primary keys
in a DataSet. The default is simply ['id'].
"""
row = DataRow
storable = None
storable_name = None
storage_medium = None
primary_key = [k for k in DataType.default_primary_key]
references = []
_stored_objects = None
_built = False
def __init__(self, default_refclass=None, default_meta=None):
DataContainer.__init__(self)
# we want the convenience of not having to
# inherit DataSet.Meta. hmmm ...
if not default_meta:
default_meta = DataSet.Meta
if not isinstance(self.meta, default_meta):
defaults = default_meta()
for name in dir(defaults):
if not hasattr(self.meta, name):
setattr(self.meta, name, getattr(defaults, name))
self.meta._stored_objects = DataSetStore(self)
# dereference from class ...
try:
cl_attr = getattr(self.Meta, 'references')
except AttributeError:
cl_attr = []
setattr(self.meta, 'references', [c for c in cl_attr])
if not default_refclass:
default_refclass = SuperSet
def mkref():
clean_refs = []
for ds in iter(self.meta.references):
if ds is type(self):
# whoops
continue
clean_refs.append(ds)
self.meta.references = clean_refs
return default_refclass(*[
ds.shared_instance(default_refclass=default_refclass)
for ds in iter(self.meta.references)
])
# data def style classes, so they have refs before data is walked
if len(self.meta.references) > 0:
self.ref = mkref()
for key, data in self.data():
if key in self:
raise ValueError(
"data() cannot redeclare key '%s' "
"(this is already an attribute)" % key)
if isinstance(data, dict):
# make a new class object for the row data
# so that a loaded dataset can instantiate this...
data = type(key, (self.meta.row,), data)
self._setdata(key, data)
if not self.ref:
# type style classes, since refs were discovered above
self.ref = mkref()
def __iter__(self):
for key in self.meta.keys:
yield (key, getattr(self, key))
def data(self):
"""returns iterable key/dict pairs.
You would only need to override this if you have a DataSet that will
break unless it is ordered very specifically. Since class-style DataSet
objects are just classes with attributes, its rows will be loaded in
alphabetical order. The alternative is to define a DataSet as follows.
However, note that this is not as functional as a class-style DataSet::
>>> class Birds(DataSet):
... def data(self):
... return (
... ('blue_jay', dict(
... name="Blue Jay")),
... ('crow', dict(
... name="Crow")),)
...
>>> b = Birds()
>>> b.blue_jay.name
'Blue Jay'
>>> b.crow.name
'Crow'
"""
if self.meta._built:
for k,v in self:
yield (k,v)
def public_dir(obj):
for name in dir(obj):
if name.startswith("_"):
continue
yield name
def add_ref_from_rowlike(rowlike):
if rowlike._dataset not in self.meta.references:
self.meta.references.append(rowlike._dataset)
empty = True
for name in public_dir(self.__class__):
val = getattr(self.__class__, name)
if not is_row_class(val):
continue
empty = False
key = name
row_class = val
row = {}
for col_name in public_dir(row_class):
col_val = getattr(row_class, col_name)
if isinstance(col_val, Ref):
# the .ref attribute
continue
elif type(col_val) in (types.ListType, types.TupleType):
for c in col_val:
if is_rowlike(c):
add_ref_from_rowlike(c)
else:
raise TypeError(
"multi-value columns can only contain "
"rowlike objects, not %s of type %s" % (
col_val, type(col_val)))
elif is_rowlike(col_val):
add_ref_from_rowlike(col_val)
elif isinstance(col_val, Ref.Value):
ref = col_val.ref
if ref.dataset_class not in self.meta.references:
# store the reference:
self.meta.references.append(ref.dataset_class)
row[col_name] = col_val
yield (key, row)
if empty:
raise ValueError("cannot create an empty DataSet")
self.meta._built = True
@classmethod
def shared_instance(cls, **kw):
# fixme: default_refclass might be in **kw. But only a loader can set a
# refclass. hmm
if cls in dataset_registry:
dataset = dataset_registry[cls]
else:
dataset = cls(**kw)
dataset_registry.register(dataset)
return dataset
class DataSetContainer(object):
"""yields datasets when itered over."""
class Meta:
datasets = None
dataset_keys = None
def __init__(self):
lazy_meta(self)
self.meta.datasets = {}
self.meta.dataset_keys = []
self.meta._cache = ObjRegistry()
def __iter__(self):
for k in self.meta.dataset_keys:
yield self.meta.datasets[k]
def _dataset_to_key(self, dataset):
return dataset.__class__.__name__
def _setdataset(self, dataset, key=None, isref=False):
# due to reference resolution we might get colliding data sets...
if dataset in self.meta._cache:
return False
if key is None:
key = self._dataset_to_key(dataset)
if not isref:
# refs are not yielded
self.meta.dataset_keys.append(key)
self.meta.datasets[key] = dataset
self.meta._cache.register(dataset)
return True
class SuperSet(DataContainer, DataSetContainer):
"""a set of data sets.
each attribute/key is a DataSet.
"""
class Meta(DataContainer.Meta, DataSetContainer.Meta):
pass
def __init__(self, *datasets):
DataContainer.__init__(self)
DataSetContainer.__init__(self)
self._store_datasets(datasets)
def _store_datasets(self, datasets):
for d in datasets:
k = self._dataset_to_key(d)
self._setdata(k, d)
self._setdataset(d, key=k)
for ref_d in d.ref:
k = self._dataset_to_key(ref_d)
self._setdata(k, ref_d)
self._setdataset(ref_d, key=k, isref=True)
class MergedSuperSet(SuperSet):
"""a collection of data sets.
all attributes of all data sets are merged together so that they are
accessible in this class, independent of dataset. duplicate attribute
names are not allowed
"""
class Meta(SuperSet.Meta):
pass
def __init__(self, *datasets):
lazy_meta(self)
self.meta.keys_to_datasets = {}
SuperSet.__init__(self, *datasets)
def _setdataset(self, dataset, key=None, isref=False):
if SuperSet._setdataset(self, dataset, key=key, isref=isref):
for k,row in dataset:
if k in self.meta.keys_to_datasets:
raise ValueError(
"cannot add key '%s' for %s because it was "
"already added by %s" % (
k, dataset, self.meta.keys_to_datasets[k]))
# need an instance here, if it's a class...
if not isinstance(row, DataRow):
row = row(dataset)
self._setdata(k, row)
self.meta.keys_to_datasets[k] = dataset
def _store_datasets(self, datasets):
for dataset in datasets:
self._setdataset(dataset)
for d in dataset.ref:
self._setdataset(d, isref=True)
def lazy_meta(obj):
if not hasattr(obj, 'meta'):
setattr(obj, 'meta', obj.Meta())
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import itertools
import os
import stat
import netaddr
from oslo.config import cfg
from neutron.agent.linux import external_process
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.i18n import _LW
from neutron.openstack.common import log as logging
VALID_STATES = ['MASTER', 'BACKUP']
VALID_NOTIFY_STATES = ['master', 'backup', 'fault']
VALID_AUTH_TYPES = ['AH', 'PASS']
HA_DEFAULT_PRIORITY = 50
PRIMARY_VIP_RANGE_SIZE = 24
# TODO(amuller): Use L3 agent constant when new constants module is introduced.
FIP_LL_SUBNET = '169.254.30.0/23'
LOG = logging.getLogger(__name__)
def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE):
"""Get a free IP range, from parent_range, of the specified size.
:param parent_range: String representing an IP range. E.g: '169.254.0.0/16'
:param excluded_ranges: A list of strings to be excluded from parent_range
:param size: What should be the size of the range returned?
:return: A string representing an IP range
"""
free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges)
for cidr in free_cidrs.iter_cidrs():
if cidr.prefixlen <= size:
return '%s/%s' % (cidr.network, size)
raise ValueError(_('Network of size %(size)s, from IP range '
'%(parent_range)s excluding IP ranges '
'%(excluded_ranges)s was not found.') %
{'size': size,
'parent_range': parent_range,
'excluded_ranges': excluded_ranges})
class InvalidInstanceStateException(exceptions.NeutronException):
message = (_('Invalid instance state: %%(state)s, valid states are: '
'%(valid_states)s') %
{'valid_states': ', '.join(VALID_STATES)})
class InvalidNotifyStateException(exceptions.NeutronException):
message = (_('Invalid notify state: %%(state)s, valid states are: '
'%(valid_notify_states)s') %
{'valid_notify_states': ', '.join(VALID_NOTIFY_STATES)})
class InvalidAuthenticationTypeExecption(exceptions.NeutronException):
message = (_('Invalid authentication type: %%(auth_type)s, '
'valid types are: %(valid_auth_types)s') %
{'valid_auth_types': ', '.join(VALID_AUTH_TYPES)})
class KeepalivedVipAddress(object):
"""A virtual address entry of a keepalived configuration."""
def __init__(self, ip_address, interface_name, scope=None):
self.ip_address = ip_address
self.interface_name = interface_name
self.scope = scope
def build_config(self):
result = '%s dev %s' % (self.ip_address, self.interface_name)
if self.scope:
result += ' scope %s' % self.scope
return result
class KeepalivedVirtualRoute(object):
"""A virtual route entry of a keepalived configuration."""
def __init__(self, destination, nexthop, interface_name=None):
self.destination = destination
self.nexthop = nexthop
self.interface_name = interface_name
def build_config(self):
output = '%s via %s' % (self.destination, self.nexthop)
if self.interface_name:
output += ' dev %s' % self.interface_name
return output
class KeepalivedGroup(object):
"""Group section of a keepalived configuration."""
def __init__(self, ha_vr_id):
self.ha_vr_id = ha_vr_id
self.name = 'VG_%s' % ha_vr_id
self.instance_names = set()
self.notifiers = []
def add_instance(self, instance):
self.instance_names.add(instance.name)
def set_notify(self, state, path):
if state not in VALID_NOTIFY_STATES:
raise InvalidNotifyStateException(state=state)
self.notifiers.append((state, path))
def build_config(self):
return itertools.chain(['vrrp_sync_group %s {' % self.name,
' group {'],
(' %s' % i for i in self.instance_names),
[' }'],
(' notify_%s "%s"' % (state, path)
for state, path in self.notifiers),
['}'])
class KeepalivedInstance(object):
"""Instance section of a keepalived configuration."""
def __init__(self, state, interface, vrouter_id, ha_cidr,
priority=HA_DEFAULT_PRIORITY, advert_int=None,
mcast_src_ip=None, nopreempt=False):
self.name = 'VR_%s' % vrouter_id
if state not in VALID_STATES:
raise InvalidInstanceStateException(state=state)
self.state = state
self.interface = interface
self.vrouter_id = vrouter_id
self.priority = priority
self.nopreempt = nopreempt
self.advert_int = advert_int
self.mcast_src_ip = mcast_src_ip
self.track_interfaces = []
self.vips = []
self.virtual_routes = []
self.authentication = None
metadata_cidr = '169.254.169.254/32'
self.primary_vip_range = get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=[metadata_cidr,
FIP_LL_SUBNET,
ha_cidr],
size=PRIMARY_VIP_RANGE_SIZE)
def set_authentication(self, auth_type, password):
if auth_type not in VALID_AUTH_TYPES:
raise InvalidAuthenticationTypeExecption(auth_type=auth_type)
self.authentication = (auth_type, password)
def add_vip(self, ip_cidr, interface_name, scope):
self.vips.append(KeepalivedVipAddress(ip_cidr, interface_name, scope))
def remove_vips_vroutes_by_interface(self, interface_name):
self.vips = [vip for vip in self.vips
if vip.interface_name != interface_name]
self.virtual_routes = [vroute for vroute in self.virtual_routes
if vroute.interface_name != interface_name]
def remove_vip_by_ip_address(self, ip_address):
self.vips = [vip for vip in self.vips
if vip.ip_address != ip_address]
def get_existing_vip_ip_addresses(self, interface_name):
return [vip.ip_address for vip in self.vips
if vip.interface_name == interface_name]
def _build_track_interface_config(self):
return itertools.chain(
[' track_interface {'],
(' %s' % i for i in self.track_interfaces),
[' }'])
def _generate_primary_vip(self):
"""Return an address in the primary_vip_range CIDR, with the router's
VRID in the host section.
For example, if primary_vip_range is 169.254.0.0/24, and this router's
VRID is 5, the result is 169.254.0.5. Using the VRID assures that
the primary VIP is consistent amongst HA router instances on different
nodes.
"""
ip = (netaddr.IPNetwork(self.primary_vip_range).network +
self.vrouter_id)
return netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE))
def _build_vips_config(self):
# NOTE(amuller): The primary VIP must be consistent in order to avoid
# keepalived bugs. Changing the VIP in the 'virtual_ipaddress' and
# SIGHUP'ing keepalived can remove virtual routers, including the
# router's default gateway.
# We solve this by never changing the VIP in the virtual_ipaddress
# section, herein known as the primary VIP.
# The only interface known to exist for HA routers is the HA interface
# (self.interface). We generate an IP on that device and use it as the
# primary VIP. The other VIPs (Internal interfaces IPs, the external
# interface IP and floating IPs) are placed in the
# virtual_ipaddress_excluded section.
primary = KeepalivedVipAddress(str(self._generate_primary_vip()),
self.interface)
vips_result = [' virtual_ipaddress {',
' %s' % primary.build_config(),
' }']
if self.vips:
vips_result.extend(
itertools.chain([' virtual_ipaddress_excluded {'],
(' %s' % vip.build_config()
for vip in
sorted(self.vips,
key=lambda vip: vip.ip_address)),
[' }']))
return vips_result
def _build_virtual_routes_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.virtual_routes),
[' }'])
def build_config(self):
config = ['vrrp_instance %s {' % self.name,
' state %s' % self.state,
' interface %s' % self.interface,
' virtual_router_id %s' % self.vrouter_id,
' priority %s' % self.priority]
if self.nopreempt:
config.append(' nopreempt')
if self.advert_int:
config.append(' advert_int %s' % self.advert_int)
if self.authentication:
auth_type, password = self.authentication
authentication = [' authentication {',
' auth_type %s' % auth_type,
' auth_pass %s' % password,
' }']
config.extend(authentication)
if self.mcast_src_ip:
config.append(' mcast_src_ip %s' % self.mcast_src_ip)
if self.track_interfaces:
config.extend(self._build_track_interface_config())
config.extend(self._build_vips_config())
if self.virtual_routes:
config.extend(self._build_virtual_routes_config())
config.append('}')
return config
class KeepalivedConf(object):
"""A keepalived configuration."""
def __init__(self):
self.reset()
def reset(self):
self.groups = {}
self.instances = {}
def add_group(self, group):
self.groups[group.ha_vr_id] = group
def get_group(self, ha_vr_id):
return self.groups.get(ha_vr_id)
def add_instance(self, instance):
self.instances[instance.vrouter_id] = instance
def get_instance(self, vrouter_id):
return self.instances.get(vrouter_id)
def build_config(self):
config = []
for group in self.groups.values():
config.extend(group.build_config())
for instance in self.instances.values():
config.extend(instance.build_config())
return config
def get_config_str(self):
"""Generates and returns the keepalived configuration.
:return: Keepalived configuration string.
"""
return '\n'.join(self.build_config())
class KeepalivedNotifierMixin(object):
def _get_notifier_path(self, state):
return self._get_full_config_file_path('notify_%s.sh' % state)
def _write_notify_script(self, state, script):
name = self._get_notifier_path(state)
utils.replace_file(name, script)
st = os.stat(name)
os.chmod(name, st.st_mode | stat.S_IEXEC)
return name
def _prepend_shebang(self, script):
return '#!/usr/bin/env bash\n%s' % script
def _append_state(self, script, state):
state_path = self._get_full_config_file_path('state')
return '%s\necho -n %s > %s' % (script, state, state_path)
def add_notifier(self, script, state, ha_vr_id):
"""Add a master, backup or fault notifier.
These notifiers are executed when keepalived invokes a state
transition. Write a notifier to disk and add it to the
configuration.
"""
script_with_prefix = self._prepend_shebang(' '.join(script))
full_script = self._append_state(script_with_prefix, state)
self._write_notify_script(state, full_script)
group = self.config.get_group(ha_vr_id)
group.set_notify(state, self._get_notifier_path(state))
def get_conf_dir(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf_path))
conf_dir = os.path.join(confs_dir, self.resource_id)
return conf_dir
def _get_full_config_file_path(self, filename, ensure_conf_dir=True):
conf_dir = self.get_conf_dir()
if ensure_conf_dir and not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, filename)
class KeepalivedManager(KeepalivedNotifierMixin):
"""Wrapper for keepalived.
This wrapper permits to write keepalived config files, to start/restart
keepalived process.
"""
def __init__(self, resource_id, config, conf_path='/tmp',
namespace=None, root_helper=None):
self.resource_id = resource_id
self.config = config
self.namespace = namespace
self.root_helper = root_helper
self.conf_path = conf_path
self.conf = cfg.CONF
self.process = None
self.spawned = False
def _output_config_file(self):
config_str = self.config.get_config_str()
config_path = self._get_full_config_file_path('keepalived.conf')
utils.replace_file(config_path, config_str)
return config_path
def get_conf_on_disk(self):
config_path = self._get_full_config_file_path('keepalived.conf')
try:
with open(config_path) as conf:
return conf.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise
def spawn(self):
config_path = self._output_config_file()
self.process = self.get_process(self.conf,
self.resource_id,
self.root_helper,
self.namespace,
self.conf_path)
def callback(pid_file):
cmd = ['keepalived', '-P',
'-f', config_path,
'-p', pid_file,
'-r', '%s-vrrp' % pid_file]
return cmd
self.process.enable(callback, reload_cfg=True)
self.spawned = True
LOG.debug('Keepalived spawned with config %s', config_path)
def spawn_or_restart(self):
if self.process:
self.restart()
else:
self.spawn()
def restart(self):
if self.process.active:
self._output_config_file()
self.process.reload_cfg()
else:
LOG.warn(_LW('A previous instance of keepalived seems to be dead, '
'unable to restart it, a new instance will be '
'spawned'))
self.process.disable()
self.spawn()
def disable(self):
if self.process:
self.process.disable(sig='15')
self.spawned = False
def revive(self):
if self.spawned and not self.process.active:
self.restart()
@classmethod
def get_process(cls, conf, resource_id, root_helper, namespace, conf_path):
return external_process.ProcessManager(
conf,
resource_id,
root_helper,
namespace,
pids_path=conf_path)
|
|
# -*- coding: utf-8 -*-
"""Fake Windows Registry objects implementation."""
import calendar
import construct
from plaso import dependencies
from plaso.dfwinreg import definitions
from plaso.dfwinreg import errors
from plaso.dfwinreg import interface
dependencies.CheckModuleVersion(u'construct')
# TODO: give this class a place of its own when dfwinreg is split off.
class Filetime(object):
"""Class that implements a FILETIME timestamp.
The FILETIME timestamp is a 64-bit integer that contains the number
of 100th nano seconds since 1601-01-01 00:00:00.
Do not confuse this with the FILETIME structure that consists of
2 x 32-bit integers and is presumed to be unsigned.
Attributes:
timestamp: the FILETIME timestamp.
"""
# The difference between Jan 1, 1601 and Jan 1, 1970 in seconds.
_FILETIME_TO_POSIX_BASE = 11644473600L
_INT64_MAX = (1 << 63L) - 1
def __init__(self, timestamp=None):
"""Initializes the FILETIME object.
Args:
timestamp: optional FILETIME timestamp.
"""
super(Filetime, self).__init__()
self.timestamp = timestamp
def CopyFromString(self, time_string):
"""Copies a FILETIME from a string containing a date and time value.
Args:
time_string: A string containing a date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the
seconds fraction can be either 3 or 6 digits. The time
of day, seconds fraction and timezone offset are optional.
The default timezone is UTC.
Returns:
An integer containing the timestamp.
Raises:
ValueError: if the time string is invalid or not supported.
"""
if not time_string:
raise ValueError(u'Invalid time string.')
time_string_length = len(time_string)
# The time string should at least contain 'YYYY-MM-DD'.
if (time_string_length < 10 or time_string[4] != u'-' or
time_string[7] != u'-'):
raise ValueError(u'Invalid time string.')
# If a time of day is specified the time string it should at least
# contain 'YYYY-MM-DD hh:mm:ss'.
if (time_string_length > 10 and (
time_string_length < 19 or time_string[10] != u' ' or
time_string[13] != u':' or time_string[16] != u':')):
raise ValueError(u'Invalid time string.')
try:
year = int(time_string[0:4], 10)
except ValueError:
raise ValueError(u'Unable to parse year.')
try:
month = int(time_string[5:7], 10)
except ValueError:
raise ValueError(u'Unable to parse month.')
if month not in range(1, 13):
raise ValueError(u'Month value out of bounds.')
try:
day_of_month = int(time_string[8:10], 10)
except ValueError:
raise ValueError(u'Unable to parse day of month.')
if day_of_month not in range(1, 32):
raise ValueError(u'Day of month value out of bounds.')
hours = 0
minutes = 0
seconds = 0
if time_string_length > 10:
try:
hours = int(time_string[11:13], 10)
except ValueError:
raise ValueError(u'Unable to parse hours.')
if hours not in range(0, 24):
raise ValueError(u'Hours value out of bounds.')
try:
minutes = int(time_string[14:16], 10)
except ValueError:
raise ValueError(u'Unable to parse minutes.')
if minutes not in range(0, 60):
raise ValueError(u'Minutes value out of bounds.')
try:
seconds = int(time_string[17:19], 10)
except ValueError:
raise ValueError(u'Unable to parse day of seconds.')
if seconds not in range(0, 60):
raise ValueError(u'Seconds value out of bounds.')
micro_seconds = 0
timezone_offset = 0
if time_string_length > 19:
if time_string[19] != u'.':
timezone_index = 19
else:
for timezone_index in range(19, time_string_length):
if time_string[timezone_index] in [u'+', u'-']:
break
# The calculation that follow rely on the timezone index to point
# beyond the string in case no timezone offset was defined.
if timezone_index == time_string_length - 1:
timezone_index += 1
if timezone_index > 19:
fraction_of_seconds_length = timezone_index - 20
if fraction_of_seconds_length not in [3, 6]:
raise ValueError(u'Invalid time string.')
try:
micro_seconds = int(time_string[20:timezone_index], 10)
except ValueError:
raise ValueError(u'Unable to parse fraction of seconds.')
if fraction_of_seconds_length == 3:
micro_seconds *= 1000
if timezone_index < time_string_length:
if (time_string_length - timezone_index != 6 or
time_string[timezone_index + 3] != u':'):
raise ValueError(u'Invalid time string.')
try:
timezone_offset = int(time_string[
timezone_index + 1:timezone_index + 3])
except ValueError:
raise ValueError(u'Unable to parse timezone hours offset.')
if timezone_offset not in range(0, 24):
raise ValueError(u'Timezone hours offset value out of bounds.')
# Note that when the sign of the timezone offset is negative
# the difference needs to be added. We do so by flipping the sign.
if time_string[timezone_index] == u'-':
timezone_offset *= 60
else:
timezone_offset *= -60
try:
timezone_offset += int(time_string[
timezone_index + 4:timezone_index + 6])
except ValueError:
raise ValueError(u'Unable to parse timezone minutes offset.')
timezone_offset *= 60
self.timestamp = int(calendar.timegm((
year, month, day_of_month, hours, minutes, seconds)))
self.timestamp += timezone_offset + self._FILETIME_TO_POSIX_BASE
self.timestamp = (self.timestamp * 1000000) + micro_seconds
self.timestamp *= 10
class FakeWinRegistryKey(interface.WinRegistryKey):
"""Fake implementation of a Windows Registry key."""
def __init__(
self, name, key_path=u'', last_written_time=None, offset=0, subkeys=None,
values=None):
"""Initializes a Windows Registry key object.
Subkeys and values with duplicate names are silenty ignored.
Args:
name: the name of the Windows Registry key.
key_path: optional Windows Registry key path.
last_written_time: optional last written time (contains
a FILETIME timestamp).
offset: optional offset of the key within the Windows Registry file.
subkeys: optional list of subkeys (instances of FakeWinRegistryKey).
values: optional list of values (instances of FakeWinRegistryValue).
"""
super(FakeWinRegistryKey, self).__init__(key_path=key_path)
self._last_written_time = last_written_time
self._name = name
self._offset = offset
self._subkeys = {}
self._values = {}
if subkeys:
for registry_key in subkeys:
name = registry_key.name.upper()
if name in self._subkeys:
continue
self._subkeys[name] = registry_key
registry_key._key_path = self._JoinKeyPath([
self._key_path, registry_key.name])
if values:
for registry_value in values:
name = registry_value.name.upper()
if name in self._values:
continue
self._values[name] = registry_value
@property
def last_written_time(self):
"""The last written time of the key (contains a FILETIME timestamp)."""
return self._last_written_time
@property
def name(self):
"""The name of the key."""
return self._name
@property
def number_of_subkeys(self):
"""The number of subkeys within the key."""
return len(self._sub_keys)
@property
def number_of_values(self):
"""The number of values within the key."""
return len(self._values)
@property
def offset(self):
"""The offset of the key within the Windows Registry file."""
return self._offset
def AddSubkey(self, registry_key):
"""Adds a subkey.
Args:
registry_key: the Windows Registry subkey (instance of
FakeWinRegistryKey).
Raises:
KeyError: if the subkey already exists.
"""
name = registry_key.name.upper()
if name in self._subkeys:
raise KeyError(
u'Subkey: {0:s} already exists.'.format(registry_key.name))
self._subkeys[name] = registry_key
registry_key._key_path = self._JoinKeyPath([
self._key_path, registry_key.name])
def AddValue(self, registry_value):
"""Adds a value.
Args:
registry_value: the Windows Registry value (instance of
FakeWinRegistryValue).
Raises:
KeyError: if the value already exists.
"""
name = registry_value.name.upper()
if name in self._values:
raise KeyError(
u'Value: {0:s} already exists.'.format(registry_value.name))
self._values[name] = registry_value
def GetSubkeyByName(self, name):
"""Retrieves a subkey by name.
Args:
name: The name of the subkey.
Returns:
The Windows Registry subkey (instances of WinRegistryKey) or
None if not found.
"""
return self._subkeys.get(name.upper(), None)
def GetSubkeys(self):
"""Retrieves all subkeys within the key.
Yields:
Windows Registry key objects (instances of WinRegistryKey) that represent
the subkeys stored within the key.
"""
for registry_key in iter(self._subkeys.values()):
yield registry_key
def GetValueByName(self, name):
"""Retrieves a value by name.
Value names are not unique and pyregf provides first match for
the value.
Args:
name: Name of the value or an empty string for the default value.
Returns:
A Windows Registry value object (instance of WinRegistryValue) if
a corresponding value was found or None if not.
"""
return self._values.get(name.upper(), None)
def GetValues(self):
"""Retrieves all values within the key.
Yields:
Windows Registry value objects (instances of WinRegistryValue) that
represent the values stored within the key.
"""
for registry_value in iter(self._values.values()):
yield registry_value
class FakeWinRegistryValue(interface.WinRegistryValue):
"""Fake implementation of a Windows Registry value."""
_INT32_BIG_ENDIAN = construct.SBInt32(u'value')
_INT32_LITTLE_ENDIAN = construct.SLInt32(u'value')
_INT64_LITTLE_ENDIAN = construct.SLInt64(u'value')
def __init__(self, name, data=b'', data_type=0, offset=0):
"""Initializes a Windows Registry value object.
Args:
name: the name of the Windows Registry value.
data: optional binary string containing the value data.
data_type: optional integer containing the value data type.
offset: optional offset of the value within the Windows Registry file.
"""
super(FakeWinRegistryValue, self).__init__()
self._data = data
self._data_type = data_type
self._data_size = len(data)
self._name = name
self._offset = offset
@property
def data(self):
"""The value data as a native Python object.
Raises:
WinRegistryValueError: if the value data cannot be read.
"""
if not self._data:
return None
if self._data_type in self._STRING_VALUE_TYPES:
try:
return self._data.decode(u'utf-16-le')
except UnicodeError as exception:
raise errors.WinRegistryValueError(
u'Unable to read data from value: {0:s} with error: {1:s}'.format(
self._name, exception))
elif (self._data_type == definitions.REG_DWORD and
self._data_size == 4):
return self._INT32_LITTLE_ENDIAN.parse(self._data)
elif (self._data_type == definitions.REG_DWORD_BIG_ENDIAN and
self._data_size == 4):
return self._INT32_BIG_ENDIAN.parse(self._data)
elif (self._data_type == definitions.REG_QWORD and
self._data_size == 8):
return self._INT64_LITTLE_ENDIAN.parse(self._data)
elif self._data_type == definitions.REG_MULTI_SZ:
try:
utf16_string = self._data.decode(u'utf-16-le')
return filter(None, utf16_string.split(u'\x00'))
except UnicodeError as exception:
raise errors.WinRegistryValueError(
u'Unable to read data from value: {0:s} with error: {1:s}'.format(
self._name, exception))
return self._data
@property
def data_type(self):
"""Numeric value that contains the data type."""
return self._data_type
@property
def name(self):
"""The name of the value."""
return self._name
@property
def offset(self):
"""The offset of the value within the Windows Registry file."""
return self._pyregf_value.offset
@property
def raw_data(self):
"""The value data as a byte string."""
return self._data
class FakeWinRegistryFile(interface.WinRegistryFile):
"""Fake implementation of a Windows Registry file."""
def __init__(self):
"""Initializes the Windows Registry file."""
super(FakeWinRegistryFile, self).__init__()
self._root_key = None
def AddKeyByPath(self, key_path, registry_key):
"""Adds a Windows Registry for a specific key path.
Args:
key_path: the Windows Registry key path to add the key.
registry_key: the Windows Registry key (instance of FakeWinRegistryKey).
Returns:
A boolean containing True if successful or False if not.
"""
if not key_path.startswith(self._KEY_PATH_SEPARATOR):
return False
if not self._root_key:
self._root_key = FakeWinRegistryKey(u'')
path_segments = self._SplitKeyPath(key_path)
parent_key = self._root_key
for path_segment in path_segments:
subkey = FakeWinRegistryKey(path_segment)
if not parent_key.AddSubkey(subkey):
return False
return parent_key.AddSubkey(registry_key)
def Close(self):
"""Closes the Windows Registry file."""
return
def GetKeyByPath(self, key_path):
"""Retrieves the key for a specific path.
Args:
key_path: the Windows Registry key path.
Returns:
A Registry key (instance of WinRegistryKey) or None if not available.
"""
if not key_path.startswith(self._KEY_PATH_SEPARATOR):
return
path_segments = self._SplitKeyPath(key_path)
registry_key = self._root_key
for path_segment in path_segments:
if not registry_key:
return
registry_key = registry_key.GetSubkeyByName(path_segment)
return registry_key
def GetRootKey(self, key_path_prefix=u''):
"""Retrieves the root key.
Args:
key_path_prefix: optional Windows Registry key path prefix.
Returns:
The Windows Registry root key (instance of WinRegistryKey) or
None if not available.
"""
# TODO: handle key_path_prefix.
return self._root_key
def Open(self, unused_file_object):
"""Opens the Windows Registry file using a file-like object.
Args:
file_object: the file-like object.
Returns:
A boolean containing True if successful or False if not.
"""
return True
|
|
"""
MZ
Component manager: run run_em.py from this script. This script manages
components and decides what fits need to be done with run_em.
It reads in the data, splits comps and prepares input data to the
run_em.py.
It also decides what model is the best.
"""
import warnings
warnings.filterwarnings("ignore")
print('fastfit: all warnings suppressed.')
import numpy as np
import os
import sys
import logging
from distutils.dir_util import mkpath
import random
#~ import uuid
#~ import subprocess
from multiprocessing import cpu_count
sys.path.insert(0, os.path.abspath('..'))
#~ from chronostar import expectmax2 as expectmax
#~ from chronostar import expectmax # replaced by C modules
from chronostar import readparam
from chronostar import tabletool
from chronostar import component
from chronostar.component import SphereComponent
from chronostar import utils
from chronostar import default_pars
from chronostar import traceorbitC
import run_em
try:
from chronostar._overall_likelihood import get_overall_lnlikelihood_for_fixed_memb_probs
except ImportError:
print("C IMPLEMENTATION OF overall_likelihood NOT IMPORTED")
USE_C_IMPLEMENTATION = False
TODO = True # NOW WHAT?
# SPLIT COMPONENTS HERE:
def build_init_comps(prev_comps=None, split_comp_ix=0, memb_probs=None,
Component=None, data_dict=None, split_group_method='age'):
"""
Given a list of converged components from a N component fit, generate
a list of N+1 components with which to initialise an EM run.
This is done by taking the target component, `prev_comps[comp_ix]`,
replacing it in the list of comps, by splitting it into two components
with a lower and higher age,
Parameters
----------
prev_comps : [N] list of Component objects
List of components from the N component fit
split_comp_ix : int
The index of component which is to be split into two
prev_med_and_spans : [ncomps,npars,3] np.array
The median and spans of
Return
------
init_comps: [N+1] list of Component objects
Side effects
------------
Updates self.fit_pars['init_comps'] with a [N+1] list of Component
objects
Edit history
------------
2020-11-14 TC: replaced explicit check for emcee vs Nelder-mead when
trying to use prev_med_and_spans. This enables emcee runs to continue
on from Nelder-mead runs, and hopefully generalises this section to
be agnostic of optimisation method
"""
target_comp = prev_comps[split_comp_ix]
# Decompose and replace the ith component with two new components
# by using the 16th and 84th percentile ages from previous run
if split_group_method=='age':
age = target_comp.get_age()
lo_age = 0.8*age
hi_age = 1.2*age
split_comps = target_comp.split_group_age(lo_age=lo_age,
hi_age=hi_age)
elif split_group_method=='spatial':
split_comps = target_comp.split_group_spatial(data_dict,
memb_probs[:,split_comp_ix])
init_comps = list(prev_comps)
init_comps.pop(split_comp_ix)
init_comps.insert(split_comp_ix, split_comps[1])
init_comps.insert(split_comp_ix, split_comps[0])
return init_comps
def calc_bic(data, ncomps, lnlike, memb_probs=None,
Component=SphereComponent):
"""Calculates the Bayesian Information Criterion
A simple metric to judge whether added components are worthwhile.
The number of 'data points' is the expected star membership count.
This way the BIC is (mostly) independent of the overall data set,
if most of those stars are not likely members of the component fit.
Parameters
----------
data: dict
See fit_many_comps
ncomps: int
Number of components used in fit
lnlike: float
the overall log likelihood of the fit
memb_probs: [nstars,ncomps {+1}] float array_like
See fit_many_comps
Component:
See fit_many_comps
Returns
-------
bic: float
A log likelihood score, scaled by number of free parameters. A
lower BIC indicates a better fit. Differences of <4 are minor
improvements.
"""
# 2020/11/15 TC: removed this...
# if memb_probs is not None:
# nstars = np.sum(memb_probs[:, :ncomps])
# else:
nstars = len(data['means'])
ncomp_pars = len(Component.PARAMETER_FORMAT)
n = nstars * 6 # 6 for phase space origin
k = ncomps * (ncomp_pars) # parameters for each component model
# -1 for age, +1 for amplitude
return np.log(n)*k - 2 * lnlike
def calc_score(data_dict, comps, memb_probs, Component,
use_box_background=False):
"""
Calculate global score of fit for comparison with future fits with different
component counts
Parameters
----------
:param comps:
:param memb_probs:
:return:
TODO: Establish relevance of bg_ln_ols
"""
ncomps = len(comps)
# python
#~ lnlike = expectmax.get_overall_lnlikelihood(data_dict, comps,
#~ old_memb_probs=memb_probs, use_box_background=use_box_background)
#~ print('lnlikeP', lnlike)
#~ lnpost = expectmax.get_overall_lnlikelihood(data_dict, comps,
#~ old_memb_probs=memb_probs, use_box_background=use_box_background,
#~ inc_posterior=True)
#~ bic = expectmax.calc_bic(data_dict, ncomps, lnlike,
#~ memb_probs=memb_probs, Component=Component)
####################################################################
#### DATA FOR C MODULES ############################################
####################################################################
gr_mns, gr_covs = traceorbitC.get_gr_mns_covs_now(comps)
st_mns = data_dict['means']
st_covs = data_dict['covs']
bg_lnols = data_dict['bg_lnols']
# For some reason, bg_ols in C only work this way now. They worked before from data_dict... A mystery! data_dict now produces values +/-1e+240 or similar.
filename_tmp = 'bgols_tmp.dat'
np.savetxt(filename_tmp, bg_lnols)
bg_lnols = np.loadtxt(filename_tmp)
print('run_em: bg_lnols read from a txt file!')
lnlike = get_overall_lnlikelihood_for_fixed_memb_probs(
st_mns, st_covs, gr_mns, gr_covs, bg_lnols, memb_probs) # TODO background
#~ print('lnlikeC', lnlike)
# use lnlikeC
bic = calc_bic(data_dict, ncomps, lnlike, memb_probs=memb_probs,
Component=Component)
#~ lnpost = expectmax.get_overall_lnlikelihood(data_dict, comps,
#~ old_memb_probs=memb_probs, use_box_background=use_box_background,
#~ inc_posterior=True)
lnpost=np.nan # don't need it
# 2020/11/16 TC: handling the case for a bad bic.
# This comes up for the initial 1 component fit with box background
# because I haven't thought of a general way to initialise memberships
# that doesn't yield 0 background members.
if np.isnan(bic):
logging.info('Warning, bic was NaN')
bic = np.inf
return {'bic': bic, 'lnlike': lnlike, 'lnpost': lnpost}
def log_score_comparison(prev, new):
"""
Purely a logging helper function.
Log BIC comparisons.
Parameters
----------
prev: dict
A dictinoary of scores from the previous run with the following entries
- bic: the Bayesian Information Criterion
- lnlike : the log likelihood
- lnpost : the log posterior
new: dict
A dictinoary of scores from the new run, with identical entries as
`prev`
Result
------
None
"""
if new['bic'] < prev['bic']:
logging.info("Extra component has improved BIC...")
logging.info(
"New BIC: {} < Old BIC: {}".format(new['bic'], prev['bic']))
else:
logging.info("Extra component has worsened BIC...")
logging.info(
"New BIC: {} > Old BIC: {}".format(new['bic'], prev['bic']))
logging.info("lnlike: {} | {}".format(new['lnlike'], prev['lnlike']))
logging.info("lnpost: {} | {}".format(new['lnpost'], prev['lnpost']))
def write_results_to_file(prev_result, prev_score, pars):
"""
Write final results of the fit to the files
TODO: write fits file with id and memberships
TODO: ascii file with components today
"""
logging.info("... saving previous fit as best fit to data")
#### Components ####################################################
filename_final_comps = os.path.join(pars['folder_destination'],
pars['filename_final_components'])
# npy
Component.store_raw_components(filename_final_comps,
prev_result['comps'])
# Ascii
filename_final_comps_ascii = filename_final_comps.replace('.npy',
'.txt')
Component.store_components_ascii(filename_final_comps_ascii,
prev_result['comps'], overwrite=pars['overwrite_prev_run'])
# Fits
tabcomps = Component.convert_components_array_into_astropy_table(prev_result['comps'])
filename_final_comps_fits = filename_final_comps.replace('.npy',
'.fits')
tabcomps.write(filename_final_comps_fits,
overwrite=pars['overwrite_prev_run'])
#### Memberships ###################################################
filename_final_memberships = os.path.join(pars['folder_destination'],
pars['filename_final_memberships'])
# npy
np.save(filename_final_memberships, prev_result['memb_probs'])
# Fits
filename_final_memberships_fits = filename_final_memberships.replace(
'.npy', '.fits')
try:
tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(
pars['data_table'],
prev_result['memb_probs'],
prev_result['comps'],
filename_final_memberships_fits,
get_background_overlaps=True,
stellar_id_colname=pars['stellar_id_colname'],
overwrite_fits=pars['overwrite_prev_run'])
except:
logging.info("[WARNING] Couldn't print membership.fits file. Check column id.")
#### Likelihood and BIC ############################################
filename_final_likelihood_and_bic =\
os.path.join(pars['folder_destination'],
pars['filename_final_lnprob_and_bic'])
np.save(filename_final_likelihood_and_bic, prev_score)
#### Final log #####################################################
log_final_log(prev_result, prev_score, pars)
def iter_end_log(best_split_ix, prev_result, new_result): # TODO
"""
This is not working. chr(ord(A))...
"""
logging.info("Selected {} as best decomposition".format(
chr(ord('A') + best_split_ix)))
logging.info(
"Turned\n{}".format(prev_result['comps'][best_split_ix].get_pars()))
logging.info('with {} members'.format(
prev_result['memb_probs'].sum(axis=0)[best_split_ix]))
logging.info("into\n{}\n&\n{}".format(
new_result['comps'][best_split_ix].get_pars(),
new_result['comps'][best_split_ix + 1].get_pars(),
))
logging.info('with {} and {} members'.format(
new_result['memb_probs'].sum(axis=0)[best_split_ix],
new_result['memb_probs'].sum(axis=0)[best_split_ix + 1],
))
logging.info("for an overall membership breakdown\n{}".format(
new_result['memb_probs'].sum(axis=0)
))
def log_final_log(prev_result, prev_score, pars):
logging.info('Final best fits:')
[logging.info(c.get_pars()) for c in prev_result['comps']]
logging.info('Final age med and span:')
logging.info('PRINT TODO')
#~ if pars['optimisation_method']=='emcee':
#~ [logging.info(row[-1]) for row in prev_result['med_and_spans']]
logging.info('Membership distribution: {}'.format(
prev_result['memb_probs'].sum(axis=0)))
logging.info('Final membership:')
logging.info('\n{}'.format(np.round(prev_result['memb_probs'] * 100)))
logging.info('Final lnlikelihood: {}'.format(prev_score['lnlike']))
logging.info('Final lnposterior: {}'.format(prev_score['lnpost']))
logging.info('Final BIC: {}'.format(prev_score['bic']))
logging.info('#########################')
logging.info('### END #################')
logging.info('#########################')
########################################################################
#### PARAMETERS ########################################################
########################################################################
filename_user_pars = sys.argv[1]
user_pars = readparam.readParam(filename_user_pars)
pars = default_pars.pars
pars.update(user_pars)
pars['filename_pars_log'] = os.path.join(pars['folder_destination'],
pars['par_log_file'])
# Log fit parameters
readparam.log_used_pars(pars, default_pars=default_pars.pars)
if pars['component'].lower() == 'sphere':
Component = component.SphereComponent
elif pars['component'].lower() == 'ellip':
Component = component.EllipComponent
########################################################################
#### INPUT DATA ########################################################
########################################################################
# Data prep should already have been completed, so we simply build
# the dictionary of arrays from the astropy table
data_dict = tabletool.build_data_dict_from_table(pars['data_table'],
historical=pars['historical_colnames'])
########################################################################
#### OUTPUT DESTINATION ################################################
########################################################################
# Folder: destination with results
# If path exists, make a new results_directory with a random int
if os.path.exists(pars['folder_destination']):
folder_destination = os.path.join(pars['folder_destination'],
str(random.randint(0, 1000)))
pars['folder_destination'] = folder_destination
mkpath(pars['folder_destination'])
# tmp folder to store tmp files
folder_tmp = os.path.join(pars['folder_destination'], pars['folder_tmp'])
mkpath(folder_tmp)
########################################################################
#### LOGGING ###########################################################
########################################################################
# Logging filename
filename_log = os.path.join(pars['folder_destination'],
pars['filename_log'])
logging.basicConfig(filename=filename_log, level=logging.INFO)
# Make some logs
utils.log_message(msg='Component count cap set to {}'.format(
pars['max_comp_count']), symbol='+', surround=True)
utils.log_message(msg='Iteration count cap set to {}'.format(
pars['max_em_iterations']), symbol='+', surround=True)
########################################################################
#### INITIAL COMPONENTS, MEMBERSHIPS AND NCOMPS ########################
########################################################################
ncomps = 1
# Initial components
filename_init_comps = pars['filename_init_comps']
if filename_init_comps is not None and os.path.exists(filename_init_comps):
init_comps = Component.load_raw_components(filename_init_comps)
ncomps = len(init_comps)
print('Managed to load in %d init_comps from file'%ncomps)
else:
init_comps = None
# Initial membership probabilities
filename_init_memb_probs = pars['filename_init_memb_probs']
if filename_init_memb_probs is not None and os.path.exists(filename_init_memb_probs):
init_memb_probs = np.load(filename_init_memb_probs)
print('Managed to load in %d init_memb_probs from file'%len(init_memb_probs))
else:
init_memb_probs = None
# Check if ncomps and init_memb_probs.shape[1] match!!!!
if init_comps is not None and init_memb_probs is not None:
assert len(init_comps)==init_memb_probs.shape[1]-1
# What happens if they are not? [background component...]
if init_comps is None and init_memb_probs is not None:
ncomps = init_memb_probs.shape[1]-1 # remove background
init_comps = None
print('ncomps: %d'%ncomps)
########################################################################
#### ESTABLISHING INITIAL FIT ##########################################
########################################################################
"""
Handle special case of very first run: This is here either for
ncomps=1 or if initialised by more comps/membership probs, Chronostar
needs to build and fit these in order to continue with further splits.
Either by fitting one component (default) or by using `init_comps`
to initialise the EM fit.
"""
utils.log_message('Beginning Chronostar run', symbol='_', surround=True)
utils.log_message(msg='FITTING {} COMPONENT'.format(ncomps),
symbol='*', surround=True)
pars_tmp = pars
pars_tmp['ncomps'] = ncomps
#~ pars_tmp['split_label'] = 'initial'
pars_tmp['split_label'] = ''
prev_result = run_em.run_expectmax_simple(pars_tmp, data_dict=data_dict,
init_comps=init_comps, init_memb_probs=init_memb_probs)
prev_score = calc_score(data_dict, prev_result['comps'],
prev_result['memb_probs'], Component,
use_box_background=pars['use_box_background'])
print('prev_score')
print(prev_score)
ncomps += 1
########################################################################
#### EXPLORE EXTRA COMPONENT BY DECOMPOSITION ##########################
########################################################################
"""
Calculate global score of fit for comparison with future fits with
different component counts
Begin iterative loop, each time trialing the incorporation of a new
component
`prev_result` track the previous fit, which is taken to be the best fit
so far
As new fits are acquired, we call them `new_result`.
The new fits are compared against the previous fit, and if determined to
be an improvement, they are taken as the best fit, and are renamed to
`prev_result`
"""
global_bics = []
while ncomps <= pars['max_comp_count']:
utils.log_message(msg='FITTING {} COMPONENT'.format(ncomps),
symbol='*', surround=True)
####################################################################
#### COMPUTE ALL SPLITS FOR A MODEL WITH NCOMPS ####################
####################################################################
all_results = []
all_scores = []
# Iteratively try subdividing each previous component
# target_comp is the component we will split into two.
# This will make a total of ncomps (the target comp split into 2,
# plus the remaining components from prev_result['comps']
for i, target_comp in enumerate(prev_result['comps']):
################################################################
#### INITIALISE ################################################
################################################################
#~ print(pars['init_comps'])
#~ ncomps = len(pars['init_comps'])
# Folders for splits are named S1... rather than letters alone
split_label = 'S%d'%(i+1)
# OUTPUT FOLDER
folder_split = os.path.join(pars['folder_destination'],
str(ncomps), split_label)
utils.log_message(msg='Subdividing stage {}'.format(split_label),
symbol='+', surround=True)
mkpath(folder_split)
#### PREPARE INITIAL COMPONENTS BY SPLITTING THEM ##############
init_comps = build_init_comps(prev_comps=prev_result['comps'],
split_comp_ix=i, memb_probs=prev_result['memb_probs'],
Component=Component, data_dict=data_dict,
split_group_method=pars['split_group_method'])
# Save components to the file so EM algorithm can read them
filename_comps_split = os.path.join(folder_tmp,
'init_comps_%d_%s.npy'%(ncomps, split_label))
Component.store_raw_components(filename_comps_split, init_comps)
#### PREPARE EM PARS FILE FOR THIS SPLIT #######################
pars_tmp = pars
pars_tmp['ncomps'] = ncomps
pars_tmp['split_label'] = split_label
pars_tmp['filename_init_comps'] = filename_comps_split
################################################################
#### FIT: EM ALGORITHM #########################################
################################################################
result = run_em.run_expectmax_simple(pars_tmp,
data_dict=data_dict, init_comps=init_comps)
################################################################
#### STORE RESULTS #############################################
################################################################
all_results.append(result)
score = calc_score(data_dict, result['comps'],
result['memb_probs'], Component,
use_box_background=pars['use_box_background'])
all_scores.append(score)
print('score')
print(score)
logging.info(
'Decomposition {} finished with \nBIC: {}\nlnlike: {}\n'
'lnpost: {}'.format(split_label, all_scores[-1]['bic'],
all_scores[-1]['lnlike'], all_scores[-1]['lnpost']))
####################################################################
#### ALL SPLITS DONE. FIND THE BEST ONE ############################
####################################################################
# Identify all the improving splits
all_bics = np.array([score['bic'] for score in all_scores])
best_split_ix = np.nanargmin(all_bics)
new_result = all_results[best_split_ix]
new_score = all_scores[best_split_ix]
#~ self.iter_end_log(best_split_ix, prev_result=prev_result,
#~ new_result=new_result)
####################################################################
#### CONVERGENCE: DO WE HAVE THE MODEL WITH OPTIMAL NUMBER OF ######
#### COMPONENTS OR DO WE NEED TO INTRODUCE ANOTHER COMPONENT? ######
####################################################################
# Check if the fit has improved
log_score_comparison(new=new_score, prev=prev_score)
print('scores in all possible splits')
for s in all_scores:
print(s)
print('')
print('all BICs so far')
print(all_bics)
print('SCORE COMPARISON FOR CONVERGENCE', new_score['bic'],
prev_score['bic'], 'Does new BIC improve the model?', new_score['bic'] < prev_score['bic'])
#### NOT CONVERGED YET, CONTINUE WITH SPLITTING ####################
if new_score['bic'] < prev_score['bic']:
print('Not converged. Continue')
prev_score = new_score
prev_result = new_result
ncomps += 1
global_bics.append(new_score['bic'])
utils.log_message(
msg="Commencing {} component fit on {}{}".format(
ncomps, ncomps - 1,
chr(ord('A') + best_split_ix)), symbol='+'
)
#### CONVERGED. WRITE RESULTS AND EXIT #############################
else:
print('CONVERGED. EXIT THE LOOP')
print('global bics')
print(global_bics)
print('last BIC', new_score['bic'])
# WRITING THE FINAL RESULTS INTO FILES
# SAVE prev_result rather than new_result because prev_result
# is optimal while new_result has worsened the score.
write_results_to_file(prev_result, prev_score, pars)
#~ fig=plt.figure()
#~ ax=fig.add_subplot(111)
#~ ax.plot(range(len(all_bics)), all_bics)
#~ plt.savefig('all_bics.png')
# Terminate the loop
break
logging.info("Best fit:\n{}".format(
[group.get_pars() for group in prev_result['comps']]))
# FINAL LOGGING
if ncomps >= pars['max_comp_count']:
utils.log_message(msg='REACHED MAX COMP LIMIT', symbol='+',
surround=True)
utils.log_message(msg='END', symbol='+', surround=True)
utils.log_message(msg='####################', symbol='+', surround=True)
print('END')
########################################################################
# END
|
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
# Try Python 2 first, otherwise load from Python 3
try:
from io import StringIO
import pickle as pickle
import urllib2 as urllib
from urllib.error import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec (code, global_vars, local_vars)
try:
str
except NameError:
str = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.request.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(list(value.keys())))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in list(value.keys()):
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
width: 0px;
overflow: hidden;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring + 1 + start_row, erow + 1 + start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
""" % (ref_name, snippet))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec ('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
exec(compile(open(os.path.basename(src_file)).read(), os.path.basename(src_file), 'exec'), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_mngr.num)
plt.savefig(image_path % fig_mngr.num)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in list(example_code_obj.values())
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in list(example_code_obj.items()):
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.>)' + # don't follow '.' or '>'
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print(e.args)
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
import codecs
from contextlib import contextmanager
import locale
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import lxml.html
from nose.plugins.skip import SkipTest
from nikola import main
import nikola
import nikola.plugins.command
import nikola.plugins.command.init
@contextmanager
def cd(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
class EmptyBuildTest(unittest.TestCase):
"""Basic integration testcase."""
dataname = None
def setUp(self):
"""Setup a demo site."""
self.tmpdir = tempfile.mkdtemp()
self.target_dir = os.path.join(self.tmpdir, "target")
self.init_command = nikola.plugins.command.init.CommandInit()
self.fill_site()
self.patch_site()
self.build()
def fill_site(self):
"""Add any needed initial content."""
self.init_command.create_empty_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
if self.dataname:
src = os.path.join(os.path.dirname(__file__), 'data',
self.dataname)
for root, dirs, files in os.walk(src):
for src_name in files:
rel_dir = os.path.relpath(root, src)
dst_file = os.path.join(self.target_dir, rel_dir, src_name)
src_file = os.path.join(root, src_name)
shutil.copy2(src_file, dst_file)
def patch_site(self):
"""Make any modifications you need to the site."""
def build(self):
"""Build the site."""
with cd(self.target_dir):
main.main(["build"])
def tearDown(self):
"""Remove the demo site."""
shutil.rmtree(self.tmpdir)
# Fixes Issue #438
try:
del sys.modules['conf']
except KeyError:
pass
def test_build(self):
"""Ensure the build did something."""
index_path = os.path.join(
self.target_dir, "output", "archive.html")
self.assertTrue(os.path.isfile(index_path))
class DemoBuildTest(EmptyBuildTest):
"""Test that a default build of --demo works."""
def fill_site(self):
"""Fill the site with demo content."""
self.init_command.copy_sample_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
# File for Issue #374 (empty post text)
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013/03/06 19:08:15\n"
)
def test_index_in_sitemap(self):
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertTrue('<loc>http://getnikola.com/</loc>' in sitemap_data)
class RepeatedPostsSetting(DemoBuildTest):
"""Duplicate POSTS, should not read each post twice, which causes conflicts."""
def patch_site(self):
"""Set the SITE_URL to have a path"""
conf_path = os.path.join(self.target_dir, "conf.py")
with codecs.open(conf_path, "ab", "utf8") as outf:
outf.write('\nPOSTS = (("posts/*.txt", "posts", "post.tmpl"),("posts/*.txt", "posts", "post.tmpl"))\n')
class FuturePostTest(EmptyBuildTest):
"""Test a site with future posts."""
def fill_site(self):
import datetime
from nikola.utils import current_time
self.init_command.copy_sample_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
# Change COMMENT_SYSTEM_ID to not wait for 5 seconds
with codecs.open(os.path.join(self.target_dir, 'conf.py'), "ab+", "utf8") as outf:
outf.write('\nCOMMENT_SYSTEM_ID = "nikolatest"\n')
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty1.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: foo\n"
".. slug: foo\n"
".. date: %s\n" % (current_time() + datetime.timedelta(-1)).strftime('%Y/%m/%d %T')
)
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty2.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: bar\n"
".. slug: bar\n"
".. date: %s\n" % (current_time() + datetime.timedelta(1)).strftime('%Y/%m/%d %T')
)
def test_future_post(self):
""" Ensure that the future post is not present in the index and sitemap."""
index_path = os.path.join(self.target_dir, "output", "index.html")
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
foo_path = os.path.join(self.target_dir, "output", "posts", "foo.html")
bar_path = os.path.join(self.target_dir, "output", "posts", "bar.html")
self.assertTrue(os.path.isfile(index_path))
self.assertTrue(os.path.isfile(foo_path))
self.assertTrue(os.path.isfile(bar_path))
index_data = codecs.open(index_path, "r", "utf8").read()
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertTrue('foo.html' in index_data)
self.assertFalse('bar.html' in index_data)
self.assertTrue('foo.html' in sitemap_data)
self.assertFalse('bar.html' in sitemap_data)
# Run deploy command to see if future post is deleted
with cd(self.target_dir):
main.main(["deploy"])
self.assertTrue(os.path.isfile(index_path))
self.assertTrue(os.path.isfile(foo_path))
self.assertFalse(os.path.isfile(bar_path))
class TranslatedBuildTest(EmptyBuildTest):
"""Test a site with translated content."""
dataname = "translated_titles"
def __init__(self, *a, **kw):
super(TranslatedBuildTest, self).__init__(*a, **kw)
try:
locale.setlocale(locale.LC_ALL, ("es", "utf8"))
except:
raise SkipTest
def test_translated_titles(self):
"""Check that translated title is picked up."""
en_file = os.path.join(self.target_dir, "output", "stories", "1.html")
es_file = os.path.join(self.target_dir, "output", "es", "stories", "1.html")
# Files should be created
self.assertTrue(os.path.isfile(en_file))
self.assertTrue(os.path.isfile(es_file))
# And now let's check the titles
with codecs.open(en_file, 'r', 'utf8') as inf:
doc = lxml.html.parse(inf)
self.assertEqual(doc.find('//title').text, 'Foo | Demo Site')
with codecs.open(es_file, 'r', 'utf8') as inf:
doc = lxml.html.parse(inf)
self.assertEqual(doc.find('//title').text, 'Bar | Demo Site')
class RelativeLinkTest(DemoBuildTest):
"""Check that SITE_URL with a path doesn't break links."""
def patch_site(self):
"""Set the SITE_URL to have a path"""
conf_path = os.path.join(self.target_dir, "conf.py")
with codecs.open(conf_path, "rb", "utf-8") as inf:
data = inf.read()
data = data.replace('SITE_URL = "http://getnikola.com/"',
'SITE_URL = "http://getnikola.com/foo/bar/"')
with codecs.open(conf_path, "wb+", "utf8") as outf:
outf.write(data)
def test_relative_links(self):
"""Check that the links in output/index.html are correct"""
test_path = os.path.join(self.target_dir, "output", "index.html")
flag = False
with open(test_path, "rb") as inf:
data = inf.read()
for _, _, url, _ in lxml.html.iterlinks(data):
# Just need to be sure this one is ok
if url.endswith("css"):
self.assertFalse(url.startswith(".."))
flag = True
# But I also need to be sure it is there!
self.assertTrue(flag)
def test_index_in_sitemap(self):
"""Test that the correct path is in sitemap, and not the wrong one."""
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertFalse('<loc>http://getnikola.com/</loc>' in sitemap_data)
self.assertTrue('<loc>http://getnikola.com/foo/bar/</loc>' in sitemap_data)
class TestCheck(DemoBuildTest):
"""The demo build should pass 'nikola check'"""
def test_check_links(self):
with cd(self.target_dir):
p = subprocess.call("nikola check -l", shell=True)
self.assertEqual(p, 0)
def test_check_files(self):
with cd(self.target_dir):
p = subprocess.call("nikola check -f", shell=True)
self.assertEqual(p, 0)
class TestCheckFailure(DemoBuildTest):
"""The demo build should pass 'nikola check'"""
def test_check_links_fail(self):
with cd(self.target_dir):
os.unlink(os.path.join("output", "archive.html"))
p = subprocess.call("nikola check -l", shell=True)
self.assertEqual(p, 1)
def test_check_files_fail(self):
with cd(self.target_dir):
with codecs.open(os.path.join("output", "foobar"), "wb+", "utf8") as outf:
outf.write("foo")
p = subprocess.call("nikola check -f", shell=True)
self.assertEqual(p, 1)
class RelativeLinkTest2(DemoBuildTest):
"""Check that dropping stories to the root doesn't break links."""
def patch_site(self):
"""Set the SITE_URL to have a path"""
conf_path = os.path.join(self.target_dir, "conf.py")
with codecs.open(conf_path, "rb", "utf-8") as inf:
data = inf.read()
data = data.replace('("stories/*.txt", "stories", "story.tmpl"),',
'("stories/*.txt", "", "story.tmpl"),')
data = data.replace('("stories/*.rst", "stories", "story.tmpl"),',
'("stories/*.rst", "", "story.tmpl"),')
data = data.replace('# INDEX_PATH = ""',
'INDEX_PATH = "blog"')
with codecs.open(conf_path, "wb+", "utf8") as outf:
outf.write(data)
outf.flush()
def test_relative_links(self):
"""Check that the links in a story are correct"""
test_path = os.path.join(self.target_dir, "output", "about-nikola.html")
flag = False
with open(test_path, "rb") as inf:
data = inf.read()
for _, _, url, _ in lxml.html.iterlinks(data):
# Just need to be sure this one is ok
if url.endswith("css"):
self.assertFalse(url.startswith(".."))
flag = True
# But I also need to be sure it is there!
self.assertTrue(flag)
def test_index_in_sitemap(self):
"""Test that the correct path is in sitemap, and not the wrong one."""
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertFalse('<loc>http://getnikola.com/</loc>' in sitemap_data)
self.assertTrue('<loc>http://getnikola.com/blog/</loc>' in sitemap_data)
if __name__ == "__main__":
unittest.main()
|
|
"""
adbwp.message
~~~~~~~~~~~~~
Object representation of a message.
"""
import collections
import typing
from . import consts, enums, exceptions, header, hints, payload
__all__ = ['Message', 'new', 'from_header', 'connect', 'auth_signature', 'auth_rsa_public_key',
'open', 'ready', 'write', 'close']
#: Mapping of thee :class:`~adbwp.enums.Command` int value to an :class:`~int` that represents
#: the maximum size of the data payload for the message.
MAX_DATA_LENGTH_BY_COMMAND = collections.defaultdict(lambda: consts.MAXDATA, {
enums.Command.CNXN.value: consts.CONNECT_AUTH_MAXDATA,
enums.Command.AUTH.value: consts.CONNECT_AUTH_MAXDATA
})
class Message(typing.NamedTuple('Message', [('header', header.Header), # pylint: disable=inherit-non-class
('data', hints.Buffer)])):
"""
Represents an entire ADB protocol message.
A message consists of a 24-byte header followed by an optional data payload.
"""
def new(command: hints.Command, arg0: hints.Int = 0, arg1: hints.Int = 0, data: hints.Buffer = b'') -> Message:
"""
Create a new :class:`~adbwp.message.Message` instance with optional default values.
:param command: Command identifier
:type command: :class:`~adbwp.enums.Command` or :class:`~int`
:param arg0: (Optional) First argument of the command
:type arg0: :class:`~int`
:param arg1: (Optional) Second argument of the command
:type arg1: :class:`~int`
:param data: (Optional) Message payload
:type data: :class:`~bytes`, :class:`~bytearray`, :class:`~str`, or :class:`~memoryview`
:return: Message instance from given values
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.MAXDATA`
"""
data = payload.as_bytes(data)
return from_header(header.new(command, arg0, arg1, len(data), payload.checksum(data), header.magic(command)), data)
def from_header(header: header.Header, data: hints.Buffer = b'') -> Message: # pylint: disable=redefined-outer-name
"""
Create a new :class:`~adbwp.message.Message` instance from an existing :class:`~adbwp.header.Header`.
:param header: Message header
:type header: :class:`~adbwp.header.Header`
:param data: (Optional) Message payload
:type data: :class:`~bytes`, :class:`~bytearray`, :class:`~str`, or :class:`~memoryview`
:return: Message instance from given values
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.MAXDATA`
:raises ChecksumError: When data payload checksum doesn't match header checksum
"""
data = payload.as_bytes(data)
max_data_length = MAX_DATA_LENGTH_BY_COMMAND[header.command]
if len(data) > max_data_length:
raise ValueError('Data length for {} message cannot be more than {}'.format(header.command, max_data_length))
checksum = payload.checksum(data)
if header.data_checksum != checksum:
raise exceptions.ChecksumError('Expected data checksum {}; got {}'.format(header.data_checksum, checksum))
return Message(header, data)
def connect(serial: hints.Str, banner: hints.Str, system_type: hints.SystemType = enums.SystemType.HOST) -> Message:
"""
Create a :class:`~adbwp.message.Message` instance that represents a connect message.
:param serial: Unique identifier
:type serial: :class:`~str`
:param banner: Human readable version/identifier string
:type banner: :class:`~str`
:param system_type: System type creating the message
:type system_type: :class:`~adbwp.enums.SystemType` or :class:`~str`
:return: Message used to connect to a remote system
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.CONNECT_AUTH_MAXDATA`
"""
system_identity_string = payload.system_identity_string(system_type, serial, banner)
return new(enums.Command.CNXN, consts.VERSION, consts.CONNECT_AUTH_MAXDATA, system_identity_string)
def auth_signature(signature: hints.Bytes) -> Message:
"""
Create a :class:`~adbwp.message.Message` instance that represents a signature
authentication message.
:param signature: Signed data payload
:type signature: :class:`~bytes`
:return: Message used to verify key pair
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.CONNECT_AUTH_MAXDATA`
"""
return new(enums.Command.AUTH, enums.AuthType.SIGNATURE, 0, signature)
def auth_rsa_public_key(public_key: hints.Bytes) -> Message:
"""
Create a :class:`~adbwp.message.Message` instance that represents a RSA public key
authentication message.
:param public_key: Public key for remote system to conditionally accept
:type public_key: :class:`~bytes`
:return: Message used to share public key
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.CONNECT_AUTH_MAXDATA`
"""
return new(enums.Command.AUTH, enums.AuthType.RSAPUBLICKEY, 0, payload.null_terminate(public_key))
def open(local_id: hints.Int, destination: hints.Str) -> Message: # pylint: disable=redefined-builtin
"""
Create a :class:`~adbwp.message.Message` instance that represents a open message.
:param local_id: Stream id on remote system to connect with
:type local_id: :class:`~int`
:param destination: Stream destination
:type destination: :class:`~str`
:return: Message used to open a stream by id on a remote system
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When local id is zero
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.MAXDATA`
"""
if not local_id:
raise ValueError('Local id cannot be zero')
return new(enums.Command.OPEN, local_id, 0, payload.null_terminate(destination))
def ready(local_id: hints.Int, remote_id: hints.Int) -> Message:
"""
Create a :class:`~adbwp.message.Message` instance that represents a ready message.
:param local_id: Identifier for the stream on the local end
:type local_id: :class:`~int`
:param remote_id: Identifier for the stream on the remote system
:type remote_id: :class:`~int`
:return: Message used to inform remote system it's ready for write messages
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When local id is zero
:raises ValueError: When remote id is zero
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.MAXDATA`
"""
if not local_id:
raise ValueError('Local id cannot be zero')
if not remote_id:
raise ValueError('Remote id cannot be zero')
return new(enums.Command.OKAY, local_id, remote_id)
def write(local_id: hints.Int, remote_id: hints.Int, data: hints.Buffer) -> Message:
"""
Create a :class:`~adbwp.adb.Message` instance that represents a write message.
:param local_id: Identifier for the stream on the local end
:type local_id: :class:`~int`
:param remote_id: Identifier for the stream on the remote system
:type remote_id: :class:`~int`
:param data: Data payload sent to the stream
:type data: :class:`~bytes`, :class:`~bytearray`, :class:`~str`, or :class:`~memoryview`
:return: Message used to write data to remote stream
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When data payload is empty
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.MAXDATA`
"""
if not data:
raise ValueError('Data cannot be empty')
return new(enums.Command.WRTE, local_id, remote_id, data)
def close(local_id: hints.Int, remote_id: hints.Int) -> Message:
"""
Create a :class:`~adbwp.message.Message` instance that represents a close message.
:param local_id: Identifier for the stream on the local end
:type local_id: :class:`~int`
:param remote_id: Identifier for the stream on the remote system
:type remote_id: :class:`~int`
:return: Message used to inform the remote system of stream closing
:rtype: :class:`~adbwp.message.Message`
:raises ValueError: When id is zero
:raises ValueError: When remote id is zero
:raises ValueError: When data payload is greater than :attr:`~adbwp.consts.MAXDATA`
"""
if not remote_id:
raise ValueError('Remote id cannot be zero')
return new(enums.Command.CLSE, local_id, remote_id)
|
|
import numpy as np
def iterate(fn, x0, T, args=()):
"""Iterate fn T times, starting at x0.
Parameters
----------
fn : function
A function whose first argument is x
x0 : float
Initial condition/seed
T : int
The number of iterations to calculate
args : tuple, optional
Extra arguments to `fn`.
Return
------
orbit - list
The orbit/itinerary
Examples
--------
>>> # The fixed points
>>> iterate(lambda x: (2.5*x)*(1-x), 0.6, 20)
>>> iterate(lambda x: (2.5*x)*(1-x), 0, 20)
>>>
>>> # Between 0-1, 0.6 is stable fixed
>>> iterate(lambda x: (2.5*x)*(1-x), 0.0001, 20)
>>> iterate(lambda x: (2.5*x)*(1-x), 0.7, 20)
>>>
>>> # Above 1, or below 0 is unstable
>>> iterate(lambda x: (2.5*x)*(1-x), 1.1, 20)
>>> iterate(lambda x: (2.5*x)*(1-x), -.1, 20)
>>>
>>> # Some assertions confirming the above facts
>>> assert iterate(lambda x: (2.5*x)*(1-x), 0.6, 20)[0] == 0.6
>>> assert iterate(lambda x: (2.5*x)*(1-x), 0.0001, 20)[-1] == 0.6
>>> assert iterate(lambda x: (2.5*x)*(1-x), 0.99, 20)[0] == 0.6
>>> assert iterate(lambda x: (2.5*x)*(1-x), 0, 20)[0] == 0
>>> assert ds.fn.iterate(lambda x: (2.5*x)*(1-x), 1.1, 20) > 100000000000
>>> assert ds.fn.iterate(lambda x: (2.5*x)*(1-x), -.1, 20) > 100000000000
>>>
>>> $ Confirm length of returned orbit
>>> assert len(ds.fn.iterate(lambda x: (2.5*x)*(1-x), 0.6, 20)) == 20
"""
# Initialize the orbit with x0
orbit = [x0, ]
# Iterate until t == T
for t in range(1, int(T)):
xt = fn(orbit[t - 1], *args)
orbit.append(xt)
return orbit
def fixed_point(fn, x0, args=(), xtol=1e-8, maxiter=500):
"""
----
THIS CODE BORROWED FROM SCIPY:
`http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fixed_point.html#scipy.optimize.fixed_point`
I didn't want a full scipy dependency, instead this requires only
numpy.
NOTE: For consistency with other functions in thie module,
x0 must be float not array-like as in the scipy version.
Use a list comprehension to search many seeds.
----
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
fn : function
A function whose first argument is x
x0 : float
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
Notes
-----
Uses Steffensen's Method using Aitken's ``Del^2`` convergence acceleration.
See Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
.... return np.sqrt(c1/(x+c2))
>>> c1 = 10
>>> c2 = 2
>>> optimize.fixed_point(func, 1.2, args=(c1,c2))
1.6542491578567586
"""
p0 = x0
for iter in range(maxiter):
p1 = fn(p0, *args)
p2 = fn(p1, *args)
d = p2 - 2.0 * p1 + p0
if d == 0.0:
return p2
else:
p = p0 - (p1 - p0)*(p1 - p0) / d
if p0 == 0:
relerr = p
else:
relerr = (p - p0)/p0
if np.abs(relerr) < xtol:
return p
p0 = p
msg = "Failed to converge after {0} iterations, value is {0}".format(
maxiter, p)
raise RuntimeError(msg)
def is_stable(fn, xfix, ep, args=(), xtol=1e-4, maxiter=500):
"""Is fn stable on the (plus, minus) sides of xfix?
Parameters
----------
fn : function
A function whose first argument is x
xfix : float
The fixed point
ep : float
The neighborhood to search for stability (ep > 0)
args : tuple, optional
Extra arguments to `fn`.
xtol : float, optional
Convergence tolerance, defaults to 1e-04.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
"""
if ep < 0:
raise ValueError("ep must be positive")
xps = []
xms = []
search_range = np.arange(0.01, ep, ep/10)
for ep_x in search_range:
xp = iterate(fn, xfix + ep_x, maxiter, *args)[-1]
xm = iterate(fn, xfix - ep_x, maxiter, *args)[-1] ## Save last x
xps.append(xp)
xms.append(xm)
p = np.all(np.abs(np.asarray(xps) - xfix) < xtol)
m = np.all(np.abs(np.asarray(xms) - xfix) < xtol)
return (p, m)
def is_oscillator(fn, x0, args=(), xtol=1e-4, maxiter=500, use=0.10):
"""Does fn converge to an oscillatory pattern, and what is the period?
NOTE: I made this up on the fly, no idea how reliable this simplistic
method of detection orbits would be in practice. User beware.
Parameters
----------
fn : function
A function whose first argument is x
x0 : float
Initial condition/seed
args : tuple, optional
Extra arguments to `fn`.
xtol : float, optional
Convergence tolerance, defaults to 1e-04.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
use : float, (0-1)
The percent of maxiter to used to check for oscillatory activity.
Examples
--------
>>> is_oscillator(partial(lambda r, x: (r*x)*(1-x), 3.838), .1))
(True, 3)
>>> # A fixed point is a period 1 oscillator
>>> (is_oscillator(partial(lambda r, x: (r*x)*(1-x), 2.1), .1))
(True, 1)
"""
x0 = float(x0)
if (use > 1) or (use < 0):
raise ValueError("use must be between 0-1")
xts = np.asarray(iterate(fn, x0, maxiter, *args))[-(maxiter * use):]
maxperiod = int(np.floor(xts.shape[0]/2.0))
for i in range(1, maxperiod):
if np.abs(xts[0] - xts[i]) < xtol:
return (True, i)
return (False, 0)
if __name__ == '__main__':
from functools import partial
print("Testing is_oscillator()...")
assert (is_oscillator(partial(lambda r, x: (r*x)*(1-x), 3.838), .1)) == (True, 3)
assert (is_oscillator(partial(lambda r, x: (r*x)*(1-x), 4.0), .1)) == (False, 0)
assert (is_oscillator(partial(lambda r, x: (r*x)*(1-x), 2.1), .1)) == (True, 1)
print("Done")
|
|
#!/usr/bin/env python
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
from opencenter import backends
import opencenter.webapp.ast
import opencenter.db.api
class NodeBackend(backends.Backend):
def __init__(self):
super(NodeBackend, self).__init__(__file__)
def additional_constraints(self, api, node_id, action, ns):
if action == 'set_fact':
addl_constraints = []
if not 'key' in ns:
raise ValueError('no key in ns')
key = ns['key']
# see what backend this key is in...
for name, obj in backends.backend_objects.iteritems():
if key in obj.facts:
# we can only solve for settable facts. If the
# fact is not settable, then there is likely (HAS TO BE!)
# a primitive to set this somewhere else. Probably in
# the same backend.
fact_info = obj.facts[key]
if fact_info['settable'] is not True:
return None
addl_constraints.append('"%s" in facts.backends' % name)
# if fact_info['converge']:
# addl_constraints.append('facts.converged = true')
return addl_constraints
return None
if action == 'unapply_fact':
if not 'key' in ns:
raise ValueError('no key in ns')
key = ns['key']
addl_constraints = []
for name, obj in backends.backend_objects.iteritems():
if key in obj.facts:
# we can only solve for settable facts. If the
# fact is not settable, then there is likely (HAS TO BE!)
# a primitive to set this somewhere else. Probably in
# the same backend.
addl_constraints.append('"%s" in facts.backends' % name)
fact_info = obj.facts[key]
if fact_info['converge'] is True:
addl_constraints.append('facts.converged = true')
return addl_constraints
return None
if action == 'apply_fact':
if not 'key' in ns:
raise ValueError('no key in ns')
key = ns['key']
addl_constraints = []
# see what backend this key is in...
for name, obj in backends.backend_objects.iteritems():
if key in obj.facts:
# we can only solve for settable facts. If the
# fact is not settable, then there is likely (HAS TO BE!)
# a primitive to set this somewhere else. Probably in
# the same backend.
addl_constraints.append('"%s" in facts.backends' % name)
fact_info = obj.facts[key]
if fact_info['converge'] is True:
addl_constraints.append('facts.converged = true')
return addl_constraints
return None
if action == 'add_backend':
if ns['backend'] == 'node':
return []
if opencenter.backends.primitive_by_name(
'%s.add_backend' % ns['backend']) is None:
return []
else:
return None
if action == 'set_parent':
new_constraints = []
if not 'parent' in ns:
raise ValueError('no parent set')
parent = api._model_get_by_id('nodes', ns['parent'])
if 'container' in parent['facts'].get('backends', {}):
# should already be the case, but...
new_constraints.append('"node" in facts.backends')
existing_node = api._model_get_by_id('nodes', node_id)
if existing_node.get('attrs', {}).get('locked', False):
# Node is locked, set_parent not allowed.
return None
if parent.get('attrs', {}).get('locked', False):
# parent is locked
return None
ephemeral_api = opencenter.db.api.ephemeral_api_from_api(api)
opencenter.webapp.ast.apply_expression(existing_node,
'facts.parent_id :='
'"%s"' %
parent['id'],
ephemeral_api)
proposed_node = ephemeral_api._model_get_by_id('nodes',
node_id)
self.logger.debug('Setting parent would change %s to %s' %
(existing_node, proposed_node))
# this should be much smarter. losing vs gaining,
# and add/remove facts as appropriate.
all_keys = set(existing_node['facts'].keys() +
proposed_node['facts'].keys())
changed_facts = []
for key in all_keys:
if existing_node['facts'].get(key, None) != \
proposed_node['facts'].get(key, None):
changed_facts.append(key)
self.logger.debug('Changed facts: %s' % changed_facts)
required_facts = []
for key in changed_facts:
action = 'unapply_fact'
value = None
if key in proposed_node['facts']:
action = 'unapply_fact'
value = proposed_node['facts'][key]
# run this through the fact discovery
new_fact_reqs = self.additional_constraints(
api, node_id, action, {'key': key,
'value': value})
if new_fact_reqs is None:
self.logger.debug('Impossible to satisfy %s->%s' %
(key, value))
return None
for fact in new_fact_reqs:
if not fact in new_constraints:
new_constraints.append(fact)
self.logger.debug('Required facts: %s' % required_facts)
return new_constraints
# parent_facts = api._model_get_by_id('nodes', parent)
# determine what facts are going to be inherited,
# so we can run the 'set_fact' (apply fact?)
# node differ?
# inherited_facts = parent['facts'].get('inherited', {})
# return ['facts.%s="%s"' % (k, v)
# for k, v in inherited_facts.items()]
else:
# cannot set_parent to something that isn't a container
return None
return []
def set_parent(self, state_data, api, node_id, **kwargs):
reply_data = {}
current = api._model_get_first_by_query(
'facts', 'node_id = %s and key="parent_id"' % node_id)
# it's possible we have unparented things
current_parent = 1
if current:
current_parent = current['value']
if current:
reply_data['rollback'] = {'primitive': 'node.set_parent',
'ns': {'parent': current_parent}}
parent = kwargs['parent']
opencenter.webapp.ast.apply_expression(node_id,
'facts.parent_id := %s'
% parent,
api)
return self._ok(data=reply_data)
def apply_fact(self, state_data, api, node_id, **kwargs):
node = api.node_get_by_id(node_id)
key, value = kwargs['key'], kwargs['value']
curval = node['facts'].get(key, None)
if key == "chef_environment" and (curval is not None
or curval != value):
return self._fail()
self.logger.debug("Applying (vs. setting) fact %s->%s" %
(key, value))
# something should be done here.
return self._ok()
def del_fact(self, state_data, api, node_id, **kwargs):
"""
delete an existing fact.
required kwargs:
key: key of fact to delete
"""
if not 'key' in kwargs:
return self._fail(msg='need "key" kwarg')
old_fact = None
old_fact = api._model_query(
'facts', 'node_id=%d and key="%s"' % (int(node_id),
kwargs['key']))
if old_fact is None:
return self._ok() # no rollback necessary
old_fact = old_fact[0]
api._model_delete_by_id('facts', old_fact['id'])
reply_data = {
'rollback': {
'primitive': 'node.set_fact',
'ns': {
'key': old_fact['key'],
'value': old_fact['value']}}}
return self._ok(data=reply_data)
def set_fact(self, state_data, api, node_id, **kwargs):
reply_data = {}
key, value = kwargs['key'], kwargs['value']
# if the fact exists, update it, else create it.
oldkeys = api._model_query('facts', 'node_id=%s and key=%s' %
(node_id, key))
_by_key = dict([[x['key'], x['value']] for x in oldkeys])
if key in _by_key and _by_key[key] == value:
# we dont' need to set the value, merely apply it -- no rollback
return self.apply_fact(state_data, api, node_id, **kwargs)
elif key in _by_key:
reply_data['rollback'] = {'primitive': 'node.set_fact',
'ns': {'key': key,
'value': _by_key[key]}}
else: # key not in _by_key
reply_data['rollback'] = {'primitive': 'node.del_fact',
'ns': {'key': key}}
if len(oldkeys) > 0:
# update
api._model_update_by_id('facts', {'id': oldkeys[0]['id'],
'value': value})
else:
api._model_create('facts', {'node_id': node_id,
'key': key,
'value': value})
return self._ok(data=reply_data)
def del_attr(self, state_data, api, node_id, **kwargs):
"""
delete an existing node attribute
required kwargs:
key: key of attr to delete
"""
if not 'key' in kwargs:
return self._fail(msg='need either "key" kwarg')
old_attr = None
old_attr = api._model_query(
'attrs', 'node_id=%d and key="%s"' % (int(node_id),
kwargs['key']))
if old_attr is None:
return self._ok()
old_attr = old_attr[0]
api._model_delete_by_id('attrs', old_attr['id'])
reply_data = {
'rollback': {
'primitive': 'node.set_attr',
'ns': {
'key': old_attr['key'],
'value': old_attr['value']}}}
return self._ok(data=reply_data)
def set_attr(self, state_data, api, node_id, **kwargs):
reply_data = {}
key, value = kwargs['key'], kwargs['value']
oldkeys = api._model_query('facts', 'node_id=%s and key=%s' %
(node_id, key))
_by_key = dict([[x['key'], x['value']] for x in oldkeys])
if key in _by_key:
reply_data['rollback'] = {'primitive': 'node.set_attr',
'ns': {'key': key,
'value': _by_key[key]}}
else:
reply_data['rollback'] = {'primitive': 'node.del_attr',
'ns': {'key': key}}
api._model_create('attrs', {"node_id": node_id,
'key': key,
'value': value})
return self._ok(data=reply_data)
def add_backend(self, state_data, api, node_id, **kwargs):
reply_data = {}
self.logger.debug('adding backend %s', kwargs['backend'])
old_node = api._model_get_by_id('nodes', node_id)
old_backend = old_node['facts'].get('backends', [])
reply_data['rollback'] = {'primitive': 'node.set_fact',
'ns': {'key': 'backends',
'value': old_backend}}
opencenter.webapp.ast.apply_expression(
node_id, 'facts.backends := union(facts.backends, "%s")' %
kwargs['backend'], api)
return self._ok(data=reply_data)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfgan.python.features.virtual_batchnorm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as contrib_variables_lib
from tensorflow.contrib.gan.python.features.python import virtual_batchnorm_impl as virtual_batchnorm
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VirtualBatchnormTest(test.TestCase):
def test_syntax(self):
reference_batch = array_ops.zeros([5, 3, 16, 9, 15])
vbn = virtual_batchnorm.VBN(reference_batch, batch_axis=1)
vbn(array_ops.ones([5, 7, 16, 9, 15]))
def test_no_broadcast_needed(self):
"""When `axis` and `batch_axis` are at the end, no broadcast is needed."""
reference_batch = array_ops.zeros([5, 3, 16, 9, 15])
minibatch = array_ops.zeros([5, 3, 16, 3, 15])
vbn = virtual_batchnorm.VBN(reference_batch, axis=-1, batch_axis=-2)
vbn(minibatch)
def test_statistics(self):
"""Check that `_statistics` gives the same result as `nn.moments`."""
random_seed.set_random_seed(1234)
tensors = random_ops.random_normal([4, 5, 7, 3])
for axes in [(3), (0, 2), (1, 2, 3)]:
vb_mean, mean_sq = virtual_batchnorm._statistics(tensors, axes)
mom_mean, mom_var = nn.moments(tensors, axes)
vb_var = mean_sq - math_ops.square(vb_mean)
with self.cached_session(use_gpu=True) as sess:
vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
vb_mean, vb_var, mom_mean, mom_var])
self.assertAllClose(mom_mean_np, vb_mean_np)
self.assertAllClose(mom_var_np, vb_var_np)
def test_virtual_statistics(self):
"""Check that `_virtual_statistics` gives same result as `nn.moments`."""
random_seed.set_random_seed(1234)
batch_axis = 0
partial_batch = random_ops.random_normal([4, 5, 7, 3])
single_example = random_ops.random_normal([1, 5, 7, 3])
full_batch = array_ops.concat([partial_batch, single_example], axis=0)
for reduction_axis in range(1, 4):
# Get `nn.moments` on the full batch.
reduction_axes = list(range(4))
del reduction_axes[reduction_axis]
mom_mean, mom_variance = nn.moments(full_batch, reduction_axes)
# Get virtual batch statistics.
vb_reduction_axes = list(range(4))
del vb_reduction_axes[reduction_axis]
del vb_reduction_axes[batch_axis]
vbn = virtual_batchnorm.VBN(partial_batch, reduction_axis)
vb_mean, mean_sq = vbn._virtual_statistics(
single_example, vb_reduction_axes)
vb_variance = mean_sq - math_ops.square(vb_mean)
# Remove singleton batch dim for easy comparisons.
vb_mean = array_ops.squeeze(vb_mean, batch_axis)
vb_variance = array_ops.squeeze(vb_variance, batch_axis)
with self.cached_session(use_gpu=True) as sess:
vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
vb_mean, vb_variance, mom_mean, mom_variance])
self.assertAllClose(mom_mean_np, vb_mean_np)
self.assertAllClose(mom_var_np, vb_var_np)
def test_reference_batch_normalization(self):
"""Check that batch norm from VBN agrees with opensource implementation."""
random_seed.set_random_seed(1234)
batch = random_ops.random_normal([6, 5, 7, 3, 3])
for axis in range(5):
# Get `layers` batchnorm result.
bn_normalized = normalization.batch_normalization(
batch, axis, training=True)
# Get VBN's batch normalization on reference batch.
batch_axis = 0 if axis != 0 else 1 # axis and batch_axis can't same
vbn = virtual_batchnorm.VBN(batch, axis, batch_axis=batch_axis)
vbn_normalized = vbn.reference_batch_normalization()
with self.cached_session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
bn_normalized_np, vbn_normalized_np = sess.run(
[bn_normalized, vbn_normalized])
self.assertAllClose(bn_normalized_np, vbn_normalized_np)
def test_same_as_batchnorm(self):
"""Check that batch norm on set X is the same as ref of X / y on `y`."""
random_seed.set_random_seed(1234)
num_examples = 4
examples = [random_ops.random_normal([5, 7, 3]) for _ in
range(num_examples)]
# Get the result of the opensource batch normalization.
batch_normalized = normalization.batch_normalization(
array_ops.stack(examples), training=True)
for i in range(num_examples):
examples_except_i = array_ops.stack(examples[:i] + examples[i+1:])
# Get the result of VBN's batch normalization.
vbn = virtual_batchnorm.VBN(examples_except_i)
vb_normed = array_ops.squeeze(
vbn(array_ops.expand_dims(examples[i], [0])), [0])
with self.cached_session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
bn_np, vb_np = sess.run([batch_normalized, vb_normed])
self.assertAllClose(bn_np[i, ...], vb_np)
def test_minibatch_independent(self):
"""Test that virtual batch normalized examples are independent.
Unlike batch normalization, virtual batch normalization has the property
that the virtual batch normalized value of an example is independent of the
other examples in the minibatch. In this test, we verify this property.
"""
random_seed.set_random_seed(1234)
# These can be random, but must be the same for all session calls.
reference_batch = constant_op.constant(
np.random.normal(size=[4, 7, 3]), dtype=dtypes.float32)
fixed_example = constant_op.constant(np.random.normal(size=[7, 3]),
dtype=dtypes.float32)
# Get the VBN object and the virtual batch normalized value for
# `fixed_example`.
vbn = virtual_batchnorm.VBN(reference_batch)
vbn_fixed_example = array_ops.squeeze(
vbn(array_ops.expand_dims(fixed_example, 0)), 0)
with self.session(use_gpu=True):
variables_lib.global_variables_initializer().run()
vbn_fixed_example_np = vbn_fixed_example.eval()
# Check that the value is the same for different minibatches, and different
# sized minibatches.
for minibatch_size in range(1, 6):
examples = [random_ops.random_normal([7, 3]) for _ in
range(minibatch_size)]
minibatch = array_ops.stack([fixed_example] + examples)
vbn_minibatch = vbn(minibatch)
cur_vbn_fixed_example = vbn_minibatch[0, ...]
with self.cached_session(use_gpu=True):
variables_lib.global_variables_initializer().run()
cur_vbn_fixed_example_np = cur_vbn_fixed_example.eval()
self.assertAllClose(vbn_fixed_example_np, cur_vbn_fixed_example_np)
def test_variable_reuse(self):
"""Test that variable scopes work and inference on a real-ish case."""
tensor1_ref = array_ops.zeros([6, 5, 7, 3, 3])
tensor1_examples = array_ops.zeros([4, 5, 7, 3, 3])
tensor2_ref = array_ops.zeros([4, 2, 3])
tensor2_examples = array_ops.zeros([2, 2, 3])
with variable_scope.variable_scope('dummy_scope', reuse=True):
with self.assertRaisesRegexp(
ValueError, 'does not exist, or was not created with '
'tf.get_variable()'):
virtual_batchnorm.VBN(tensor1_ref)
vbn1 = virtual_batchnorm.VBN(tensor1_ref, name='vbn1')
vbn2 = virtual_batchnorm.VBN(tensor2_ref, name='vbn2')
# Fetch reference and examples after virtual batch normalization. Also
# fetch in variable reuse case.
to_fetch = []
to_fetch.append(vbn1.reference_batch_normalization())
to_fetch.append(vbn2.reference_batch_normalization())
to_fetch.append(vbn1(tensor1_examples))
to_fetch.append(vbn2(tensor2_examples))
variable_scope.get_variable_scope().reuse_variables()
to_fetch.append(vbn1.reference_batch_normalization())
to_fetch.append(vbn2.reference_batch_normalization())
to_fetch.append(vbn1(tensor1_examples))
to_fetch.append(vbn2(tensor2_examples))
self.assertEqual(4, len(contrib_variables_lib.get_variables()))
with self.session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
sess.run(to_fetch)
def test_invalid_input(self):
# Reference batch has unknown dimensions.
with self.assertRaisesRegexp(
ValueError, '`reference_batch` has unknown dimensions.'):
virtual_batchnorm.VBN(array_ops.placeholder(dtypes.float32), name='vbn1')
# Axis too negative.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), axis=-3, name='vbn2')
# Axis too large.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), axis=2, name='vbn3')
# Batch axis too negative.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), name='vbn4', batch_axis=-3)
# Batch axis too large.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), name='vbn5', batch_axis=2)
# Axis and batch axis are the same.
with self.assertRaisesRegexp(
ValueError, '`axis` and `batch_axis` cannot be the same.'):
virtual_batchnorm.VBN(array_ops.zeros(
[1, 2]), axis=1, name='vbn6', batch_axis=1)
# Reference Tensor and example Tensor have incompatible shapes.
tensor_ref = array_ops.zeros([5, 2, 3])
tensor_examples = array_ops.zeros([3, 2, 3])
vbn = virtual_batchnorm.VBN(tensor_ref, name='vbn7', batch_axis=1)
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
vbn(tensor_examples)
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python3
import os
import glob
import json
import copy
import timeit
from collections import OrderedDict as odict
from ruamel import yaml as yaml
from ruamel.yaml.comments import CommentedMap as CommentedMap
from . import util
from . import conf
from .build_flags import BuildFlags
from .build_item import BuildItem
from .build_type import BuildType
from .system import System
from .architecture import Architecture
from .compiler import Compiler
from .variant import Variant
from .build import Build
from .combination_rules import CombinationRules
from .cmake import getcachevars
from . import cmake
from . import err
from .util import path_exists as _pexists
from .util import logdbg as dbg
# -----------------------------------------------------------------------------
def _getdir(attr_name, default, kwargs, cwd):
d = kwargs.get(attr_name)
if d is None:
d = os.path.join(cwd, default)
else:
if not os.path.isabs(d):
d = os.path.join(cwd, d)
d = util.abspath(d)
return d
# -----------------------------------------------------------------------------
class Project:
def __init__(self, **kwargs):
#
self.kwargs = kwargs
self.num_jobs = kwargs.get('jobs')
self.targets = kwargs.get('target')
self.continue_on_fail = kwargs.get('continue')
#
cwd = util.abspath(os.getcwd())
pdir = kwargs.get('proj_dir')
dbg("cwd:", cwd)
dbg("proj_dir:", pdir)
if pdir is None:
raise err.ProjDirNotFound(None)
if pdir == ".":
pdir = cwd
pdir = util.abspath(pdir)
dbg("proj_dir, abs:", pdir)
#
if not _pexists(pdir):
raise err.ProjDirNotFound(pdir)
#
self.cmakelists = os.path.join(pdir, "CMakeLists.txt")
cmakecache = None
if _pexists(self.cmakelists):
dbg("found CMakeLists.txt:", self.cmakelists)
self.build_dir = _getdir('build_dir', 'build', kwargs, cwd)
self.install_dir = _getdir('install_dir', 'install', kwargs, cwd)
self.root_dir = pdir
elif _pexists(pdir, "CMakeCache.txt"):
cmakecache = os.path.join(pdir, "CMakeCache.txt")
dbg("found CMakeCache.txt:", cmakecache)
ch = cmake.CMakeCache(pdir)
self.build_dir = os.path.dirname(pdir)
self.install_dir = os.path.dirname(ch['CMAKE_INSTALL_PREFIX'].val)
self.root_dir = ch['CMAKE_HOME_DIRECTORY'].val
self.cmakelists = os.path.join(self.root_dir, "CMakeLists.txt")
#
self.root_dir = os.path.realpath(self.root_dir)
self.build_dir = os.path.realpath(self.build_dir)
self.install_dir = os.path.realpath(self.install_dir)
self.cmakelists = os.path.realpath(self.cmakelists)
#
dbg("root_dir:", self.root_dir)
dbg("build_dir:", self.build_dir)
dbg("install_dir:", self.install_dir)
dbg("CMakeLists.txt:", self.cmakelists)
#
if not _pexists(self.cmakelists):
raise err.CMakeListsNotFound(pdir)
#
if cmakecache is not None:
self._init_with_build_dir(os.path.dirname(cmakecache), **kwargs)
elif cmake.hascache(self.root_dir):
self._init_with_build_dir(self.root_dir, **kwargs)
elif kwargs.get('glob'):
self._init_with_glob(**kwargs)
else:
self.load_configs()
self._init_with_build_items(**kwargs)
def _init_with_build_dir(self, pdir, **kwargs):
build = Build.deserialize(pdir)
self.builds = [build]
def _init_with_glob(self, **kwargs):
g = kwargs.get('glob')
self.builds = []
for pattern in g:
bp = os.path.join(self.build_dir, pattern)
li = glob.glob(bp)
for b in li:
build = Build.deserialize(b)
self.builds.append(build)
def _init_with_build_items(self, **kwargs):
s, a, c, t, v = __class__.get_build_items(**kwargs)
#
cr = CombinationRules(kwargs.get('combination_rules', []))
combs = cr.valid_combinations(s, a, c, t, v)
dbg("combinations:", combs)
self.combination_rules = cr
#
self.builds = []
for comb in combs:
dbg("adding build from combination:", comb)
self.add_build(*comb) #s_, a_, c_, t_, v_)
#
self.systems = s
self.architectures = a
self.compilers = c
self.build_types = t
self.variants = v
#
# add new build params as needed to deal with adjusted builds
def _addnew(b, name):
a = getattr(b, name)
ali = getattr(self, name + 's')
if not [elm for elm in ali if str(elm) == str(a)]:
ali.append(a)
for b in self.builds:
if not b.adjusted:
continue
_addnew(b, 'system')
_addnew(b, 'architecture')
_addnew(b, 'build_type')
_addnew(b, 'compiler')
_addnew(b, 'variant')
@staticmethod
def get_build_items(**kwargs):
d = odict()
for c, cls in (
('systems', System),
('architectures', Architecture),
('compilers', Compiler),
('build_types', BuildType),
('variants', Variant)):
d[c] = (cls, kwargs.get(c))
coll = BuildItem.create(d)
s = coll['systems']
a = coll['architectures']
c = coll['compilers']
t = coll['build_types']
v = coll['variants']
return s, a, c, t, v
def load_configs(self):
seq = [os.path.join(d, "cmany.yml") for d in (
conf.CONF_DIR, conf.USER_DIR, self.root_dir)]
if self.kwargs.get('no_default_config'):
seq = []
for f in self.kwargs.get('config_file', []):
ff = f
if not os.path.isabs(ff):
ff = os.path.join(self.root_dir, ff)
if not os.path.exists(ff):
raise err.ConfigFileNotFound(ff)
seq.append(f)
self.configs = conf.Configs.load_seq(seq)
def save_configs(self):
# c = Configs()
pass
def create_proj(self):
yml = CommentedMap()
yml['project'] = CommentedMap()
#
def _add(name):
items = getattr(self, name)
#if BuildItem.trivial_item(items):
# yml['project'][name] = "_default_"
#elif BuildItem.no_flags_in_collection(items):
if BuildItem.no_flags_in_collection(items):
out = []
for s in items:
out.append(s.name)
yml['project'][name] = out
else:
out = []
for s in items:
cm = CommentedMap()
cm[s.name] = CommentedMap()
s.save_config(cm[s.name])
out.append(cm)
yml['project'][name] = out
#
_add('systems')
_add('architectures')
_add('compilers')
_add('build_types')
_add('variants')
txt = yaml.round_trip_dump(yml)
fn = self.kwargs['output_file']
if not os.path.isabs(fn):
fn = os.path.join(self.root_dir, fn)
with open(fn, "w") as f:
f.write(txt)
def add_build(self, system, arch, compiler, build_type, variant):
# duplicate the build items, as they may be mutated due
# to translation of their flags for the compiler
def _dup_item(item):
i = copy.deepcopy(item)
i.flags.resolve_flag_aliases(compiler, aliases=self.configs.flag_aliases)
return i
s = _dup_item(system)
a = _dup_item(arch)
t = _dup_item(build_type)
c = _dup_item(compiler)
v = _dup_item(variant)
#
f = BuildFlags('all_builds', **self.kwargs)
f.resolve_flag_aliases(compiler, aliases=self.configs.flag_aliases)
#
# create the build
dbg("adding build:", s, a, t, c, v, f)
b = Build(self.root_dir, self.build_dir, self.install_dir,
s, a, t, c, v, f,
self.num_jobs, dict(self.kwargs))
#
# When a build is created, its parameters may have been adjusted
# because of an incompatible generator specification.
# So drop this build if an equal one already exists
if b.adjusted and self.exists(b):
return False # a similar build already exists
#
# finally, this.
self.builds.append(b)
return True # build successfully added
def exists(self, build):
for b in self.builds:
if str(b.tag) == str(build.tag):
return True
return False
def select(self, **kwargs):
out = [b for b in self.builds]
def _h(kw, attr):
global out
g = kwargs.get(kw)
if g is not None:
lo = []
for b in out:
if str(getattr(b, attr)) == str(g):
lo.append(b)
out = lo
_h("sys", "system")
_h("arch", "architecture")
_h("compiler", "compiler")
_h("build_type", "build_type")
_h("variant", "variant")
return out
def create_tree(self, **restrict_to):
builds = self.select(**restrict_to)
for b in builds:
b.create_dir()
b.create_preload_file()
# print(b, ":", d)
def configure(self, **restrict_to):
if not os.path.exists(self.build_dir):
os.makedirs(self.build_dir)
self._execute(Build.configure, "Configure", silent=False, **restrict_to)
def reconfigure(self, **restrict_to):
if not os.path.exists(self.build_dir):
os.makedirs(self.build_dir)
self._execute(Build.reconfigure, "Reconfigure", silent=False, **restrict_to)
def export_compile_commands(self, **restrict_to):
if not os.path.exists(self.build_dir):
os.makedirs(self.build_dir)
self._execute(Build.export_compile_commands, "Export compile commands", silent=False, **restrict_to)
def build(self, **restrict_to):
def do_build(build):
build.build(self.targets)
self._execute(do_build, "Build", silent=False, **restrict_to)
def rebuild(self, **restrict_to):
def do_rebuild(build):
build.rebuild(self.targets)
self._execute(do_rebuild, "Rebuild", silent=False, **restrict_to)
def clean(self, **restrict_to):
self._execute(Build.clean, "Clean", silent=False, **restrict_to)
def install(self, **restrict_to):
self._execute(Build.install, "Install", silent=False, **restrict_to)
def reinstall(self, **restrict_to):
self._execute(Build.reinstall, "Reinstall", silent=False, **restrict_to)
def run_cmd(self, cmd, **subprocess_args):
def run_it(build):
build.run_custom_cmd(cmd, **subprocess_args)
self._execute(run_it, "Run cmd", silent=False)
def export_vs(self):
confs = []
for b in self.builds:
confs.append(b.json_data())
jd = odict([('configurations', confs)])
with open(self.configfile, 'w') as f:
json.dump(jd, f, indent=2)
def show_vars(self, varlist):
varv = odict()
pat = os.path.join(self.build_dir, '*', 'CMakeCache.txt')
g = glob.glob(pat)
md = 0
mv = 0
for p in g:
d = os.path.dirname(p)
b = os.path.basename(d)
md = max(md, len(b))
vars = getcachevars(d, varlist)
for k, v in vars.items():
sk = str(k)
if not varv.get(sk):
varv[sk] = odict()
varv[sk][b] = v
mv = max(mv, len(sk))
#
fmt = "{:" + str(mv) + "}[{:" + str(md) + "}]={}"
for var, sysvalues in varv.items():
for s, v in sysvalues.items():
print(fmt.format(var, s, v))
def show_build_names(self):
for b in self.builds:
print(b)
def show_build_dirs(self):
for b in self.builds:
print(b.builddir)
def show_builds(self):
for b in self.builds:
b.show_properties()
def show_targets(self):
for t in self.builds[0].get_targets():
print(t)
def _execute(self, fn, msg, silent, **restrict_to):
builds = self.select(**restrict_to)
failed = odict()
durations = odict()
num = len(builds)
if not silent:
if num == 0:
print("no builds selected")
if num == 0:
return
def nt(*args, **kwargs): # notice
if silent: return
util.lognotice(*args, **kwargs)
def dn(*args, **kwargs): # done
if silent: return
util.logdone(*args, **kwargs)
def er(*args, **kwargs): # error
if silent: return
util.logerr(*args, **kwargs)
#
if num > 1:
nt("")
nt("===============================================")
nt(msg + ": start", num, "builds:")
for b in builds:
nt(b)
nt("===============================================")
#
for i, b in enumerate(builds):
if i > 0:
nt("\n")
nt("-----------------------------------------------")
if num > 1:
nt(msg + ": build #{} of {}:".format(i + 1, num), b)
else:
nt(msg, b)
nt("-----------------------------------------------")
#
t = timeit.default_timer()
try:
# this is where it happens
fn(b) # <-- here
word, logger = "finished", dn
# exceptions thrown from builds inherit this type
except err.BuildError as e:
word, logger = "failed", er
util.logerr(f"{b} failed! {e}")
failed[b] = e
if not self.continue_on_fail:
raise
t = timeit.default_timer() - t
hrt = util.human_readable_time(t)
durations[b] = (t, hrt)
if num > 1:
ip1 = i + 1
info = f"{word} build #{ip1} of {num} ({hrt})"
else:
info = f"{word} building ({hrt})"
logger(msg + ": " + info + ":", b)
#
nt("-----------------------------------------------")
if num > 1:
if failed:
dn(msg + ": processed", num, "builds: (with failures)")
else:
dn(msg + ": finished", num, "builds:")
tot = 0.
for _, (d, _) in durations.items():
tot += d
for b in builds:
dur, hrt = durations[b]
times = "({}, {:.3f}%, {:.3f}x avg)".format(
hrt, dur / tot * 100., dur / (tot / float(num))
)
fail = failed.get(b)
if fail:
er(b, times, "[FAIL]!!!", fail)
else:
dn(b, times)
if failed:
msg = "{}/{} builds failed ({:.1f}%)!"
er(msg.format(len(failed), num, float(len(failed)) / num * 100.0))
else:
dn(f"all {num} builds succeeded!")
dn("total time:", util.human_readable_time(tot))
nt("===============================================")
if failed:
raise Exception(failed)
|
|
import networkx as nx
from collections import defaultdict
import csv
import numpy as np
import numpy as np
import matplotlib as mpl
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 16}
mpl.rc('font', **font)
def effective_size(G, nodes=None, weight=None):
"""Returns the effective size of all nodes in the graph ``G``.
The *effective size* of a node's ego network is based on the concept
of redundancy. A person's ego network has redundancy to the extent
that her contacts are connected to each other as well. The
nonredundant part of a person's relationships it's the effective
size of her ego network [1]_. Formally, the effective size of a
node `u`, denoted `e(u)`, is defined by
.. math::
e(u) = \sum_{v \in N(u) \setminus \{u\}}
\left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right)
where `N(u)` is the set of neighbors of `u` and :math:`p_{uw}` is the
normalized mutual weight of the (directed or undirected) edges
joining `u` and `v`, for each vertex `u` and `v` [1]_. And :math:`m_{vw}`
is the mutual weight of `v` and `w` divided by `v` highest mutual
weight with any of its neighbors. The *mutual weight* of `u` and `v`
is the sum of the weights of edges joining them (edge weights are
assumed to be one if the graph is unweighted).
For the case of unweighted and undirected graphs, Borgatti proposed
a simplified formula to compute effective size [2]_
.. math::
e(u) = n - \frac{2t}{n}
where `t` is the number of ties in the ego network (not including
ties to ego) and `n` is the number of nodes (excluding ego).
Parameters
----------
G : NetworkX graph
The graph containing ``v``. Directed graphs are treated like
undirected graphs when computing neighbors of ``v``.
nodes : container, optional
Container of nodes in the graph ``G``.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
dict
Dictionary with nodes as keys and the constraint on the node as values.
Notes
-----
Burt also defined the related concept of *efficency* of a node's ego
network, which is its effective size divided by the degree of that
node [1]_. So you can easily compute efficencty:
>>> G = nx.DiGraph()
>>> G.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)])
>>> esize = nx.effective_size(G)
>>> efficency = {n: v / G.degree(n) for n, v in esize.items()}
See also
--------
constraint
References
----------
.. [1] Burt, Ronald S.
*Structural Holes: The Social Structure of Competition.*
Cambridge: Harvard University Press, 1995.
.. [2] Borgatti, S.
"Structural Holes: Unpacking Burt's Redundancy Measures"
CONNECTIONS 20(1):35-38.
http://www.analytictech.com/connections/v20(1)/holes.htm
"""
def redundancy(G, u, v, weight=None):
nmw = normalized_mutual_weight
r = sum(nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight)
for w in set(nx.all_neighbors(G, u)))
return 1 - r
effective_size = {}
if nodes is None:
nodes = G
# Use Borgatti's simplified formula for unweighted and undirected graphs
if not G.is_directed() and weight is None:
for v in G:
# Effective size is not defined for isolated nodes
if len(G[v]) == 0:
effective_size[v] = float('nan')
continue
E = nx.ego_graph(G, v, center=False, undirected=True)
effective_size[v] = len(E) - (2 * E.size()) / len(E)
else:
for v in G:
# Effective size is not defined for isolated nodes
if len(G[v]) == 0:
effective_size[v] = float('nan')
continue
effective_size[v] = sum(redundancy(G, v, u, weight)
for u in set(nx.all_neighbors(G, v)))
return effective_size
## agg backend is used to create plot as a .png file
mpl.use('agg')
doc_dict = defaultdict(set)
doc_year_dict = defaultdict(set)
fileIn = 'author_combinations.csv'
with open(fileIn,'rb') as csvfile:
reader = csv.reader(csvfile,delimiter=';')
#reader = csv.DictReader(csvfile,delimiter=';')
for row in reader:
doc_id = row[0]
a1 = row[2]
a2 = row[3]
year = row[4]
#doc_id,title,a1,a2,year = line.split(';')
doc_dict[doc_id].add(a1)
doc_dict[doc_id].add(a2)
doc_year_dict[year].add(doc_id)
a1_first_name = a1.split(' ')[0]
a2_first_name = a2.split(' ')[0]
gender_dict = {}
with open('gender.csv','rb') as csvfile:
reader = csv.reader(csvfile,delimiter=',')
#reader = csv.DictReader(csvfile,delimiter=';')
for row in reader:
first_name = row[3]
gender = row[2]
gender_dict[first_name] = gender
G = nx.Graph()
for doc in doc_dict.keys():
author_list = list(set(doc_dict[doc])) #make the list without duplicates
for m in author_list:
for n in author_list:
if m not in G.nodes():
m_first = m.split(' ')[0]
try:
m_gender = gender_dict[m_first]
except KeyError:
m_gender = 'none'
print m
G.add_node(m, gender = m_gender)
if n not in G.nodes():
n_first = n.split(' ')[0]
try:
n_gender = gender_dict[n_first]
except KeyError:
n_gender = 'none'
print n
G.add_node(n, gender = n_gender)
if m != n:
if G.has_edge(m,n):
G[m][n]['weight'] += 1
else:
G.add_edge(m,n, weight = 1)
#### measuring node strength ##############
weight_dict = defaultdict(int)
for e in G.edges():
n1,n2 = e
e_weight = G[n1][n2]['weight']
weight_dict[n1] += e_weight
weight_dict[n2] += e_weight
############################################
print nx.attribute_assortativity_coefficient(G,'gender')
node_clustering = nx.clustering(G)
node_degree = nx.degree(G)
btw_list = nx.betweenness_centrality(G)
closeness_list = nx.closeness_centrality(G)
core_list = nx.core_number(G) #not a googd measure
esize = effective_size(G)
core_list = nx.core_number(G)
rc = nx.rich_club_coefficient(G,normalized=False)
rc[23] = 1.0
print rc
print core_list
male_cc = []
male_deg = []
male_btw = []
male_closeness = []
male_esize = []
male_core = []
male_rc = []
male_strength = []
female_cc = []
female_deg = []
female_btw = []
female_closeness = []
female_esize = []
female_core = []
female_rc = []
female_strength = []
largest_cc = max(nx.connected_components(G), key=len)
count_men = 0
count_women = 0
gender_list = nx.get_node_attributes(G,'gender')
for n in G.nodes():
if n in largest_cc:
print n,'yes'
n_gender = gender_list[n]
n_clustering = node_clustering[n]
n_degree = node_degree[n]
n_btw = btw_list[n]
n_closeness = closeness_list[n]
n_esize = esize[n]
n_core = core_list[n]
n_rc = rc[n_degree] #what is the rich club coefficient for that node degree
n_strength = weight_dict[n]
if n_gender == 'male':
count_men += 1
male_cc.append(n_clustering)
male_deg.append(n_degree)
male_btw.append(n_btw)
male_closeness.append(n_closeness)
male_esize.append(n_esize)
male_core.append(n_core)
male_rc.append(n_rc)
male_strength.append(n_strength)
if n_gender == 'female':
count_women += 1
female_cc.append(n_clustering)
female_deg.append(n_degree)
female_btw.append(n_btw)
female_closeness.append(n_closeness)
female_esize.append(n_esize)
female_core.append(n_core)
female_rc.append(n_rc)
female_strength.append(n_strength)
print count_men , count_women
################ make box plots ##########
## numpy is used for creating fake data
import matplotlib.pyplot as plt
data_deg = [female_deg,male_deg]
data_cc = [female_cc,male_cc]
data_btw = [female_btw,male_btw]
data_close= [female_closeness,male_closeness]
data_esize = [female_esize,male_esize]
data_rc = [female_rc,male_rc]
data_strength = [female_strength,male_strength]
#data_core = [female_core,male_core]
# Create a figure instance
colors = ['pink','lightblue']
labels = ['F','M']
#labels_short = ['F','M']
fs = 16
# Create an axes instance
# demonstrate how to toggle the display of different elements:
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20,8))
bplot1 = axes[0, 0].boxplot(data_deg, labels=labels , patch_artist=True , vert=False , widths=0.5)
axes[0, 0].set_title('degree', fontsize=fs)
#axes[0, 0].set_aspect(8) # or some other float
bplot2 = axes[0, 1].boxplot(data_cc, labels=labels,patch_artist=True , vert=False, widths=0.5)
axes[0, 1].set_title('clustering', fontsize=fs)
#axes[0, 1].set_aspect(10) # or some other float
#axes[0, 1].set_yscale('log')
#axes[0, 1].set_ylim(0,1.2)
bplot3 = axes[0, 2].boxplot(data_btw, labels=labels, patch_artist=True , vert=False, widths=0.5)
axes[0, 2].set_title('betweenness', fontsize=fs)
#axes[0, 2].set_aspect(8) # or some other float
#axes[0, 2].set_yscale('log')
#axes[0, 2].set_ylim(10**(-5),10**(-1))
bplot4 = axes[1, 0].boxplot(data_close, labels=labels, patch_artist=True , vert=False, widths=0.5)
axes[1, 0].set_title('closeness', fontsize=fs)
#axes[1, 0].set_aspect(8) # or some other float
#axes[1, 0].set_yscale('log')
bplot5 = axes[1, 1].boxplot(data_esize, labels=labels,patch_artist=True , vert=False, widths=0.5)
axes[1, 1].set_title('effective size', fontsize=fs)
#axes[1, 1].set_aspect(8) # or some other float
#axes[1, 1].set_yscale('log')
#axes[1,1].set_ylim(10**(-2),25)
bplot6 = axes[1, 2].boxplot(data_strength, labels=labels,patch_artist=True , vert=False, widths=0.5)
axes[1, 2].set_title('strength', fontsize=fs)
#axes[1, 2].set_aspect(8) # or some other float
for bplot in (bplot1, bplot2,bplot3,bplot4,bplot5,bplot6):
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
#axes[1, 2].boxplot(data, labels=labels, showfliers=False)
#axes[1, 2].set_title('showfliers=False', fontsize=fs)
# Create the boxplot
#bp = ax.boxplot(data_to_plot)
# draw temporary red and blue lines and use them to create a legend
plt.subplots_adjust(left=0.25)
# Save the figure
fig.savefig('boxplot_horizontal.pdf', bbox_inches='tight')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mixture distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
import tensorflow as tf
distributions_py = tf.contrib.distributions
def _swap_first_last_axes(array):
rank = len(array.shape)
transpose = [rank - 1] + list(range(0, rank - 1))
return array.transpose(transpose)
@contextlib.contextmanager
def _test_capture_mvndiag_sample_outputs():
"""Use monkey-patching to capture the output of an MVNDiag sample_n."""
data_container = []
true_mvndiag_sample = distributions_py.MultivariateNormalDiag.sample_n
def _capturing_mvndiag_sample(self, n, seed=None, name="sample_n"):
samples = true_mvndiag_sample(self, n=n, seed=seed, name=name)
data_container.append(samples)
return samples
distributions_py.MultivariateNormalDiag.sample_n = _capturing_mvndiag_sample
yield data_container
distributions_py.MultivariateNormalDiag.sample_n = true_mvndiag_sample
@contextlib.contextmanager
def _test_capture_normal_sample_outputs():
"""Use monkey-patching to capture the output of an Normal sample_n."""
data_container = []
true_normal_sample = distributions_py.Normal.sample_n
def _capturing_normal_sample(self, n, seed=None, name="sample_n"):
samples = true_normal_sample(self, n=n, seed=seed, name=name)
data_container.append(samples)
return samples
distributions_py.Normal.sample_n = _capturing_normal_sample
yield data_container
distributions_py.Normal.sample_n = true_normal_sample
def make_univariate_mixture(batch_shape, num_components):
logits = tf.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=tf.float32) - 50.
components = [
distributions_py.Normal(
mu=np.float32(np.random.randn(*list(batch_shape))),
sigma=np.float32(10 * np.random.rand(*list(batch_shape))))
for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=tf.int32)
return distributions_py.Mixture(cat, components)
def make_multivariate_mixture(batch_shape, num_components, event_shape):
logits = tf.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=tf.float32) - 50.
components = [
distributions_py.MultivariateNormalDiag(
mu=np.float32(np.random.randn(*list(batch_shape + event_shape))),
diag_stdev=np.float32(10 * np.random.rand(
*list(batch_shape + event_shape))))
for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=tf.int32)
return distributions_py.Mixture(cat, components)
class MixtureTest(tf.test.TestCase):
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_univariate_mixture(batch_shape, num_components=10)
self.assertAllEqual(batch_shape, dist.get_batch_shape())
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual([], dist.get_event_shape())
self.assertAllEqual([], dist.event_shape().eval())
for event_shape in ([1], [2]):
dist = make_multivariate_mixture(
batch_shape, num_components=10, event_shape=event_shape)
self.assertAllEqual(batch_shape, dist.get_batch_shape())
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual(event_shape, dist.get_event_shape())
self.assertAllEqual(event_shape, dist.event_shape().eval())
def testBrokenShapesStatic(self):
with self.assertRaisesWithPredicateMatch(ValueError,
r"cat.num_classes != len"):
distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.5]), # 2 classes
[distributions_py.Normal(mu=1.0, sigma=2.0)])
with self.assertRaisesWithPredicateMatch(
ValueError, r"\(\) and \(2,\) are not compatible"):
# The value error is raised because the batch shapes of the
# Normals are not equal. One is a scalar, the other is a
# vector of size (2,).
distributions_py.Mixture(
distributions_py.Categorical([-0.5, 0.5]), # scalar batch
[distributions_py.Normal(mu=1.0, sigma=2.0), # scalar dist
distributions_py.Normal(mu=[1.0, 1.0], sigma=[2.0, 2.0])])
with self.assertRaisesWithPredicateMatch(ValueError, r"Could not infer"):
cat_logits = tf.placeholder(shape=[1, None], dtype=tf.float32)
distributions_py.Mixture(
distributions_py.Categorical(cat_logits),
[distributions_py.Normal(mu=[1.0], sigma=[2.0])])
def testBrokenShapesDynamic(self):
with self.test_session():
d0_param = tf.placeholder(dtype=tf.float32)
d1_param = tf.placeholder(dtype=tf.float32)
d = distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.2]),
[distributions_py.Normal(mu=d0_param, sigma=d0_param),
distributions_py.Normal(mu=d1_param, sigma=d1_param)],
validate_args=True)
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: [1.0]})
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: 1.0})
def testBrokenTypes(self):
with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
distributions_py.Mixture(None, [])
cat = distributions_py.Categorical([0.3, 0.2])
# components must be a list of distributions
with self.assertRaisesWithPredicateMatch(
TypeError, "all .* must be Distribution instances"):
distributions_py.Mixture(cat, [None])
with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
distributions_py.Mixture(
cat,
[distributions_py.Normal(mu=[1.0], sigma=[2.0]),
distributions_py.Normal(mu=[np.float16(1.0)],
sigma=[np.float16(2.0)])])
with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
distributions_py.Mixture(distributions_py.Categorical([0.3, 0.2]), None)
with self.assertRaisesWithPredicateMatch(TypeError,
"either be continuous or not"):
distributions_py.Mixture(
cat,
[distributions_py.Normal(mu=[1.0], sigma=[2.0]),
distributions_py.Bernoulli(dtype=tf.float32, logits=[1.0])])
def testMeanUnivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=2)
mean = dist.mean()
self.assertEqual(batch_shape, mean.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape, mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testMeanMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
mean = dist.mean()
self.assertEqual(batch_shape + (4,), mean.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape + (4,), mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# Add a new innermost dimension for broadcasting to mvn vector shape
cat_probs_value = [np.expand_dims(c_p, -1) for c_p in cat_probs_value]
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testProbScalarUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[], num_components=2)
for x in [np.array(
[1.0, 2.0], dtype=np.float32), np.array(
1.0, dtype=np.float32), np.random.randn(3, 4).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = tf.nn.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
total_prob = sum(
c_p_value * d_p_value
for (c_p_value, d_p_value)
in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbScalarMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[], num_components=2, event_shape=[3])
for x in [np.array(
[[-1.0, 0.0, 1.0], [0.5, 1.0, -0.3]], dtype=np.float32), np.array(
[-1.0, 0.0, 1.0], dtype=np.float32),
np.random.randn(2, 2, 3).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = tf.nn.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2)
for x in [np.random.randn(2, 3).astype(np.float32),
np.random.randn(4, 2, 3).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(
c_p_value * d_p_value
for (c_p_value, d_p_value)
in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=2, event_shape=[4])
for x in [np.random.randn(2, 3, 4).astype(np.float32),
np.random.randn(4, 2, 3, 4).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(
c_p_value * d_p_value for (c_p_value, d_p_value)
in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testSampleScalarBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((4,), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4,), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch univariate case: batch_size == 1, rank 1
which_dist_samples = dist_sample_values[c][:size_c]
self.assertAllClose(which_dist_samples, sample_values[which_c])
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
# 5 component mixture.
logits = [-10.0, -5.0, 0.0, 5.0, 10.0]
mus = [-5.0, 0.0, 5.0, 4.0, 20.0]
sigmas = [0.1, 5.0, 3.0, 0.2, 4.0]
with self.test_session():
n = 100
tf.set_random_seed(654321)
components = [distributions_py.Normal(
mu=mu, sigma=sigma) for mu, sigma in zip(mus, sigmas)]
cat = distributions_py.Categorical(logits, dtype=tf.int32, name="cat1")
dist1 = distributions_py.Mixture(cat, components, name="mixture1")
samples1 = dist1.sample_n(n, seed=123456).eval()
tf.set_random_seed(654321)
components2 = [distributions_py.Normal(
mu=mu, sigma=sigma) for mu, sigma in zip(mus, sigmas)]
cat2 = distributions_py.Categorical(logits, dtype=tf.int32, name="cat2")
dist2 = distributions_py.Mixture(cat2, components2, name="mixture2")
samples2 = dist2.sample_n(n, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testSampleScalarBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[], num_components=num_components, event_shape=[2])
n = 4
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((4, 2), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch multivariate case: batch_size == 1, rank 2
which_dist_samples = dist_sample_values[c][:size_c, :]
self.assertAllClose(which_dist_samples, sample_values[which_c, :])
def testSampleBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[2, 3], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((4, 2, 3), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2, 3), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 3
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1])
def testSampleBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=num_components, event_shape=[4])
n = 5
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((5, 2, 3, 4), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((5, 2, 3, 4), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 4 (multivariate)
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1, :]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1, :])
def testEntropyLowerBoundMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
entropy_lower_bound = dist.entropy_lower_bound()
self.assertEqual(batch_shape, entropy_lower_bound.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_entropy = [d.entropy() for d in dist.components]
entropy_lower_bound_value, cat_probs_value, dist_entropy_value = (
sess.run([entropy_lower_bound, cat_probs, dist_entropy]))
self.assertEqual(batch_shape, entropy_lower_bound_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# entropy_lower_bound = sum_i pi_i entropy_i
# for i in num_components, batchwise.
true_entropy_lower_bound = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_entropy_value)])
self.assertAllClose(true_entropy_lower_bound, entropy_lower_bound_value)
class MixtureBenchmark(tf.test.Benchmark):
def _runSamplingBenchmark(self, name,
create_distribution, use_gpu, num_components,
batch_size, num_features, sample_size):
config = tf.ConfigProto()
config.allow_soft_placement = True
np.random.seed(127)
with tf.Session(config=config, graph=tf.Graph()) as sess:
tf.set_random_seed(0)
with tf.device("/gpu:0" if use_gpu else "/cpu:0"):
mixture = create_distribution(
num_components=num_components,
batch_size=batch_size,
num_features=num_features)
sample_op = mixture.sample(sample_size).op
sess.run(tf.global_variables_initializer())
reported = self.run_op_benchmark(
sess, sample_op,
min_iters=10,
name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d"
% (name, use_gpu, num_components,
batch_size, num_features, sample_size)))
print("\t".join(["%s", "%d", "%d", "%d", "%d", "%g"])
% (use_gpu, num_components, batch_size,
num_features, sample_size, reported["wall_time"]))
def benchmarkSamplingMVNDiag(self):
print("mvn_diag\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
tf.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)]
sigmas = [
tf.Variable(np.random.rand(batch_size, num_features))
for _ in range(num_components)]
components = list(
distributions_py.MultivariateNormalDiag(mu=mu, diag_stdev=sigma)
for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not tf.test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_diag", create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
def benchmarkSamplingMVNFull(self):
print("mvn_full\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def psd(x):
"""Construct batch-wise PSD matrices."""
return np.stack([np.dot(np.transpose(z), z) for z in x])
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
tf.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)]
sigmas = [
tf.Variable(
psd(np.random.rand(batch_size, num_features, num_features)))
for _ in range(num_components)]
components = list(
distributions_py.MultivariateNormalFull(mu=mu, sigma=sigma)
for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not tf.test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_full", create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
if __name__ == "__main__":
tf.test.main()
|
|
# -*- coding: utf-8 -*-
import datetime, sys, os
import xlwt
import env
import common
import htmlreport
def generate_result_xls():
wbk = xlwt.Workbook()
style_red = xlwt.easyxf('font: colour red, bold False;')
style_green = xlwt.easyxf('font: colour green, bold False;')
style_bold = xlwt.easyxf('font: colour black, bold True;')
for m in env.EXCEL_REPORT_DATA:
if m.has_key("Name"):
sheet = wbk.add_sheet(m["Name"])
sheet.write(0, 0, 'Test Case Name', style_bold)
sheet.write(0, 1, 'IE', style_bold)
sheet.write(0, 2, 'Firefox', style_bold)
sheet.write(0, 3, 'Chrome', style_bold)
sheet.col(0).width = 256 * 80
sheet.col(1).width = 256 * 20
sheet.col(2).width = 256 * 20
sheet.col(3).width = 256 * 20
i = 1
for case in m["TestCases"]:
sheet.write(i, 0, case["Name"])
if case.has_key("IE"):
if case["IE"] == "Pass":
sheet.write(i, 1, case["IE"], style_green)
if case["IE"] == "Fail":
sheet.write(i, 1, case["IE"], style_red)
if case.has_key("Firefox"):
if case["Firefox"] == "Pass":
sheet.write(i, 2, case["Firefox"], style_green)
if case["Firefox"] == "Fail":
sheet.write(i, 2, case["Firefox"], style_red)
if case.has_key("Chrome"):
if case["Chrome"] == "Pass":
sheet.write(i, 3, case["Chrome"], style_green)
if case["Chrome"] == "Fail":
sheet.write(i, 3, case["Chrome"], style_red)
i = i + 1
wbk.save(common.force_delete_file(os.path.join(env.RESULT_PATH, "result", "result.xls")))
def add_excel_report_data(list_all=[], module_name="TestModule", case_name="TestCase", browser_type="IE", result="Pass"):
for module in list_all:
if module_name == module["Name"]:
for case in module["TestCases"]:
if case_name == case["Name"]:
case[browser_type] = result
return list_all
module["TestCases"].append({"Name": case_name, browser_type: result})
return list_all
list_all.append({"Name": module_name, "TestCases": [{"Name": case_name, browser_type: result}]})
return list_all
def start_test(case_name):
env.threadlocal.CASE_NAME = case_name
env.threadlocal.CASE_START_TIME = datetime.datetime.now().replace(microsecond=0)
env.threadlocal.CASE_PASS = True
env.threadlocal.CASE_WARNINGS = 0
write_log(os.path.join("testcase", "%s.log" % (case_name)),
"\n************** Test Case [%s] [%s] ***************\n" %(case_name, env.threadlocal.TESTING_BROWSER))
def start_total_test():
env.threadlocal.CASE_START_TIME = ""
env.threadlocal.CASE_STOP_TIME = ""
env.threadlocal.CASE_NAME = ""
env.threadlocal.CASE_PASS = True
env.threadlocal.CASE_WARNINGS = 0
env.threadlocal.MODULE_NAME = ""
env.threadlocal.BROWSER = None
env.threadlocal.TESTING_BROWSER = ""
env.threadlocal.TESTING_BROWSERS = ""
env.TOTAL_TESTCASE_PASS = 0
env.TOTAL_TESTCASE_FAIL = 0
env.HTMLREPORT_TESTCASES[:] = []
common.delete_file_or_folder(os.path.join(env.RESULT_PATH, "result", "testcase"))
common.delete_file_or_folder(os.path.join(env.RESULT_PATH, "result", "screenshots"))
env.TOTAL_START_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print (">>>>>> [%s] => start testing...... <<<<<<" %
(
env.TOTAL_START_TIME,
)
)
htmlreport.generate_html_report([env.TOTAL_START_TIME, "N/A", "N/A", "N/A", "N/A", "N/A"], [])
def finish_total_test():
env.TOTAL_STOP_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print (">>>>>> [%s] => [%s], duration [%s], case [%s], pass [%s], fail [%s] <<<<<<" %
(
env.TOTAL_START_TIME,
env.TOTAL_STOP_TIME,
datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS + env.TOTAL_TESTCASE_FAIL,
env.TOTAL_TESTCASE_PASS,
env.TOTAL_TESTCASE_FAIL,
)
)
print (
">>>>>> [%s] => [%s]" % (env.TOTAL_START_TIME, common.get_version_info())
)
htmlreport.generate_html_report([env.TOTAL_START_TIME, env.TOTAL_STOP_TIME, datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS+env.TOTAL_TESTCASE_FAIL, env.TOTAL_TESTCASE_PASS, env.TOTAL_TESTCASE_FAIL],
env.HTMLREPORT_TESTCASES,
countdown=False)
htmlreport.save_current_report_to_repository()
htmlreport.generate_report_history()
htmlreport.generate_html_report([env.TOTAL_START_TIME, env.TOTAL_STOP_TIME, datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS+env.TOTAL_TESTCASE_FAIL, env.TOTAL_TESTCASE_PASS, env.TOTAL_TESTCASE_FAIL],
env.HTMLREPORT_TESTCASES,
countdown=True)
env.TOTAL_TESTCASE_PASS = 0
env.TOTAL_TESTCASE_FAIL = 0
env.HTMLREPORT_TESTCASES[:] = []
print ("\n")
def stop_test():
try:
env.THREAD_LOCK.acquire()
env.threadlocal.CASE_STOP_TIME = datetime.datetime.now().replace(microsecond=0)
env.TOTAL_STOP_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if env.threadlocal.CASE_WARNINGS > 0:
warning_message = ", has [%s] warning(s)!" % env.threadlocal.CASE_WARNINGS
else:
warning_message = ""
if env.threadlocal.CASE_PASS == True:
print (u"%s [Pass] => [%s] [%s] [%s] [%s]%s" %(common.stamp_datetime(),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.MODULE_NAME,
env.threadlocal.CASE_NAME,
env.threadlocal.TESTING_BROWSER,
warning_message
))
env.TOTAL_TESTCASE_PASS = env.TOTAL_TESTCASE_PASS + 1
env.HTMLREPORT_TESTCASES.append(["%s => %s" % (env.threadlocal.CASE_START_TIME.strftime("%m-%d %H:%M:%S"), env.threadlocal.CASE_STOP_TIME.strftime("%m-%d %H:%M:%S")),
'<a href="testcase/%s.log">[%s] - %s</a>' % (env.threadlocal.CASE_NAME, env.threadlocal.MODULE_NAME, env.threadlocal.CASE_NAME),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.TESTING_BROWSER,
'<td>Pass</td>'
])
else:
print (u"%s [Fail] => [%s] [%s] [%s] [%s]%s :( " %(common.stamp_datetime(),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.MODULE_NAME,
env.threadlocal.CASE_NAME,
env.threadlocal.TESTING_BROWSER,
warning_message
))
env.TOTAL_TESTCASE_FAIL = env.TOTAL_TESTCASE_FAIL + 1
env.HTMLREPORT_TESTCASES.append(["%s => %s" % (env.threadlocal.CASE_START_TIME.strftime("%m-%d %H:%M:%S"),env.threadlocal.CASE_STOP_TIME.strftime("%m-%d %H:%M:%S")),
'<a href="testcase/%s.log">[%s] - %s</a>' % (env.threadlocal.CASE_NAME, env.threadlocal.MODULE_NAME, env.threadlocal.CASE_NAME),
env.threadlocal.CASE_STOP_TIME - env.threadlocal.CASE_START_TIME,
env.threadlocal.TESTING_BROWSER,
'<td class="tfail"><a href="screenshots/%s">Fail</a></td>' % env.HTMLREPORT_SCREENSHOT_NAME
])
htmlreport.generate_html_report([env.TOTAL_START_TIME, env.TOTAL_STOP_TIME, datetime.datetime.strptime(env.TOTAL_STOP_TIME, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(env.TOTAL_START_TIME, "%Y-%m-%d %H:%M:%S"),
env.TOTAL_TESTCASE_PASS+env.TOTAL_TESTCASE_FAIL, env.TOTAL_TESTCASE_PASS, env.TOTAL_TESTCASE_FAIL],
env.HTMLREPORT_TESTCASES)
env.threadlocal.CASE_PASS = True
env.threadlocal.CASE_WARNINGS = 0
finally:
env.THREAD_LOCK.release()
def step_section(message):
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"\n%s Section: %s\n" %(common.stamp_datetime(), message))
def step_normal(message):
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Step: %s\n" %(common.stamp_datetime(), message))
def step_pass(message):
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Pass: %s\n" %(common.stamp_datetime(), message))
def step_fail(message):
screenshot_name = "Fail__%s__%s__%s.png" % (common.stamp_datetime_coherent(), env.threadlocal.CASE_NAME, env.threadlocal.TESTING_BROWSER)
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Fail [%s] -------------------\n"%common.stamp_datetime())
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Fail: %s, Check ScreenShot [%s]\n" %(common.stamp_datetime(), message, screenshot_name))
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Fail [%s] --------------------------------------------\n"%common.stamp_datetime())
try:
save_screen_shot(screenshot_name)
except:
step_normal(str(sys.exc_info()))
env.HTMLREPORT_SCREENSHOT_NAME = screenshot_name
env.threadlocal.CASE_PASS = False
env.EXIT_STATUS = -1
raise AssertionError(message)
def step_warning(message):
screenshot_name = "Warning__%s__%s__%s.png" % (common.stamp_datetime_coherent(), env.threadlocal.CASE_NAME, env.threadlocal.TESTING_BROWSER)
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Warning [%s] -------------------\n"%common.stamp_datetime())
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"%s Warning: %s, Check ScreenShot [%s]\n" %(common.stamp_datetime(), message, screenshot_name))
write_log(os.path.join("testcase", "%s.log" % (env.threadlocal.CASE_NAME)),
"------------ Warning [%s] --------------------------------------------\n"%common.stamp_datetime())
try:
save_screen_shot(screenshot_name)
except:
step_normal(str(sys.exc_info()))
env.threadlocal.CASE_WARNINGS = env.threadlocal.CASE_WARNINGS + 1
def write_log(relative_path, log_message):
log_path = os.path.join(env.RESULT_PATH, "result", relative_path)
common.mkdirs(os.path.dirname(log_path))
with open(log_path, 'a') as f:
f.write(log_message)
def save_screen_shot(image_name):
image_path = os.path.join(env.RESULT_PATH, "result", "screenshots")
common.mkdirs(image_path)
env.threadlocal.BROWSER.save_screenshot(os.path.join(image_path, image_name))
def handle_error():
if env.threadlocal.CASE_PASS == False:
return
if sys.exc_info()[0] != None:
step_normal(common.exception_error())
screenshot_name = "Fail__%s__%s__%s.png" % (common.stamp_datetime_coherent(), env.threadlocal.CASE_NAME, env.threadlocal.TESTING_BROWSER)
try:
save_screen_shot(screenshot_name)
except:
step_warning(str(sys.exc_info()))
step_normal("Current step screen short [%s]" % (screenshot_name))
env.HTMLREPORT_SCREENSHOT_NAME = screenshot_name
env.threadlocal.CASE_PASS = False
env.EXIT_STATUS = -1
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A collection of ORM sqlalchemy models for Superset"""
import enum
from cron_descriptor import get_description
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from sqlalchemy import (
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Integer,
String,
Table,
Text,
)
from sqlalchemy.orm import backref, relationship
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy_utils import UUIDType
from superset.extensions import security_manager
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.helpers import AuditMixinNullable
from superset.models.slice import Slice
metadata = Model.metadata # pylint: disable=no-member
class ReportScheduleType(str, enum.Enum):
ALERT = "Alert"
REPORT = "Report"
class ReportScheduleValidatorType(str, enum.Enum):
"""Validator types for alerts"""
NOT_NULL = "not null"
OPERATOR = "operator"
class ReportRecipientType(str, enum.Enum):
EMAIL = "Email"
SLACK = "Slack"
class ReportState(str, enum.Enum):
SUCCESS = "Success"
WORKING = "Working"
ERROR = "Error"
NOOP = "Not triggered"
GRACE = "On Grace"
class ReportDataFormat(str, enum.Enum):
VISUALIZATION = "PNG"
DATA = "CSV"
TEXT = "TEXT"
class ReportCreationMethodType(str, enum.Enum):
CHARTS = "charts"
DASHBOARDS = "dashboards"
ALERTS_REPORTS = "alerts_reports"
report_schedule_user = Table(
"report_schedule_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id"), nullable=False),
Column(
"report_schedule_id", Integer, ForeignKey("report_schedule.id"), nullable=False
),
UniqueConstraint("user_id", "report_schedule_id"),
)
class ReportSchedule(Model, AuditMixinNullable):
"""
Report Schedules, supports alerts and reports
"""
__tablename__ = "report_schedule"
__table_args__ = (UniqueConstraint("name", "type"),)
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
name = Column(String(150), nullable=False)
description = Column(Text)
context_markdown = Column(Text)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(1000), nullable=False)
creation_method = Column(
String(255), server_default=ReportCreationMethodType.ALERTS_REPORTS
)
timezone = Column(String(100), default="UTC", nullable=False)
report_format = Column(String(50), default=ReportDataFormat.VISUALIZATION)
sql = Column(Text())
# (Alerts/Reports) M-O to chart
chart_id = Column(Integer, ForeignKey("slices.id"), nullable=True)
chart = relationship(Slice, backref="report_schedules", foreign_keys=[chart_id])
# (Alerts/Reports) M-O to dashboard
dashboard_id = Column(Integer, ForeignKey("dashboards.id"), nullable=True)
dashboard = relationship(
Dashboard, backref="report_schedules", foreign_keys=[dashboard_id]
)
# (Alerts) M-O to database
database_id = Column(Integer, ForeignKey("dbs.id"), nullable=True)
database = relationship(Database, foreign_keys=[database_id])
owners = relationship(security_manager.user_model, secondary=report_schedule_user)
# (Alerts) Stamped last observations
last_eval_dttm = Column(DateTime)
last_state = Column(String(50), default=ReportState.NOOP)
last_value = Column(Float)
last_value_row_json = Column(Text)
# (Alerts) Observed value validation related columns
validator_type = Column(String(100))
validator_config_json = Column(Text, default="{}")
# Log retention
log_retention = Column(Integer, default=90)
# (Alerts) After a success how long to wait for a new trigger (seconds)
grace_period = Column(Integer, default=60 * 60 * 4)
# (Alerts/Reports) Unlock a possible stalled working state
working_timeout = Column(Integer, default=60 * 60 * 1)
# Store the selected dashboard tabs etc.
extra = Column(Text, default="{}")
# (Reports) When generating a screenshot, bypass the cache?
force_screenshot = Column(Boolean, default=False)
def __repr__(self) -> str:
return str(self.name)
@renders("crontab")
def crontab_humanized(self) -> str:
return get_description(self.crontab)
class ReportRecipients(Model, AuditMixinNullable):
"""
Report Recipients, meant to support multiple notification types, eg: Slack, email
"""
__tablename__ = "report_recipient"
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
recipient_config_json = Column(Text, default="{}")
report_schedule_id = Column(
Integer, ForeignKey("report_schedule.id"), nullable=False
)
report_schedule = relationship(
ReportSchedule,
backref=backref("recipients", cascade="all,delete,delete-orphan"),
foreign_keys=[report_schedule_id],
)
class ReportExecutionLog(Model): # pylint: disable=too-few-public-methods
"""
Report Execution Log, hold the result of the report execution with timestamps,
last observation and possible error messages
"""
__tablename__ = "report_execution_log"
id = Column(Integer, primary_key=True)
uuid = Column(UUIDType(binary=True))
# Timestamps
scheduled_dttm = Column(DateTime, nullable=False)
start_dttm = Column(DateTime)
end_dttm = Column(DateTime)
# (Alerts) Observed values
value = Column(Float)
value_row_json = Column(Text)
state = Column(String(50), nullable=False)
error_message = Column(Text)
report_schedule_id = Column(
Integer, ForeignKey("report_schedule.id"), nullable=False
)
report_schedule = relationship(
ReportSchedule,
backref=backref("logs", cascade="all,delete,delete-orphan"),
foreign_keys=[report_schedule_id],
)
|
|
import numpy as np
import gensim
from keras.preprocessing import sequence
from pyshm import SharedNPArray
import time
import re
import os
MAX_CHILDREN = 10
OUT_PATH = '../../'
DATA_PATH = 'cnntweets/data/rt-data-nlp4jtok/'
W2VBASE_PATH = 'cnntweets/data/emory_w2v/w2v-%d-amazon.gnsm'
# relation_index={'punct': 1,
# 'det': 2,
# 'prep': 3,
# 'nmod': 4,
# 'pobj': 5,
# 'nsubj': 6,
# 'advmod': 7,
# 'conj': 8,
# 'compound': 9,
# 'cc': 10,
# 'dobj': 11,
# 'aux': 12,
# 'poss': 13,
# 'acomp': 14,
# 'advcl': 15,
# 'relcl': 16,
# 'attr': 17,
# 'mark': 18,
# 'ccomp': 19,
# 'xcomp': 20,
# 'neg': 21,
# 'others':22,
# None: 0,
# 'root': 0}
relation_index = {'punct': 1,
'det': 2,
'prep': 3,
'nmod': 4,
'pobj': 5,
'nsubj': 6,
'advmod': 7,
'conj': 8,
'compound': 9,
'cc': 10,
'dobj': 11,
'aux': 12,
'poss': 13,
'acomp': 14,
'advcl': 15,
'others': 16,
None: 0,
}
class ParsedSentence:
def __init__(self, tsv):
self.tsv = tsv
self.n = len(tsv)
def get_token(self, node):
return self.tsv[node][1]
def get_parent_id(self, node):
parent_id = self.tsv[node][5]
return int(parent_id) - 1
def get_parant_token(self, node):
id = self.get_parent_id(node)
if id == -1:
return 'ROOT'
return self.get_token(id)
def get_relationship(self, node):
return self.tsv[node][6]
class TSVReader:
def __init__(self, filename):
self.ins = None
self.empty = False
self.open(filename)
def __exit__(self):
self.close()
def open(self, filename):
self.ins = open(filename)
return self.ins
def close(self):
self.ins.close()
def next_sentence(self):
tsv = []
self.empty = True
for line in self.ins:
self.empty = False
line = line.strip()
if line:
tsv.append(re.compile('\t').split(line))
elif tsv:
return tsv
else:
return None
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
def to_treebase_data(target, vocab, pathbase='../data/%s.new.nlp', maxlen = 60, maxchildren = MAX_CHILDREN):
filename = pathbase % target
reader = TSVReader(filename)
num = 0
token_index_list = []
children_list = []
relation_list = []
def voca_lookup(voca, token):
if voca.has_key(token):
index = vocab[token].index
else:
index = 0
return index
while reader.empty is False:
tsv = reader.next_sentence()
if tsv is None:
break
else:
num += 1
token_list = []
token_index_one_sample_list = []
parsed = ParsedSentence(tsv)
children_dic = {}
relation_name_dic = {}
relation_dic = {}
for i in range(parsed.n):
current_token = parsed.get_token(i)
token_list.append(current_token)
current_voca_index = voca_lookup(vocab, current_token)
token_index_one_sample_list.append(current_voca_index)
parent_token = parsed.get_parant_token(i)
relation = parsed.get_relationship(i)
if relation not in relation_index:
relation = 'others'
if parent_token in children_dic:
children_dic[parent_token].append(current_token)
relation_name_dic[parent_token].append(relation)
relation_dic[parent_token].append(relation_index[relation])
else:
children_dic[parent_token]=[current_token]
relation_name_dic[parent_token]=[relation]
relation_dic[parent_token]=[relation_index[relation]]
children_one_sample_list = []
relation_one_sample_list = []
relation_name_list = []
for tok in token_list:
current_children = [0] * maxchildren
current_rel = [0] * maxchildren
current_rel_name = [None] * maxchildren
if tok in children_dic:
for idx, c in enumerate(children_dic[tok]):
current_voca_index = voca_lookup(vocab, c)
if idx >= MAX_CHILDREN:
continue
current_children[idx] = current_voca_index
current_rel_name[idx] = relation_name_dic[tok][idx]
current_rel[idx] = relation_dic[tok][idx]
c = ' | '.join('{}({},{})'.format(*t) for t in zip(children_dic[tok], relation_name_dic[tok], relation_dic[tok]))
# print '[%s]: %s' % (tok, c)
# else:
# print '[%s]' % tok
children_one_sample_list.append(current_children)
relation_name_list.append(current_rel_name)
relation_one_sample_list.append(current_rel)
# print token_index_one_sample_list
# print children_one_sample_list
# print relation_one_sample_list
padded_token_index_one_sample_list = [0] * (maxlen - len(token_index_one_sample_list)) + token_index_one_sample_list
token_index_list.append(padded_token_index_one_sample_list)
padded_children_one_sample_list = [[0] * maxchildren] * (maxlen - len(children_one_sample_list)) + children_one_sample_list
children_list.append(padded_children_one_sample_list)
padded_relation_one_sample_list = [[0] * maxchildren] * (maxlen - len(relation_one_sample_list)) + relation_one_sample_list
relation_list.append(padded_relation_one_sample_list)
# if num_example == num:
# break
return np.array(token_index_list), np.array(children_list), np.array(relation_list)
# return token_index_list, children_list, relation_list
def load_data(vocab, target='trn', pathbase='../data/'):
x_text = [line.split('\t')[2] for line in open(pathbase + target, "r").readlines()]
x = []
for s in x_text:
one_doc = []
for token in s.strip().split(" "):
try:
one_doc.append(vocab[token].index)
except:
one_doc.append(len(vocab))
x.append(one_doc)
y = []
for line in open(pathbase + target, "r").readlines():
senti = line.split('\t')[1]
if senti == 'neutral':
y.append(2)
elif senti == 'positive':
y.append(3)
elif senti == 'very_positive':
y.append(4)
elif senti == 'negative':
y.append(1)
elif senti == 'very_negative':
y.append(0)
return np.array(x), np.array(y)
def get_embedding(dim, base_path=OUT_PATH):
print('Loading w2v...')
if dim==300:
W2VGSIM_DIR = 'cove/glove.840B.300d.w2vformat.gnsm'
emb_model = gensim.models.KeyedVectors.load(base_path+W2VGSIM_DIR, mmap='r')
else:
emb_model = gensim.models.KeyedVectors.load( base_path+W2VBASE_PATH %(dim), mmap='r')
print('creating w2v mat...')
word_index = emb_model.vocab
embedding_matrix = np.zeros((len(word_index) + 1, dim), dtype=np.float32)
for word, i in word_index.items():
embedding_vector = emb_model[word]
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i.index] = embedding_vector
return embedding_matrix, emb_model.vocab
def load_all(dim, maxlen, source='file'):
if source=='shm':
if dim == 50:
max_features = 2665792
embedding = SharedNPArray(shape=(max_features, 50), dtype=np.float32, tag='embedding_%d' % dim,
create=False)
elif dim == 300:
max_features = 2196016
embedding = SharedNPArray(shape=(max_features, 300), dtype=np.float32, tag='embedding_%d' % dim,
create=False)
else: # dim==400
max_features = 2665792
embedding = SharedNPArray(shape=(max_features, 400), dtype=np.float32, tag='embedding_%d' % dim,
create=False)
x_trn = SharedNPArray(shape=(8544, 60), dtype=np.int32, tag='x_trn_%d' % dim, create=False)
y_trn = SharedNPArray(shape=(8544,), dtype=np.int64, tag='y_trn_%d' % dim, create=False)
x_dev = SharedNPArray(shape=(1101, 60), dtype=np.int32, tag='x_dev_%d' % dim, create=False)
y_dev = SharedNPArray(shape=(1101,), dtype=np.int64, tag='y_dev_%d' % dim, create=False)
x_tst = SharedNPArray(shape=(2210, 60), dtype=np.int32, tag='x_tst_%d' % dim, create=False)
y_tst = SharedNPArray(shape=(2210,), dtype=np.int64, tag='y_tst_%d' % dim, create=False)
else: # source=='file
if dim == 50:
max_features = 2665792
elif dim == 300:
max_features = 2196016
else: # dim==400
max_features = 2665792
embedding, vocab = get_embedding(dim)
(x_trn, y_trn) = load_data(vocab, target='trn')
(x_dev, y_dev) = load_data(vocab, target='dev')
(x_tst, y_tst) = load_data(vocab, target='tst')
x_trn = sequence.pad_sequences(x_trn, maxlen=maxlen)
x_dev = sequence.pad_sequences(x_dev, maxlen=maxlen)
x_tst = sequence.pad_sequences(x_tst, maxlen=maxlen)
return (x_trn, y_trn), (x_dev, y_dev), (x_tst, y_tst), embedding, max_features
def load_tbdata(dim, source='file'):
if source=='shm':
if dim == 50:
embedding = SharedNPArray(shape=(2665792, 50), dtype=np.float32, tag='tb_embedding_%d' % dim, create=False)
else: # dim==400
embedding = SharedNPArray(shape=(2665792, 400), dtype=np.float32, tag='tb_embedding_%d' % dim, create=False)
tokens_etrn = SharedNPArray(shape=(165361, 60), dtype=np.int64, tag='tb_tokens_etrn_%d' % dim, create=False)
children_etrn = SharedNPArray(shape=(165361, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_children_etrn_%d' % dim,
create=False)
rels_etrn = SharedNPArray(shape=(165361, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_relations_etrn_%d' % dim,
create=False)
y_etrn = SharedNPArray(shape=(165361,), dtype=np.int64, tag='tb_y_etrn_%d' % dim, create=False)
tokens_trn = SharedNPArray(shape=(8544, 60), dtype=np.int64, tag='tb_tokens_trn_%d' % dim, create=False)
children_trn = SharedNPArray(shape=(8544, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_children_trn_%d' % dim,
create=False)
rels_trn = SharedNPArray(shape=(8544, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_relations_trn_%d' % dim,
create=False)
y_trn = SharedNPArray(shape=(8544,), dtype=np.int64, tag='tb_y_trn_%d' % dim, create=False)
tokens_dev = SharedNPArray(shape=(1101, 60), dtype=np.int64, tag='tb_tokens_dev_%d' % dim, create=False)
children_dev = SharedNPArray(shape=(1101, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_children_dev_%d' % dim, create=False)
rels_dev = SharedNPArray(shape=(1101, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_relations_dev_%d' % dim, create=False)
y_dev = SharedNPArray(shape=(1101,), dtype=np.int64, tag='tb_y_dev_%d' % dim, create=False)
tokens_tst = SharedNPArray(shape=(2210, 60), dtype=np.int64, tag='tb_tokens_tst_%d' % dim, create=False)
children_tst = SharedNPArray(shape=(2210, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_children_tst_%d' % dim, create=False)
rels_tst = SharedNPArray(shape=(2210, 60, MAX_CHILDREN), dtype=np.int64, tag='tb_relations_tst_%d' % dim, create=False)
y_tst = SharedNPArray(shape=(2210,), dtype=np.int64, tag='tb_y_tst_%d' % dim, create=False)
max_features = 2665792
else: # source=='file
embedding, vocab = get_embedding(dim)
tokens_etrn, children_etrn, rels_etrn = to_treebase_data('ext_trn', vocab, pathbase='../data/%s.new.nlp')
(_, y_etrn) = load_data(vocab, target='ext_trn', pathbase='../data/')
tokens_trn, children_trn, rels_trn = to_treebase_data('trn', vocab, pathbase='../data/%s.new.nlp')
(_, y_trn) = load_data(vocab, target='trn', pathbase='../data/')
tokens_dev, children_dev, rels_dev = to_treebase_data('dev', vocab, pathbase='../data/%s.new.nlp')
(_, y_dev) = load_data(vocab, target='dev', pathbase='../data/')
tokens_tst, children_tst, rels_tst = to_treebase_data('tst', vocab, pathbase='../data/%s.new.nlp')
(_, y_tst) = load_data(vocab, target='tst', pathbase='../data/')
max_features = 2665792
return (tokens_etrn, children_etrn, rels_etrn, y_etrn), \
(tokens_trn, children_trn, rels_trn, y_trn), \
(tokens_dev, children_dev, rels_dev, y_dev), \
(tokens_tst, children_tst, rels_tst, y_tst), embedding, max_features
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.delete_column('auth_user', 'last_name')
def backwards(self, orm):
# Adding field 'User.last_name'
db.add_column('auth_user', 'last_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 12, 15, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']", 'null': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'),)"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run Config (deprecated, use tf.estimator.RunConfig instead).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as core_run_config
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util.deprecation import deprecated
# A list of the property names in RunConfig user allows to change. They will
# not affect the execution framework, so when execution framework checks the
# `uid` of the RunConfig, it should be ignored.
_DEFAULT_UID_WHITE_LIST = [
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps',
]
class Environment(object):
"""DEPRECATED CLASS."""
# For running general distributed training.
CLOUD = 'cloud'
# For running Google-internal distributed training.
GOOGLE = 'google'
# For running on local desktop.
LOCAL = 'local'
class TaskType(object):
"""DEPRECATED CLASS."""
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class ClusterConfig(object):
"""This class specifies the configurations for a distributed run.
THIS CLASS IS DEPRECATED. Use tf.estimator.RunConfig instead.
If you're using an `Estimator`, you should probably use the subclass
RunConfig instead.
"""
def __init__(self, master=None, evaluation_master=None):
"""Constructor.
Sets the properties `cluster_spec`, `is_chief`, `master` (if `None` in the
args), `num_ps_replicas`, `task_id`, and `task_type` based on the
`TF_CONFIG` environment variable, if the pertinent information is
present. The `TF_CONFIG` environment variable is a JSON object with
attributes: `cluster`, `environment`, and `task`.
`cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from
`server_lib.py`, mapping task types (usually one of the TaskType enums) to a
list of task addresses.
`environment` specifies the runtime environment for the job (usually one of
the `Environment` enums). Defaults to `LOCAL`.
`task` has two attributes: `type` and `index`, where `type` can be any of
the task types in `cluster`. When `TF_CONFIG` contains said information, the
following properties are set on this class:
* `task_type` is set to `TF_CONFIG['task']['type']`. Defaults to `None`.
* `task_id` is set to `TF_CONFIG['task']['index']`. Defaults to 0.
* `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}.
* `master` is determined by looking up `task_type` and `task_id` in the
`cluster_spec`. Defaults to ''.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` attribute of `cluster_spec`. Defaults to 0.
* `num_worker_replicas` is set by counting the number of nodes listed
in the `worker` attribute of `cluster_spec`. Defaults to 0.
* `is_chief` is deteremined based on `task_type`, `type_id`, and
`environment`.
Example:
```
cluster = {'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}})
config = ClusterConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 3
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'worker'
assert not config.is_chief
```
Args:
master: TensorFlow master. Defaults to empty string for local.
evaluation_master: The master on which to perform evaluation.
"""
# If not explicitly specified in the constructor and the TF_CONFIG
# environment variable is present, load cluster_spec from TF_CONFIG.
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
# Set task_type and task_id if the TF_CONFIG environment variable is
# present. Otherwise, use the respective default (None / 0).
task_env = config.get('task', {})
self._task_type = task_env.get('type', None)
self._task_id = self.get_task_id()
self._cluster_spec = server_lib.ClusterSpec(config.get('cluster', {}))
self._master = (master if master is not None else
_get_master(self._cluster_spec, self._task_type,
self._task_id) or '')
self._num_ps_replicas = _count_ps(self._cluster_spec) or 0
self._num_worker_replicas = _count_worker(self._cluster_spec) or 0
# Set is_chief.
self._environment = config.get('environment', Environment.LOCAL)
self._is_chief = None
if self._task_type is None:
self._is_chief = (self._task_id == 0)
elif self._environment == Environment.CLOUD:
# When the TF_CONFIG environment variable is set, we can set the
# default of is_chief to 0 when task_type is "master" and task_id is 0.
self._is_chief = (self._task_type == TaskType.MASTER and
self._task_id == 0)
else:
# Legacy behavior is that is_chief is None if task_id == 0.
self._is_chief = (self._task_type == TaskType.WORKER and
self._task_id == 0)
self._evaluation_master = evaluation_master or ''
@property
def cluster_spec(self):
return self._cluster_spec
@property
def environment(self):
return self._environment
@property
def evaluation_master(self):
return self._evaluation_master
@property
def is_chief(self):
return self._is_chief
@property
def master(self):
return self._master
@property
def num_ps_replicas(self):
return self._num_ps_replicas
@property
def num_worker_replicas(self):
return self._num_worker_replicas
@property
def task_id(self):
return self._task_id
@property
def task_type(self):
return self._task_type
@staticmethod
def get_task_id():
"""Returns task index from `TF_CONFIG` environmental variable.
If you have a ClusterConfig instance, you can just access its task_id
property instead of calling this function and re-parsing the environmental
variable.
Returns:
`TF_CONFIG['task']['index']`. Defaults to 0.
"""
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
task_env = config.get('task', {})
task_index = task_env.get('index')
return int(task_index) if task_index else 0
class RunConfig(ClusterConfig, core_run_config.RunConfig):
"""This class specifies the configurations for an `Estimator` run.
This class is a deprecated implementation of `tf.estimator.RunConfig`
interface.
"""
_USE_DEFAULT = 0
@deprecated(None, 'When switching to tf.estimator.Estimator, use'
' tf.estimator.RunConfig instead.')
def __init__(self,
master=None,
num_cores=0,
log_device_placement=False,
gpu_memory_fraction=1,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_secs=_USE_DEFAULT,
save_checkpoints_steps=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
protocol=None,
evaluation_master='',
model_dir=None,
session_config=None,
session_creation_timeout_secs=7200):
"""Constructor.
The superclass `ClusterConfig` may set properties like `cluster_spec`,
`is_chief`, `master` (if `None` in the args), `num_ps_replicas`, `task_id`,
and `task_type` based on the `TF_CONFIG` environment variable. See
`ClusterConfig` for more details.
N.B.: If `save_checkpoints_steps` or `save_checkpoints_secs` is set,
`keep_checkpoint_max` might need to be adjusted accordingly, especially in
distributed training. For example, setting `save_checkpoints_secs` as 60
without adjusting `keep_checkpoint_max` (defaults to 5) leads to situation
that checkpoint would be garbage collected after 5 minutes. In distributed
training, the evaluation job starts asynchronously and might fail to load or
find the checkpoint due to race condition.
Args:
master: TensorFlow master. Defaults to empty string for local.
num_cores: Number of cores to be used. If 0, the system picks an
appropriate number (default: 0).
log_device_placement: Log the op placement to devices (default: False).
gpu_memory_fraction: Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec will be logged during training.
protocol: An optional argument which specifies the protocol used when
starting server. None means default to grpc.
evaluation_master: the master on which to perform evaluation.
model_dir: directory where model parameters, graph etc are saved. If
`None`, will use `model_dir` property in `TF_CONFIG` environment
variable. If both are set, must have same value. If both are `None`, see
`Estimator` about where the model will be saved.
session_config: a ConfigProto used to set session parameters, or None.
Note - using this argument, it is easy to provide settings which break
otherwise perfectly good models. Use with care.
session_creation_timeout_secs: Max time workers should wait for a session
to become available (on initialization or when recovering a session)
with MonitoredTrainingSession. Defaults to 7200 seconds, but users may
want to set a lower value to detect problems with variable / session
(re)-initialization more quickly.
"""
# Neither parent class calls super().__init__(), so here we have to
# manually call their __init__() methods.
ClusterConfig.__init__(
self, master=master, evaluation_master=evaluation_master)
# For too long this code didn't call:
# core_run_config.RunConfig.__init__(self)
# so instead of breaking compatibility with that assumption, we
# just manually initialize this field:
self._train_distribute = None
self._eval_distribute = None
self._experimental_max_worker_delay_secs = None
self._device_fn = None
gpu_options = config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
self._tf_config = config_pb2.ConfigProto(
log_device_placement=log_device_placement,
inter_op_parallelism_threads=num_cores,
intra_op_parallelism_threads=num_cores,
gpu_options=gpu_options)
self._tf_random_seed = tf_random_seed
self._save_summary_steps = save_summary_steps
self._save_checkpoints_secs = save_checkpoints_secs
self._log_step_count_steps = log_step_count_steps
self._protocol = protocol
self._session_config = session_config
if save_checkpoints_secs == RunConfig._USE_DEFAULT:
if save_checkpoints_steps is None:
self._save_checkpoints_secs = 600
else:
self._save_checkpoints_secs = None
self._save_checkpoints_steps = save_checkpoints_steps
# TODO(weiho): Remove these after ModelFn refactoring, when users can
# create Scaffold and Saver in their model_fn to set these.
self._keep_checkpoint_max = keep_checkpoint_max
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._model_dir = _get_model_dir(model_dir)
self._session_creation_timeout_secs = session_creation_timeout_secs
@experimental
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = _DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = collections.OrderedDict(
sorted(ordered_state['_cluster_spec'].as_dict().items(),
key=lambda t: t[0]))
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
@property
def model_dir(self):
return self._model_dir
@property
def tf_config(self):
return self._tf_config
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def session_config(self):
return self._session_config
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
@property
def log_step_count_steps(self):
return self._log_step_count_steps
@property
def session_creation_timeout_secs(self):
return self._session_creation_timeout_secs
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0
def _count_worker(cluster_spec):
"""Counts the number of workers in cluster_spec.
Workers with TaskType.WORKER and TaskType.MASTER are included in the return
value.
Args:
cluster_spec: a ClusterSpec instance that describes current deployment.
Returns:
The total number of eligible workers.
If 'cluster_spec' was None, then 0 is returned.
"""
return (len(cluster_spec.as_dict().get('worker', [])) +
len(cluster_spec.as_dict().get('master', []))) if cluster_spec else 0
def _get_master(cluster_spec, task_type, task_id):
"""Returns the appropriate string for the TensorFlow master."""
if not cluster_spec:
return ''
# If there is only one node in the cluster, do things locally.
jobs = cluster_spec.jobs
if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:
return ''
# Lookup the master in cluster_spec using task_type and task_id,
# if possible.
if task_type:
if task_type not in jobs:
raise ValueError(
'%s is not a valid task_type in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_type, cluster_spec))
addresses = cluster_spec.job_tasks(task_type)
if task_id >= len(addresses) or task_id < 0:
raise ValueError(
'%d is not a valid task_id for task_type %s in the '
'cluster_spec:\n'
'%s\n\n'
'Note that these value may be coming from the TF_CONFIG environment '
'variable.' % (task_id, task_type, cluster_spec))
return 'grpc://' + addresses[task_id]
# For backwards compatibility, we return empty string if task_type was
# not set (task_type did not previously exist).
return ''
def _get_model_dir(model_dir):
"""Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`."""
model_dir_in_tf_config = json.loads(
os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)
if model_dir_in_tf_config is not None:
if model_dir is not None and model_dir_in_tf_config != model_dir:
raise ValueError(
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. '
'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format(
model_dir, model_dir_in_tf_config))
logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)
return model_dir or model_dir_in_tf_config
|
|
#!/usr/bin/env python
'''
This module contains helper functions to make plugins simpler to read and write,
centralising common functionality easy to reuse
'''
import os
import re
import cgi
import logging
from tornado.template import Template
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.lib.exceptions import FrameworkAbortException, PluginAbortException
from framework.lib.general import *
from framework.utils import FileOperations
PLUGIN_OUTPUT = {"type": None, "output": None} # This will be json encoded and stored in db as string
class PluginHelper(BaseComponent):
COMPONENT_NAME = "plugin_helper"
mNumLinesToShow = 25
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.target = self.get_component("target")
self.url_manager = self.get_component("url_manager")
self.plugin_handler = self.get_component("plugin_handler")
self.reporter = self.get_component("reporter")
self.requester = self.get_component("requester")
self.shell = self.get_component("shell")
self.timer = self.get_component("timer")
# Compile regular expressions only once on init:
self.RobotsAllowRegexp = re.compile("Allow: ([^\n #]+)")
self.RobotsDisallowRegexp = re.compile("Disallow: ([^\n #]+)")
self.RobotsSiteMap = re.compile("Sitemap: ([^\n #]+)")
def MultipleReplace(self, Text, ReplaceDict): # This redundant method is here so that plugins can use it
return MultipleReplace(Text, ReplaceDict)
def CommandTable(self, Command):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "CommandTable"
plugin_output["output"] = {"Command": Command}
return ([plugin_output])
def LinkList(self, LinkListName, Links):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "LinkList"
plugin_output["output"] = {"LinkListName": LinkListName, "Links": Links}
return ([plugin_output])
def ResourceLinkList(self, ResourceListName, ResourceList):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ResourceLinkList"
plugin_output["output"] = {"ResourceListName": ResourceListName, "ResourceList": ResourceList}
return ([plugin_output])
def TabbedResourceLinkList(self, ResourcesList):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TabbedResourceLinkList"
plugin_output["output"] = {"ResourcesList": ResourcesList}
return ([plugin_output])
def ListPostProcessing(self, ResourceListName, LinkList, HTMLLinkList):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ListPostProcessing"
plugin_output["output"] = {
"ResourceListName": ResourceListName,
"LinkList": LinkList,
"HTMLLinkList": HTMLLinkList
}
return ([plugin_output])
def RequestLinkList(self, ResourceListName, ResourceList, PluginInfo):
LinkList = []
for Name, Resource in ResourceList:
Chunks = Resource.split('###POST###')
URL = Chunks[0]
POST = None
Method = 'GET'
if len(Chunks) > 1: # POST
Method = 'POST'
POST = Chunks[1]
Transaction = self.requester.GetTransaction(True, URL, Method, POST)
if Transaction.Found:
RawHTML = Transaction.GetRawResponseBody()
FilteredHTML = self.reporter.sanitize_html(RawHTML)
NotSandboxedPath = self.plugin_handler.DumpOutputFile("NOT_SANDBOXED_%s.html" % Name, FilteredHTML,
PluginInfo)
logging.info("File: NOT_SANDBOXED_%s.html saved to: %s", Name, NotSandboxedPath)
iframe_template = Template("""
<iframe src="{{ NotSandboxedPath }}" sandbox="" security="restricted" frameborder='0'
style="overflow-y:auto; overflow-x:hidden;width:100%;height:100%;" >
Your browser does not support iframes
</iframe>
""")
iframe = iframe_template.generate(NotSandboxedPath=NotSandboxedPath.split('/')[-1])
SandboxedPath = self.plugin_handler.DumpOutputFile("SANDBOXED_%s.html" % Name, iframe, PluginInfo)
logging.info("File: SANDBOXED_%s.html saved to: %s", Name, SandboxedPath)
LinkList.append((Name, SandboxedPath))
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "RequestLinkList"
plugin_output["output"] = {"ResourceListName": ResourceListName, "LinkList": LinkList}
return ([plugin_output])
def VulnerabilitySearchBox(self, SearchStr):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "VulnerabilitySearchBox"
plugin_output["output"] = {"SearchStr": SearchStr}
return ([plugin_output])
def SuggestedCommandBox(self, PluginInfo, CommandCategoryList, Header=''):
plugin_output = dict(PLUGIN_OUTPUT)
PluginOutputDir = self.InitPluginOutputDir(PluginInfo)
plugin_output["type"] = "SuggestedCommandBox"
plugin_output["output"] = {
"PluginOutputDir": PluginOutputDir,
"CommandCategoryList": CommandCategoryList,
"Header": Header
}
return ([plugin_output])
def SetConfigPluginOutputDir(self, PluginInfo):
PluginOutputDir = self.plugin_handler.GetPluginOutputDir(PluginInfo)
# FULL output path for plugins to use
self.target.SetPath('plugin_output_dir', "%s/%s" % (os.getcwd(), PluginOutputDir))
self.shell.RefreshReplacements() # Get dynamic replacement, i.e. plugin-specific output directory
return PluginOutputDir
def InitPluginOutputDir(self, PluginInfo):
PluginOutputDir = self.SetConfigPluginOutputDir(PluginInfo)
FileOperations.create_missing_dirs(PluginOutputDir) # Create output dir so that scripts can cd to it :)
return PluginOutputDir
def RunCommand(self, Command, PluginInfo, PluginOutputDir):
FrameworkAbort = PluginAbort = False
if not PluginOutputDir:
PluginOutputDir = self.InitPluginOutputDir(PluginInfo)
self.timer.start_timer('FormatCommandAndOutput')
ModifiedCommand = self.shell.GetModifiedShellCommand(Command, PluginOutputDir)
try:
RawOutput = self.shell.shell_exec_monitor(ModifiedCommand, PluginInfo)
except PluginAbortException, PartialOutput:
RawOutput = str(PartialOutput.parameter) # Save Partial Output
PluginAbort = True
except FrameworkAbortException, PartialOutput:
RawOutput = str(PartialOutput.parameter) # Save Partial Output
FrameworkAbort = True
TimeStr = self.timer.get_elapsed_time_as_str('FormatCommandAndOutput')
logging.info("Time=%s", TimeStr)
return [ModifiedCommand, FrameworkAbort, PluginAbort, TimeStr, RawOutput, PluginOutputDir]
def GetCommandOutputFileNameAndExtension(self, InputName):
OutputName = InputName
OutputExtension = "txt"
if InputName.split('.')[-1] in ['html']:
OutputName = InputName[0:-5]
OutputExtension = "html"
return [OutputName, OutputExtension]
def EscapeSnippet(self, Snippet, Extension):
if Extension == "html": # HTML
return str(Snippet)
return cgi.escape(str(Snippet)) # Escape snippet to avoid breaking HTML
def CommandDump(self, CommandIntro, OutputIntro, ResourceList, PluginInfo, PreviousOutput):
output_list = []
PluginOutputDir = self.InitPluginOutputDir(PluginInfo)
for Name, Command in ResourceList:
dump_file_name = "%s.txt" % os.path.splitext(Name)[0] # Add txt extension to avoid wrong mimetypes
plugin_output = dict(PLUGIN_OUTPUT)
ModifiedCommand, FrameworkAbort, PluginAbort, TimeStr, RawOutput, PluginOutputDir = self.RunCommand(Command,
PluginInfo, PluginOutputDir)
plugin_output["type"] = "CommandDump"
plugin_output["output"] = {
"Name": self.GetCommandOutputFileNameAndExtension(Name)[0],
"CommandIntro": CommandIntro,
"ModifiedCommand": ModifiedCommand,
"RelativeFilePath": self.plugin_handler.DumpOutputFile(dump_file_name, RawOutput, PluginInfo,
RelativePath=True),
"OutputIntro": OutputIntro,
"TimeStr": TimeStr
}
plugin_output = [plugin_output]
# This command returns URLs for processing
if Name == self.config.FrameworkConfigGet('EXTRACT_URLS_RESERVED_RESOURCE_NAME'):
# The plugin_output output dict will be remade if the resource is of this type
plugin_output = self.LogURLsFromStr(RawOutput)
# TODO: Look below to handle streaming report
if PluginAbort: # Pass partial output to external handler:
raise PluginAbortException(PreviousOutput + plugin_output)
if FrameworkAbort:
raise FrameworkAbortException(PreviousOutput + plugin_output)
output_list += plugin_output
return (output_list)
def LogURLsFromStr(self, RawOutput):
plugin_output = dict(PLUGIN_OUTPUT)
self.timer.start_timer('LogURLsFromStr')
# Extract and classify URLs and store in DB
URLList = self.url_manager.ImportURLs(RawOutput.strip().split("\n"))
NumFound = 0
VisitURLs = False
# TODO: Whether or not active testing will depend on the user profile ;). Have cool ideas for profile names
if True:
VisitURLs = True
# Visit all URLs if not in Cache
for Transaction in self.requester.GetTransactions(True, self.url_manager.GetURLsToVisit()):
if Transaction is not None and Transaction.Found:
NumFound += 1
TimeStr = self.timer.get_elapsed_time_as_str('LogURLsFromStr')
logging.info("Spider/URL scaper time=%s", TimeStr)
plugin_output["type"] = "URLsFromStr"
plugin_output["output"] = {"TimeStr": TimeStr, "VisitURLs": VisitURLs, "URLList": URLList, "NumFound": NumFound}
return ([plugin_output])
def DumpFile(self, Filename, Contents, PluginInfo, LinkName=''):
save_path = self.plugin_handler.DumpOutputFile(Filename, Contents, PluginInfo)
if not LinkName:
LinkName = save_path
logging.info("File: %s saved to: %s", Filename, save_path)
template = Template("""
<a href="{{ Link }}" target="_blank">
{{ LinkName }}
</a>
""")
return [save_path, template.generate(LinkName=LinkName, Link="../../../%s" % save_path)]
def DumpFileGetLink(self, Filename, Contents, PluginInfo, LinkName=''):
return self.DumpFile(Filename, Contents, PluginInfo, LinkName)[1]
def AnalyseRobotsEntries(self, Contents): # Find the entries of each kind and count them
num_lines = len(Contents.split("\n")) # Total number of robots.txt entries
AllowedEntries = list(set(self.RobotsAllowRegexp.findall(Contents))) # list(set()) is to avoid repeated entries
num_allow = len(AllowedEntries) # Number of lines that start with "Allow:"
DisallowedEntries = list(set(self.RobotsDisallowRegexp.findall(Contents)))
num_disallow = len(DisallowedEntries) # Number of lines that start with "Disallow:"
SitemapEntries = list(set(self.RobotsSiteMap.findall(Contents)))
num_sitemap = len(SitemapEntries) # Number of lines that start with "Sitemap:"
RobotsFound = True
if 0 == num_allow and 0 == num_disallow and 0 == num_sitemap:
RobotsFound = False
return [num_lines, AllowedEntries, num_allow, DisallowedEntries, num_disallow, SitemapEntries, num_sitemap,
RobotsFound]
def ProcessRobots(self, PluginInfo, Contents, LinkStart, LinkEnd, Filename='robots.txt'):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "Robots"
num_lines, AllowedEntries, num_allow, DisallowedEntries, num_disallow, SitemapEntries, num_sitemap, NotStr = \
self.AnalyseRobotsEntries(Contents)
SavePath = self.plugin_handler.DumpOutputFile(Filename, Contents, PluginInfo, True)
TopURL = self.target.Get('top_url')
EntriesList = []
# robots.txt contains some entries, show browsable list! :)
if num_disallow > 0 or num_allow > 0 or num_sitemap > 0:
self.url_manager.AddURLsStart()
for Display, Entries in [['Disallowed Entries', DisallowedEntries], ['Allowed Entries', AllowedEntries],
['Sitemap Entries', SitemapEntries]]:
Links = [] # Initialise category-specific link list
for Entry in Entries:
if 'Sitemap Entries' == Display:
URL = Entry
self.url_manager.AddURL(URL) # Store real links in the DB
Links.append([Entry, Entry]) # Show link in defined format (passive/semi_passive)
else:
URL = TopURL + Entry
self.url_manager.AddURL(URL) # Store real links in the DB
# Show link in defined format (passive/semi_passive)
Links.append([Entry, LinkStart + Entry + LinkEnd])
EntriesList.append((Display, Links))
NumAddedURLs = self.url_manager.AddURLsEnd()
plugin_output["output"] = {
"NotStr": NotStr,
"NumLines": num_lines,
"NumAllow": num_allow,
"NumDisallow": num_disallow,
"NumSitemap": num_sitemap,
"SavePath": SavePath,
"NumAddedURLs": NumAddedURLs,
"EntriesList": EntriesList
}
return ([plugin_output])
def TransactionTable(self, transactions_list):
# Store transaction ids in the output, so that reporter can fetch transactions from db
trans_ids = []
for transaction in transactions_list:
trans_ids.append(transaction.GetID())
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TransactionTableFromIDs"
plugin_output["output"] = {"TransactionIDs": trans_ids}
return ([plugin_output])
def TransactionTableForURLList(self, UseCache, URLList, Method=None, Data=None):
# Have to make sure that those urls are visited ;), so we
# perform get transactions but don't save the transaction ids etc..
self.requester.GetTransactions(UseCache, URLList, Method, Data)
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TransactionTableForURLList"
plugin_output["output"] = {"UseCache": UseCache, "URLList": URLList, "Method": Method, "Data": Data}
return ([plugin_output])
def TransactionTableForURL(self, UseCache, URL, Method=None, Data=None):
# Have to make sure that those urls are visited ;),
# so we perform get transactions but don't save the transaction ids
self.requester.GetTransaction(UseCache, URL, method=Method, data=Data)
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TransactionTableForURL"
plugin_output["output"] = {"UseCache": UseCache, "URL": URL, "Method": Method, "Data": Data}
return ([plugin_output])
def CreateMatchTables(self, Num):
TableList = []
for x in range(0, Num):
TableList.append(self.CreateMatchTable())
return TableList
def HtmlString(self, html_string):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "HtmlString"
plugin_output["output"] = {"String": html_string}
return ([plugin_output])
def FindResponseHeaderMatchesForRegexpName(self, HeaderRegexpName):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ResponseHeaderMatches"
plugin_output["output"] = {"HeaderRegexpName": HeaderRegexpName}
return ([plugin_output])
def FindResponseHeaderMatchesForRegexpNames(self, HeaderRegexpNamesList):
Results = []
for HeaderRegexpName in HeaderRegexpNamesList:
Results += self.FindResponseHeaderMatchesForRegexpName(HeaderRegexpName)
return Results
def FindResponseBodyMatchesForRegexpName(self, ResponseRegexpName):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ResponseBodyMatches"
plugin_output["output"] = {"ResponseRegexpName": ResponseRegexpName}
return ([plugin_output])
def FindResponseBodyMatchesForRegexpNames(self, ResponseRegexpNamesList):
Results = []
for ResponseRegexpName in ResponseRegexpNamesList:
Results += self.FindResponseBodyMatchesForRegexpName(ResponseRegexpName)
return Results
def ResearchFingerprintInlog(self):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "FingerprintData"
plugin_output["output"] = {}
return ([plugin_output])
def FindTopTransactionsBySpeed(self, Order="Desc"):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TopTransactionsBySpeed"
plugin_output["output"] = {"Order": Order}
return ([plugin_output])
|
|
## Ordinary Least Squares
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
np.random.seed(9876789)
# ## OLS estimation
#
# Artificial data:
nsample = 100
x = np.linspace(0, 10, 100)
X = np.column_stack((x, x**2))
beta = np.array([1, 0.1, 10])
e = np.random.normal(size=nsample)
# Our model needs an intercept so we add a column of 1s:
X = sm.add_constant(X)
y = np.dot(X, beta) + e
# Inspect data:
X = sm.add_constant(X)
y = np.dot(X, beta) + e
# Fit and summary:
model = sm.OLS(y, X)
results = model.fit()
print(results.summary())
# Quantities of interest can be extracted directly from the fitted model. Type ``dir(results)`` for a full list. Here are some examples:
print('Parameters: ', results.params)
print('R2: ', results.rsquared)
# ## OLS non-linear curve but linear in parameters
#
# We simulate artificial data with a non-linear relationship between x and y:
nsample = 50
sig = 0.5
x = np.linspace(0, 20, nsample)
X = np.column_stack((x, np.sin(x), (x-5)**2, np.ones(nsample)))
beta = [0.5, 0.5, -0.02, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
# Fit and summary:
res = sm.OLS(y, X).fit()
print(res.summary())
# Extract other quantities of interest:
print('Parameters: ', res.params)
print('Standard errors: ', res.bse)
print('Predicted values: ', res.predict())
# Draw a plot to compare the true relationship to OLS predictions. Confidence intervals around the predictions are built using the ``wls_prediction_std`` command.
prstd, iv_l, iv_u = wls_prediction_std(res)
fig, ax = plt.subplots()
ax.plot(x, y, 'o', label="data")
ax.plot(x, y_true, 'b-', label="True")
ax.plot(x, res.fittedvalues, 'r--.', label="OLS")
ax.plot(x, iv_u, 'r--')
ax.plot(x, iv_l, 'r--')
ax.legend(loc='best');
# ## OLS with dummy variables
#
# We generate some artificial data. There are 3 groups which will be modelled using dummy variables. Group 0 is the omitted/benchmark category.
nsample = 50
groups = np.zeros(nsample, int)
groups[20:40] = 1
groups[40:] = 2
#dummy = (groups[:,None] == np.unique(groups)).astype(float)
dummy = sm.categorical(groups, drop=True)
x = np.linspace(0, 20, nsample)
# drop reference category
X = np.column_stack((x, dummy[:,1:]))
X = sm.add_constant(X, prepend=False)
beta = [1., 3, -3, 10]
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + e
# Inspect the data:
print(X[:5,:])
print(y[:5])
print(groups)
print(dummy[:5,:])
# Fit and summary:
res2 = sm.OLS(y, X).fit()
print(res.summary())
# Draw a plot to compare the true relationship to OLS predictions:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x, y, 'o', label="Data")
ax.plot(x, y_true, 'b-', label="True")
ax.plot(x, res2.fittedvalues, 'r--.', label="Predicted")
ax.plot(x, iv_u, 'r--')
ax.plot(x, iv_l, 'r--')
ax.legend(loc="best")
# ## Joint hypothesis test
#
# ### F test
#
# We want to test the hypothesis that both coefficients on the dummy variables are equal to zero, that is, $R \times \beta = 0$. An F test leads us to strongly reject the null hypothesis of identical constant in the 3 groups:
R = [[0, 1, 0, 0], [0, 0, 1, 0]]
print(np.array(R))
print(res2.f_test(R))
# You can also use formula-like syntax to test hypotheses
print(res2.f_test("x2 = x3 = 0"))
# ### Small group effects
#
# If we generate artificial data with smaller group effects, the T test can no longer reject the Null hypothesis:
beta = [1., 0.3, -0.0, 10]
y_true = np.dot(X, beta)
y = y_true + np.random.normal(size=nsample)
res3 = sm.OLS(y, X).fit()
print(res3.f_test(R))
print(res3.f_test("x2 = x3 = 0"))
# ### Multicollinearity
#
# The Longley dataset is well known to have high multicollinearity. That is, the exogenous predictors are highly correlated. This is problematic because it can affect the stability of our coefficient estimates as we make minor changes to model specification.
from statsmodels.datasets.longley import load_pandas
y = load_pandas().endog
X = load_pandas().exog
X = sm.add_constant(X)
# Fit and summary:
ols_model = sm.OLS(y, X)
ols_results = ols_model.fit()
print(ols_results.summary())
# #### Condition number
#
# One way to assess multicollinearity is to compute the condition number. Values over 20 are worrisome (see Greene 4.9). The first step is to normalize the independent variables to have unit length:
for i, name in enumerate(X):
if name == "const":
continue
norm_x[:,i] = X[name]/np.linalg.norm(X[name])
norm_xtx = np.dot(norm_x.T,norm_x)
# Then, we take the square root of the ratio of the biggest to the smallest eigen values.
eigs = np.linalg.eigvals(norm_xtx)
condition_number = np.sqrt(eigs.max() / eigs.min())
print(condition_number)
# #### Dropping an observation
#
# Greene also points out that dropping a single observation can have a dramatic effect on the coefficient estimates:
ols_results2 = sm.OLS(y.ix[:14], X.ix[:14]).fit()
print("Percentage change %4.2f%%\n"*7 % tuple([i for i in (ols_results2.params - ols_results.params)/ols_results.params*100]))
# We can also look at formal statistics for this such as the DFBETAS -- a standardized measure of how much each coefficient changes when that observation is left out.
infl = ols_results.get_influence()
# In general we may consider DBETAS in absolute value greater than $2/\sqrt{N}$ to be influential observations
2./len(X)**.5
print(infl.summary_frame().filter(regex="dfb"))
|
|
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
import os
import yaml
from pkg_resources import resource_filename
PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_config',
'ansible_log_path',
'variant',
'variant_version',
'version',
]
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
pass
class OOConfigInvalidHostError(Exception):
""" Host in config is missing both ip and hostname. """
pass
class Host(object):
""" A system we will or have installed OpenShift on. """
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
self.preconfigured = kwargs.get('preconfigured', None)
self.new_host = kwargs.get('new_host', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
# Should this host run as an OpenShift node:
self.node = kwargs.get('node', False)
# Should this host run as an HAProxy:
self.master_lb = kwargs.get('master_lb', False)
self.containerized = kwargs.get('containerized', False)
if self.connect_to is None:
raise OOConfigInvalidHostError("You must specify either an ip " \
"or hostname as 'connect_to'")
if self.master is False and self.node is False and self.master_lb is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
def __str__(self):
return self.connect_to
def __repr__(self):
return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
'master', 'node', 'master_lb', 'containerized',
'connect_to', 'preconfigured', 'new_host']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
def is_etcd_member(self, all_hosts):
""" Will this host be a member of a standalone etcd cluster. """
if not self.master:
return False
masters = [host for host in all_hosts if host.master]
if len(masters) > 1:
return True
return False
def is_dedicated_node(self):
""" Will this host be a dedicated node. (not a master) """
return self.node and not self.master
def is_schedulable_node(self, all_hosts):
""" Will this host be a node marked as schedulable. """
if not self.node:
return False
if not self.master:
return True
masters = [host for host in all_hosts if host.master]
nodes = [host for host in all_hosts if host.node]
if len(masters) == len(nodes):
return True
return False
class OOConfig(object):
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
os.environ['HOME'] + '/.config/') + '/openshift/')
default_file = '/installer.cfg.yml'
def __init__(self, config_path):
if config_path:
self.config_path = os.path.normpath(config_path)
else:
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
self.settings = {}
self._read_config()
self._set_defaults()
def _read_config(self):
self.hosts = []
try:
if os.path.exists(self.config_path):
cfgfile = open(self.config_path, 'r')
self.settings = yaml.safe_load(cfgfile.read())
cfgfile.close()
# Use the presence of a Description as an indicator this is
# a legacy config file:
if 'Description' in self.settings:
self._upgrade_legacy_config()
# Parse the hosts into DTO objects:
if 'hosts' in self.settings:
for host in self.settings['hosts']:
self.hosts.append(Host(**host))
# Watchout for the variant_version coming in as a float:
if 'variant_version' in self.settings:
self.settings['variant_version'] = \
str(self.settings['variant_version'])
except IOError, ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path))
def _upgrade_legacy_config(self):
new_hosts = []
remove_settings = ['validated_facts', 'Description', 'Name',
'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
if 'validated_facts' in self.settings:
for key, value in self.settings['validated_facts'].iteritems():
value['connect_to'] = key
if 'masters' in self.settings and key in self.settings['masters']:
value['master'] = True
if 'nodes' in self.settings and key in self.settings['nodes']:
value['node'] = True
new_hosts.append(value)
self.settings['hosts'] = new_hosts
for s in remove_settings:
if s in self.settings:
del self.settings[s]
# A legacy config implies openshift-enterprise 3.0:
self.settings['variant'] = 'openshift-enterprise'
self.settings['variant_version'] = '3.0'
def _set_defaults(self):
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = \
self._default_ansible_inv_dir()
if not os.path.exists(self.settings['ansible_inventory_directory']):
os.makedirs(self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins')
if 'version' not in self.settings:
self.settings['version'] = 'v1'
if 'ansible_callback_facts_yaml' not in self.settings:
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
self.settings['ansible_inventory_directory']
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
self.settings['ansible_inventory_path'] = '{}/hosts'.format(self.settings['ansible_inventory_directory'])
# clean up any empty sets
for setting in self.settings.keys():
if not self.settings[setting]:
self.settings.pop(setting)
def _default_ansible_inv_dir(self):
return os.path.normpath(
os.path.dirname(self.config_path) + "/.ansible")
def calc_missing_facts(self):
"""
Determine which host facts are not defined in the config.
Returns a hash of host to a list of the missing facts.
"""
result = {}
for host in self.hosts:
missing_facts = []
if host.preconfigured:
required_facts = PRECONFIGURED_REQUIRED_FACTS
else:
required_facts = DEFAULT_REQUIRED_FACTS
for required_fact in required_facts:
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
out_file = open(self.config_path, 'w')
out_file.write(self.yaml())
out_file.close()
def persist_settings(self):
p_settings = {}
for setting in PERSIST_SETTINGS:
if setting in self.settings and self.settings[setting]:
p_settings[setting] = self.settings[setting]
p_settings['hosts'] = []
for host in self.hosts:
p_settings['hosts'].append(host.to_dict())
if self.settings['ansible_inventory_directory'] != \
self._default_ansible_inv_dir():
p_settings['ansible_inventory_directory'] = \
self.settings['ansible_inventory_directory']
return p_settings
def yaml(self):
return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
def __str__(self):
return self.yaml()
def get_host(self, name):
for host in self.hosts:
if host.connect_to == name:
return host
return None
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.summary import event_accumulator as ea
class _EventGenerator(object):
def __init__(self):
self.items = []
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = tf.Event(
wall_time=wall_time, step=step,
summary=tf.Summary(
value=[tf.Summary.Value(tag=tag, simple_value=value)]
)
)
self.AddEvent(event)
def AddHistogram(self, tag, wall_time=0, step=0, hmin=1, hmax=2, hnum=3,
hsum=4, hsum_squares=5, hbucket_limit=None, hbucket=None):
histo = tf.HistogramProto(min=hmin, max=hmax, num=hnum, sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = tf.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)]))
self.AddEvent(event)
def AddImage(self, tag, wall_time=0, step=0, encoded_image_string='imgstr',
width=150, height=100):
image = tf.Summary.Image(encoded_image_string=encoded_image_string,
width=width, height=height)
event = tf.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(
value=[tf.Summary.Value(tag=tag, image=image)]))
self.AddEvent(event)
def AddEvent(self, event):
self.items.append(event)
class EventAccumulatorTest(tf.test.TestCase):
def assertTagsEqual(self, tags1, tags2):
# Make sure the two dictionaries have the same keys.
self.assertItemsEqual(tags1, tags2)
# Additionally, make sure each key in the dictionary maps to the same value.
for key in tags1:
if isinstance(tags1[key], list):
# We don't care about the order of the values in lists, thus asserting
# only if the items are equal.
self.assertItemsEqual(tags1[key], tags2[key])
else:
# Make sure the values are equal.
self.assertEqual(tags1[key], tags2[key])
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.empty = {ea.IMAGES: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False}
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator()
x = ea.EventAccumulator(gen)
x.Reload()
self.assertEqual(x.Tags(), self.empty)
def testTags(self):
gen = _EventGenerator()
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(
acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False})
def testReload(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertEqual(acc.Tags(), self.empty)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
self.assertEqual(acc.Tags(), self.empty)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False})
def testScalars(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def testHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(min=1, max=2, num=3, sum=4, sum_squares=5,
bucket_limit=[1, 2, 3], bucket=[0, 3, 0])
val2 = ea.HistogramValue(min=-2, max=3, num=4, sum=5, sum_squares=6,
bucket_limit=[2, 3, 4], bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram('hst1', wall_time=1, step=10, hmin=1, hmax=2, hnum=3,
hsum=4, hsum_squares=5, hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2', wall_time=2, step=12, hmin=-2, hmax=3, hnum=4,
hsum=5, hsum_squares=6, hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram('hst1', wall_time=1, step=10, hmin=1, hmax=2, hnum=3,
hsum=4, hsum_squares=5, hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2', wall_time=2, step=12, hmin=-2, hmax=3, hnum=4,
hsum=5, hsum_squares=6, hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [ea.CompressedHistogramValue(bp, val) for bp, val in [(
0, 1.0), (2500, 1.25), (5000, 1.5), (7500, 1.75), (10000, 2.0)]]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1,
step=10,
compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), (7500, 2 + 2 / 3
), (10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2,
step=12,
compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testPercentile(self):
def AssertExpectedForBps(bps, expected):
output = acc._Percentile(
bps, bucket_limit, cumsum_weights, histo_min, histo_max, histo_num)
self.assertAlmostEqual(expected, output)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
bucket_limit = [1, 2, 3, 4]
histo_num = 100
## All weights in the first bucket
cumsum_weights = [10000, 10000, 10000, 10000]
histo_min = -1
histo_max = .9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, acc._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, acc._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, acc._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in second bucket
cumsum_weights = [0, 10000, 10000, 10000]
histo_min = 1.1
histo_max = 1.8
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, acc._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, acc._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, acc._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in the last bucket
cumsum_weights = [0, 0, 0, 10000]
histo_min = 3.1
histo_max = 3.6
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, acc._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, acc._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, acc._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between two buckets
cumsum_weights = [0, 4000, 10000, 10000]
histo_min = 1.1
histo_max = 2.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, acc._Remap(2500, 0, 4000, histo_min,
bucket_limit[1]))
AssertExpectedForBps(5000, acc._Remap(5000, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(7500, acc._Remap(7500, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between all buckets
cumsum_weights = [1000, 4000, 8000, 10000]
histo_min = -1
histo_max = 3.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, acc._Remap(2500, 1000, 4000, bucket_limit[0],
bucket_limit[1]))
AssertExpectedForBps(5000, acc._Remap(5000, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(7500, acc._Remap(7500, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(9000, acc._Remap(9000, 8000, 10000, bucket_limit[2],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Most weight in first bucket
cumsum_weights = [9000, 10000, 10000, 10000]
histo_min = -1
histo_max = 1.1
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, acc._Remap(2500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(5000, acc._Remap(5000, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(7500, acc._Remap(7500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(9500, acc._Remap(9500, 9000, 10000, bucket_limit[0],
histo_max))
AssertExpectedForBps(10000, histo_max)
def testImages(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(wall_time=1, step=10, encoded_image_string='big',
width=400, height=300)
im2 = ea.ImageEvent(wall_time=2, step=12, encoded_image_string='small',
width=40, height=30)
gen.AddImage('im1', wall_time=1, step=10, encoded_image_string='big',
width=400, height=300)
gen.AddImage('im2', wall_time=2, step=12, encoded_image_string='small',
width=40, height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testActivation(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
self.assertFalse(acc._activated)
with self.assertRaises(RuntimeError):
acc.Tags()
with self.assertRaises(RuntimeError):
acc.Scalars('s1')
acc.Reload()
self.assertTrue(acc._activated)
acc._activated = False
def testKeyError(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(tf.Event(
wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
ea.GRAPH: False})
def testExpiredDataDiscardedAfterRestart(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = tf.Summary.Value(tag=tag, simple_value=value)
summary = tf.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.train.SummaryWriter(directory, max_queue=100)
graph_def = tf.GraphDef(node=[tf.NodeDef(name='A', op='Mul')])
# Add a graph to the summary writer.
writer.add_graph(graph_def)
# Write a bunch of events using the writer
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i*i)
writer.add_summary(summ_id, i*5)
writer.add_summary(summ_sq, i*5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: [],
ea.SCALARS: ['id', 'sq'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i*5, id_events[i].step)
self.assertEqual(i*5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i*i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i*i)
writer.add_summary(summ_id, i*5)
writer.add_summary(summ_sq, i*5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i*5, id_events[i].step)
self.assertEqual(i*5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i*i, sq_events[i].value)
if __name__ == '__main__':
tf.test.main()
|
|
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pip._vendor.pygments.lexers._mapping import LEXERS
from pip._vendor.pygments.modeline import get_filetype_from_buffer
from pip._vendor.pygments.plugin import find_plugin_lexers
from pip._vendor.pygments.util import ClassNotFound, guess_decode
COMPAT = {
'Python3Lexer': 'PythonLexer',
'Python3TracebackLexer': 'PythonTracebackLexer',
}
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in LEXERS.values():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.values():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def find_lexer_class_by_name(_alias):
"""Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
"""Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(lexername, filename))
lexer_class = custom_namespace[lexername]
# And finally instantiate it with the options
return lexer_class(**options)
except OSError as err:
raise ClassNotFound('cannot read %s: %s' % (filename, err))
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound('error when loading custom lexer: %s' % err)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.values():
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus, cls.__name__
return cls.priority + bonus, cls.__name__
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in LEXERS.values():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
yield from find_plugin_lexers()
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
if not isinstance(_text, str):
inencoding = options.get('inencoding', options.get('encoding'))
if inencoding:
_text = _text.decode(inencoding or 'utf8')
else:
_text, _ = guess_decode(_text)
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
if name in COMPAT:
return getattr(self, COMPAT[name])
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.