gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from typing import Union
from pytest import raises
from graphql import graphql_sync
from graphql.language import parse, print_ast
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumValue,
GraphQLField,
GraphQLFloat,
GraphQLID,
GraphQLInputField,
GraphQLInt,
GraphQLNamedType,
GraphQLSchema,
GraphQLString,
assert_directive,
assert_enum_type,
assert_input_object_type,
assert_interface_type,
assert_object_type,
assert_scalar_type,
assert_union_type,
validate_schema,
)
from graphql.utilities import (
build_schema,
concat_ast,
extend_schema,
print_schema,
)
from ..utils import dedent
TypeWithAstNode = Union[
GraphQLArgument,
GraphQLEnumValue,
GraphQLField,
GraphQLInputField,
GraphQLNamedType,
GraphQLSchema,
]
TypeWithExtensionAstNodes = Union[
GraphQLNamedType,
GraphQLSchema,
]
def expect_extension_ast_nodes(obj: TypeWithExtensionAstNodes, expected: str) -> None:
assert obj is not None and obj.extension_ast_nodes is not None
assert "\n\n".join(print_ast(node) for node in obj.extension_ast_nodes) == expected
def expect_ast_node(obj: TypeWithAstNode, expected: str) -> None:
assert obj is not None and obj.ast_node is not None
assert print_ast(obj.ast_node) == expected
def expect_schema_changes(
schema: GraphQLSchema, extended_schema: GraphQLSchema, expected: str
) -> None:
schema_definitions = {
print_ast(node) for node in parse(print_schema(schema)).definitions
}
assert (
"\n\n".join(
schema_def
for schema_def in (
print_ast(node)
for node in parse(print_schema(extended_schema)).definitions
)
if schema_def not in schema_definitions
)
== expected
)
def describe_extend_schema():
def returns_the_original_schema_when_there_are_no_type_definitions():
schema = build_schema("type Query")
extended_schema = extend_schema(schema, parse("{ field }"))
assert extended_schema == schema
def can_be_used_for_limited_execution():
schema = build_schema("type Query")
extend_ast = parse(
"""
extend type Query {
newField: String
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
result = graphql_sync(
schema=extended_schema, source="{ newField }", root_value={"newField": 123}
)
assert result == ({"newField": "123"}, None)
def extends_objects_by_adding_new_fields():
schema = build_schema(
'''
type Query {
someObject: SomeObject
}
type SomeObject implements AnotherInterface & SomeInterface {
self: SomeObject
tree: [SomeObject]!
"""Old field description."""
oldField: String
}
interface SomeInterface {
self: SomeInterface
}
interface AnotherInterface {
self: SomeObject
}
'''
)
extension_sdl = dedent(
'''
extend type SomeObject {
"""New field description."""
newField(arg: Boolean): String
}
'''
)
extended_schema = extend_schema(schema, parse(extension_sdl))
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
'''
type SomeObject implements AnotherInterface & SomeInterface {
self: SomeObject
tree: [SomeObject]!
"""Old field description."""
oldField: String
"""New field description."""
newField(arg: Boolean): String
}
'''
),
)
def extends_objects_with_standard_type_fields():
schema = build_schema("type Query")
# Only String and Boolean are used by introspection types
assert schema.get_type("Int") is None
assert schema.get_type("Float") is None
assert schema.get_type("String") is GraphQLString
assert schema.get_type("Boolean") is GraphQLBoolean
assert schema.get_type("ID") is None
extend_ast = parse(
"""
extend type Query {
bool: Boolean
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
assert extended_schema.get_type("Int") is None
assert extended_schema.get_type("Float") is None
assert extended_schema.get_type("String") is GraphQLString
assert extended_schema.get_type("Boolean") is GraphQLBoolean
assert extended_schema.get_type("ID") is None
extend_twice_ast = parse(
"""
extend type Query {
int: Int
float: Float
id: ID
}
"""
)
extended_twice_schema = extend_schema(schema, extend_twice_ast)
assert validate_schema(extended_twice_schema) == []
assert extended_twice_schema.get_type("Int") is GraphQLInt
assert extended_twice_schema.get_type("Float") is GraphQLFloat
assert extended_twice_schema.get_type("String") is GraphQLString
assert extended_twice_schema.get_type("Boolean") is GraphQLBoolean
assert extended_twice_schema.get_type("ID") is GraphQLID
def extends_enums_by_adding_new_values():
schema = build_schema(
'''
type Query {
someEnum(arg: SomeEnum): SomeEnum
}
directive @foo(arg: SomeEnum) on SCHEMA
enum SomeEnum {
"""Old value description."""
OLD_VALUE
}
'''
)
extend_ast = parse(
'''
extend enum SomeEnum {
"""New value description."""
NEW_VALUE
}
'''
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
'''
enum SomeEnum {
"""Old value description."""
OLD_VALUE
"""New value description."""
NEW_VALUE
}
'''
),
)
def extends_unions_by_adding_new_types():
schema = build_schema(
"""
type Query {
someUnion: SomeUnion
}
union SomeUnion = Foo | Biz
type Foo { foo: String }
type Biz { biz: String }
type Bar { bar: String }
"""
)
extend_ast = parse(
"""
extend union SomeUnion = Bar
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
union SomeUnion = Foo | Biz | Bar
"""
),
)
def allows_extension_of_union_by_adding_itself():
schema = build_schema(
"""
union SomeUnion
"""
)
extend_ast = parse(
"""
extend union SomeUnion = SomeUnion
"""
)
# invalid schema cannot be built with Python
with raises(TypeError) as exc_info:
extend_schema(schema, extend_ast)
assert str(exc_info.value) == (
"SomeUnion types must be specified"
" as a collection of GraphQLObjectType instances."
)
def extends_inputs_by_adding_new_fields():
schema = build_schema(
'''
type Query {
someInput(arg: SomeInput): String
}
directive @foo(arg: SomeInput) on SCHEMA
input SomeInput {
"""Old field description."""
oldField: String
}
'''
)
extend_ast = parse(
'''
extend input SomeInput {
"""New field description."""
newField: String
}
'''
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
'''
input SomeInput {
"""Old field description."""
oldField: String
"""New field description."""
newField: String
}
'''
),
)
def extends_scalars_by_adding_new_directives():
schema = build_schema(
"""
type Query {
someScalar(arg: SomeScalar): SomeScalar
}
directive @foo(arg: SomeScalar) on SCALAR
input FooInput {
foo: SomeScalar
}
scalar SomeScalar
"""
)
extension_sdl = dedent(
"""
extend scalar SomeScalar @foo
"""
)
extended_schema = extend_schema(schema, parse(extension_sdl))
some_scalar = assert_scalar_type(extended_schema.get_type("SomeScalar"))
assert validate_schema(extended_schema) == []
expect_extension_ast_nodes(some_scalar, extension_sdl)
def extends_scalars_by_adding_specified_by_directive():
schema = build_schema(
"""
type Query {
foo: Foo
}
scalar Foo
directive @foo on SCALAR
"""
)
extension_sdl = dedent(
"""
extend scalar Foo @foo
extend scalar Foo @specifiedBy(url: "https://example.com/foo_spec")
"""
)
extended_schema = extend_schema(schema, parse(extension_sdl))
foo = assert_scalar_type(extended_schema.get_type("Foo"))
assert foo.specified_by_url == "https://example.com/foo_spec"
assert validate_schema(extended_schema) == []
expect_extension_ast_nodes(foo, extension_sdl)
def correctly_assigns_ast_nodes_to_new_and_extended_types():
schema = build_schema(
"""
type Query
scalar SomeScalar
enum SomeEnum
union SomeUnion
input SomeInput
type SomeObject
interface SomeInterface
directive @foo on SCALAR
"""
)
first_extension_ast = parse(
"""
extend type Query {
newField(testArg: TestInput): TestEnum
}
extend scalar SomeScalar @foo
extend enum SomeEnum {
NEW_VALUE
}
extend union SomeUnion = SomeObject
extend input SomeInput {
newField: String
}
extend interface SomeInterface {
newField: String
}
enum TestEnum {
TEST_VALUE
}
input TestInput {
testInputField: TestEnum
}
"""
)
extended_schema = extend_schema(schema, first_extension_ast)
second_extension_ast = parse(
"""
extend type Query {
oneMoreNewField: TestUnion
}
extend scalar SomeScalar @test
extend enum SomeEnum {
ONE_MORE_NEW_VALUE
}
extend union SomeUnion = TestType
extend input SomeInput {
oneMoreNewField: String
}
extend interface SomeInterface {
oneMoreNewField: String
}
union TestUnion = TestType
interface TestInterface {
interfaceField: String
}
type TestType implements TestInterface {
interfaceField: String
}
directive @test(arg: Int) repeatable on FIELD | SCALAR
"""
)
extended_twice_schema = extend_schema(extended_schema, second_extension_ast)
extend_in_one_go_schema = extend_schema(
schema, concat_ast([first_extension_ast, second_extension_ast])
)
assert print_schema(extend_in_one_go_schema) == print_schema(
extended_twice_schema
)
query = assert_object_type(extended_twice_schema.get_type("Query"))
some_enum = assert_enum_type(extended_twice_schema.get_type("SomeEnum"))
some_union = assert_union_type(extended_twice_schema.get_type("SomeUnion"))
some_scalar = assert_scalar_type(extended_twice_schema.get_type("SomeScalar"))
some_input = assert_input_object_type(
extended_twice_schema.get_type("SomeInput")
)
some_interface = assert_interface_type(
extended_twice_schema.get_type("SomeInterface")
)
test_input = assert_input_object_type(
extended_twice_schema.get_type("TestInput")
)
test_enum = assert_enum_type(extended_twice_schema.get_type("TestEnum"))
test_union = assert_union_type(extended_twice_schema.get_type("TestUnion"))
test_type = assert_object_type(extended_twice_schema.get_type("TestType"))
test_interface = assert_interface_type(
extended_twice_schema.get_type("TestInterface")
)
test_directive = assert_directive(extended_twice_schema.get_directive("test"))
assert test_type.extension_ast_nodes == ()
assert test_enum.extension_ast_nodes == ()
assert test_union.extension_ast_nodes == ()
assert test_input.extension_ast_nodes == ()
assert test_interface.extension_ast_nodes == ()
assert query.extension_ast_nodes
assert len(query.extension_ast_nodes) == 2
assert some_scalar.extension_ast_nodes
assert len(some_scalar.extension_ast_nodes) == 2
assert some_enum.extension_ast_nodes
assert len(some_enum.extension_ast_nodes) == 2
assert some_union.extension_ast_nodes
assert len(some_union.extension_ast_nodes) == 2
assert some_input.extension_ast_nodes
assert len(some_input.extension_ast_nodes) == 2
assert some_interface.extension_ast_nodes
assert len(some_interface.extension_ast_nodes) == 2
assert {
test_input.ast_node,
test_enum.ast_node,
test_union.ast_node,
test_interface.ast_node,
test_type.ast_node,
test_directive.ast_node,
*query.extension_ast_nodes,
*some_scalar.extension_ast_nodes,
*some_enum.extension_ast_nodes,
*some_union.extension_ast_nodes,
*some_input.extension_ast_nodes,
*some_interface.extension_ast_nodes,
} == {*first_extension_ast.definitions, *second_extension_ast.definitions}
new_field = query.fields["newField"]
expect_ast_node(new_field, "newField(testArg: TestInput): TestEnum")
expect_ast_node(new_field.args["testArg"], "testArg: TestInput")
expect_ast_node(query.fields["oneMoreNewField"], "oneMoreNewField: TestUnion")
expect_ast_node(some_enum.values["NEW_VALUE"], "NEW_VALUE")
one_more_new_value = some_enum.values["ONE_MORE_NEW_VALUE"]
expect_ast_node(one_more_new_value, "ONE_MORE_NEW_VALUE")
expect_ast_node(some_input.fields["newField"], "newField: String")
expect_ast_node(some_input.fields["oneMoreNewField"], "oneMoreNewField: String")
expect_ast_node(some_interface.fields["newField"], "newField: String")
expect_ast_node(
some_interface.fields["oneMoreNewField"], "oneMoreNewField: String"
)
expect_ast_node(test_input.fields["testInputField"], "testInputField: TestEnum")
expect_ast_node(test_enum.values["TEST_VALUE"], "TEST_VALUE")
expect_ast_node(
test_interface.fields["interfaceField"], "interfaceField: String"
)
expect_ast_node(test_type.fields["interfaceField"], "interfaceField: String")
expect_ast_node(test_directive.args["arg"], "arg: Int")
def builds_types_with_deprecated_fields_and_values():
schema = GraphQLSchema()
extend_ast = parse(
"""
type SomeObject {
deprecatedField: String @deprecated(reason: "not used anymore")
}
enum SomeEnum {
DEPRECATED_VALUE @deprecated(reason: "do not use")
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
some_type = assert_object_type(extended_schema.get_type("SomeObject"))
deprecated_field = some_type.fields["deprecatedField"]
assert deprecated_field.deprecation_reason == "not used anymore"
some_enum = assert_enum_type(extended_schema.get_type("SomeEnum"))
deprecated_enum = some_enum.values["DEPRECATED_VALUE"]
assert deprecated_enum.deprecation_reason == "do not use"
def extends_objects_with_deprecated_fields():
schema = build_schema("type SomeObject")
extend_ast = parse(
"""
extend type SomeObject {
deprecatedField: String @deprecated(reason: "not used anymore")
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
some_type = assert_object_type(extended_schema.get_type("SomeObject"))
deprecated_field = some_type.fields["deprecatedField"]
assert deprecated_field.deprecation_reason == "not used anymore"
def extend_enums_with_deprecated_values():
schema = build_schema("enum SomeEnum")
extend_ast = parse(
"""
extend enum SomeEnum {
DEPRECATED_VALUE @deprecated(reason: "do not use")
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
some_enum = assert_enum_type(extended_schema.get_type("SomeEnum"))
deprecated_value = some_enum.values["DEPRECATED_VALUE"]
assert deprecated_value.deprecation_reason == "do not use"
def adds_new_unused_types():
schema = build_schema(
"""
type Query {
dummy: String
}
"""
)
extension_sdl = dedent(
"""
type DummyUnionMember {
someField: String
}
enum UnusedEnum {
SOME_VALUE
}
input UnusedInput {
someField: String
}
interface UnusedInterface {
someField: String
}
type UnusedObject {
someField: String
}
union UnusedUnion = DummyUnionMember
"""
)
extended_schema = extend_schema(schema, parse(extension_sdl))
assert validate_schema(extended_schema) == []
expect_schema_changes(schema, extended_schema, extension_sdl)
def extends_objects_by_adding_new_fields_with_arguments():
schema = build_schema(
"""
type SomeObject
type Query {
someObject: SomeObject
}
"""
)
extend_ast = parse(
"""
input NewInputObj {
field1: Int
field2: [Float]
field3: String!
}
extend type SomeObject {
newField(arg1: String, arg2: NewInputObj!): String
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
type SomeObject {
newField(arg1: String, arg2: NewInputObj!): String
}
input NewInputObj {
field1: Int
field2: [Float]
field3: String!
}
"""
),
)
def extends_objects_by_adding_new_fields_with_existing_types():
schema = build_schema(
"""
type Query {
someObject: SomeObject
}
type SomeObject
enum SomeEnum { VALUE }
"""
)
extend_ast = parse(
"""
extend type SomeObject {
newField(arg1: SomeEnum!): SomeEnum
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
type SomeObject {
newField(arg1: SomeEnum!): SomeEnum
}
"""
),
)
def extends_objects_by_adding_implemented_interfaces():
schema = build_schema(
"""
type Query {
someObject: SomeObject
}
type SomeObject {
foo: String
}
interface SomeInterface {
foo: String
}
"""
)
extend_ast = parse(
"""
extend type SomeObject implements SomeInterface
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
type SomeObject implements SomeInterface {
foo: String
}
"""
),
)
def extends_objects_by_including_new_types():
schema = build_schema(
"""
type Query {
someObject: SomeObject
}
type SomeObject {
oldField: String
}
"""
)
new_types_sdl = """
enum NewEnum {
VALUE
}
interface NewInterface {
baz: String
}
type NewObject implements NewInterface {
baz: String
}
scalar NewScalar
union NewUnion = NewObject
"""
extend_ast = parse(
new_types_sdl
+ """
extend type SomeObject {
newObject: NewObject
newInterface: NewInterface
newUnion: NewUnion
newScalar: NewScalar
newEnum: NewEnum
newTree: [SomeObject]!
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
type SomeObject {
oldField: String
newObject: NewObject
newInterface: NewInterface
newUnion: NewUnion
newScalar: NewScalar
newEnum: NewEnum
newTree: [SomeObject]!
}\n"""
+ new_types_sdl
),
)
def extends_objects_by_adding_implemented_new_interfaces():
schema = build_schema(
"""
type Query {
someObject: SomeObject
}
type SomeObject implements OldInterface {
oldField: String
}
interface OldInterface {
oldField: String
}
"""
)
extend_ast = parse(
"""
extend type SomeObject implements NewInterface {
newField: String
}
interface NewInterface {
newField: String
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
type SomeObject implements OldInterface & NewInterface {
oldField: String
newField: String
}
interface NewInterface {
newField: String
}
"""
),
)
def extends_different_types_multiple_times():
schema = build_schema(
"""
type Query {
someScalar: SomeScalar
someObject(someInput: SomeInput): SomeObject
someInterface: SomeInterface
someEnum: SomeEnum
someUnion: SomeUnion
}
scalar SomeScalar
type SomeObject implements SomeInterface {
oldField: String
}
interface SomeInterface {
oldField: String
}
enum SomeEnum {
OLD_VALUE
}
union SomeUnion = SomeObject
input SomeInput {
oldField: String
}
"""
)
new_types_sdl = dedent(
"""
scalar NewScalar
scalar AnotherNewScalar
type NewObject {
foo: String
}
type AnotherNewObject {
foo: String
}
interface NewInterface {
newField: String
}
interface AnotherNewInterface {
anotherNewField: String
}
"""
)
schema_with_new_types = extend_schema(schema, parse(new_types_sdl))
expect_schema_changes(schema, schema_with_new_types, new_types_sdl)
extend_ast = parse(
"""
extend scalar SomeScalar @specifiedBy(url: "http://example.com/foo_spec")
extend type SomeObject implements NewInterface {
newField: String
}
extend type SomeObject implements AnotherNewInterface {
anotherNewField: String
}
extend enum SomeEnum {
NEW_VALUE
}
extend enum SomeEnum {
ANOTHER_NEW_VALUE
}
extend union SomeUnion = NewObject
extend union SomeUnion = AnotherNewObject
extend input SomeInput {
newField: String
}
extend input SomeInput {
anotherNewField: String
}
"""
)
extended_schema = extend_schema(schema_with_new_types, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
scalar SomeScalar @specifiedBy(url: "http://example.com/foo_spec")
type SomeObject implements SomeInterface & NewInterface & AnotherNewInterface {
oldField: String
newField: String
anotherNewField: String
}
enum SomeEnum {
OLD_VALUE
NEW_VALUE
ANOTHER_NEW_VALUE
}
union SomeUnion = SomeObject | NewObject | AnotherNewObject
input SomeInput {
oldField: String
newField: String
anotherNewField: String
}
""" # noqa: E501
)
+ "\n\n"
+ new_types_sdl,
)
def extends_interfaces_by_adding_new_fields():
schema = build_schema(
"""
interface SomeInterface {
oldField: String
}
interface AnotherInterface implements SomeInterface {
oldField: String
}
type SomeObject implements SomeInterface & AnotherInterface {
oldField: String
}
type Query {
someInterface: SomeInterface
}
"""
)
extend_ast = parse(
"""
extend interface SomeInterface {
newField: String
}
extend interface AnotherInterface {
newField: String
}
extend type SomeObject {
newField: String
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
interface SomeInterface {
oldField: String
newField: String
}
interface AnotherInterface implements SomeInterface {
oldField: String
newField: String
}
type SomeObject implements SomeInterface & AnotherInterface {
oldField: String
newField: String
}
"""
),
)
def extends_interfaces_by_adding_new_implemented_interfaces():
schema = build_schema(
"""
interface SomeInterface {
oldField: String
}
interface AnotherInterface implements SomeInterface {
oldField: String
}
type SomeObject implements SomeInterface & AnotherInterface {
oldField: String
}
type Query {
someInterface: SomeInterface
}
"""
)
extend_ast = parse(
"""
interface NewInterface {
newField: String
}
extend interface AnotherInterface implements NewInterface {
newField: String
}
extend type SomeObject implements NewInterface {
newField: String
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
interface AnotherInterface implements SomeInterface & NewInterface {
oldField: String
newField: String
}
type SomeObject implements SomeInterface & AnotherInterface & NewInterface {
oldField: String
newField: String
}
interface NewInterface {
newField: String
}
"""
),
)
def allows_extension_of_interface_with_missing_object_fields():
schema = build_schema(
"""
type Query {
someInterface: SomeInterface
}
type SomeObject implements SomeInterface {
oldField: SomeInterface
}
interface SomeInterface {
oldField: SomeInterface
}
"""
)
extend_ast = parse(
"""
extend interface SomeInterface {
newField: String
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema)
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
interface SomeInterface {
oldField: SomeInterface
newField: String
}
"""
),
)
def extends_interfaces_multiple_times():
schema = build_schema(
"""
type Query {
someInterface: SomeInterface
}
interface SomeInterface {
some: SomeInterface
}
"""
)
extend_ast = parse(
"""
extend interface SomeInterface {
newFieldA: Int
}
extend interface SomeInterface {
newFieldB(test: Boolean): String
}
"""
)
extended_schema = extend_schema(schema, extend_ast)
assert validate_schema(extended_schema) == []
expect_schema_changes(
schema,
extended_schema,
dedent(
"""
interface SomeInterface {
some: SomeInterface
newFieldA: Int
newFieldB(test: Boolean): String
}
"""
),
)
def may_extend_mutations_and_subscriptions():
mutation_schema = build_schema(
"""
type Query {
queryField: String
}
type Mutation {
mutationField: String
}
type Subscription {
subscriptionField: String
}
"""
)
ast = parse(
"""
extend type Query {
newQueryField: Int
}
extend type Mutation {
newMutationField: Int
}
extend type Subscription {
newSubscriptionField: Int
}
"""
)
original_print = print_schema(mutation_schema)
extended_schema = extend_schema(mutation_schema, ast)
assert extended_schema != mutation_schema
assert print_schema(mutation_schema) == original_print
assert print_schema(extended_schema) == dedent(
"""
type Query {
queryField: String
newQueryField: Int
}
type Mutation {
mutationField: String
newMutationField: Int
}
type Subscription {
subscriptionField: String
newSubscriptionField: Int
}
"""
)
def may_extend_directives_with_new_directive():
schema = build_schema(
"""
type Query {
foo: String
}
"""
)
extension_sdl = dedent(
'''
"""New directive."""
directive @new(enable: Boolean!, tag: String) repeatable on QUERY | FIELD
'''
)
extended_schema = extend_schema(schema, parse(extension_sdl))
assert validate_schema(extended_schema) == []
expect_schema_changes(schema, extended_schema, extension_sdl)
def rejects_invalid_sdl():
schema = GraphQLSchema()
extend_ast = parse("extend schema @unknown")
with raises(TypeError) as exc_info:
extend_schema(schema, extend_ast)
assert str(exc_info.value) == "Unknown directive '@unknown'."
def allows_to_disable_sdl_validation():
schema = GraphQLSchema()
extend_ast = parse("extend schema @unknown")
extend_schema(schema, extend_ast, assume_valid=True)
extend_schema(schema, extend_ast, assume_valid_sdl=True)
def throws_on_unknown_types():
schema = GraphQLSchema()
ast = parse(
"""
type Query {
unknown: UnknownType
}
"""
)
with raises(TypeError) as exc_info:
extend_schema(schema, ast, assume_valid_sdl=True)
assert str(exc_info.value).endswith("Unknown type: 'UnknownType'.")
def rejects_invalid_ast():
schema = GraphQLSchema()
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
extend_schema(schema, None) # type: ignore
assert str(exc_info.value) == "Must provide valid Document AST."
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
extend_schema(schema, {}) # type: ignore
assert str(exc_info.value) == "Must provide valid Document AST."
def does_not_allow_replacing_a_default_directive():
schema = GraphQLSchema()
extend_ast = parse(
"""
directive @include(if: Boolean!) on FIELD | FRAGMENT_SPREAD
"""
)
with raises(TypeError) as exc_info:
extend_schema(schema, extend_ast)
assert str(exc_info.value).startswith(
"Directive '@include' already exists in the schema."
" It cannot be redefined."
)
def does_not_allow_replacing_an_existing_enum_value():
schema = build_schema(
"""
enum SomeEnum {
ONE
}
"""
)
extend_ast = parse(
"""
extend enum SomeEnum {
ONE
}
"""
)
with raises(TypeError) as exc_info:
extend_schema(schema, extend_ast)
assert str(exc_info.value).startswith(
"Enum value 'SomeEnum.ONE' already exists in the schema."
" It cannot also be defined in this type extension."
)
def describe_can_add_additional_root_operation_types():
def does_not_automatically_include_common_root_type_names():
schema = GraphQLSchema()
extended_schema = extend_schema(schema, parse("type Mutation"))
assert extended_schema.get_type("Mutation")
assert extended_schema.mutation_type is None
def adds_schema_definition_missing_in_the_original_schema():
schema = build_schema(
"""
directive @foo on SCHEMA
type Foo
"""
)
assert schema.query_type is None
extension_sdl = dedent(
"""
schema @foo {
query: Foo
}
"""
)
extended_schema = extend_schema(schema, parse(extension_sdl))
query_type = assert_object_type(extended_schema.query_type)
assert query_type.name == "Foo"
expect_ast_node(extended_schema, extension_sdl)
def adds_new_root_types_via_schema_extension():
schema = build_schema(
"""
type Query
type MutationRoot
"""
)
extension_sdl = dedent(
"""
extend schema {
mutation: MutationRoot
}
"""
)
extended_schema = extend_schema(schema, parse(extension_sdl))
mutation_type = assert_object_type(extended_schema.mutation_type)
assert mutation_type.name == "MutationRoot"
expect_extension_ast_nodes(extended_schema, extension_sdl)
def adds_directive_via_schema_extension():
schema = build_schema(
"""
type Query
directive @foo on SCHEMA
"""
)
extension_sdl = dedent(
"""
extend schema @foo
"""
)
extended_schema = extend_schema(schema, parse(extension_sdl))
expect_extension_ast_nodes(extended_schema, extension_sdl)
def adds_multiple_new_root_types_via_schema_extension():
schema = build_schema("type Query")
extend_ast = parse(
"""
extend schema {
mutation: Mutation
subscription: Subscription
}
type Mutation
type Subscription
"""
)
extended_schema = extend_schema(schema, extend_ast)
mutation_type = assert_object_type(extended_schema.mutation_type)
assert mutation_type.name == "Mutation"
subscription_type = assert_object_type(extended_schema.subscription_type)
assert subscription_type.name == "Subscription"
def applies_multiple_schema_extensions():
schema = build_schema("type Query")
extend_ast = parse(
"""
extend schema {
mutation: Mutation
}
type Mutation
extend schema {
subscription: Subscription
}
type Subscription
"""
)
extended_schema = extend_schema(schema, extend_ast)
mutation_type = assert_object_type(extended_schema.mutation_type)
assert mutation_type.name == "Mutation"
subscription_type = assert_object_type(extended_schema.subscription_type)
assert subscription_type.name == "Subscription"
def schema_extension_ast_are_available_from_schema_object():
schema = build_schema(
"""
type Query
directive @foo on SCHEMA
"""
)
extend_ast = parse(
"""
extend schema {
mutation: Mutation
}
type Mutation
extend schema {
subscription: Subscription
}
type Subscription
"""
)
extended_schema = extend_schema(schema, extend_ast)
second_extend_ast = parse("extend schema @foo")
extended_twice_schema = extend_schema(extended_schema, second_extend_ast)
expect_extension_ast_nodes(
extended_twice_schema,
dedent(
"""
extend schema {
mutation: Mutation
}
extend schema {
subscription: Subscription
}
extend schema @foo
"""
),
)
| |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_assembly_feature_list_response1174_all_of
except ImportError:
bt_assembly_feature_list_response1174_all_of = sys.modules[
"onshape_client.oas.models.bt_assembly_feature_list_response1174_all_of"
]
try:
from onshape_client.oas.models import bt_feature_api_base1430
except ImportError:
bt_feature_api_base1430 = sys.modules[
"onshape_client.oas.models.bt_feature_api_base1430"
]
try:
from onshape_client.oas.models import bt_feature_state1688
except ImportError:
bt_feature_state1688 = sys.modules["onshape_client.oas.models.bt_feature_state1688"]
try:
from onshape_client.oas.models import btm_assembly_feature887
except ImportError:
btm_assembly_feature887 = sys.modules[
"onshape_client.oas.models.btm_assembly_feature887"
]
class BTAssemblyFeatureListResponse1174(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"feature_states": (
{str: (bt_feature_state1688.BTFeatureState1688,)},
), # noqa: E501
"features": (
[btm_assembly_feature887.BTMAssemblyFeature887],
), # noqa: E501
"is_complete": (bool,), # noqa: E501
"library_version": (int,), # noqa: E501
"microversion_skew": (bool,), # noqa: E501
"reject_microversion_skew": (bool,), # noqa: E501
"serialization_version": (str,), # noqa: E501
"source_microversion": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"feature_states": "featureStates", # noqa: E501
"features": "features", # noqa: E501
"is_complete": "isComplete", # noqa: E501
"library_version": "libraryVersion", # noqa: E501
"microversion_skew": "microversionSkew", # noqa: E501
"reject_microversion_skew": "rejectMicroversionSkew", # noqa: E501
"serialization_version": "serializationVersion", # noqa: E501
"source_microversion": "sourceMicroversion", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_assembly_feature_list_response1174.BTAssemblyFeatureListResponse1174 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
feature_states ({str: (bt_feature_state1688.BTFeatureState1688,)}): [optional] # noqa: E501
features ([btm_assembly_feature887.BTMAssemblyFeature887]): [optional] # noqa: E501
is_complete (bool): [optional] # noqa: E501
library_version (int): [optional] # noqa: E501
microversion_skew (bool): [optional] # noqa: E501
reject_microversion_skew (bool): [optional] # noqa: E501
serialization_version (str): [optional] # noqa: E501
source_microversion (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_assembly_feature_list_response1174_all_of.BTAssemblyFeatureListResponse1174AllOf,
bt_feature_api_base1430.BTFeatureApiBase1430,
],
"oneOf": [],
}
| |
import collections
import os
import re
from os.path import dirname
from pathlib import Path
def handle_negation(file_path, rules):
matched = False
for rule in rules:
if rule.match(file_path):
if rule.negation:
matched = False
else:
matched = True
return matched
def parse_gitignore(full_path, base_dir=None):
if base_dir is None:
base_dir = dirname(full_path)
rules = []
with open(full_path) as ignore_file:
counter = 0
for line in ignore_file:
counter += 1
line = line.rstrip('\n')
rule = rule_from_pattern(line, base_path=Path(base_dir).resolve(),
source=(full_path, counter))
if rule:
rules.append(rule)
if not any(r.negation for r in rules):
return lambda file_path: any(r.match(file_path) for r in rules)
else:
# We have negation rules. We can't use a simple "any" to evaluate them.
# Later rules override earlier rules.
return lambda file_path: handle_negation(file_path, rules)
def rule_from_pattern(pattern, base_path=None, source=None):
"""
Take a .gitignore match pattern, such as "*.py[cod]" or "**/*.bak",
and return an IgnoreRule suitable for matching against files and
directories. Patterns which do not match files, such as comments
and blank lines, will return None.
Because git allows for nested .gitignore files, a base_path value
is required for correct behavior. The base path should be absolute.
"""
if base_path and base_path != Path(base_path).resolve():
raise ValueError('base_path must be absolute')
# Store the exact pattern for our repr and string functions
orig_pattern = pattern
# Early returns follow
# Discard comments and separators
if pattern.strip() == '' or pattern[0] == '#':
return
# Discard anything with more than two consecutive asterisks
if pattern.find('***') > -1:
return
# Strip leading bang before examining double asterisks
if pattern[0] == '!':
negation = True
pattern = pattern[1:]
else:
negation = False
# Discard anything with invalid double-asterisks -- they can appear
# at the start or the end, or be surrounded by slashes
for m in re.finditer(r'\*\*', pattern):
start_index = m.start()
if (start_index != 0 and start_index != len(pattern) - 2 and
(pattern[start_index - 1] != '/' or
pattern[start_index + 2] != '/')):
return
# Special-casing '/', which doesn't match any files or directories
if pattern.rstrip() == '/':
return
directory_only = pattern[-1] == '/'
# A slash is a sign that we're tied to the base_path of our rule
# set.
anchored = '/' in pattern[:-1]
if pattern[0] == '/':
pattern = pattern[1:]
if pattern[0] == '*' and len(pattern) >= 2 and pattern[1] == '*':
pattern = pattern[2:]
anchored = False
if pattern[0] == '/':
pattern = pattern[1:]
if pattern[-1] == '/':
pattern = pattern[:-1]
# patterns with leading hashes are escaped with a backslash in front, unescape it
if pattern[0] == '\\' and pattern[1] == '#':
pattern = pattern[1:]
# trailing spaces are ignored unless they are escaped with a backslash
i = len(pattern)-1
striptrailingspaces = True
while i > 1 and pattern[i] == ' ':
if pattern[i-1] == '\\':
pattern = pattern[:i-1] + pattern[i:]
i = i - 1
striptrailingspaces = False
else:
if striptrailingspaces:
pattern = pattern[:i]
i = i - 1
regex = fnmatch_pathname_to_regex(pattern, directory_only)
if anchored:
regex = ''.join(['^', regex])
return IgnoreRule(
pattern=orig_pattern,
regex=regex,
negation=negation,
directory_only=directory_only,
anchored=anchored,
base_path=Path(base_path) if base_path else None,
source=source
)
whitespace_re = re.compile(r'(\\ )+$')
IGNORE_RULE_FIELDS = [
'pattern', 'regex', # Basic values
'negation', 'directory_only', 'anchored', # Behavior flags
'base_path', # Meaningful for gitignore-style behavior
'source' # (file, line) tuple for reporting
]
class IgnoreRule(collections.namedtuple('IgnoreRule_', IGNORE_RULE_FIELDS)):
def __str__(self):
return self.pattern
def __repr__(self):
return ''.join(['IgnoreRule(\'', self.pattern, '\')'])
def match(self, abs_path):
matched = False
if self.base_path:
rel_path = str(Path(abs_path).resolve().relative_to(self.base_path))
else:
rel_path = str(Path(abs_path))
if rel_path.startswith('./'):
rel_path = rel_path[2:]
if re.search(self.regex, rel_path):
matched = True
return matched
# Frustratingly, python's fnmatch doesn't provide the FNM_PATHNAME
# option that .gitignore's behavior depends on.
def fnmatch_pathname_to_regex(pattern, directory_only: bool):
"""
Implements fnmatch style-behavior, as though with FNM_PATHNAME flagged;
the path separator will not match shell-style '*' and '.' wildcards.
"""
i, n = 0, len(pattern)
seps = [re.escape(os.sep)]
if os.altsep is not None:
seps.append(re.escape(os.altsep))
seps_group = '[' + '|'.join(seps) + ']'
nonsep = r'[^{}]'.format('|'.join(seps))
res = []
while i < n:
c = pattern[i]
i += 1
if c == '*':
try:
if pattern[i] == '*':
i += 1
res.append('.*')
if pattern[i] == '/':
i += 1
res.append(''.join([seps_group, '?']))
else:
res.append(''.join([nonsep, '*']))
except IndexError:
res.append(''.join([nonsep, '*']))
elif c == '?':
res.append(nonsep)
elif c == '/':
res.append(seps_group)
elif c == '[':
j = i
if j < n and pattern[j] == '!':
j += 1
if j < n and pattern[j] == ']':
j += 1
while j < n and pattern[j] != ']':
j += 1
if j >= n:
res.append('\\[')
else:
stuff = pattern[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = ''.join(['^', stuff[1:]])
elif stuff[0] == '^':
stuff = ''.join('\\' + stuff)
res.append('[{}]'.format(stuff))
else:
res.append(re.escape(c))
res.insert(0, '(?ms)')
if not directory_only:
res.append('$')
return ''.join(res)
| |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This stuff can't live in test/unit/__init__.py due to its swob dependency.
from collections import defaultdict, namedtuple
from hashlib import md5
from swift.common import swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import is_user_meta, \
is_object_transient_sysmeta, resolve_etag_is_at_header
from swift.common.swob import HTTPNotImplemented
from swift.common.utils import split_path
from test.unit import FakeLogger, FakeRing
class LeakTrackingIter(object):
def __init__(self, inner_iter, mark_closed, path):
self.inner_iter = inner_iter
self.mark_closed = mark_closed
self.path = path
def __iter__(self):
for x in self.inner_iter:
yield x
def close(self):
self.mark_closed(self.path)
FakeSwiftCall = namedtuple('FakeSwiftCall', ['method', 'path', 'headers'])
class FakeSwift(object):
"""
A good-enough fake Swift proxy server to use in testing middleware.
"""
ALLOWED_METHODS = [
'PUT', 'POST', 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'REPLICATE']
def __init__(self):
self._calls = []
self._unclosed_req_paths = defaultdict(int)
self.req_method_paths = []
self.swift_sources = []
self.txn_ids = []
self.uploaded = {}
# mapping of (method, path) --> (response class, headers, body)
self._responses = {}
self.logger = FakeLogger('fake-swift')
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.get_object_ring = lambda policy_index: FakeRing()
def _find_response(self, method, path):
resp = self._responses[(method, path)]
if isinstance(resp, list):
try:
resp = resp.pop(0)
except IndexError:
raise IndexError("Didn't find any more %r "
"in allowed responses" % (
(method, path),))
return resp
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
if method not in self.ALLOWED_METHODS:
raise HTTPNotImplemented()
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if env.get('QUERY_STRING'):
path += '?' + env['QUERY_STRING']
if 'swift.authorize' in env:
resp = env['swift.authorize'](swob.Request(env))
if resp:
return resp(env, start_response)
req = swob.Request(env)
self.swift_sources.append(env.get('swift.source'))
self.txn_ids.append(env.get('swift.trans_id'))
try:
resp_class, raw_headers, body = self._find_response(method, path)
headers = HeaderKeyDict(raw_headers)
except KeyError:
if (env.get('QUERY_STRING')
and (method, env['PATH_INFO']) in self._responses):
resp_class, raw_headers, body = self._find_response(
method, env['PATH_INFO'])
headers = HeaderKeyDict(raw_headers)
elif method == 'HEAD' and ('GET', path) in self._responses:
resp_class, raw_headers, body = self._find_response(
'GET', path)
body = None
headers = HeaderKeyDict(raw_headers)
elif method == 'GET' and obj and path in self.uploaded:
resp_class = swob.HTTPOk
headers, body = self.uploaded[path]
else:
raise KeyError("Didn't find %r in allowed responses" % (
(method, path),))
# simulate object PUT
if method == 'PUT' and obj:
put_body = ''.join(iter(env['wsgi.input'].read, ''))
if 'swift.callback.update_footers' in env:
footers = HeaderKeyDict()
env['swift.callback.update_footers'](footers)
req.headers.update(footers)
etag = md5(put_body).hexdigest()
headers.setdefault('Etag', etag)
headers.setdefault('Content-Length', len(put_body))
# keep it for subsequent GET requests later
self.uploaded[path] = (dict(req.headers), put_body)
if "CONTENT_TYPE" in env:
self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"]
# simulate object POST
elif method == 'POST' and obj:
metadata, data = self.uploaded.get(path, ({}, None))
# select items to keep from existing...
new_metadata = dict(
(k, v) for k, v in metadata.items()
if (not is_user_meta('object', k) and not
is_object_transient_sysmeta(k)))
# apply from new
new_metadata.update(
dict((k, v) for k, v in req.headers.items()
if (is_user_meta('object', k) or
is_object_transient_sysmeta(k) or
k.lower == 'content-type')))
self.uploaded[path] = new_metadata, data
# note: tests may assume this copy of req_headers is case insensitive
# so we deliberately use a HeaderKeyDict
self._calls.append(
FakeSwiftCall(method, path, HeaderKeyDict(req.headers)))
# Apply conditional etag overrides
conditional_etag = resolve_etag_is_at_header(req, headers)
# range requests ought to work, hence conditional_response=True
if isinstance(body, list):
resp = resp_class(
req=req, headers=headers, app_iter=body,
conditional_response=req.method in ('GET', 'HEAD'),
conditional_etag=conditional_etag)
else:
resp = resp_class(
req=req, headers=headers, body=body,
conditional_response=req.method in ('GET', 'HEAD'),
conditional_etag=conditional_etag)
wsgi_iter = resp(env, start_response)
self.mark_opened(path)
return LeakTrackingIter(wsgi_iter, self.mark_closed, path)
def mark_opened(self, path):
self._unclosed_req_paths[path] += 1
def mark_closed(self, path):
self._unclosed_req_paths[path] -= 1
@property
def unclosed_requests(self):
return {path: count
for path, count in self._unclosed_req_paths.items()
if count > 0}
@property
def calls(self):
return [(method, path) for method, path, headers in self._calls]
@property
def headers(self):
return [headers for method, path, headers in self._calls]
@property
def calls_with_headers(self):
return self._calls
@property
def call_count(self):
return len(self._calls)
def register(self, method, path, response_class, headers, body=''):
self._responses[(method, path)] = (response_class, headers, body)
def register_responses(self, method, path, responses):
self._responses[(method, path)] = list(responses)
class FakeAppThatExcepts(object):
MESSAGE = "We take exception to that!"
def __init__(self, exception_class=Exception):
self.exception_class = exception_class
def __call__(self, env, start_response):
raise self.exception_class(self.MESSAGE)
| |
#!/usr/bin/env python
import argparse
import datetime
import io
from nltk.translate import bleu_score
import numpy
import progressbar
import six
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
UNK = 0
EOS = 1
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0)
return exs
class Seq2seq(chainer.Chain):
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Seq2seq, self).__init__()
with self.init_scope():
self.embed_x = L.EmbedID(n_source_vocab, n_units)
self.embed_y = L.EmbedID(n_target_vocab, n_units)
self.encoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)
self.decoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)
self.W = L.Linear(n_units, n_target_vocab)
self.n_layers = n_layers
self.n_units = n_units
def forward(self, xs, ys):
xs = [x[::-1] for x in xs]
eos = self.xp.array([EOS], numpy.int32)
ys_in = [F.concat([eos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
# Both xs and ys_in are lists of arrays.
exs = sequence_embed(self.embed_x, xs)
eys = sequence_embed(self.embed_y, ys_in)
batch = len(xs)
# None represents a zero vector in an encoder.
hx, cx, _ = self.encoder(None, None, exs)
_, _, os = self.decoder(hx, cx, eys)
# It is faster to concatenate data before calculating loss
# because only one matrix multiplication is called.
concat_os = F.concat(os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
loss = F.sum(F.softmax_cross_entropy(
self.W(concat_os), concat_ys_out, reduce='no')) / batch
chainer.report({'loss': loss}, self)
n_words = concat_ys_out.shape[0]
perp = self.xp.exp(loss.array * batch / n_words)
chainer.report({'perp': perp}, self)
return loss
def translate(self, xs, max_length=100):
batch = len(xs)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
h, c, _ = self.encoder(None, None, exs)
ys = self.xp.full(batch, EOS, numpy.int32)
result = []
for i in range(max_length):
eys = self.embed_y(ys)
eys = F.split_axis(eys, batch, 0)
h, c, ys = self.decoder(h, c, eys)
cys = F.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.array, axis=1).astype(numpy.int32)
result.append(ys)
# Using `xp.concatenate(...)` instead of `xp.stack(result)` here to
# support NumPy 1.9.
result = chainer.get_device('@numpy').send(
self.xp.concatenate([x[None, :] for x in result]).T)
# Remove EOS taggs
outs = []
for y in result:
inds = numpy.argwhere(y == EOS)
if len(inds) > 0:
y = y[:inds[0, 0]]
outs.append(y)
return outs
@chainer.dataset.converter()
def convert(batch, device):
def to_device_batch(batch):
if device is None:
return batch
src_xp = chainer.backend.get_array_module(*batch)
xp = device.xp
concat = src_xp.concatenate(batch, axis=0)
sections = list(numpy.cumsum(
[len(x) for x in batch[:-1]], dtype=numpy.int32))
concat_dst = device.send(concat)
batch_dst = xp.split(concat_dst, sections)
return batch_dst
return {'xs': to_device_batch([x for x, _ in batch]),
'ys': to_device_batch([y for _, y in batch])}
class CalculateBleu(chainer.training.Extension):
trigger = 1, 'epoch'
priority = chainer.training.PRIORITY_WRITER
def __init__(
self, model, test_data, key, device, batch=100, max_length=100):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = device
self.max_length = max_length
def __call__(self, trainer):
device = self.device
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [device.send(x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
chainer.report({self.key: bleu})
def count_lines(path):
with io.open(path, encoding='utf-8') as f:
return sum([1 for _ in f])
def load_vocabulary(path):
with io.open(path, encoding='utf-8') as f:
# +2 for UNK and EOS
word_ids = {line.strip(): i + 2 for i, line in enumerate(f)}
word_ids['<UNK>'] = 0
word_ids['<EOS>'] = 1
return word_ids
def load_data(vocabulary, path):
n_lines = count_lines(path)
bar = progressbar.ProgressBar()
data = []
print('loading...: %s' % path)
with io.open(path, encoding='utf-8') as f:
for line in bar(f, max_value=n_lines):
words = line.strip().split()
array = numpy.array([vocabulary.get(w, UNK)
for w in words], numpy.int32)
data.append(array)
return data
def load_data_using_dataset_api(
src_vocab, src_path, target_vocab, target_path, filter_func):
def _transform_line(vocabulary, line):
words = line.strip().split()
return numpy.array(
[vocabulary.get(w, UNK) for w in words], numpy.int32)
def _transform(example):
source, target = example
return (
_transform_line(src_vocab, source),
_transform_line(target_vocab, target)
)
return chainer.datasets.TransformDataset(
chainer.datasets.TextDataset(
[src_path, target_path],
encoding='utf-8',
filter_func=filter_func
), _transform)
def calculate_unknown_ratio(data):
unknown = sum((s == UNK).sum() for s in data)
total = sum(s.size for s in data)
return unknown / total
def main():
parser = argparse.ArgumentParser(description='Chainer example: seq2seq')
parser.add_argument('SOURCE', help='source sentence list')
parser.add_argument('TARGET', help='target sentence list')
parser.add_argument('SOURCE_VOCAB', help='source vocabulary file')
parser.add_argument('TARGET_VOCAB', help='target vocabulary file')
parser.add_argument('--validation-source',
help='source sentence list for validation')
parser.add_argument('--validation-target',
help='target sentence list for validation')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='number of sentence pairs in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='number of sweeps over the dataset to train')
parser.add_argument('--resume', '-r', type=str,
help='resume the training from snapshot')
parser.add_argument('--save', '-s', type=str,
help='save a snapshot of the training')
parser.add_argument('--unit', '-u', type=int, default=1024,
help='number of units')
parser.add_argument('--layer', '-l', type=int, default=3,
help='number of layers')
parser.add_argument('--use-dataset-api', default=False,
action='store_true',
help='use TextDataset API to reduce CPU memory usage')
parser.add_argument('--min-source-sentence', type=int, default=1,
help='minimium length of source sentence')
parser.add_argument('--max-source-sentence', type=int, default=50,
help='maximum length of source sentence')
parser.add_argument('--min-target-sentence', type=int, default=1,
help='minimium length of target sentence')
parser.add_argument('--max-target-sentence', type=int, default=50,
help='maximum length of target sentence')
parser.add_argument('--log-interval', type=int, default=200,
help='number of iteration to show log')
parser.add_argument('--validation-interval', type=int, default=4000,
help='number of iteration to evlauate the model '
'with validation dataset')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='directory to output the result')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# If the device is a ChainerX CUDA device, use the shared device memory
# pool between ChainerX and CuPy.
if device.xp is chainerx and device.device.backend.name == 'cuda':
# TODO(niboshi): The API is provisional.
chainerx._cuda.cupy_share_allocator()
# Load pre-processed dataset
print('[{}] Loading dataset... (this may take several minutes)'.format(
datetime.datetime.now()))
source_ids = load_vocabulary(args.SOURCE_VOCAB)
target_ids = load_vocabulary(args.TARGET_VOCAB)
if args.use_dataset_api:
# By using TextDataset, you can avoid loading whole dataset on memory.
# This significantly reduces the host memory usage.
def _filter_func(s, t):
sl = len(s.strip().split()) # number of words in source line
tl = len(t.strip().split()) # number of words in target line
return (
args.min_source_sentence <= sl <= args.max_source_sentence and
args.min_target_sentence <= tl <= args.max_target_sentence)
train_data = load_data_using_dataset_api(
source_ids, args.SOURCE,
target_ids, args.TARGET,
_filter_func,
)
else:
# Load all records on memory.
train_source = load_data(source_ids, args.SOURCE)
train_target = load_data(target_ids, args.TARGET)
assert len(train_source) == len(train_target)
train_data = [
(s, t)
for s, t in six.moves.zip(train_source, train_target)
if (args.min_source_sentence <= len(s) <= args.max_source_sentence
and
args.min_target_sentence <= len(t) <= args.max_target_sentence)
]
print('[{}] Dataset loaded.'.format(datetime.datetime.now()))
if not args.use_dataset_api:
# Skip printing statistics when using TextDataset API, as it is slow.
train_source_unknown = calculate_unknown_ratio(
[s for s, _ in train_data])
train_target_unknown = calculate_unknown_ratio(
[t for _, t in train_data])
print('Source vocabulary size: %d' % len(source_ids))
print('Target vocabulary size: %d' % len(target_ids))
print('Train data size: %d' % len(train_data))
print('Train source unknown ratio: %.2f%%' % (
train_source_unknown * 100))
print('Train target unknown ratio: %.2f%%' % (
train_target_unknown * 100))
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
# Set the current device
device.use()
# Setup model
model = Seq2seq(args.layer, len(source_ids), len(target_ids), args.unit)
model.to_device(device)
# Setup optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Setup iterator
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
# Setup updater and trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=convert, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.LogReport(
trigger=(args.log_interval, 'iteration')))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'main/perp',
'validation/main/bleu', 'elapsed_time']),
trigger=(args.log_interval, 'iteration'))
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.iteration}'),
trigger=(args.validation_interval, 'iteration'))
if args.validation_source and args.validation_target:
test_source = load_data(source_ids, args.validation_source)
test_target = load_data(target_ids, args.validation_target)
assert len(test_source) == len(test_target)
test_data = list(six.moves.zip(test_source, test_target))
test_data = [(s, t) for s, t in test_data if 0 < len(s) and 0 < len(t)]
test_source_unknown = calculate_unknown_ratio(
[s for s, _ in test_data])
test_target_unknown = calculate_unknown_ratio(
[t for _, t in test_data])
print('Validation data: %d' % len(test_data))
print('Validation source unknown ratio: %.2f%%' %
(test_source_unknown * 100))
print('Validation target unknown ratio: %.2f%%' %
(test_target_unknown * 100))
@chainer.training.make_extension()
def translate(trainer):
source, target = test_data[numpy.random.choice(len(test_data))]
result = model.translate([model.xp.array(source)])[0]
source_sentence = ' '.join([source_words[x] for x in source])
target_sentence = ' '.join([target_words[y] for y in target])
result_sentence = ' '.join([target_words[y] for y in result])
print('# source : ' + source_sentence)
print('# result : ' + result_sentence)
print('# expect : ' + target_sentence)
trainer.extend(
translate, trigger=(args.validation_interval, 'iteration'))
trainer.extend(
CalculateBleu(
model, test_data, 'validation/main/bleu', device),
trigger=(args.validation_interval, 'iteration'))
if args.resume is not None:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
print('start training')
trainer.run()
if args.save is not None:
# Save a snapshot
chainer.serializers.save_npz(args.save, trainer)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
import theano
from collections import OrderedDict
import experiments.nmt
from experiments.nmt import\
RNNEncoderDecoder,\
prototype_phrase_state,\
parse_input
from experiments.nmt.numpy_compat import argpartition
logger = logging.getLogger(__name__)
def parse_output(word2idx, line, eos_id, unk_id, raise_unk=False):
seqin = line.split()
seqlen = len(seqin)
seq = numpy.zeros(seqlen+1, dtype='int64')
for idx,sx in enumerate(seqin):
seq[idx] = word2idx.get(sx, unk_id)
# Assumes that there are no words with
# a proper index, but no vector representation.
# It may crash otherwise.
if seq[idx] == unk_id and raise_unk:
raise Exception("Unknown word {}".format(sx))
seq[-1] = eos_id
return seq, seqin
# From score.py
def pack(seqs, return_lengths=False):
num = len(seqs)
lengths = map(len, seqs)
max_len = max(lengths)
x = numpy.zeros((num, max_len), dtype="int64")
x_mask = numpy.zeros((num, max_len), dtype="float32")
for i, seq in enumerate(seqs):
x[i, :len(seq)] = seq
x_mask[i, :len(seq)] = 1.0
if not return_lengths:
return x.T, x_mask.T
else:
return x.T, x_mask.T, numpy.asarray(lengths)
def update_dicts(indices, d, D, C, full):
for word in indices:
if word not in d:
if len(d) == full:
raise RuntimeError("The dictionary is full")
if word not in D: # Also not in C
key, value = C.popitem()
del D[key]
d[word] = 0
D[word] = 0
else: # Also in C as (d UNION C) is D. (d INTERSECTION C) is the empty set.
d[word] = 0
del C[word]
def compute_alignment(src_seqs, trg_seqs, alignment_fns, batchsize):
full_x, full_x_mask, full_x_lengths = pack(src_seqs, return_lengths=True)
full_y, full_y_mask = pack(trg_seqs)
assert full_x.shape[1] == full_y.shape[1]
num_models = len(alignment_fns)
full_alignments = numpy.zeros((full_y.shape[0], full_x.shape[0], 0), dtype=numpy.float32)
for batch_start in xrange(0, full_x.shape[1], batchsize):
alignments = 0.
x = full_x[:,batch_start:batch_start+batchsize]
x_mask = full_x_mask[:,batch_start:batch_start+batchsize]
x_lengths = full_x_lengths[batch_start:batch_start+batchsize]
y = full_y[:,batch_start:batch_start+batchsize]
y_mask = full_y_mask[:,batch_start:batch_start+batchsize]
for j in xrange(num_models):
# target_len x source_len x num_examples
alignments += numpy.asarray(alignment_fns[j](x, y, x_mask, y_mask)[0])
alignments[:,x_lengths-1,range(x.shape[1])] = 0. # Put source <eos> score to 0.
full_alignments = numpy.concatenate((full_alignments, alignments), axis=2)
hard_alignments = numpy.argmax(full_alignments, axis=1) # trg_len x num_examples
return hard_alignments
def replace_unknown_words(src_word_seqs, trg_seqs, trg_word_seqs, hard_alignments,
heuristic, mapping, unk_id, new_trans_file, n_best, full_trans_lines=None):
for i, src_words in enumerate(src_word_seqs):
trans_words = trg_word_seqs[i]
trans_seq = trg_seqs[i]
hard_alignment = hard_alignments[:,i]
if n_best:
full_trans_line = full_trans_lines[i]
new_trans_words = []
for j in xrange(len(trans_words) - 1): # -1 : Don't write <eos>
if trans_seq[j] == unk_id:
UNK_src = src_words[hard_alignment[j]]
if heuristic == 0: # Copy (ok when training with large vocabularies on en->fr, en->de)
new_trans_words.append(UNK_src)
elif heuristic == 1:
# Use the most likely translation (with t-table). If not found, copy the source word.
# Ok for small vocabulary (~30k) models
if UNK_src in mapping:
new_trans_words.append(mapping[UNK_src])
else:
new_trans_words.append(UNK_src)
elif heuristic == 2:
# Use t-table if the source word starts with a lowercase letter. Otherwise copy
# Sometimes works better than other heuristics
if UNK_src in mapping and UNK_src.decode('utf-8')[0].islower():
new_trans_words.append(mapping[UNK_src])
else:
new_trans_words.append(UNK_src)
else:
new_trans_words.append(trans_words[j])
to_write = ''
for j, word in enumerate(new_trans_words):
to_write = to_write + word
if j < len(new_trans_words) - 1:
to_write += ' '
if n_best:
print >>new_trans_file, full_trans_line[0].strip() + ' ||| ' + to_write + ' ||| ' + full_trans_line[2].strip()
else:
print >>new_trans_file, to_write
def parse_args():
parser = argparse.ArgumentParser(
"Replace UNK by original word")
parser.add_argument("--state",
required=True, help="State to use")
parser.add_argument("--mapping",
help="Top1 unigram mapping (Source to target)")
parser.add_argument("--source",
help="File of source sentences")
parser.add_argument("--trans",
help="File of translated sentences")
parser.add_argument("--new-trans",
help="File to save new translations in")
parser.add_argument("--verbose",
action="store_true", default=False,
help="Be verbose")
parser.add_argument("--heuristic", type=int, default=0,
help="0: copy, 1: Use dict, 2: Use dict only if lowercase \
Used only if a mapping is given. Default is 0.")
parser.add_argument("--topn-file",
type=str,
help="Binarized topn list for each source word (Vocabularies must correspond)")
parser.add_argument("--num-common",
type=int,
help="Number of always used common words (inc. <eos>, UNK) \
(With --less-transfer, total number of words)")
parser.add_argument("--num-ttables",
type=int,
help="Number of target words taken from the T-tables for each input word")
parser.add_argument("--change-every", type=int, default=100,
help="Change the dicts at each multiple of this number. \
Use -1 to change only if full")
parser.add_argument("--no-reset", action="store_true", default=False,
help="Do not reset the dicts when changing vocabularies")
parser.add_argument("--batchsize", type=int, default=32,
help="(Maximum) batchsize")
parser.add_argument("--n-best", action="store_true", default=False,
help="Trans file is a n-best list, where lines look like \
`20 ||| A sentence . ||| 0.353`")
parser.add_argument("--models", nargs = '+', required=True,
help="path to the models")
parser.add_argument("changes",
nargs="?", default="",
help="Changes to state")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_phrase_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
if 'rolling_vocab' not in state:
state['rolling_vocab'] = 0
if 'save_algo' not in state:
state['save_algo'] = 0
if 'save_gs' not in state:
state['save_gs'] = 0
if 'save_iter' not in state:
state['save_iter'] = -1
if 'var_src_len' not in state:
state['var_src_len'] = False
if args.num_common and args.num_ttables and args.topn_file:
with open(args.topn_file, 'rb') as f:
topn = cPickle.load(f) # Load dictionary (source word index : list of target word indices)
for elt in topn:
topn[elt] = topn[elt][:args.num_ttables] # Take the first args.num_ttables only
num_models = len(args.models)
rng = numpy.random.RandomState(state['seed'])
enc_decs = []
lm_models = []
alignment_fns = []
if args.num_common and args.num_ttables and args.topn_file:
original_W_0_dec_approx_embdr = []
original_W2_dec_deep_softmax = []
original_b_dec_deep_softmax = []
for i in xrange(num_models):
enc_decs.append(RNNEncoderDecoder(state, rng, skip_init=True, compute_alignment=True))
enc_decs[i].build()
lm_models.append(enc_decs[i].create_lm_model())
lm_models[i].load(args.models[i])
alignment_fns.append(theano.function(inputs=enc_decs[i].inputs, outputs=[enc_decs[i].alignment], name="alignment_fn"))
if args.num_common and args.num_ttables and args.topn_file:
original_W_0_dec_approx_embdr.append(lm_models[i].params[lm_models[i].name2pos['W_0_dec_approx_embdr']].get_value())
original_W2_dec_deep_softmax.append(lm_models[i].params[lm_models[i].name2pos['W2_dec_deep_softmax']].get_value())
original_b_dec_deep_softmax.append(lm_models[i].params[lm_models[i].name2pos['b_dec_deep_softmax']].get_value())
lm_models[i].params[lm_models[i].name2pos['W_0_dec_approx_embdr']].set_value(numpy.zeros((1,1), dtype=numpy.float32))
lm_models[i].params[lm_models[i].name2pos['W2_dec_deep_softmax']].set_value(numpy.zeros((1,1), dtype=numpy.float32))
lm_models[i].params[lm_models[i].name2pos['b_dec_deep_softmax']].set_value(numpy.zeros((1), dtype=numpy.float32))
if args.mapping:
with open(args.mapping, 'rb') as f:
mapping = cPickle.load(f)
heuristic = args.heuristic
else:
heuristic = 0
mapping = None
word2idx_src = cPickle.load(open(state['word_indx'], 'rb'))
idict_src = cPickle.load(open(state['indx_word'], 'r'))
word2idx_trg = cPickle.load(open(state['word_indx_trgt'], 'rb'))
idict_trg = cPickle.load(open(state['indx_word_target'], 'r'))
word2idx_trg['<eos>'] = state['null_sym_target']
word2idx_trg[state['oov']] = state['unk_sym_target'] # 'UNK' may be in the vocabulary. Now points to the right index.
idict_trg[state['null_sym_target']] = '<eos>'
idict_trg[state['unk_sym_target']] = state['oov']
if args.num_common and args.num_ttables and args.topn_file:
# Use OrderedDict instead of set for reproducibility
d = OrderedDict() # Up to now
D = OrderedDict() # Full
C = OrderedDict() # Allowed to reject
prev_line = 0
logger.info("%d" % prev_line)
D_dict = OrderedDict()
output = False
for i in xrange(args.num_common):
D[i] = 0
C[i] = 0
null_unk_indices = [state['null_sym_target'],state['unk_sym_target']]
update_dicts(null_unk_indices, d, D, C, args.num_common)
with open(args.source, 'r') as f:
for i, line in enumerate(f):
seqin = line.strip()
seq, _ = parse_input(state, word2idx_src, seqin) # seq is the ndarray of indices
indices = []
for elt in seq[:-1]: # Exclude the EOL token
if elt != 1: # Exclude OOV (1 will not be a key of topn)
indices.extend(topn[elt]) # Add topn best unigram translations for each source word
update_dicts(indices, d, D, C, args.num_common)
if (i % args.change_every) == 0 and args.change_every > 0 and i > 0:
D_dict[prev_line] = D.copy() # Save dictionary for the lines preceding this one
prev_line = i
logger.info("%d" % i)
output = False
d = OrderedDict()
if args.no_reset:
C = D.copy()
else:
D = OrderedDict() # Full
C = OrderedDict() # Allowed to reject
for i in xrange(args.num_common):
D[i] = 0
C[i] = 0
null_unk_indices = [state['null_sym_target'], state['unk_sym_target']]
update_dicts(null_unk_indices, d, D, C, args.num_common)
update_dicts(indices, d, D, C, args.num_common) # Assumes you cannot fill d with only 1 line
D_dict[prev_line] = D.copy()
start_time = time.time()
if args.source and args.trans and args.new_trans:
with open(args.source, 'r') as src_file:
with open(args.trans, 'r') as trans_file:
with open(args.new_trans, 'w') as new_trans_file:
if not (args.num_common and args.num_ttables and args.topn_file):
eos_id = state['null_sym_target']
unk_id = state['unk_sym_target']
new_word2idx_trg = word2idx_trg
prev_i = -1
if args.n_best:
full_trans_line = trans_file.readline()
if full_trans_line == '':
raise IOError("File is empty")
full_trans_line = full_trans_line.split('|||')
n_best_start = int(full_trans_line[0].strip())
trans_file.seek(0)
while True:
if args.n_best:
full_trans_line = trans_file.readline()
if full_trans_line == '':
break
full_trans_line = full_trans_line.split('|||')
i = int(full_trans_line[0].strip()) - n_best_start
trans_line = full_trans_line[1].strip()
else:
trans_line = trans_file.readline()
if trans_line == '':
break
i = prev_i + 1
if i == (prev_i + 1):
prev_i = i
if (i % args.change_every) == 0 and i > 0:
hard_alignments = compute_alignment(src_seqs, trg_seqs, alignment_fns, args.batchsize)
replace_unknown_words(
src_word_seqs, trg_seqs, trg_word_seqs,
hard_alignments, heuristic, mapping, unk_id,
new_trans_file, args.n_best, full_trans_lines)
if (i % 100 == 0) and i > 0:
new_trans_file.flush()
logger.debug("Current speed is {} per sentence".
format((time.time() - start_time) / i))
src_line = src_file.readline()
src_seq, src_words = parse_input(state, word2idx_src, src_line.strip())
src_words.append('<eos>')
if (i % args.change_every) == 0:
src_seqs = []
src_word_seqs = []
trg_seqs = []
trg_word_seqs = []
full_trans_lines = [] # Only used with n-best lists
if args.num_common and args.num_ttables and args.topn_file:
indices = D_dict[i].keys()
eos_id = indices.index(state['null_sym_target']) # Find new eos and unk positions
unk_id = indices.index(state['unk_sym_target'])
for j in xrange(num_models):
lm_models[j].params[lm_models[j].name2pos['W_0_dec_approx_embdr']].set_value(original_W_0_dec_approx_embdr[j][indices])
lm_models[j].params[lm_models[j].name2pos['W2_dec_deep_softmax']].set_value(original_W2_dec_deep_softmax[j][:, indices])
lm_models[j].params[lm_models[j].name2pos['b_dec_deep_softmax']].set_value(original_b_dec_deep_softmax[j][indices])
new_word2idx_trg = dict([(idict_trg[index], k) for k, index in enumerate(indices)])
elif i != prev_i:
raise ValueError("prev_i: %d, i: %d" % (prev_i, i))
trans_seq, trans_words = parse_output(new_word2idx_trg, trans_line.strip(), eos_id=eos_id, unk_id=unk_id)
trans_words.append('<eos>')
src_seqs.append(src_seq)
src_word_seqs.append(src_words)
trg_seqs.append(trans_seq)
trg_word_seqs.append(trans_words)
if args.n_best:
full_trans_lines.append(full_trans_line)
# Out of loop
hard_alignments = compute_alignment(src_seqs, trg_seqs, alignment_fns, args.batchsize)
replace_unknown_words(src_word_seqs, trg_seqs, trg_word_seqs,
hard_alignments, heuristic, mapping, unk_id,
new_trans_file, args.n_best, full_trans_lines)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
| |
"""distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
__revision__ = "$Id$"
import sys
import os
import string
from sysconfig import get_python_version
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils import log
from distutils.util import get_platform
class bdist_wininst (Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized) "
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after "
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
('user-access-control=', None,
"specify Vista's UAC handling - 'none'/default=no "
"handling, 'auto'=use UAC if target Python installed for "
"all users, 'force'=always use UAC"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = None
self.install_script = None
self.pre_install_script = None
self.user_access_control = None
# initialize_options()
def finalize_options (self):
self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
if self.bdist_dir is None:
if self.skip_build and self.plat_name:
# If build is skipped and plat_name is overridden, bdist will
# not see the correct 'plat_name' - so set that up manually.
bdist = self.distribution.get_command_obj('bdist')
bdist.plat_name = self.plat_name
# next the command will be initialized using that name
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip-build'" \
" option must be specified" % (short_version,)
self.target_version = short_version
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
# finalize_options()
def run (self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install.plat_name = self.plat_name
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = string.upper(key)
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
def get_inidata (self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return string.replace(s, "\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(string.capitalize(name), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
if self.user_access_control:
lines.append("user_access_control=%s" % self.user_access_control)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return string.join(lines, "\n")
# get_inidata()
def create_exe (self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
try:
unicode
except NameError:
pass
else:
if isinstance(cfgdata, unicode):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + "\0"
if self.pre_install_script:
script_data = open(self.pre_install_script, "r").read()
cfgdata = cfgdata + script_data + "\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + "\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
# create_exe()
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.%s-py%s.exe" %
(fullname, self.plat_name, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.%s.exe" % (fullname, self.plat_name))
return installer_name
# get_installer_filename()
def get_exe_bytes (self):
from distutils.msvccompiler import get_build_version
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
if self.target_version and self.target_version != cur_version:
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version > cur_version:
bv = get_build_version()
else:
if self.target_version < "2.4":
bv = 6.0
else:
bv = 7.1
else:
# for current version - use authoritative check.
bv = get_build_version()
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
# if plat_name starts with "win" but is not "win32"
# we want to strip "win" and leave the rest (e.g. -amd64)
# for all other cases, we don't want any suffix
if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
sfix = self.plat_name[3:]
else:
sfix = ''
filename = os.path.join(directory, "wininst-%.1f%s.exe" % (bv, sfix))
f = open(filename, "rb")
try:
return f.read()
finally:
f.close()
# class bdist_wininst
| |
"""ResNet training script, with some code from
https://github.com/tensorflow/models/tree/master/resnet.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import numpy as np
import ray
import tensorflow as tf
import cifar_input
import resnet_model
# Tensorflow must be at least version 1.2.0 for the example to work.
tf_major = int(tf.__version__.split(".")[0])
tf_minor = int(tf.__version__.split(".")[1])
if (tf_major < 1) or (tf_major == 1 and tf_minor < 2):
raise Exception("Your Tensorflow version is less than 1.2.0. Please "
"update Tensorflow to the latest version.")
parser = argparse.ArgumentParser(description="Run the ResNet example.")
parser.add_argument(
"--dataset",
default="cifar10",
type=str,
help="Dataset to use: cifar10 or cifar100.")
parser.add_argument(
"--train_data_path",
default="cifar-10-batches-bin/data_batch*",
type=str,
help="Data path for the training data.")
parser.add_argument(
"--eval_data_path",
default="cifar-10-batches-bin/test_batch.bin",
type=str,
help="Data path for the testing data.")
parser.add_argument(
"--eval_dir",
default="/tmp/resnet-model/eval",
type=str,
help="Data path for the tensorboard logs.")
parser.add_argument(
"--eval_batch_count",
default=50,
type=int,
help="Number of batches to evaluate over.")
parser.add_argument(
"--num_gpus",
default=0,
type=int,
help="Number of GPUs to use for training.")
parser.add_argument(
"--redis-address",
default=None,
type=str,
help="The Redis address of the cluster.")
FLAGS = parser.parse_args()
# Determines if the actors require a gpu or not.
use_gpu = 1 if int(FLAGS.num_gpus) > 0 else 0
@ray.remote
def get_data(path, size, dataset):
# Retrieves all preprocessed images and labels using a tensorflow queue.
# This only uses the cpu.
os.environ["CUDA_VISIBLE_DEVICES"] = ""
with tf.device("/cpu:0"):
dataset = cifar_input.build_data(path, size, dataset)
sess = tf.Session()
images, labels = sess.run(dataset)
sess.close()
return images, labels
@ray.remote(num_gpus=use_gpu)
class ResNetTrainActor(object):
def __init__(self, data, dataset, num_gpus):
if num_gpus > 0:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in ray.get_gpu_ids()])
hps = resnet_model.HParams(
batch_size=128,
num_classes=100 if dataset == "cifar100" else 10,
min_lrn_rate=0.0001,
lrn_rate=0.1,
num_residual_units=5,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer="mom",
num_gpus=num_gpus)
# We seed each actor differently so that each actor operates on a
# different subset of data.
if num_gpus > 0:
tf.set_random_seed(ray.get_gpu_ids()[0] + 1)
else:
# Only a single actor in this case.
tf.set_random_seed(1)
with tf.device("/gpu:0" if num_gpus > 0 else "/cpu:0"):
# Build the model.
images, labels = cifar_input.build_input(data, hps.batch_size,
dataset, False)
self.model = resnet_model.ResNet(hps, images, labels, "train")
self.model.build_graph()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
self.model.variables.set_session(sess)
init = tf.global_variables_initializer()
sess.run(init)
self.steps = 10
def compute_steps(self, weights):
# This method sets the weights in the network, trains the network
# self.steps times, and returns the new weights.
self.model.variables.set_weights(weights)
for i in range(self.steps):
self.model.variables.sess.run(self.model.train_op)
return self.model.variables.get_weights()
def get_weights(self):
# Note that the driver cannot directly access fields of the class,
# so helper methods must be created.
return self.model.variables.get_weights()
@ray.remote
class ResNetTestActor(object):
def __init__(self, data, dataset, eval_batch_count, eval_dir):
os.environ["CUDA_VISIBLE_DEVICES"] = ""
hps = resnet_model.HParams(
batch_size=100,
num_classes=100 if dataset == "cifar100" else 10,
min_lrn_rate=0.0001,
lrn_rate=0.1,
num_residual_units=5,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer="mom",
num_gpus=0)
with tf.device("/cpu:0"):
# Builds the testing network.
images, labels = cifar_input.build_input(data, hps.batch_size,
dataset, False)
self.model = resnet_model.ResNet(hps, images, labels, "eval")
self.model.build_graph()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
self.model.variables.set_session(sess)
init = tf.global_variables_initializer()
sess.run(init)
# Initializing parameters for tensorboard.
self.best_precision = 0.0
self.eval_batch_count = eval_batch_count
self.summary_writer = tf.summary.FileWriter(eval_dir, sess.graph)
# The IP address where tensorboard logs will be on.
self.ip_addr = ray.services.get_node_ip_address()
def accuracy(self, weights, train_step):
# Sets the weights, computes the accuracy and other metrics
# over eval_batches, and outputs to tensorboard.
self.model.variables.set_weights(weights)
total_prediction, correct_prediction = 0, 0
model = self.model
sess = self.model.variables.sess
for _ in range(self.eval_batch_count):
summaries, loss, predictions, truth = sess.run(
[model.summaries, model.cost, model.predictions, model.labels])
truth = np.argmax(truth, axis=1)
predictions = np.argmax(predictions, axis=1)
correct_prediction += np.sum(truth == predictions)
total_prediction += predictions.shape[0]
precision = 1.0 * correct_prediction / total_prediction
self.best_precision = max(precision, self.best_precision)
precision_summ = tf.Summary()
precision_summ.value.add(tag="Precision", simple_value=precision)
self.summary_writer.add_summary(precision_summ, train_step)
best_precision_summ = tf.Summary()
best_precision_summ.value.add(
tag="Best Precision", simple_value=self.best_precision)
self.summary_writer.add_summary(best_precision_summ, train_step)
self.summary_writer.add_summary(summaries, train_step)
tf.logging.info("loss: %.3f, precision: %.3f, best precision: %.3f" %
(loss, precision, self.best_precision))
self.summary_writer.flush()
return precision
def get_ip_addr(self):
# As above, a helper method must be created to access the field from
# the driver.
return self.ip_addr
def train():
num_gpus = FLAGS.num_gpus
if FLAGS.redis_address is None:
ray.init(num_gpus=num_gpus)
else:
ray.init(redis_address=FLAGS.redis_address)
train_data = get_data.remote(FLAGS.train_data_path, 50000, FLAGS.dataset)
test_data = get_data.remote(FLAGS.eval_data_path, 10000, FLAGS.dataset)
# Creates an actor for each gpu, or one if only using the cpu. Each actor
# has access to the dataset.
if FLAGS.num_gpus > 0:
train_actors = [
ResNetTrainActor.remote(train_data, FLAGS.dataset, num_gpus)
for _ in range(num_gpus)
]
else:
train_actors = [ResNetTrainActor.remote(train_data, FLAGS.dataset, 0)]
test_actor = ResNetTestActor.remote(test_data, FLAGS.dataset,
FLAGS.eval_batch_count, FLAGS.eval_dir)
print("The log files for tensorboard are stored at ip {}.".format(
ray.get(test_actor.get_ip_addr.remote())))
step = 0
weight_id = train_actors[0].get_weights.remote()
acc_id = test_actor.accuracy.remote(weight_id, step)
# Correction for dividing the weights by the number of gpus.
if num_gpus == 0:
num_gpus = 1
print("Starting training loop. Use Ctrl-C to exit.")
try:
while True:
all_weights = ray.get([
actor.compute_steps.remote(weight_id) for actor in train_actors
])
mean_weights = {
k: (sum(weights[k] for weights in all_weights) / num_gpus)
for k in all_weights[0]
}
weight_id = ray.put(mean_weights)
step += 10
if step % 200 == 0:
# Retrieves the previously computed accuracy and launches a new
# testing task with the current weights every 200 steps.
acc = ray.get(acc_id)
acc_id = test_actor.accuracy.remote(weight_id, step)
print("Step {}: {:.6f}".format(step - 200, acc))
except KeyboardInterrupt:
pass
if __name__ == "__main__":
train()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware ESX platform.
**Related Flags**
:vmwareapi_host_ip: IPAddress of VMware ESX server.
:vmwareapi_host_username: Username for connection to VMware ESX Server.
:vmwareapi_host_password: Password for connection to VMware ESX Server.
:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks
(default: 1.0).
:vmwareapi_api_retry_count: The API retry count in case of failure such as
network failures (socket errors etc.)
(default: 10).
"""
import time
from eventlet import event
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi.vmops import VMWareVMOps
LOG = logging.getLogger("nova.virt.vmwareapi_conn")
FLAGS = flags.FLAGS
flags.DEFINE_string('vmwareapi_host_ip',
None,
'URL for connection to VMWare ESX host.'
'Required if connection_type is vmwareapi.')
flags.DEFINE_string('vmwareapi_host_username',
None,
'Username for connection to VMWare ESX host.'
'Used only if connection_type is vmwareapi.')
flags.DEFINE_string('vmwareapi_host_password',
None,
'Password for connection to VMWare ESX host.'
'Used only if connection_type is vmwareapi.')
flags.DEFINE_float('vmwareapi_task_poll_interval',
5.0,
'The interval used for polling of remote tasks '
'Used only if connection_type is vmwareapi')
flags.DEFINE_float('vmwareapi_api_retry_count',
10,
'The number of times we retry on failures, '
'e.g., socket error, etc.'
'Used only if connection_type is vmwareapi')
flags.DEFINE_string('vmwareapi_vlan_interface',
'vmnic0',
'Physical ethernet adapter name for vlan networking')
TIME_BETWEEN_API_CALL_RETRIES = 2.0
class Failure(Exception):
"""Base Exception class for handling task failures."""
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
def get_connection(_):
"""Sets up the ESX host connection."""
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
api_retry_count = FLAGS.vmwareapi_api_retry_count
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
"connection_type=vmwareapi"))
return VMWareESXConnection(host_ip, host_username, host_password,
api_retry_count)
class VMWareESXConnection(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
super(VMWareESXConnection, self).__init__()
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = VMWareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, network_info,
block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, network_info)
def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def get_info(self, instance_name):
"""Return info about the VM instance."""
return self._vmops.get_info(instance_name)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_info(instance)
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_ajax_console(self, instance):
"""Return link to instance's ajax console."""
return self._vmops.get_ajax_console(instance)
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance."""
pass
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
pass
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
return {'address': FLAGS.vmwareapi_host_ip,
'username': FLAGS.vmwareapi_host_username,
'password': FLAGS.vmwareapi_host_password}
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
pass
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
class VMWareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
self._host_ip = host_ip
self._host_username = host_username
self._host_password = host_password
self.api_retry_count = api_retry_count
self._scheme = scheme
self._session_id = None
self.vim = None
self._create_session()
def _get_vim_object(self):
"""Create the VIM Object instance."""
return vim.Vim(protocol=self._scheme, host=self._host_ip)
def _create_session(self):
"""Creates a session with the ESX host."""
while True:
try:
# Login and setup the session with the ESX host for making
# API calls
self.vim = self._get_vim_object()
session = self.vim.Login(
self.vim.get_service_content().sessionManager,
userName=self._host_username,
password=self._host_password)
# Terminate the earlier session, if possible ( For the sake of
# preserving sessions as there is a limit to the number of
# sessions we can have )
if self._session_id:
try:
self.vim.TerminateSession(
self.vim.get_service_content().sessionManager,
sessionId=[self._session_id])
except Exception, excep:
# This exception is something we can live with. It is
# just an extra caution on our side. The session may
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
LOG.debug(excep)
self._session_id = session.key
return
except Exception, excep:
LOG.critical(_("In vmwareapi:_create_session, "
"got this exception: %s") % excep)
raise exception.Error(excep)
def __del__(self):
"""Logs-out the session."""
# Logout to avoid un-necessary increase in session count at the
# ESX host
try:
self.vim.Logout(self.vim.get_service_content().sessionManager)
except Exception, excep:
# It is just cautionary on our part to do a logout in del just
# to ensure that the session is not left active.
LOG.debug(excep)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""
Calls a method within the module specified with
args provided.
"""
args = list(args)
retry_count = 0
exc = None
last_fault_list = []
while True:
try:
if not self._is_vim_object(module):
# If it is not the first try, then get the latest
# vim object
if retry_count > 0:
args = args[1:]
args = [self.vim] + args
retry_count += 1
temp_module = module
for method_elem in method.split("."):
temp_module = getattr(temp_module, method_elem)
return temp_module(*args, **kwargs)
except error_util.VimFaultException, excep:
# If it is a Session Fault Exception, it may point
# to a session gone bad. So we try re-creating a session
# and then proceeding ahead with the call.
exc = excep
if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
# Because of the idle session returning an empty
# RetrievePropertiesResponse and also the same is returned
# when there is say empty answer to the query for
# VMs on the host ( as in no VMs on the host), we have no
# way to differentiate.
# So if the previous response was also am empty response
# and after creating a new session, we get the same empty
# response, then we are sure of the response being supposed
# to be empty.
if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
return []
last_fault_list = excep.fault_list
self._create_session()
else:
# No re-trying for errors for API call has gone through
# and is the caller's fault. Caller should handle these
# errors. e.g, InvalidArgument fault.
break
except error_util.SessionOverLoadException, excep:
# For exceptions which may come because of session overload,
# we retry
exc = excep
except Exception, excep:
# If it is a proper exception, say not having furnished
# proper data in the SOAP call or the retry limit having
# exceeded, we raise the exception
exc = excep
break
# If retry count has been reached then break and
# raise the exception
if retry_count > self.api_retry_count:
break
time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
LOG.critical(_("In vmwareapi:_call_method, "
"got this exception: %s") % exc)
raise
def _get_vim(self):
"""Gets the VIM object reference."""
if self.vim is None:
self._create_session()
return self.vim
def _wait_for_task(self, instance_uuid, task_ref):
"""
Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
done = event.Event()
loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
done)
loop.start(FLAGS.vmwareapi_task_poll_interval, now=True)
ret_val = done.wait()
loop.stop()
return ret_val
def _poll_task(self, instance_uuid, task_ref, done):
"""
Poll the given task, and fires the given Deferred if we
get a result.
"""
try:
task_info = self._call_method(vim_util, "get_dynamic_property",
task_ref, "Task", "info")
task_name = task_info.name
action = dict(
instance_uuid=instance_uuid,
action=task_name[0:255],
error=None)
if task_info.state in ['queued', 'running']:
return
elif task_info.state == 'success':
LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
"status: success") % locals())
done.send("success")
else:
error_info = str(task_info.error.localizedMessage)
action["error"] = error_info
LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
"status: error %(error_info)s") % locals())
done.send_exception(exception.Error(error_info))
db.instance_action_create(context.get_admin_context(), action)
except Exception, excep:
LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
done.send_exception(excep)
| |
import base64
from winrm.http.transport import HttpPlaintext, HttpKerberos
from datetime import timedelta
import uuid
from isodate.isoduration import duration_isoformat
import xmlwitch
import xml.etree.ElementTree as ET
class WinRMWebService(object):
"""
This is the main class that does the SOAP request/response logic. There are a few helper classes, but pretty
much everything comes through here first.
"""
DEFAULT_TIMEOUT = 'PT60S'
DEFAULT_MAX_ENV_SIZE = 153600
DEFAULT_LOCALE = 'en-US'
def __init__(self, endpoint, transport='kerberos', username=None, password=None, realm=None, service=None, keytab=None, ca_trust_path=None):
"""
@param string endpoint: the WinRM webservice endpoint
@param string transport: transport type, one of 'kerberos' (default), 'ssl', 'plaintext'
@param string username: username
@param string password: password
@param string realm: the Kerberos realm we are authenticating to
@param string service: the service name, default is HTTP
@param string keytab: the path to a keytab file if you are using one
@param string ca_trust_path: Certification Authority trust path
"""
self.endpoint = endpoint
self.timeout = WinRMWebService.DEFAULT_TIMEOUT
self.max_env_sz = WinRMWebService.DEFAULT_MAX_ENV_SIZE
self.locale = WinRMWebService.DEFAULT_LOCALE
if transport == 'plaintext':
self.transport = HttpPlaintext(endpoint, username, password)
elif transport == 'kerberos':
self.transport = HttpKerberos(endpoint)
else:
raise NotImplementedError()
self.username = username
self.password = password
self.service = service
self.keytab = keytab
self.ca_trust_path = ca_trust_path
def set_timeout(self, seconds):
"""
Operation timeout, see http://msdn.microsoft.com/en-us/library/ee916629(v=PROT.13).aspx
@param int seconds: the number of seconds to set the timeout to. It will be converted to an ISO8601 format.
"""
# in original library there is an alias - op_timeout method
return duration_isoformat(timedelta(seconds))
def open_shell(self, i_stream='stdin', o_stream='stdout stderr', working_directory=None, env_vars=None, noprofile=False, codepage=437, lifetime=None, idle_timeout=None):
"""
Create a Shell on the destination host
@param string i_stream: Which input stream to open. Leave this alone unless you know what you're doing (default: stdin)
@param string o_stream: Which output stream to open. Leave this alone unless you know what you're doing (default: stdout stderr)
@param string working_directory: the directory to create the shell in
@param dict env_vars: environment variables to set for the shell. Fir instance: {'PATH': '%PATH%;c:/Program Files (x86)/Git/bin/', 'CYGWIN': 'nontsec codepage:utf8'}
@returns The ShellId from the SOAP response. This is our open shell instance on the remote machine.
@rtype string
"""
node = xmlwitch.Builder(version='1.0', encoding='utf-8')
with node.env__Envelope(**self._namespaces):
with node.env__Header:
self._build_soap_header(node)
self._set_resource_uri_cmd(node)
self._set_action_create(node)
with node.w__OptionSet:
node.w__Option(str(noprofile).upper(), Name='WINRS_NOPROFILE')
node.w__Option(str(codepage), Name='WINRS_CODEPAGE')
with node.env__Body:
with node.rsp__Shell:
node.rsp__InputStreams(i_stream)
node.rsp__OutputStreams(o_stream)
if working_directory:
#TODO ensure that rsp:WorkingDirectory should be nested within rsp:Shell
node.rsp_WorkingDirectory(working_directory)
# TODO: research Lifetime a bit more: http://msdn.microsoft.com/en-us/library/cc251546(v=PROT.13).aspx
#if lifetime:
# node.rsp_Lifetime = iso8601_duration.sec_to_dur(lifetime)
# TODO: make it so the input is given in milliseconds and converted to xs:duration
if idle_timeout:
node.rsp_IdleTimeOut = idle_timeout
if env_vars:
with node.rsp_Environment:
for key, value in env_vars.items():
node.rsp_Variable(value, Name=key)
response = self.send_message(str(node))
root = ET.fromstring(response)
return next(node for node in root.findall('.//*') if node.get('Name') == 'ShellId').text
# Helper methods for building SOAP Headers
def _build_soap_header(self, node, message_id=uuid.uuid4()):
node.a__To(str(self.endpoint))
with node.a__ReplyTo:
node.a__Address('http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous', mustUnderstand='true')
node.w__MaxEnvelopeSize(str(self.max_env_sz), mustUnderstand='true')
node.a__MessageID('uuid:{0}'.format(message_id))
node.w__Locale(None, xml__lang=self.locale, mustUnderstand='false')
node.p__DataLocale(None, xml__lang=self.locale, mustUnderstand='false')
# TODO: research this a bit http://msdn.microsoft.com/en-us/library/cc251561(v=PROT.13).aspx
#node.cfg__MaxTimeoutms = 600
node.w__OperationTimeout(self.timeout)
def _set_resource_uri_cmd(self, node):
node.w__ResourceURI('http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', mustUnderstand='true')
def _set_resource_uri_wmi(self, node, namespace='root/cimv2/*'):
node.w__ResourceURI('http://schemas.microsoft.com/wbem/wsman/1/wmi/{0}'.format(namespace), mustUnderstand='true')
def _set_action_create(self, node):
node.a__Action('http://schemas.xmlsoap.org/ws/2004/09/transfer/Create', mustUnderstand='true')
def _set_action_delete(self, node):
node.a__Action('http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete', mustUnderstand='true')
def _set_action_command(self, node):
node.a__Action('http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command', mustUnderstand='true')
def _set_action_receive(self, node):
node.a__Action('http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive', mustUnderstand='true')
def _set_action_signal(self, node):
node.a__Action('http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Signal', mustUnderstand='true')
def _set_action_enumerate(self, node):
node.a__Action('http://schemas.xmlsoap.org/ws/2004/09/enumeration/Enumerate', mustUnderstand='true')
def _set_selector_shell_id(self, node, shell_id):
with node.w__SelectorSet:
node.w__Selector(shell_id, Name='ShellId')
@property
def _namespaces(self):
return {
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns:env': 'http://www.w3.org/2003/05/soap-envelope',
'xmlns:a': 'http://schemas.xmlsoap.org/ws/2004/08/addressing',
'xmlns:b': 'http://schemas.dmtf.org/wbem/wsman/1/cimbinding.xsd',
'xmlns:n': 'http://schemas.xmlsoap.org/ws/2004/09/enumeration',
'xmlns:x': 'http://schemas.xmlsoap.org/ws/2004/09/transfer',
'xmlns:w': 'http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd',
'xmlns:p': 'http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd',
'xmlns:rsp': 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell',
'xmlns:cfg': 'http://schemas.microsoft.com/wbem/wsman/1/config'
}
def send_message(self, message):
# TODO add message_id vs relates_to checking
# TODO port error handling code
return self.transport.send_message(message)
def close_shell(self, shell_id):
"""
Close the shell
@param string shell_id: The shell id on the remote machine. See #open_shell
@returns This should have more error checking but it just returns true for now.
@rtype bool
"""
message_id = uuid.uuid4()
node = xmlwitch.Builder(version='1.0', encoding='utf-8')
with node.env__Envelope(**self._namespaces):
with node.env__Header:
self._build_soap_header(node, message_id)
self._set_resource_uri_cmd(node)
self._set_action_delete(node)
self._set_selector_shell_id(node, shell_id)
# SOAP message requires empty env:Body
with node.env__Body:
pass
response = self.send_message(str(node))
root = ET.fromstring(response)
relates_to = next(node for node in root.findall('.//*') if node.tag.endswith('RelatesTo')).text
# TODO change assert into user-friendly exception
assert uuid.UUID(relates_to.replace('uuid:', '')) == message_id
def run_command(self, shell_id, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False):
"""
Run a command on a machine with an open shell
@param string shell_id: The shell id on the remote machine. See #open_shell
@param string command: The command to run on the remote machine
@param iterable of string arguments: An array of arguments for this command
@param bool console_mode_stdin: (default: True)
@param bool skip_cmd_shell: (default: False)
@return: The CommandId from the SOAP response. This is the ID we need to query in order to get output.
@rtype string
"""
node = xmlwitch.Builder(version='1.0', encoding='utf-8')
with node.env__Envelope(**self._namespaces):
with node.env__Header:
self._build_soap_header(node)
self._set_resource_uri_cmd(node)
self._set_action_command(node)
self._set_selector_shell_id(node, shell_id)
with node.w__OptionSet:
node.w__Option(str(console_mode_stdin).upper(), Name='WINRS_CONSOLEMODE_STDIN')
node.w__Option(str(skip_cmd_shell).upper(), Name='WINRS_SKIP_CMD_SHELL')
with node.env__Body:
with node.rsp__CommandLine:
node.rsp__Command(command)
if arguments:
node.rsp__Arguments(' '.join(arguments))
response = self.send_message(str(node))
root = ET.fromstring(response)
command_id = next(node for node in root.findall('.//*') if node.tag.endswith('CommandId')).text
return command_id
def cleanup_command(self, shell_id, command_id):
"""
Clean-up after a command. @see #run_command
@param string shell_id: The shell id on the remote machine. See #open_shell
@param string command_id: The command id on the remote machine. See #run_command
@returns: This should have more error checking but it just returns true for now.
@rtype bool
"""
message_id = uuid.uuid4()
node = xmlwitch.Builder(version='1.0', encoding='utf-8')
with node.env__Envelope(**self._namespaces):
with node.env__Header:
self._build_soap_header(node, message_id)
self._set_resource_uri_cmd(node)
self._set_action_signal(node)
self._set_selector_shell_id(node, shell_id)
# Signal the Command references to terminate (close stdout/stderr)
with node.env__Body:
with node.rsp__Signal(CommandId=command_id):
node.rsp__Code('http://schemas.microsoft.com/wbem/wsman/1/windows/shell/signal/terminate')
response = self.send_message(str(node))
root = ET.fromstring(response)
relates_to = next(node for node in root.findall('.//*') if node.tag.endswith('RelatesTo')).text
# TODO change assert into user-friendly exception
assert uuid.UUID(relates_to.replace('uuid:', '')) == message_id
def get_command_output(self, shell_id, command_id):
"""
Get the Output of the given shell and command
@param string shell_id: The shell id on the remote machine. See #open_shell
@param string command_id: The command id on the remote machine. See #run_command
#@return [Hash] Returns a Hash with a key :exitcode and :data. Data is an Array of Hashes where the cooresponding key
# is either :stdout or :stderr. The reason it is in an Array so so we can get the output in the order it ocurrs on
# the console.
"""
stdout_buffer, stderr_buffer = [], []
command_done = False
while not command_done:
stdout, stderr, return_code, command_done = \
self._raw_get_command_output(shell_id, command_id)
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
return "".join(stdout_buffer), "".join(stderr_buffer), return_code
def _raw_get_command_output(self, shell_id, command_id):
node = xmlwitch.Builder(version='1.0', encoding='utf-8')
with node.env__Envelope(**self._namespaces):
with node.env__Header:
self._build_soap_header(node)
self._set_resource_uri_cmd(node)
self._set_action_receive(node)
self._set_selector_shell_id(node, shell_id)
with node.env__Body:
with node.rsp__Receive:
node.rsp__DesiredStream('stdout stderr', CommandId=command_id)
response = self.send_message(str(node))
root = ET.fromstring(response)
stream_nodes = [node for node in root.findall('.//*') if node.tag.endswith('Stream')]
stdout = stderr = ''
return_code = -1
for stream_node in stream_nodes:
if stream_node.text:
if stream_node.attrib['Name'] == 'stdout':
stdout += str(base64.b64decode(stream_node.text.encode('ascii')))
elif stream_node.attrib['Name'] == 'stderr':
stderr += str(base64.b64decode(stream_node.text.encode('ascii')))
# We may need to get additional output if the stream has not finished.
# The CommandState will change from Running to Done like so:
# @example
# from...
# <rsp:CommandState CommandId="..." State="http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Running"/>
# to...
# <rsp:CommandState CommandId="..." State="http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done">
# <rsp:ExitCode>0</rsp:ExitCode>
# </rsp:CommandState>
command_done = len([node for node in root.findall('.//*') if node.get('State', '').endswith('CommandState/Done')]) == 1
if command_done:
return_code = int(next(node for node in root.findall('.//*') if node.tag.endswith('ExitCode')).text)
return stdout, stderr, return_code, command_done
| |
import itertools
from hazelcast.future import combine_futures, ImmediateFuture
from hazelcast.protocol.codec import map_add_entry_listener_codec, map_add_entry_listener_to_key_codec, \
map_add_entry_listener_with_predicate_codec, map_add_entry_listener_to_key_with_predicate_codec, \
map_add_index_codec, map_clear_codec, map_contains_key_codec, map_contains_value_codec, map_delete_codec, \
map_entry_set_codec, map_entries_with_predicate_codec, map_evict_codec, map_evict_all_codec, map_flush_codec, \
map_force_unlock_codec, map_get_codec, map_get_all_codec, map_get_entry_view_codec, map_is_empty_codec, \
map_is_locked_codec, map_key_set_codec, map_key_set_with_predicate_codec, map_load_all_codec, \
map_load_given_keys_codec, map_lock_codec, map_put_codec, map_put_all_codec, map_put_if_absent_codec, \
map_put_transient_codec, map_size_codec, map_remove_codec, map_remove_if_same_codec, \
map_remove_entry_listener_codec, map_replace_codec, map_replace_if_same_codec, map_set_codec, map_try_lock_codec, \
map_try_put_codec, map_try_remove_codec, map_unlock_codec, map_values_codec, map_values_with_predicate_codec, \
map_add_interceptor_codec, map_execute_on_all_keys_codec, map_execute_on_key_codec, map_execute_on_keys_codec, \
map_execute_with_predicate_codec
from hazelcast.proxy.base import Proxy, EntryEvent, EntryEventType, get_entry_listener_flags
from hazelcast.util import check_not_none, thread_id, to_millis
class Map(Proxy):
def add_entry_listener(self, include_value=False, key=None, predicate=None, added=None, removed=None, updated=None,
evicted=None, evict_all=None, clear_all=None, merged=None, expired=None):
flags = get_entry_listener_flags(added=added, removed=removed, updated=updated,
evicted=evicted, evict_all=evict_all, clear_all=clear_all, merged=merged,
expired=expired)
if key and predicate:
key_data = self._to_data(key)
predicate_data = self._to_data(predicate)
request = map_add_entry_listener_to_key_with_predicate_codec.encode_request(self.name, key_data,
predicate_data, include_value,
flags, False)
elif key and not predicate:
key_data = self._to_data(key)
request = map_add_entry_listener_to_key_codec.encode_request(self.name, key_data, include_value, flags,
False)
elif not key and predicate:
predicate = self._to_data(predicate)
request = map_add_entry_listener_with_predicate_codec.encode_request(self.name, predicate, include_value,
flags, False)
else:
request = map_add_entry_listener_codec.encode_request(self.name, include_value, flags, False)
def handle_event_entry(**_kwargs):
event = EntryEvent(self._to_object, **_kwargs)
if event.event_type == EntryEventType.added:
added(event)
elif event.event_type == EntryEventType.removed:
removed(event)
elif event.event_type == EntryEventType.updated:
updated(event)
elif event.event_type == EntryEventType.evicted:
evicted(event)
elif event.event_type == EntryEventType.evict_all:
evict_all(event)
elif event.event_type == EntryEventType.clear_all:
clear_all(event)
elif event.event_type == EntryEventType.merged:
merged(event)
elif event.event_type == EntryEventType.expired:
expired(event)
return self._start_listening(request,
lambda m: map_add_entry_listener_codec.handle(m,
handle_event_entry),
lambda r: map_add_entry_listener_codec.decode_response(r)[
'response'])
def add_index(self, attribute, ordered=False):
return self._encode_invoke(map_add_index_codec, attribute=attribute, ordered=ordered)
def add_interceptor(self, interceptor):
return self._encode_invoke(map_add_interceptor_codec, interceptor=self._to_data(interceptor))
def clear(self):
return self._encode_invoke(map_clear_codec)
def contains_key(self, key):
"""
:param key:
:return:
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_contains_key_codec, key_data,
key=key_data, thread_id=thread_id())
def contains_value(self, value):
check_not_none(value, "value can't be None")
value_data = self._to_data(value)
return self._encode_invoke(map_contains_value_codec, value=value_data)
def delete(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_delete_codec, key_data, key=key_data,
thread_id=thread_id())
def entry_set(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_entries_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_entry_set_codec)
def evict(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_evict_codec, key_data, key=key_data,
thread_id=thread_id())
def evict_all(self):
return self._encode_invoke(map_evict_all_codec)
def execute_on_entries(self, entry_processor, predicate=None):
if predicate:
return self._encode_invoke(map_execute_with_predicate_codec, entry_processor=self._to_data(entry_processor),
predicate=self._to_data(predicate))
return self._encode_invoke(map_execute_on_all_keys_codec, entry_processor=self._to_data(entry_processor))
def execute_on_key(self, key, entry_processor):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_execute_on_key_codec, key_data, key=key_data,
entry_processor=self._to_data(entry_processor), thread_id=thread_id())
def execute_on_keys(self, keys, entry_processor):
key_list = []
for key in keys:
check_not_none(key, "key can't be None")
key_list.append(self._to_data(key))
return self._encode_invoke(map_execute_on_keys_codec, entry_processor=self._to_data(entry_processor),
keys=key_list)
def flush(self):
return self._encode_invoke(map_flush_codec)
def force_unlock(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_force_unlock_codec, key_data, key=key_data)
def get(self, key):
"""
:param key:
:return:
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_get_codec, key_data, key=key_data, thread_id=thread_id())
def get_all(self, keys):
check_not_none(keys, "keys can't be None")
if not keys:
return ImmediateFuture({})
partition_service = self._client.partition_service
partition_to_keys = {}
for key in keys:
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
partition_id = partition_service.get_partition_id(key_data)
try:
partition_to_keys[partition_id].append(key_data)
except KeyError:
partition_to_keys[partition_id] = [key_data]
futures = []
for partition_id, key_list in partition_to_keys.iteritems():
future = self._encode_invoke_on_partition(map_get_all_codec, partition_id, keys=key_list)
futures.append(future)
def merge(f):
return dict(itertools.chain(*f.result()))
return combine_futures(*futures).continue_with(merge)
def get_entry_view(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_get_entry_view_codec, key_data, key=key_data,
thread_id=thread_id())
def is_empty(self):
return self._encode_invoke(map_is_empty_codec)
def is_locked(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_is_locked_codec, key_data, key=key_data)
def key_set(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_key_set_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_key_set_codec)
def load_all(self, keys=None, replace_existing_values=True):
if keys:
key_data_list = map(self._to_data, keys)
return self._encode_invoke(map_load_given_keys_codec, keys=key_data_list,
replace_existing_values=replace_existing_values)
else:
return self._encode_invoke(map_load_all_codec,
replace_existing_values=replace_existing_values)
def lock(self, key, ttl=-1):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_lock_codec, key_data, key=key_data, thread_id=thread_id(),
ttl=to_millis(ttl))
def put(self, key, value, ttl=-1):
"""
:param key:
:param value:
:param ttl:
:return:
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_put_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(),
ttl=to_millis(ttl))
def put_all(self, map):
check_not_none(map, "map can't be None")
if not map:
return ImmediateFuture(None)
partition_service = self._client.partition_service
partition_map = {}
for key, value in map.iteritems():
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
entry = (self._to_data(key), self._to_data(value))
partition_id = partition_service.get_partition_id(entry[0])
try:
partition_map[partition_id].append(entry)
except KeyError:
partition_map[partition_id] = [entry]
futures = []
for partition_id, entry_list in partition_map.iteritems():
future = self._encode_invoke_on_partition(map_put_all_codec, partition_id,
entries=dict(entry_list))
futures.append(future)
return combine_futures(*futures)
def put_if_absent(self, key, value, ttl=-1):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_put_if_absent_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id(), ttl=to_millis(ttl))
def put_transient(self, key, value, ttl=-1):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_put_transient_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id(), ttl=to_millis(ttl))
def remove(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_remove_codec, key_data, key=key_data,
thread_id=thread_id())
def remove_if_same(self, key, value):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_remove_if_same_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id())
def remove_entry_listener(self, registration_id):
return self._stop_listening(registration_id,
lambda i: map_remove_entry_listener_codec.encode_request(self.name, i))
def replace(self, key, value):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_replace_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id())
def replace_if_same(self, key, old_value, new_value):
check_not_none(key, "key can't be None")
check_not_none(old_value, "old_value can't be None")
check_not_none(new_value, "new_value can't be None")
key_data = self._to_data(key)
old_value_data = self._to_data(old_value)
new_value_data = self._to_data(new_value)
return self._encode_invoke_on_key(map_replace_if_same_codec, key_data, key=key_data,
test_value=old_value_data,
value=new_value_data, thread_id=thread_id())
def set(self, key, value, ttl=-1):
"""
:param key:
:param value:
:param ttl:
:return:
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_set_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(),
ttl=to_millis(ttl))
def size(self):
return self._encode_invoke(map_size_codec)
def try_lock(self, key, ttl=-1, timeout=0):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_try_lock_codec, key_data, key=key_data,
thread_id=thread_id(), lease=to_millis(ttl), timeout=to_millis(timeout))
def try_put(self, key, value, timeout=0):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_try_put_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(), timeout=to_millis(timeout))
def try_remove(self, key, timeout=0):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_try_remove_codec, key_data, key=key_data,
thread_id=thread_id(), timeout=to_millis(timeout))
def unlock(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_unlock_codec, key_data, key=key_data,
thread_id=thread_id())
def values(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_values_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_values_codec)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics."""
import math
import time
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
def _get_or_create_eval_step():
"""Gets or creates the eval step `Tensor`.
Returns:
A `Tensor` representing a counter for the evaluation step.
Raises:
ValueError: If multiple `Tensors` have been added to the
`tf.GraphKeys.EVAL_STEP` collection.
"""
graph = ops.get_default_graph()
eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
if len(eval_steps) == 1:
return eval_steps[0]
elif len(eval_steps) > 1:
raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
else:
counter = variable_scope.get_variable(
'eval_step',
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
return counter
def _get_latest_eval_step_value(update_ops):
"""Gets the eval step `Tensor` value after running `update_ops`.
Args:
update_ops: A list of `Tensors` or a dictionary of names to `Tensors`, which
are run before reading the eval step value.
Returns:
A `Tensor` representing the value for the evaluation step.
"""
if isinstance(update_ops, dict):
update_ops = list(update_ops.values())
with ops.control_dependencies(update_ops):
return array_ops.identity(_get_or_create_eval_step().read_value())
class _MultiStepStopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, steps_per_run=1):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
steps_per_run: Number of steps executed per run call.
"""
self._num_evals = num_evals
self._evals_completed = None
self._steps_per_run_initial_value = steps_per_run
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def begin(self):
self._steps_per_run_variable = \
basic_session_run_hooks.get_or_create_steps_per_run_variable()
def after_create_session(self, session, coord):
# Update number of steps to run in the first run call
if self._num_evals is None:
steps = self._steps_per_run_initial_value
else:
steps = min(self._steps_per_run_initial_value, self._num_evals)
self._steps_per_run_variable.load(steps, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'evals_completed': self._evals_completed})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
# Update number of steps to run in the next iteration
if self._num_evals is None:
steps = self._steps_per_run_initial_value
else:
steps = min(self._num_evals - evals_completed,
self._steps_per_run_initial_value)
self._steps_per_run_variable.load(steps, session=run_context.session)
if self._num_evals is None:
logging.info('Evaluation [%d]', evals_completed)
else:
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if self._num_evals is not None and evals_completed >= self._num_evals:
run_context.request_stop()
class _StopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, log_progress=True):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
log_progress: Whether to log evaluation progress, defaults to True.
"""
# The number of evals to run for.
self._num_evals = num_evals
self._evals_completed = None
self._log_progress = log_progress
# Reduce logging frequency if there are 20 or more evaluations.
self._log_frequency = (1 if (num_evals is None or num_evals < 20) else
math.floor(num_evals / 10.))
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'evals_completed': self._evals_completed})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
if self._log_progress:
if self._num_evals is None:
logging.info('Evaluation [%d]', evals_completed)
else:
if ((evals_completed % self._log_frequency) == 0 or
(self._num_evals == evals_completed)):
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if self._num_evals is not None and evals_completed >= self._num_evals:
run_context.request_stop()
def _evaluate_once(checkpoint_path,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
hooks=None,
config=None):
"""Evaluates the model at the given checkpoint path.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_path: The path to a checkpoint to use for evaluation.
master: The BNS address of the TensorFlow master.
scaffold: An tf.compat.v1.train.Scaffold instance for initializing variables
and restoring variables. Note that `scaffold.init_fn` is used by the
function to restore the checkpoint. If you supply a custom init_fn, then
it must also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to
`Tensors`, which is run until the session is requested to stop, commonly
done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside
the evaluation loop.
config: An instance of `tf.compat.v1.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = _get_or_create_eval_step()
# Prepare the run hooks.
hooks = list(hooks or [])
if eval_ops is not None:
if any(isinstance(h, _MultiStepStopAfterNEvalsHook) for h in hooks):
steps_per_run_variable = \
basic_session_run_hooks.get_or_create_steps_per_run_variable()
update_eval_step = state_ops.assign_add(
eval_step,
math_ops.cast(steps_per_run_variable, dtype=eval_step.dtype),
use_locking=True)
else:
update_eval_step = state_ops.assign_add(eval_step, 1, use_locking=True)
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
eval_step_value = _get_latest_eval_step_value(eval_ops)
for h in hooks:
if isinstance(h, (_StopAfterNEvalsHook, _MultiStepStopAfterNEvalsHook)):
h._set_evals_completed_tensor(eval_step_value) # pylint: disable=protected-access
logging.info('Starting evaluation at ' +
time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime()))
start = time.time()
# Prepare the session creator.
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,
final_ops_feed_dict)
hooks.append(final_ops_hook)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Inference Time : {:0.5f}s'.format(time.time() - start))
logging.info('Finished evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()))
return final_ops_hook.final_ops_values
| |
"""Heuristic-based algorithm to find the family trees in the titanic data.
Only involves a few parameters, all of which are related to age
(e.g. minimum age for marriage). See the constant defined at the
top for all of them. Neither iterative nor stochastic methods are
used.
First a graph of individuals is constructed where the edges represent
shared last names. This includes any previous names such maiden names.
Each edge represents a relationship that can be classified as one
of the following:
o Spouse
o Parent/Child
o Sibling
o Extended (e.g. aunt, cousin, or distant relative)
The classification scheme is optimistic, i.e. we only ask
whether or not the relationship is possible. Much of information
can be directly inferred from the given attributes (e.g. two
individual cannot be siblings if one of them has sibsp==0).
Next we prove spousal relationship. This fairly easy, epically
as many spouses name pairs are of the form:
West, Mrs. Edwy Arthur (Ada Mary Worth)
West, Mr. Edwy Arthur
We don't require names of this style and can also use age differences,
requires Mrs title for the female, and other simple heuristics.
The only difficulty arises when one individual could be classified as
married to multiple individuals. There are only a few such situations and
they can all be handled by assigning marriage to the couple in which the
female has the males first name (e.g. Mrs. Edwy Arthur).
With spousal relationships found it is then straightforward to workout
parent/child relationships. The only ambiguities at this point are
child vs. sibling and they can be resolved by checking for common
parent(s). Lastly, parent/child relationships can be used to work out
sibling relationships. We can then recover the structure of nuclear
families: families in which there is at least one parent and one or more
children.
Outside of the nuclear family structure, we still maintain the
relationship graph which allows for such classifications as:
o siblings traveling together without any parents
o extended relations
o families joined by extended relationships
At the moment there are still some edge cases. In particular, the largest
relationship graph component isn't separated into a family structure.
Additionally, it would be nice to remove or relax the few parameters.
"""
from __future__ import division
import re
from contextlib import contextmanager
from collections import defaultdict
import numpy as np
from data import TitanicDataSet
import graphlib
__all__ = ['construct_family_components',
'find_nuclear_families']
# Parameters
#--------------------------------------------------------
# No on under this age can be consider married
MINIMUM_AGE_FOR_MARRIAGE = 14
# # At most, a married woman can be n years older than husband
# # This rule isn't needed, and therefore have made the
# # value large enough to have no effect.
# MAXIMUM_MARRIED_FEMALE_AGE_ADVANTAGE = 100
# A parent must be at least n years older than their child
MINIMUM_PARENT_AGE_ADVANTAGE = 14
# # For two family graphs, there is an extra child-like individual
# # (possibly a niece or nephew or just distant relative) that can't
# # be discerned from the true children. With this set we classify them
# # as a child.
# ALLOW_ADDITIONAL_CHILDREN = True
# Name parsing
#--------------------------------------------------------
name_rgx = re.compile(r'''
^ # Explicit start
\s*
([^,]+) # Last Name
, \s+
([^.]+) \. # Title
\s+
([^("]+)? # Main name
\s*
(?:
"([^"]+)" # Nick name
)?
\s*
(?: # Other name
\(
([^)]+)
\)
)?
''', re.VERBOSE)
class ParsedName(object):
def __init__(self, last, title, main, nick, other):
if not main and other:
main = other.split(None, 1)[0]
self.last = last
self.title = title
self.main = main
self.nick = nick
self.other = other.strip('"') if other else None
@classmethod
def create(cls, name):
m = name_rgx.match(name.lower())
if not m:
raise ValueError('bad name %r' % (name,))
return cls(*m.groups())
def iter_last_names(self):
yield self.last
if self.other:
yield self.other.rsplit(None, 1)[-1]
# Graph data structures
#--------------------------------------------------------
class DotIDMixin(object):
'''Base class for nodes that are written to dot files
'''
_dot_id_counter = 0
@property
def dot_id(self):
try:
return self._dot_id
except AttributeError:
DotIDMixin._dot_id_counter += 1
self._dot_id = str(DotIDMixin._dot_id_counter)
return self._dot_id
class Person(graphlib.Node, DotIDMixin):
def __init__(self, attributes, survived):
super(Person, self).__init__()
self.survived = survived
self.a = attributes
self.parsed_name = ParsedName.create(attributes.name)
# These are filled in as relationships are proven
self.spouse = None
self.mother = None
self.father = None
self.children = ()
self.siblings = ()
self.extendeds = ()
@property
def known_parents(self):
a = []
if self.mother: a.append(self.mother)
if self.father: a.append(self.father)
return a
@property
def n_known_parents(self):
s = 0
if self.mother: s+=1
if self.father: s+=1
return s
@property
def n_known_children(self):
return len(self.children)
@property
def n_known_siblings(self):
return len(self.siblings)
@property
def n_known_extended(self):
return len(self.extendeds)
def __str__(self):
return 'p(%s)' % (self.name,)
@property
def adjusted_sibsp(self):
return self.a.sibsp - len(self.siblings) - (1 if self.spouse else 0)
@property
def adjusted_parch(self):
return self.a.parch - self.n_known_parents - self.n_known_children
def get_edge_to(self, other):
es = [e for e in self.edges if e.other(self) == other]
if len(es) != 1:
raise ValueError("%d edges to other" % (len(es),))
return es[0]
def has_edge_to(self, other):
return any(1 for e in self.edges if e.other(self) == other)
class CommonLastName(graphlib.Node):
'''Keeps track of individuals who share a common last name
'''
def __init__(self, lastname):
super(CommonLastName, self).__init__()
self.lastname = lastname
class LastNameEdge(graphlib.Edge):
def __init__(self, name, person):
if not isinstance(name, CommonLastName):
name,person = person,name
assert isinstance(name, CommonLastName), 'name %r' % (name,)
assert isinstance(person, Person), 'person %r' % (person,)
super(LastNameEdge, self).__init__(name, person)
self.name = name
self.person = person
class RelationEdge(graphlib.Edge):
'''Keeps track of what relationships are possible between
two linked individuals.
'''
def __init__(self, a, b):
assert isinstance(a, Person)
assert isinstance(b, Person)
super(RelationEdge, self).__init__(a, b)
self.a = a
self.b = b
self.set_all_possibilities(True)
def set_all_possibilities(self, value):
value = bool(value)
self.could_be_spouse = value
self.could_be_sibling = value
self.could_be_child = value
self.could_be_extended = value
@property
def possibilities(self):
return (self.could_be_spouse,
self.could_be_sibling,
self.could_be_child,
self.could_be_extended)
@property
def n_possibilities(self):
return sum(self.possibilities)
@property
def is_definitive(self):
return self.n_possibilities == 1
@property
def definitive_spouse(self):
return self.check_definitive(0)
@property
def definitive_sibling(self):
return self.check_definitive(1)
@property
def definitive_child(self):
return self.check_definitive(2)
@property
def definitive_extended(self):
return self.check_definitive(3)
def check_definitive(self, i):
return self.possibilities[i] and self.is_definitive
@property
def could_be_close(self):
return (self.could_be_spouse or
self.could_be_sibling or
self.could_be_child)
class NuclearFamily(DotIDMixin):
def __init__(self, name='', mother=None, father=None, children=()):
super(NuclearFamily, self).__init__()
self.name = name
self.mother = mother
self.father = father
self.children = list(children)
if None not in (mother, father):
assert mother.spouse is father and father.spouse is mother
# add more validation
# Relationship construction
#--------------------------------------------------------
class LastNameBuilder(graphlib.GraphBuilder):
def __init__(self):
super(LastNameBuilder, self).__init__(node_factory=CommonLastName,
edge_factory=LastNameEdge)
def add_edge(self, name, person):
self.values_to_nodes[person] = person
super(LastNameBuilder, self).add_edge(self.get_node(name),
person)
def construct_family_components(train=TitanicDataSet.get_train(),
test=TitanicDataSet.get_test(),
tune=True):
'''Entry point for finding relationships.
Returns a list of graph components (graphlib.Component)
where the nodes are individuals (Person) and edges are
relationships (RelationEdge).
'''
lnb = LastNameBuilder()
add_last_names(lnb, train)
add_last_names(lnb, test)
last_name_graph = lnb.get_graph()
return [tune_family_relations(f) if tune else f
for c in last_name_graph.components
for f in build_relations(c)]
def add_last_names(nb, ds):
if ds is None:
return
if ds.is_train:
survived = ds.survived
else:
survived = [None] * len(ds)
for survived,attributes in zip(survived, ds.iter_entries()):
person = Person(attributes, survived)
for last_name in person.parsed_name.iter_last_names():
nb.add_edge(last_name, person)
def build_relations(c):
# Extract people nodes, discarding LastNameNodes, and
# clear all of the LastNameEdges
nodes, edges = c.tear_down()
people = [n for n in nodes if isinstance(n, Person)]
for p in people:
del p.edges[::]
# Group together all of the people who share a last name
# and meet general affinity qualifications
gb = graphlib.GraphBuilder(edge_factory=RelationEdge)
for p in people:
gb.values_to_nodes[p] = p
for i,a in enumerate(people):
for b in people[i+1::]:
if share_name(a, b) and general_affinity(a, b) != 0:
gb.add_edge(a, b)
return gb.get_graph().components
def find_nuclear_families(c):
'''Finds the nuclear families in a graph component.
Also returns any nodes and edges that are not included
in families.
'''
families = []
seen = set()
for n in c.nodes:
if n not in seen and n.children:
families.append(build_family(seen, n))
extra_nodes = []
extra_edges = set()
for n in c.nodes:
if n in seen:
continue
extra_nodes.append(n)
for e in n.edges:
extra_edges.add(e)
for f in families:
for p in f.mother, f.father:
if p and set(p.edges) & extra_edges:
p.write_elsewhere = True
return families, extra_nodes, extra_edges
def build_family(seen, n):
f = NuclearFamily(name=n.parsed_name.last)
if n.a.sex == 0:
f.father = n
else:
f.mother = n
seen.add(n)
if n.spouse:
if n.spouse.a.sex == 0:
f.father = n.spouse
else:
f.mother = n.spouse
seen.add(n.spouse)
f.children = n.children
for c in n.children:
seen.add(c)
if c.children:
build_family(seen, c)
return f
def tune_family_relations(c):
# Use simple heuristics to determine relationship individuals.
# Throughout this procedure we incrementally prove various relationships
# exists.
update_relationship_possibilities(c)
prove_spouses(c)
update_relationship_possibilities(c)
# ensure there is no ambiguity in spouse classification
for e in c.edges:
assert (not e.could_be_spouse) or e.definitive_spouse
for n in c.nodes:
n_spouse = sum(1 for e in n.edges if e.could_be_spouse)
assert n_spouse in (0,1)
prove_parents(c)
if c.difficult_parent_child:
return c
update_relationship_possibilities(c)
prove_siblings(c)
update_relationship_possibilities(c)
prove_extended(c)
update_relationship_possibilities(c)
return c
def update_relationship_possibilities(c):
for e in c.edges:
update_a_relationship_possibilities(e)
def update_a_relationship_possibilities(e):
e.could_be_spouse = could_be_spouse(e.a, e.b)
e.could_be_sibling = could_be_sibling(e.a, e.b)
# child rule is directional, so try both directions
c = could_be_child(e.a, e.b)
if not c:
c = could_be_child(e.b, e.a)
if c:
e.b, e.a = e.a, e.b
e.could_be_child = c
# If no possibility for close relationships, then classify as an extended relationship
e.could_be_extended = not e.could_be_close
assert e.n_possibilities >= 1
# Relationship possibilities functions
#--------------------------------------------------------
def could_be_spouse(a, b):
# already proven that they are spouses
if a.spouse is not None and a.spouse is b:
assert b.spouse is a
return True
# proven that they aren't spouses
elif a.spouse or b.spouse:
return False
# check if sibsp or sex rules out possibility of them being spouse
if a.adjusted_sibsp == 0 or b.adjusted_sibsp == 0:
return False
if a.a.sex == b.a.sex:
return False
# parch should only be off by at most 2, as a couple has the same
# number of kids, and only their own parents can lead to a difference
if abs(a.a.parch - b.a.parch) > 2:
return False
# check if their names are consistent with them being married
if a.parsed_name.last != b.parsed_name.last:
return False
titles = a.parsed_name.title, b.parsed_name.title
if 'mrs' not in titles:
return False
if 'master' in titles or 'miss' in titles:
return False
# check if the woman's main name includes a significant portion of
# the mans name. (e.g Mrs. Sammuel Herman and Mr. Sammuel Herman)
m,f = (a,b) if a.a.sex == 0 else (b,a)
if m.parsed_name.main == f.parsed_name.main:
return True
n = largest_common_substring(m.parsed_name.main, f.parsed_name.main)
if n > 0.5 * len(f.parsed_name.main):
return True
# rule out individual under MINIMUM_AGE_FOR_MARRIAGE
if not (ambiguous_ge(a.a.age, MINIMUM_AGE_FOR_MARRIAGE) and
ambiguous_ge(b.a.age, MINIMUM_AGE_FOR_MARRIAGE)):
return False
# # If we know both ages, rule out women who are 5+ years older than men
# # this might be an invalid rule as some of entries look like older rich
# # women married to young men. The cases I've observed are already handled
# # by the earlier check if the woman's main name includes the main.
# # This rule was added to help sort out the mother/son vs. mother/husband,
# # but it may be better to leave those cases ambiguous anyways.
# if not ambiguous_le_diff(f.a.age, m.a.age, MAXIMUM_MARRIED_FEMALE_AGE_ADVANTAGE):
# return False
# optimistic default
return True
def could_be_child(parent, child):
# check if we've already proven that parent or their spouse is
# a parent to this child
if set(child.known_parents) & set(filter(None, [parent, parent.spouse])):
return True
if parent.spouse is not None and parent.spouse is child:
assert child.spouse is parent
return False
# check if parch rules out possibility of parent/child relationship
if parent.adjusted_parch == 0 or child.adjusted_parch == 0:
return False
# rule out possibility of parents conceiving a child when under
# a certain age
if not ambiguous_ge_diff(parent.a.age, child.a.age, MINIMUM_PARENT_AGE_ADVANTAGE):
return False
# ensure they have the same last name
if parent.parsed_name.last != maiden_name(child):
return False
# this rule can help with sibling/child ambiguities if we've
# already found out they share the same parent
if has_common_parents(parent, child):
return False
# optimistic default
return True
def could_be_sibling(a, b):
# Already proven that they are siblings
if a in b.siblings:
assert b in a.siblings
return True
# Check if sibsp rules out the possibility of them being siblings.
# Adjust sibsp if we've proven they have a spouse.
if a.adjusted_sibsp == 0 or b.adjusted_sibsp == 0:
return False
# if one of this individuals is a married women, then her maiden
# name must match the other individuals last name to be siblings
if maiden_name(a) != maiden_name(b):
return False
# this rule can help with sibling/child ambiguities if we've
# already found out they share the same parent
if has_common_parents(a, b):
return True
# optimistic default
return True
# Spouse proving
#--------------------------------------------------------
def prove_spouses(c):
# In practice it is fairly easy to prove spouse relationship.
# We only run into problems where one partner could be classified
# into multiple marriages due to optimistic spouse rule.
# Here we deal with these ambiguous cases by preferring situations
# where the females name includes the male name. In practice this
# seems to easily give us definitive spouse relationships.
# Find sets of individual joined by the possibility of marriage
gb = graphlib.GraphBuilder()
for e in c.edges:
if e.could_be_spouse:
assert not e.a.spouse
assert not e.b.spouse
gb.add_edge(gb.get_node(e.a), gb.get_node(e.b))
for cc in gb.get_graph().components:
c.spouse_collisions = handle_spouse_collisions(cc)
def handle_spouse_collisions(c):
assert len(c.nodes) >= 2
people = [n.value for n in c.nodes]
males = [n for n in people if n.a.sex == 0]
females = [n for n in people if n.a.sex == 1]
# Easy case, only possibility for these 2 people to be married
if len(people) == 2:
m, = males
f, = females
make_spouse(m, f)
return
while males and females:
# Marry the couple that shares the largest substring in their main names.
# e.g. Mrs. Sammuel Herman and Mr. Sammuel Herman
n = np.array([0 if m.parsed_name.last != f.parsed_name.last else
largest_common_substring(m.parsed_name.main, f.parsed_name.main)
for m in males
for f in females])
i = np.argmax(n)
m_i,f_i = divmod(i, len(females))
m = males.pop(m_i)
f = females.pop(f_i)
make_spouse(m, f)
return True
def make_spouse(m, f):
assert not m.spouse
assert not f.spouse
e = m.get_edge_to(f)
assert e.could_be_spouse
e.set_all_possibilities(False)
e.could_be_spouse = True
m.spouse = f
f.spouse = m
# Parent proving
#--------------------------------------------------------
def prove_parents(c):
checked = set()
c.difficult_parent_child = False
for p in c.nodes:
if p in checked:
continue
assert p.spouse not in checked
parents = filter(None, [p, p.spouse])
for p in parents:
checked.add(p)
prove_parents_children(c, parents)
def prove_parents_children(comp, parents):
assert not any(p.children for p in parents)
n_max_children = min(p.adjusted_parch for p in parents)
if n_max_children == 0:
return
children = set()
for p in parents:
for e in p.edges:
if not e.definitive_child:
continue
other = e.other(p)
if not child_parent_direction(p, other):
continue
#if has_other_possible_parents(other, parents):
# continue
children.add(other)
if not children:
return
if len(children) > n_max_children:
children = discern_children_by_fare(parents, children, n_max_children)
errors = False
if len(children) > n_max_children:
print 'warning: %d children were found when max was expected to be %d' % (
len(children), n_max_children)
# for p in parents:
# print p.a.name
# print '-'*60
# for c in children:
# print c.parsed_name.main
# print
errors = True
if len(parents) == 1:
mother = parents[0]
father = None
else:
mother, father = parents
if mother.a.sex == 0:
mother,father = father,mother
for c in children:
if c.mother is not None:
if c.mother != mother:
print 'warning: inconsistent mother for child'
errors = True
if mother is not None and not c.has_edge_to(mother):
print 'no relationship between mother and child'
errors = True
if c.father is not None:
if c.father != father:
print 'warning: inconsistent father for child'
errors = True
if father is not None and not c.has_edge_to(father):
print 'no relationship between father and child'
errors = True
if errors:
comp.difficult_parent_child = True
return
for c in children:
c.mother = mother
c.father = father
if mother:
e = c.get_edge_to(mother)
e.set_all_possibilities(False)
e.could_be_child = True
e.a = mother
e.b = c
if father:
e = c.get_edge_to(father)
e.set_all_possibilities(False)
e.could_be_child = True
e.a = father
e.b = c
ct = frozenset(children)
if mother:
mother.children = ct
if father:
father.children = ct
# In practice this function only helps with one specific case
def discern_children_by_fare(parents, children, n_max_children):
return [c for c in children
if any(np.allclose(c.a.fare, p.a.fare)
for p in parents)]
def child_parent_direction(parent, child):
if parent.a.age != -1 and child.a.age != -1:
return parent.a.age > child.a.age
if parent.parsed_name.last != maiden_name(child):
return False
if parent.spouse:
try:
e = parent.get_edge_to(child)
except ValueError:
return False
return e.could_be_child
if child.parsed_name.title in ('miss','master'):
return True
if (parent.parsed_name.title in ('miss','master') and
child.parsed_name.title in ('mrs','mr')):
return False
print 'difficult child parent direction'
return parent.a.parch > child.a.parch
def has_other_possible_parents(child, parents):
for e in child.edges:
if not e.could_be_child:
continue
if e.other(child) in parents:
continue
if child_parent_direction(e.other(child), child):
return True
return False
# Sibling proving
#--------------------------------------------------------
def prove_siblings(c):
# at this stage, most sibling relationships have been worked out
# as we've already worked spouses and parent/child relationships
prove_symmetric(c, 'definitive_sibling', 'siblings')
def prove_symmetric(c, e_attr, col_attr):
acc = defaultdict(list)
for p in c.nodes:
for e in p.edges:
if getattr(e, e_attr):
acc[p].append(e.other(p))
for p in c.nodes:
setattr(p, col_attr, frozenset(acc[p]))
for p in c.nodes:
for o in getattr(p, col_attr):
assert p in getattr(o, col_attr)
# Sibling proving
#--------------------------------------------------------
def prove_extended(c):
prove_symmetric(c, 'definitive_extended', 'extendeds')
# Utilities
#--------------------------------------------------------
def share_name(a, b):
return any(an==bn
for an in a.parsed_name.iter_last_names()
for bn in b.parsed_name.iter_last_names())
def general_affinity(a, b):
return (ambiguous_equal(a.a.embarked, b.a.embarked) and
ambiguous_equal(a.a.pclass, b.a.pclass))
def ambiguous_equal(a, b):
return a == b or a < 0 or b < 0
def ambiguous_gt(a, b):
return a<0 or b<0 or a>b
def ambiguous_ge(a, b):
return a<0 or b<0 or a>=b
def ambiguous_lt(a, b):
return a<0 or b<0 or a<b
def ambiguous_gt_diff(a, b, d):
return a<0 or b<0 or a-b > d
def ambiguous_ge_diff(a, b, d):
return a<0 or b<0 or a-b >= d
def ambiguous_le_diff(a, b, d):
return a<0 or b<0 or a-b <= d
def largest_common_substring(a, b):
for i in xrange(min(len(a), len(b))):
if a[:i:] != b[:i:]:
break
return i
def has_common_parents(a, b):
return bool(set(a.known_parents) & set(b.known_parents))
def maiden_name(p):
if p.a.sex == 1 and p.parsed_name.title == 'mrs' and p.parsed_name.other:
return p.parsed_name.other.rsplit(None, 1)[-1]
return p.parsed_name.last
# Dot file creation
#--------------------------------------------------------
@contextmanager
def block(fp, name, *args):
print >>fp, name, ' '.join(args), '{'
yield
print >>fp, '}'
class DotCreator(object):
def __init__(self, fp):
self.fp = fp
self.graph_counter = 0
self.written_nodes = set()
def write_components(self, components, individual_digraphs=False, **kwds):
if not individual_digraphs:
with block(self.fp, 'digraph', self.next_graph_name()):
for c in components:
self.write_component(c, **kwds)
else:
for c in components:
with block(self.fp, 'digraph', self.next_graph_name()):
self.write_component(c, **kwds)
def next_graph_name(self):
i = self.graph_counter
self.graph_counter += 1
return 'G%d' % (i,)
def write_component(self, c, show_nuclear_families=True):
if show_nuclear_families:
self.write_nuclear_families(c)
return
for n in c.nodes:
self.write_common_node(n)
for e in c.edges:
self.write_common_edge(e)
def write_nuclear_families(self, c):
families, extra_nodes, extra_edges = find_nuclear_families(c)
for f in families:
self.write_family(f)
for n in extra_nodes:
self.write_common_node(n)
for e in extra_edges:
self.write_common_edge(e)
def write_family(self, f):
print >>self.fp, '%s [label="%s" shape="circle"]' % (f.dot_id, f.name)
for p,label in [(f.mother, 'mother'), (f.father, 'father')]:
if p:
self.write_common_node(p)
print >>self.fp, '%s -> %s [label="%s"]' % (p.dot_id, f.dot_id, label)
print >>self.fp, '{rank=same;',
for p in f.mother, f.father:
if p:
print >>self.fp, p.dot_id,
print >>self.fp, '}'
for c in f.children:
self.write_common_node(c)
print >>self.fp, '%s -> %s [label="child"]' % (f.dot_id, c.dot_id)
print >>self.fp, '{rank=same;',
for c in f.children:
print >>self.fp, c.dot_id,
print >>self.fp, '}'
def write_common_node(self, n):
if n in self.written_nodes:
return
self.write_node(n,
label=self.get_node_label(n),
color=self.get_node_color(n),
shape=self.get_node_shape(n),
**self.get_extra_node_attributes(n))
def get_node_label(self, n):
return '%s\\ns=%d p=%d c=%d e=%s c=%s\\na=%.1f f=%.1f' % (
n.a.name, n.a.sibsp, n.a.parch, n.a.pclass, n.a.embarked, n.a.cabin,
n.a.age, n.a.fare)
def get_node_color(self, n):
return {True:'green', False:'red', None:'black'}[n.survived]
def get_node_shape(self, n):
return 'rectangle'
def get_extra_node_attributes(self, n):
return {}
show_extended = True
def write_common_edge(self, e):
if not e.definitive_extended or self.show_extended:
self.write_edge(e, label='/'.join(l for l,v in zip(['spouse','sibling','child','extended'],
e.possibilities)
if v),
style='solid' if not e.definitive_extended else 'dashed')
def write_node(self, n, **kwds):
assert n not in self.written_nodes
print >>self.fp, '%s [%s]' % (n.dot_id, self.make_attributes(kwds))
self.written_nodes.add(n)
def write_edge(self, e, **kwds):
print >>self.fp, '%s -> %s [%s]' % (e.a.dot_id, e.b.dot_id, self.make_attributes(kwds))
@classmethod
def make_attributes(cls, kwds):
return ' '.join('%s=%s' % (k, cls.quote_value(k,v))
for k,v in kwds.iteritems())
@staticmethod
def quote_value(k, v):
if k in ('label','shape'):
return '"%s"' % (str(v).replace('"', '\\"'))
return str(v)
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces for handling BIDS-like neuroimaging structures."""
from collections import defaultdict
from json import dumps, loads
from pathlib import Path
from shutil import copytree, rmtree
from pkg_resources import resource_filename as _pkgres
import re
import nibabel as nb
import numpy as np
from bids.layout import parse_file_entities
from bids.layout.writing import build_path
from bids.utils import listify
from nipype import logging
from nipype.interfaces.base import (
traits,
isdefined,
Undefined,
TraitedSpec,
BaseInterfaceInputSpec,
DynamicTraitedSpec,
File,
Directory,
InputMultiObject,
OutputMultiObject,
Str,
SimpleInterface,
)
from nipype.interfaces.io import add_traits
from templateflow.api import templates as _get_template_list
from ..utils.bids import _init_layout, relative_to_root
from ..utils.images import overwrite_header
from ..utils.misc import splitext as _splitext, _copy_any
regz = re.compile(r"\.gz$")
_pybids_spec = loads(Path(_pkgres("niworkflows", "data/nipreps.json")).read_text())
BIDS_DERIV_ENTITIES = frozenset({e["name"] for e in _pybids_spec["entities"]})
BIDS_DERIV_PATTERNS = tuple(_pybids_spec["default_path_patterns"])
STANDARD_SPACES = _get_template_list()
LOGGER = logging.getLogger("nipype.interface")
def _none():
return None
# Automatically coerce certain suffixes (DerivativesDataSink)
DEFAULT_DTYPES = defaultdict(
_none,
(
("mask", "uint8"),
("dseg", "int16"),
("probseg", "float32"),
("boldref", "source"),
),
)
class _BIDSBaseInputSpec(BaseInterfaceInputSpec):
bids_dir = traits.Either(
(None, Directory(exists=True)), usedefault=True, desc="optional bids directory"
)
bids_validate = traits.Bool(True, usedefault=True, desc="enable BIDS validator")
class _BIDSInfoInputSpec(_BIDSBaseInputSpec):
in_file = File(mandatory=True, desc="input file, part of a BIDS tree")
class _BIDSInfoOutputSpec(DynamicTraitedSpec):
subject = traits.Str()
session = traits.Str()
task = traits.Str()
acquisition = traits.Str()
reconstruction = traits.Str()
run = traits.Int()
suffix = traits.Str()
class BIDSInfo(SimpleInterface):
"""
Extract BIDS entities from a BIDS-conforming path.
This interface uses only the basename, not the path, to determine the
subject, session, task, run, acquisition or reconstruction.
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_rec-MB_acq-AP_run-1_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = AP
reconstruction = MB
run = 1
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_acq-AP_run-01_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = AP
reconstruction = <undefined>
run = 1
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_validate=False)
>>> bids_info.inputs.in_file = str(
... datadir / 'ds114' / 'sub-01' / 'ses-retest' /
... 'func' / 'sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz')
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
"""
input_spec = _BIDSInfoInputSpec
output_spec = _BIDSInfoOutputSpec
def _run_interface(self, runtime):
bids_dir = self.inputs.bids_dir
in_file = self.inputs.in_file
if bids_dir is not None:
try:
in_file = str(Path(in_file).relative_to(bids_dir))
except ValueError:
pass
params = parse_file_entities(in_file)
self._results = {
key: params.get(key, Undefined)
for key in _BIDSInfoOutputSpec().get().keys()
}
return runtime
class _BIDSDataGrabberInputSpec(BaseInterfaceInputSpec):
subject_data = traits.Dict(Str, traits.Any)
subject_id = Str()
class _BIDSDataGrabberOutputSpec(TraitedSpec):
out_dict = traits.Dict(desc="output data structure")
fmap = OutputMultiObject(desc="output fieldmaps")
bold = OutputMultiObject(desc="output functional images")
sbref = OutputMultiObject(desc="output sbrefs")
t1w = OutputMultiObject(desc="output T1w images")
roi = OutputMultiObject(desc="output ROI images")
t2w = OutputMultiObject(desc="output T2w images")
flair = OutputMultiObject(desc="output FLAIR images")
class BIDSDataGrabber(SimpleInterface):
"""
Collect files from a BIDS directory structure.
>>> bids_src = BIDSDataGrabber(anat_only=False)
>>> bids_src.inputs.subject_data = bids_collect_data(
... str(datadir / 'ds114'), '01', bids_validate=False)[0]
>>> bids_src.inputs.subject_id = '01'
>>> res = bids_src.run()
>>> res.outputs.t1w # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['.../ds114/sub-01/ses-retest/anat/sub-01_ses-retest_T1w.nii.gz',
'.../ds114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz']
"""
input_spec = _BIDSDataGrabberInputSpec
output_spec = _BIDSDataGrabberOutputSpec
_require_funcs = True
def __init__(self, *args, **kwargs):
anat_only = kwargs.pop("anat_only")
anat_derivatives = kwargs.pop("anat_derivatives", None)
super(BIDSDataGrabber, self).__init__(*args, **kwargs)
if anat_only is not None:
self._require_funcs = not anat_only
self._require_t1w = anat_derivatives is None
def _run_interface(self, runtime):
bids_dict = self.inputs.subject_data
self._results["out_dict"] = bids_dict
self._results.update(bids_dict)
if self._require_t1w and not bids_dict['t1w']:
raise FileNotFoundError(
"No T1w images found for subject sub-{}".format(self.inputs.subject_id)
)
if self._require_funcs and not bids_dict["bold"]:
raise FileNotFoundError(
"No functional images found for subject sub-{}".format(
self.inputs.subject_id
)
)
for imtype in ["bold", "t2w", "flair", "fmap", "sbref", "roi"]:
if not bids_dict[imtype]:
LOGGER.info(
'No "%s" images found for sub-%s', imtype, self.inputs.subject_id
)
return runtime
class _DerivativesDataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
base_directory = traits.Directory(
desc="Path to the base directory for storing data."
)
check_hdr = traits.Bool(True, usedefault=True, desc="fix headers of NIfTI outputs")
compress = InputMultiObject(
traits.Either(None, traits.Bool),
usedefault=True,
desc="whether ``in_file`` should be compressed (True), uncompressed (False) "
"or left unmodified (None, default).",
)
data_dtype = Str(
desc="NumPy datatype to coerce NIfTI data to, or `source` to"
"match the input file dtype"
)
dismiss_entities = InputMultiObject(
traits.Either(None, Str),
usedefault=True,
desc="a list entities that will not be propagated from the source file",
)
in_file = InputMultiObject(
File(exists=True), mandatory=True, desc="the object to be saved"
)
meta_dict = traits.DictStrAny(desc="an input dictionary containing metadata")
source_file = InputMultiObject(
File(exists=False), mandatory=True, desc="the source file(s) to extract entities from")
class _DerivativesDataSinkOutputSpec(TraitedSpec):
out_file = OutputMultiObject(File(exists=True, desc="written file path"))
out_meta = OutputMultiObject(File(exists=True, desc="written JSON sidecar path"))
compression = OutputMultiObject(
traits.Either(None, traits.Bool),
desc="whether ``in_file`` should be compressed (True), uncompressed (False) "
"or left unmodified (None).",
)
fixed_hdr = traits.List(traits.Bool, desc="whether derivative header was fixed")
class DerivativesDataSink(SimpleInterface):
"""
Store derivative files.
Saves the ``in_file`` into a BIDS-Derivatives folder provided
by ``base_directory``, given the input reference ``source_file``.
>>> import tempfile
>>> tmpdir = Path(tempfile.mkdtemp())
>>> tmpfile = tmpdir / 'a_temp_file.nii.gz'
>>> tmpfile.open('w').close() # "touch" the file
>>> t1w_source = bids_collect_data(
... str(datadir / 'ds114'), '01', bids_validate=False)[0]['t1w'][0]
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.desc = 'denoised'
>>> dsink.inputs.compress = False
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_desc-denoised_T1w.nii'
>>> tmpfile = tmpdir / 'a_temp_file.nii'
>>> tmpfile.open('w').close() # "touch" the file
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... allowed_entities=("custom",))
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.custom = 'noise'
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom-noise_T1w.nii'
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... allowed_entities=("custom",))
>>> dsink.inputs.in_file = [str(tmpfile), str(tmpfile)]
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.custom = [1, 2]
>>> dsink.inputs.compress = True
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
['.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom-1_T1w.nii.gz',
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom-2_T1w.nii.gz']
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... allowed_entities=("custom1", "custom2"))
>>> dsink.inputs.in_file = [str(tmpfile)] * 2
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.custom1 = [1, 2]
>>> dsink.inputs.custom2 = "b"
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
['.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom1-1_custom2-b_T1w.nii',
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom1-2_custom2-b_T1w.nii']
When multiple source files are passed, only common entities are passed down.
For example, if two T1w images from different sessions are used to generate
a single image, the session entity is removed automatically.
>>> bids_dir = tmpdir / 'bidsroot'
>>> multi_source = [
... bids_dir / 'sub-02/ses-A/anat/sub-02_ses-A_T1w.nii.gz',
... bids_dir / 'sub-02/ses-B/anat/sub-02_ses-B_T1w.nii.gz']
>>> for source_file in multi_source:
... source_file.parent.mkdir(parents=True, exist_ok=True)
... _ = source_file.write_text("")
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = list(map(str, multi_source))
>>> dsink.inputs.desc = 'preproc'
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-02/anat/sub-02_desc-preproc_T1w.nii'
If, on the other hand, only one is used, the session is preserved:
>>> dsink.inputs.source_file = str(multi_source[0])
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-A/anat/sub-02_ses-A_desc-preproc_T1w.nii'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-01_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
desc-preproc_bold.nii'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-1_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> dsink.inputs.RepetitionTime = 0.75
>>> res = dsink.run()
>>> res.outputs.out_meta # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
desc-preproc_bold.json'
>>> Path(res.outputs.out_meta).read_text().splitlines()[1]
' "RepetitionTime": 0.75'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-01_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... SkullStripped=True)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> dsink.inputs.space = 'MNI152NLin6Asym'
>>> dsink.inputs.resolution = '01'
>>> dsink.inputs.RepetitionTime = 0.75
>>> res = dsink.run()
>>> res.outputs.out_meta # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
space-MNI152NLin6Asym_res-01_desc-preproc_bold.json'
>>> lines = Path(res.outputs.out_meta).read_text().splitlines()
>>> lines[1]
' "RepetitionTime": 0.75,'
>>> lines[2]
' "SkullStripped": true'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-01_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... SkullStripped=True)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> dsink.inputs.resolution = 'native'
>>> dsink.inputs.space = 'MNI152NLin6Asym'
>>> dsink.inputs.RepetitionTime = 0.75
>>> dsink.inputs.meta_dict = {'RepetitionTime': 1.75, 'SkullStripped': False, 'Z': 'val'}
>>> res = dsink.run()
>>> res.outputs.out_meta # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
space-MNI152NLin6Asym_desc-preproc_bold.json'
>>> lines = Path(res.outputs.out_meta).read_text().splitlines()
>>> lines[1]
' "RepetitionTime": 0.75,'
>>> lines[2]
' "SkullStripped": true,'
>>> lines[3]
' "Z": "val"'
"""
input_spec = _DerivativesDataSinkInputSpec
output_spec = _DerivativesDataSinkOutputSpec
out_path_base = "niworkflows"
_always_run = True
_allowed_entities = set(BIDS_DERIV_ENTITIES)
def __init__(self, allowed_entities=None, out_path_base=None, **inputs):
"""Initialize the SimpleInterface and extend inputs with custom entities."""
self._allowed_entities = set(allowed_entities or []).union(
self._allowed_entities
)
if out_path_base:
self.out_path_base = out_path_base
self._metadata = {}
self._static_traits = self.input_spec.class_editable_traits() + sorted(
self._allowed_entities
)
for dynamic_input in set(inputs) - set(self._static_traits):
self._metadata[dynamic_input] = inputs.pop(dynamic_input)
# First regular initialization (constructs InputSpec object)
super().__init__(**inputs)
add_traits(self.inputs, self._allowed_entities)
for k in self._allowed_entities.intersection(list(inputs.keys())):
# Add additional input fields (self.inputs is an object)
setattr(self.inputs, k, inputs[k])
def _run_interface(self, runtime):
# Ready the output folder
base_directory = runtime.cwd
if isdefined(self.inputs.base_directory):
base_directory = self.inputs.base_directory
base_directory = Path(base_directory).absolute()
out_path = base_directory / self.out_path_base
out_path.mkdir(exist_ok=True, parents=True)
# Ensure we have a list
in_file = listify(self.inputs.in_file)
# Read in the dictionary of metadata
if isdefined(self.inputs.meta_dict):
meta = self.inputs.meta_dict
# inputs passed in construction take priority
meta.update(self._metadata)
self._metadata = meta
# Initialize entities with those from the source file.
in_entities = [
parse_file_entities(str(relative_to_root(source_file)))
for source_file in self.inputs.source_file
]
out_entities = {k: v for k, v in in_entities[0].items()
if all(ent.get(k) == v for ent in in_entities[1:])}
for drop_entity in listify(self.inputs.dismiss_entities or []):
out_entities.pop(drop_entity, None)
# Override extension with that of the input file(s)
out_entities["extension"] = [
# _splitext does not accept .surf.gii (for instance)
"".join(Path(orig_file).suffixes).lstrip(".")
for orig_file in in_file
]
compress = listify(self.inputs.compress) or [None]
if len(compress) == 1:
compress = compress * len(in_file)
for i, ext in enumerate(out_entities["extension"]):
if compress[i] is not None:
ext = regz.sub("", ext)
out_entities["extension"][i] = f"{ext}.gz" if compress[i] else ext
# Override entities with those set as inputs
for key in self._allowed_entities:
value = getattr(self.inputs, key)
if value is not None and isdefined(value):
out_entities[key] = value
# Clean up native resolution with space
if out_entities.get("resolution") == "native" and out_entities.get("space"):
out_entities.pop("resolution", None)
if len(set(out_entities["extension"])) == 1:
out_entities["extension"] = out_entities["extension"][0]
# Insert custom (non-BIDS) entities from allowed_entities.
custom_entities = set(out_entities.keys()) - set(BIDS_DERIV_ENTITIES)
patterns = BIDS_DERIV_PATTERNS
if custom_entities:
# Example: f"{key}-{{{key}}}" -> "task-{task}"
custom_pat = "_".join(f"{key}-{{{key}}}" for key in sorted(custom_entities))
patterns = [
pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix")))
for pat in patterns
]
# Prepare SimpleInterface outputs object
self._results["out_file"] = []
self._results["compression"] = []
self._results["fixed_hdr"] = [False] * len(in_file)
dest_files = build_path(out_entities, path_patterns=patterns)
if not dest_files:
raise ValueError(f"Could not build path with entities {out_entities}.")
# Make sure the interpolated values is embedded in a list, and check
dest_files = listify(dest_files)
if len(in_file) != len(dest_files):
raise ValueError(
f"Input files ({len(in_file)}) not matched "
f"by interpolated patterns ({len(dest_files)})."
)
for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)):
out_file = out_path / dest_file
out_file.parent.mkdir(exist_ok=True, parents=True)
self._results["out_file"].append(str(out_file))
self._results["compression"].append(_copy_any(orig_file, str(out_file)))
is_nifti = out_file.name.endswith(
(".nii", ".nii.gz")
) and not out_file.name.endswith((".dtseries.nii", ".dtseries.nii.gz"))
data_dtype = self.inputs.data_dtype or DEFAULT_DTYPES[self.inputs.suffix]
if is_nifti and any((self.inputs.check_hdr, data_dtype)):
# Do not use mmap; if we need to access the data at all, it will be to
# rewrite, risking a BusError
nii = nb.load(out_file, mmap=False)
if self.inputs.check_hdr:
hdr = nii.header
curr_units = tuple(
[None if u == "unknown" else u for u in hdr.get_xyzt_units()]
)
curr_codes = (int(hdr["qform_code"]), int(hdr["sform_code"]))
# Default to mm, use sec if data type is bold
units = (
curr_units[0] or "mm",
"sec" if out_entities["suffix"] == "bold" else None,
)
xcodes = (1, 1) # Derivative in its original scanner space
if self.inputs.space:
xcodes = (
(4, 4) if self.inputs.space in STANDARD_SPACES else (2, 2)
)
if curr_codes != xcodes or curr_units != units:
self._results["fixed_hdr"][i] = True
hdr.set_qform(nii.affine, xcodes[0])
hdr.set_sform(nii.affine, xcodes[1])
hdr.set_xyzt_units(*units)
# Rewrite file with new header
overwrite_header(nii, out_file)
if data_dtype == "source": # match source dtype
try:
data_dtype = nb.load(self.inputs.source_file[0]).get_data_dtype()
except Exception:
LOGGER.warning(
f"Could not get data type of file {self.inputs.source_file[0]}"
)
data_dtype = None
if data_dtype:
if self.inputs.check_hdr:
# load updated NIfTI
nii = nb.load(out_file, mmap=False)
data_dtype = np.dtype(data_dtype)
orig_dtype = nii.get_data_dtype()
if orig_dtype != data_dtype:
LOGGER.warning(
f"Changing {out_file} dtype from {orig_dtype} to {data_dtype}"
)
# coerce dataobj to new data dtype
if np.issubdtype(data_dtype, np.integer):
new_data = np.rint(nii.dataobj).astype(data_dtype)
else:
new_data = np.asanyarray(nii.dataobj, dtype=data_dtype)
# and set header to match
nii.set_data_dtype(data_dtype)
nii = nii.__class__(new_data, nii.affine, nii.header)
nii.to_filename(out_file)
if len(self._results["out_file"]) == 1:
meta_fields = self.inputs.copyable_trait_names()
self._metadata.update(
{
k: getattr(self.inputs, k)
for k in meta_fields
if k not in self._static_traits
}
)
if self._metadata:
out_file = Path(self._results["out_file"][0])
# 1.3.x hack
# For dtseries, we have been generating weird non-BIDS JSON files.
# We can safely keep producing them to avoid breaking derivatives, but
# only the existing keys should keep going into them.
if out_file.name.endswith(".dtseries.nii"):
legacy_metadata = {}
for key in ("grayordinates", "space", "surface", "surface_density", "volume"):
if key in self._metadata:
legacy_metadata[key] = self._metadata.pop(key)
if legacy_metadata:
sidecar = out_file.parent / f"{_splitext(str(out_file))[0]}.json"
sidecar.write_text(dumps(legacy_metadata, sort_keys=True, indent=2))
# The future: the extension is the first . and everything after
sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json"
sidecar.write_text(dumps(self._metadata, sort_keys=True, indent=2))
self._results["out_meta"] = str(sidecar)
return runtime
class _ReadSidecarJSONInputSpec(_BIDSBaseInputSpec):
in_file = File(exists=True, mandatory=True, desc="the input nifti file")
class _ReadSidecarJSONOutputSpec(_BIDSInfoOutputSpec):
out_dict = traits.Dict()
class ReadSidecarJSON(SimpleInterface):
"""
Read JSON sidecar files of a BIDS tree.
>>> fmap = str(datadir / 'ds054' / 'sub-100185' / 'fmap' /
... 'sub-100185_phasediff.nii.gz')
>>> meta = ReadSidecarJSON(in_file=fmap, bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run()
>>> meta.outputs.subject
'100185'
>>> meta.outputs.suffix
'phasediff'
>>> meta.outputs.out_dict['Manufacturer']
'SIEMENS'
>>> meta = ReadSidecarJSON(in_file=fmap, fields=['Manufacturer'],
... bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run()
>>> meta.outputs.out_dict['Manufacturer']
'SIEMENS'
>>> meta.outputs.Manufacturer
'SIEMENS'
>>> meta.outputs.OtherField # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError:
>>> meta = ReadSidecarJSON(
... in_file=fmap, fields=['MadeUpField'],
... bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError:
>>> meta = ReadSidecarJSON(in_file=fmap, fields=['MadeUpField'],
... undef_fields=True,
... bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run()
>>> meta.outputs.MadeUpField
<undefined>
"""
input_spec = _ReadSidecarJSONInputSpec
output_spec = _ReadSidecarJSONOutputSpec
layout = None
_always_run = True
def __init__(self, fields=None, undef_fields=False, **inputs):
super(ReadSidecarJSON, self).__init__(**inputs)
self._fields = listify(fields or [])
self._undef_fields = undef_fields
def _outputs(self):
base = super(ReadSidecarJSON, self)._outputs()
if self._fields:
base = add_traits(base, self._fields)
return base
def _run_interface(self, runtime):
self.layout = self.inputs.bids_dir or self.layout
self.layout = _init_layout(
self.inputs.in_file, self.layout, self.inputs.bids_validate
)
# Fill in BIDS entities of the output ("*_id")
output_keys = list(_BIDSInfoOutputSpec().get().keys())
params = self.layout.parse_file_entities(self.inputs.in_file)
self._results = {
key: params.get(key.split("_")[0], Undefined) for key in output_keys
}
# Fill in metadata
metadata = self.layout.get_metadata(self.inputs.in_file)
self._results["out_dict"] = metadata
# Set dynamic outputs if fields input is present
for fname in self._fields:
if not self._undef_fields and fname not in metadata:
raise KeyError(
'Metadata field "%s" not found for file %s'
% (fname, self.inputs.in_file)
)
self._results[fname] = metadata.get(fname, Undefined)
return runtime
class _BIDSFreeSurferDirInputSpec(BaseInterfaceInputSpec):
derivatives = Directory(
exists=True, mandatory=True, desc="BIDS derivatives directory"
)
freesurfer_home = Directory(
exists=True, mandatory=True, desc="FreeSurfer installation directory"
)
subjects_dir = traits.Either(
traits.Str(),
Directory(),
default="freesurfer",
usedefault=True,
desc="Name of FreeSurfer subjects directory",
)
spaces = traits.List(traits.Str, desc="Set of output spaces to prepare")
overwrite_fsaverage = traits.Bool(
False, usedefault=True, desc="Overwrite fsaverage directories, if present"
)
class _BIDSFreeSurferDirOutputSpec(TraitedSpec):
subjects_dir = traits.Directory(exists=True, desc="FreeSurfer subjects directory")
class BIDSFreeSurferDir(SimpleInterface):
"""
Prepare a FreeSurfer subjects directory for use in a BIDS context.
Constructs a subjects directory path, creating if necessary, and copies
fsaverage subjects (if necessary or forced via ``overwrite_fsaverage``)
into from the local FreeSurfer distribution.
If ``subjects_dir`` is an absolute path, then it is returned as the output
``subjects_dir``.
If it is a relative path, it will be resolved relative to the
```derivatives`` directory.`
Regardless of the path, if ``fsaverage`` spaces are provided, they will be
verified to exist, or copied from ``$FREESURFER_HOME/subjects``, if missing.
The output ``subjects_dir`` is intended to be passed to ``ReconAll`` and
other FreeSurfer interfaces.
"""
input_spec = _BIDSFreeSurferDirInputSpec
output_spec = _BIDSFreeSurferDirOutputSpec
_always_run = True
def _run_interface(self, runtime):
subjects_dir = Path(self.inputs.subjects_dir)
if not subjects_dir.is_absolute():
subjects_dir = Path(self.inputs.derivatives) / subjects_dir
subjects_dir.mkdir(parents=True, exist_ok=True)
self._results["subjects_dir"] = str(subjects_dir)
orig_subjects_dir = Path(self.inputs.freesurfer_home) / "subjects"
# Source is target, so just quit
if subjects_dir == orig_subjects_dir:
return runtime
spaces = list(self.inputs.spaces)
# Always copy fsaverage, for proper recon-all functionality
if "fsaverage" not in spaces:
spaces.append("fsaverage")
for space in spaces:
# Skip non-freesurfer spaces and fsnative
if not space.startswith("fsaverage"):
continue
source = orig_subjects_dir / space
dest = subjects_dir / space
# Edge case, but give a sensible error
if not source.exists():
if dest.exists():
continue
else:
raise FileNotFoundError("Expected to find '%s' to copy" % source)
# Finesse is overrated. Either leave it alone or completely clobber it.
if dest.exists() and self.inputs.overwrite_fsaverage:
rmtree(dest)
if not dest.exists():
try:
copytree(source, dest)
except FileExistsError:
LOGGER.warning(
"%s exists; if multiple jobs are running in parallel"
", this can be safely ignored",
dest,
)
return runtime
| |
#!/usr/bin/python
#this is a conglomerate of fasta-fixing scripts, now called FISH (FASTA ID SWAPPING HELPER) because lol i can acronym.
##
#last edit abigailc@Actaeon Sept 7 2016
#things this doesn't do: play super nice with accession numbers instead of GI numbers. probably easy to convert, (see that one script that one time), but meh
#do it later.
#todo
#import functions from FEAST - append taxonomy, extract, split, etc
#when you create a Fasta object, initialize it with sequence ID and Data by using either gen_original_lists or blast2fasta
class Fasta:
def __init__(self, name):
#all ids and seqs should be stripped of leading and trailing whitespace and have ">" removed for reasons.
#this is the name of the fasta, it can be anything, i'm not really using it right now.
self.name = name
#this is the to-be-modified version of sequence IDs and sequence-Data
# ALWAYS keep IDS and SEQS the same length. id[1] should ALWAYS correspond to seq[1].
self.ids = []
self.seqs = []
# these are the original SEQids and Sequences. They should never be modified after generation in gen_original_lists or blast_to_fasta
self.original_ids = []
self.original_seqs = []
self.species_names = []
self.numbers = []
self.taxid = []
self.taxonomy = []
def ret_name(self):
return self.name
def gen_original_lists(self, fastaname):
with open(fastaname) as fastafile:
for line in fastafile:
if "\n" == line:
pass
if ">" in line:
#write the previous AA seq
try:
AAseq=AAseq.strip()
self.seqs.append(AAseq)
self.original_seqs.append(AAseq)
except:
pass
#initialize a new AAseq
AAseq = ""
#format the seqID
newline = line.strip()
newline = line.strip(">")
#write the seqID
self.ids.append(newline.strip())
self.original_ids.append(newline.strip())
else:
AAseq = AAseq+line
AAseq=AAseq.strip()
#catch the last AAseq pass
self.seqs.append(AAseq)
self.original_seqs.append(AAseq)
print("Initial sequence and ID lists created. Contains "+str(len(self.ids))+" sequences")
def manual_shorten(self, shorts):
#the list of shorts will be provided like "Bacteria,Bac Eukarya,Euk"
changes = shorts.split()
for item in self.ids:
newline = item
index = self.ids.index(item)
for change in changes:
old, new = change.split(",")
newline = newline.replace(old,new)
self.ids[index] = newline
#done
print("Manual shorten complete")
def gen_numbers(self):
for item in self.ids:
number = re.sub("(.*)(\|)(.*)","\\3", item)
self.numbers.append(number)
def gen_species_lists(self):
self.species_names = []
speclist = []
for item in self.ids:
# item will be "Nostoc_punctiforme_PCC_73102|gi#|186468349" or "Blah|Rank|Nostoc_punctiforme_PCC_73102|gi#|186468349"
# for now, ignores anything that isn't Genus_species.
# for example, ignores strain, ., things with an extra
# word, etc.
taxon = re.sub("([^_]*)([A-Z][a-z]*_[a-z]*)(.*)", "\\2", item)
if "#" in taxon:
print ("TAXON error in gen_species_lists():" + taxon)
speclist.append(taxon)
self.species_names.append(taxon)
return speclist
def common_shorten(self, verbose = False):
#TODO: allow input of manual shorten-pairs, possibly in new function
#put your conversions of common strings to shorten here
inte = 0
for item in self.ids:
newline = item
index = self.ids.index(item)
newline = re.sub("bacteria\|", "bac|", newline)
newline = re.sub("bacteriales\|", "bacl|", newline)
newline = re.sub("bacteriaceae\|", "bacc|", newline)
newline = re.sub("Bacteria\|", "Bac|", newline)
newline = re.sub("Archaea\|", "Arc|", newline)
newline = re.sub("Eukaryota\|", "Euk|", newline)
newline = re.sub("Fungi\|", "Fun|", newline)
newline = re.sub("Viridiplantae\|", "Vir|", newline)
newline = re.sub("Metazoa\|", "Met|", newline)
newline = re.sub("mycetes\|", "myc|", newline)
newline = re.sub("mycetales\|", "mycl|", newline)
newline = re.sub("mycetaceae\|", "mycc|", newline)
newline = re.sub("Methanomassiliicoccaceae\|", "Methmasscoc|", newline)
#newline = re.sub("bacteriales\|", "bacles|", newline)
#newline = re.sub("bacteriales\|", "bacles|", newline)
#newline = re.sub("[+=\.]", "", newline)
newline = re.sub("_enterica_subsp_enterica_serovar", "", newline)
if newline == item:
pass
else:
if verbose is True:
print(item)
print(newline)
inte +=1
self.ids[index] = newline
print("Common shorten complete")
print("Fixed "+str(inte)+" lines")
#this should have successfully modified the self.ids list to contain shortened sequence ids.
def length_check(self, length, verbose):
#needs to pass in a number... charnum
toolong = 0
length = int(length)
print("trying to shorten to length "+str(length))
for item in self.ids:
index = self.ids.index(item)
linelength = len(item)
newline= item
if int(linelength) > int(length):
toolong +=1
#change all 12 to 14 if include \n at end of seqids... for now, we are not considering them.
gi = newline[-12:]
rest = re.sub("([^#]*)(#)(.*)", "\\1", newline)
nogi = rest[:-3]
newl = length-13
#12 instead of 12 to leave space for adding a bar.
newnogi = nogi[:newl]
if newnogi[-1:] == "|":
pass
else:
newnodi = newnogi[:-1]
newline = newnogi+"|"+gi
if verbose == True:
print ("LENGTHERROR: "+item[:length]+" || "+item[length:])
print("Tried to fix: "+newline)
self.ids[index] = newline
#end
print("Length-check complete, "+str(toolong)+" sequences were fixed")
def weird_AA_check(self, verbose = False):
lerr = 0
errletters = []
for item in self.seqs:
#if you want to not remove "-" just add it to list of letters.
listofletters = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'Y']
newseq = ""
#open sequences list
index = self.seqs.index(item)
anerror = "no"
for letter in item:
if letter in listofletters:
pass
elif letter == "\n":
pass
else:
if verbose == True:
if letter == "-":
pass
else:
print("LETTERERROR: "+letter)
anerror = "yes"
errletters.append(letter)
letter = ""
lerr +=1
newseq = newseq+letter
if verb == True:
if anerror == "yes":
print(item)
self.seqs[index] = newseq
if verbose == True:
from collections import Counter
counta = Counter(errletters).most_common()
print("There were "+str(lerr)+" letter errors as so:")
print(type(counta))
for thing in counta:
print(thing)
#end
print("weird aa check done")
def weird_ID_check(self, verb = False):
errors = 0
for item in self.ids:
index = self.ids.index(item)
newitem = re.sub("[\[\]]", "", item)
newitem = re.sub("[:;=,/\+'\.\(\)]", "_", newitem)
newitem = re.sub(" ", "_", newitem)
newitem = re.sub("__", "_", newitem)
if item == newitem:
pass
else:
errors += 1
if verb == True:
print("Replacing:\n"+item+"\n with:\n"+newitem)
self.ids[index] = newitem
if verb == True:
print("there were "+str(errors)+" weird_ID errors")
print("weird id check done")
def duplicates_check(self, verb = False):
listoflines = []
rep = 0
num = 0
for line in self.ids:
index = self.ids.index(line)
if line in listoflines:
num+=1
rep = line+"v"+str(num)
self.ids[index] = rep
listoflines.append(line)
if verb == True:
print ("there were "+str(num)+" duplicate sequences that were numbered")
#done
print("duplicate check done")
def duplicates_remove(self, verb=False):
listoflines = []
to_remove = []
rep = 0
num = 0
for line in self.ids:
index = self.ids.index(line)
#if it is a duplicate...
if line in listoflines:
num+=1
to_remove.append(index)
listoflines.append(line)
for number in to_remove:
self.ids.pop(number)
self.seqs.pop(number)
if verb == True:
print ("there were "+str(num)+" duplicate sequences that were removed")
#done
print("duplicate ID removal done")
def index_shorted(self, replace):
#currently does NOT work w/ accession numbers
#here replace is depth and/or gi num eg "2 3 gi"
CTdict = {}
for line in self.ids:
if "|gi#" in line:
taxgi = re.sub("([^#]*)(\|gi#\|?)([0-9]*)(.*)", "\\1~\\3", line)
tax, gi = taxgi.split("~")
taxlist = tax.split("|")
if replace == "gi":
CTdict[line] = gi
if type(replace) is int:
CTdict[line] = taxlist[replace-1]
if type(replace) is str:
listreplace = replace.split()
newid = ""
for item in listreplace:
if item == "gi":
newid = newid+"|"+gi
else:
newid = str(newid)+"|"+str(taxlist[int(item)-1])
newid = newid
CTdict[line] = newid
print(newid)
else:
tax = re.sub("([^#]*)(\|gi#\|?)([0-9]*)(.*)", "\\1", line)
taxlist = tax.split("|")
if replace == "gi":
pass
if type(replace) is int:
CTdict[line] = taxlist[replace-1]
if type(replace) is str:
listreplace = replace.split()
newid = ""
f = 1
for item in listreplace:
f += 1
if item == "gi":
newid = newid+"|NA"
else:
newid = str(newid)+"|"+str(taxlist[int(item)-1])
# #SPECIFICALLY FOR CURRENT USE_CASE, REMOVE LATER
# if f == 2:
# newid = str(newid)+"|"+str(taxlist[int(item)-1])
# if f == 3:
# newid = str(newid)+"|"+str(taxlist[int(item)])
newid = newid
CTdict[line] = newid
print(newid)
for line in self.ids:
index = self.ids.index(line)
newestid = CTdict[line]
self.ids[index] = newestid
print("index check done")
def ten_char(self):
#something
#this one should be done in a seperate loop
CTdict = {}
iteration = 0
for line in self.ids:
iteration +=1
line = line.strip()
## #i have something like
## >Methanococcoides_burtonii|gi|909890
## #i want
## MethBurt00
GenusSpecies = re.sub("([A-Z][a-z]*)(_)([A-Z]*[a-z]*)(.*)", "\\1~\\3", line)
try:
Genus, Species = GenusSpecies.split("~")
g4 = Genus[:4]
try:
s4 = Species[:4]
s3 = Species[:3]
except:
s4 = Species[:2]
s3 = Species[:2]
if iteration < 10:
newid = g4+s4.capitalize()+"0"+str(iteration)
elif iteration > 99:
newid = g4+s3.capitalize()+str(iteration)
else:
newid = g4+s4.capitalize()+str(iteration)
except:
## print(GenusSpecies)
gs8 = GenusSpecies[1:9]
if iteration < 10:
newid = gs8+"0"+str(iteration)
elif iteration > 99:
newid = gs8[:-1]+str(iteration)
else:
newid = gs8+str(iteration)
## print(newid)
CTdict[line] = newid
for line in self.ids:
index = self.ids(line)
newestid = CTdict[line]
self.ids[index] = newestid
print("ten char done")
def mb_version(self):
#shorten seqids to 94 if not already done.
self.length_check(94)
#deal with any duplicates that may have caused
self.duplicates_check()
#remove the # and | characters that MrBayes Hates
for line in self.ids:
if "#" in nline:
nline = re.sub("[#]", "", nline)
if "|" in nline:
nline = re.sub("\|", "_", nline)
#tell you what to do
print("MB version ids created")
print("You should print this too .fasta format, and then convert to nexus however you want")
def load_info_swap(self, info_file_in):
#reads a file of form
# originalID
# changedID
#and generates self.ids from that file.
kid = "no"
vid = "no"
CTdict = {}
with open (info_file_in) as old:
for line in old:
#first pass: gets key (original ID)
#second pass: gets value (new ID)
#if we have no info, get key
if kid == "no":
key = line.strip()
kid = "yes"
continue
elif kid == "yes":
#if we have key and value, record.
if vid == "yes":
CTdict[key]=value
vid = "no"
kid = "no"
continue
#if we have key but no value, get value.
if vid == "no":
value = line.strip()
vid = "yes"
#catch the final pass
CTdict[key]=value
if self.original_ids == []:
for thing in CTdict:
self.ids.append(thing)
self.original_ids.append(CTdict[thing])
else:
for item in self.original_ids:
index = self.original_ids.index(item)
newid = CTdict[item]
self.ids[index] = newid
print("original ids:")
print(self.original_ids)
print("new ids:")
print(self.ids)
#done
#troubleshooting: do not preform this operation after any that change self.ids. this op must be done first, or in a seperate command.
def one_per_genus(self):
#get species list if it doesn't exist
genuslist = []
used_genus = []
indices = []
if self.species_names == []:
self.gen_species_lists()
for item in self.species_names:
try:
gen, spec = item.split("_")
except:
gen = item
genuslist.append(gen)
for gen in genuslist:
if gen in used_genus:
rem_index = genuslist.index(gen)
indices.append(rem_index)
else:
used_genus.append(gen)
for i in sorted(indices, reverse=True):
del self.ids[i]
del self.seqs[i]
print("do not use this method in conjunction with anything other than write-fasta")
def gen_new_fasta(self, new_fasta_name):
#this should print the changed seqids and changed AA sequences to file.
newfasta = new_fasta_name
# print(len(self.original_ids))
# print(len(self.ids))
# print(len(self.original_seqs))
# print(len(self.seqs))
with open (newfasta, "w") as new:
for i in range(len(self.ids)):
new.write(">"+self.ids[i].strip()+"\n")
# print(i) #
#unclear if this needs a "\n" after it... check.#TODO
#print(self.seqs)
#print(type(self.seqs[i]))
new.write(self.seqs[i]+"\n")
print("Finished, your new fasta file is located at "+newfasta)
#done
def extract(self, list_of_keeps):
keep_ids = []
keep_seq = []
success = 0
suc_num = len(list_of_keeps)
for item in list_of_keeps:
item = item.strip()
for thing in self.original_ids:
if thing == item:
keep_ids.append(thing)
index = self.original_ids.index(item)
seq = self.original_seqs[index]
keep_seq.append(seq)
success += 1
if suc_num == success:
print("100% complete extract")
else:
print(str(success)+"out of "+str(suc_num)+" sequences extracted")
self.ids = keep_ids
self.seqs = keep_seq
def swap_in_newick(self, old_newick_name, new_file_name):
#this replaces the tip names in a newick file. sometimes works on nexus files too, but I havent extensively tested it.
newick = old_newick_name
newnewick = new_file_name
with open (newick) as old:
with open (newnewick, "w") as new:
for line in old:
for item in self.original_ids:
index = self.original_ids.index(item)
line = line.replace(item, self.ids[index])
new.write(line)
print("finished, tip-replaced-newick file at: "+newnewick)
#done
def swap_in_nexus(self):
print ("You didn't implement this yet. try using newick replace, it might work")
pass
#something
#to-do, try nexus replace in the meantime, it should work
def gen_info(self, info_file_name):
#writes a file of form
# originalID
# changedID
with open(info_file_name, "w") as inf:
listlength = len(self.original_ids)
if listlength != len(self.ids):
print ("List lengths do not match! FATAL ERROR")
print (self.original_ids)
print (self.ids)
raiseSystemExit
for i in range(listlength):
inf.write(self.original_ids[i])
inf.write(self.ids[i]+"\n")
print("Info file was generated. Named "+info_file_name)
#done
def write_one_seq_per_file(self):
geneflist = []
genenames = []
for i in range(len(self.ids)):
with open("Seq" + str(i), "w") as new:
new.write(">" + self.ids[i]+"\n")
new.write(self.seqs[i]+"\n")
name = re.sub("([^\|]*)(\|)(.*)", "\\1", self.ids[i])
geneflist.append("Seq" + str(i))
genenames.append(name)
return geneflist, genenames
print("one per file generated")
def number_of_sites(self):
return len(self.original_seqs[0])
def shorten(self):
print("shortening ids...")
unk = "no"
normal = 0
ucount = 0
for line in self.ids:
index = self.ids.index(line)
# this removes words in brackets that aren't Species_name
# and then changes NCBI's default naming scheme to be
#>Species_name|#########
# and makes a list of all gi nums and all
# duplicates
# AAH91460.1 Ribosomal protein L3 [Danio rerio]
if "gi|" in line:
number = re.sub("(gi)(\|)([0-9]*)(\|)([A-Za-z]*)(\|)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\3", line)
num = number.strip()
edit1 = re.sub("(gi)(\|)([0-9]*)(\|)([A-Za-z]*)(\|)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\8\\2\\1#|", line)
#get acc number
else:
number = re.sub("([^ ]*)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\1", line)
num = number.strip()
#get edit | AAH91460.1 Ribosomal protein L3 [Danio rerio]
edit1 = re.sub("([^ ]*)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\3|", line)
if "[" in edit1:
unk = "no"
normal += 1
else:
unk = "yes"
edit2 = re.sub("[\[\]]", "", edit1)
#for now, leave periods in name due to their necessity in acc numbers (????)
edit3 = re.sub("[:;\.=,/\+'\(\)]", "_", edit2)
edit4 = re.sub(" ", "_", edit3)
edit4 = re.sub("__", "_", edit4)
edit4 = edit4+num
if unk == "no":
self.ids[index] = edit4
else:
print("Unknown Species in ID:" + line)
print("shortened: "+str(normal)+" sequence")
def blast2fasta(self, blastlist, ENTREZ=False, num=False):
# entrez is used to ensure that sequence saved uses correct TAXON, esp. if sequence is a MULTISPECIES entry.
# entrex should be somethin like "Mycobacterium triplex"
#take from MakeSPeciesTree.py version if you want a new sequence for each multispecies thing(!)
# num is how many sequences to write. for species trees, we almost certainly only want one.
# for converting full downloaded .fastas, we will want all of them (default = False means to do all of them)
# Converts blast outfmt "6 sseqid stitle sseq" to original lists if
# entrez = false
#... now converting outfmt "6 sallseqid salltitles sseq" to sh fasta with selection of proper gi/acc/taxon
# this should take format " " blast names and replace them with the proper
# fasta shit
ernum = 0
# we open each file in a unique call to blast2fasta. files should be
# deleted afterwards.
bf = open(blastlist, 'r')
error = 0
end = "no"
for line in bf:
if end == "yes":
break
# gi|738518257|ref|WP_036466735.1|;gi|620038207|emb|CDO87046.1| 50S
# ribosomal protein L15 [Mycobacterium triplex]<>50S ribosomal protein L15
# [Mycobacterium triplex]
gis = re.sub("(.*)(\t)(.*])(\t)([A-Z-]*)", "\\1", line)
names = re.sub("(.*)(\t)(.*])(\t)([A-Z-]*)", "\\3", line)
seq = re.sub("(.*)(\t)(.*])(\t)([A-Z-]*)", "\\5", line)
# this removes sequences with no Species_name given, so as to avoid errors
# downstream
if "\t" in gis:
error += 1
print("ERROR in blast parsing: " + line)
continue
else:
gilist = gis.split(";")
namelist = names.split("<>")
if ENTREZ is False:
index = 0
else:
ENTREZ = ENTREZ.strip("\"")
for item in namelist:
if ENTREZ in item:
index = namelist.index(item)
try:
seqi = gilist[index].strip() + namelist[index].strip()
#end = "yes"
except UnboundLocalError:
error += 1
print("Name error... might fix")
if error == 5:
print("Serious ENTREZ error:")
print(ENTREZ)
print(namelist)
print("This gene wasn't found in this taxon, skipping")
break
continue
# goes to next line, abandoning this one
seqid = re.sub("[ ]", "_", seqi)
# strips for .fasta format
seqid = seqid.strip()
seqid = seqid.strip(">")
# add the new sequence id to the list.
self.ids.append(seqid)
self.original_ids.append(seqid)
# the new sequence
slist = []
count = 0
newseq = ""
for letter in seq:
if count > 79:
count = 0
newseq = newseq + ("\n")
newseq = newseq + letter
count += 1
self.seqs.append(newseq.strip())
self.original_seqs.append(newseq.strip())
print("Blasttofasta id/seq loading complete!")
def SetTaxID(self):
self.taxid = []
for item in self.numbers:
GItoTAXID = "xmllint --xpath '/GBSet/GBSeq/GBSeq_feature-table/GBFeature/GBFeature_quals/GBQualifier[GBQualifier_name=\"db_xref\"]/GBQualifier_value/text()' \"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein&id="+item+"&retmode=xml\""
futuretaxid = subprocess.check_output(GItoTAXID, shell=True)
taxid = re.sub("(taxon:)([0-9]*)(.*)", "\\2", futuretaxid)
self.taxid.append(taxid)
def GetTaxonomy(self):
self.taxonomy = []
if self.taxid == []:
print("You need to generate taxids first.. lets try")
self.SetTaxID()
for item in self.taxid:
taxid = number
ranklist = "superkingdom kingdom phylum class order family"
ranklist = ranklist.split()
for r in ranklist:
TAXIDtoRANKNAME = "xmllint --xpath '/TaxaSet/Taxon/LineageEx/Taxon[Rank=\"" + r + \
"\"]/ScientificName/text()' \"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=taxonomy&id=" + taxid + "\""
try:
rankname = subprocess.check_output(TAXIDtoRANKNAME, shell=True)
except:
rankname = "NA"
rankname = re.sub(" ", "_", rankname)
taxdict = {}
taxdict[r]=rankname
self.taxonomy.append(taxdict)
def AppendTaxonomy(self):
for item in self.ids:
index = self.ids.index(item)
rankdict = self.taxonomy[index]
newitem = rankdict["superkingdom"]+"|"+rankdict["kingdom"]+"|"+rankdict["phylum"]+"|"+rankdict["class"]+"|"+rankdict["order"]+"|"+rankdict["family"]+"|"+item
self.ids[index] = newitem
#TODO:
#add get taxonomy to parser..
#this hasn't been implemented in class fasta, so I am leaving it commented out.. subtrees file might be easily replaced using replace.newick but it might take literally ages... unclear.
# def replace2(replace_file, dict_old_new, charnum, verb):
# print("Recognized subtrees file, using subtrees varient")
# outputlist = []
# rep = 0
# replist = []
# newfilename = replace_file.split(".")
# newfilename = newfilename[0]+str(charnum)+"limit."+newfilename[1]
# with open(replace_file) as old:
# if verb == True:
# print("Opening "+replace_file)
# with open (newfilename, "w") as new:
# for line in old:
# line = line.strip()
# for item in dict_old_new:
# if item[:127] in line:
# if item[:127] in replist:
# pass
# else:
# replist.append(item[:127])
# rep+=1
# ## print(line)
# oldline = line
# line = line.replace(item[:127], dict_old_new[item])
# ## if verb == True:
# ## if len(line) <200:
# ## print oldline
# ## print item
# ## print dict_old_new[item]
# ## print(line)
# ## print("\n")
# ## print("\n")
# new.write(line+"\n")
# print("finished with "+newfilename+"made "+str(rep)+" replacements of "+str(len(replist))+" differnt patterns")
# ## print(replist)
# return newfilename
# def gen_original_lists(self, fastaname):
# def load_info_swap(info_file_in):
# def duplicates_check(verb = False):
# def weird_ID_check(verb = False):
# def weird_AA_check(verbose = False):
# def length_check(length, verbose=False):
# def manual_shorten():
# def common_shorten():
# def mb_version():
# def index_shorted(replace):
# def ten_char():
# #write stuff
# def gen_new_fasta(new_fasta_name):
# def swap_in_nexus():
# def swap_in_newick(old_newick_name, new_file_name):
# def gen_info(info_file_name):
# example $ python FISH_2.py /path/to/my/files -fas input.fasta -wf output.fasta -dr
#to remove duplicates from a thing
if __name__ == "__main__":
print("Running in terminal")
import sys
import argparse
import os
import re
parser = argparse.ArgumentParser(description="All")
#necessary bits
parser.add_argument("directory", nargs='?', default=os.getcwd(), type=str, help="type name of directory to run in where fasta resides, if not pwd")
parser.add_argument("-fas", "--fasta", action = "store", default = False, help="type the name of your .fasta file")
#options to load changes from another file
parser.add_argument("-i", "--infofile", action = "store", default = False, help="Provide an Info File (as generated by this script previously) to pull original and new sequences from")
#options# to check,fix,edit,etc the seqs or seqids
# -length
# -duplicate
# -weirdaa
# -weirdID
parser.add_argument("-sh", "--shorten", action = "store_true", default=False, help="shortens blast (from online) seqIDs")
parser.add_argument("-b2f", "--blast2fasta", action = "store_true", default=False, help="Blast+ output -> fasta download format BUGGY")
parser.add_argument("-l", "--length", action = "store", default=False, help="Provide a max length for your sequenceIDs")
parser.add_argument("-dn", "--duplicates_number", action = "store_true", help="Flag causes identical seqIDs to be numbered 1 2 3 etc to prevent program confusion")
parser.add_argument("-dr", "--duplicates_remove", action = "store_true", help="Flag causes identical seqIDs to be removed")
parser.add_argument("-og", "--one_per_genus", action = "store_true", help = "Flag keeps first sequence per unique genus")
parser.add_argument("-fid", "--fixID", action = "store_true", help="Flag scans SeqIDs and removes weird characters like += etc")
parser.add_argument("-faa", "--fixAA", action = "store_true", help="Flag scans Sequences and removes non-standard AA characters like X B &")
#options to shorten specific words
# -m manual_shorten
# -c common_shorten
parser.add_argument("-c", "--common", action = "store_true", help="Flag causes seqIDs to be shortened in a predefined manner, eg bacteriales->bacles ")
parser.add_argument("-m", "--manual", default = False, action = "store", help="Provide a list of \"original,new\" things to shorten. eg \"Bacteria,Bac Eukaryota,Euk\"")
#special shortening methods
parser.add_argument("-t", "--tenchars", action = "store_true", help="Flag turns sequence IDs into ten character strings")
parser.add_argument("-ba", "--bayes", action = "store_true", help="Flag turns sequences into form that will work as MrBayes input")
parser.add_argument("-p", "--piece", default = False, action = "store", help="Provide taxonomy-depth, gi, or combo for shortening eg \"1 3 gi\"")
#writing methods
parser.add_argument("-wf", "--writefasta", action = "store", default=False, help="Provide name for new fasta file")
parser.add_argument("-wn", "--writenewick", action = "store",default=False, help="Provide name of newick, name of newfile eg \"example.newick replaced.newick\"")
parser.add_argument("-wi", "--writeinformation", action = "store", default=False, help="Provide name for this info_file")
# -fasta
# -newick replace
# -info gen (should this always happen?)
parser.add_argument("-v", "--verbose", action = "store_true", help="prints more information - for debugging mostly. might not be implemented yet")
args = parser.parse_args()
#workflow: do all the things you want to do to change seqID/seq in one step, save the information and .fasta file.
#then, if desired, use that fasta as base to make ten-char shortened, MBversion, or depth-shortened files, also saving info file so they are reversable.
#actual work flow
#change dir if desiredprint(args.fasta)
try:
os.chdir(args.directory)
if args.verbose == True:
print("moved to dir: "+args.directory)
except:
print ("didn't change dir")
if args.verbose:
verb = True
else:
verb = False
#originate the fasta class instance
MyFasta = Fasta("MyFastaName")
if args.blast2fasta != False:
MyFasta.blast2fasta(args.fasta)
else:
if args.fasta != False:
MyFasta.gen_original_lists(args.fasta)
#this should be done in conjunction w / write fasta or replace newick.
if args.infofile != False:
MyFasta.load_info_swap(args.infofile)
#here are the error-fixing calls
if args.duplicates_number == True:
MyFasta.duplicates_check(verb)
if args.duplicates_remove == True:
MyFasta.duplicates_remove(verb)
if args.one_per_genus == True:
MyFasta.one_per_genus()
if args.fixID == True:
MyFasta.weird_ID_check(verb)
if args.fixAA == True:
MyFasta.weird_AA_check(verb)
#shortening calls
if args.shorten == True:
MyFasta.shorten()
if args.common == True:
MyFasta.common_shorten(verb)
if args.manual != False:
MyFasta.manual_shorten(args.manual)
if args.piece != False:
MyFasta.index_shorted(args.piece)
if args.length != False:
MyFasta.length_check(args.length, verb)
#these should only be done on their own, not combined w the above. for mrbayes, anything that requires 10 characters.
if args.bayes == True:
MyFasta.mb_version()
if args.tenchars == True:
MyFasta.ten_char()
#write stuff
if args.writefasta != False:
MyFasta.gen_new_fasta(args.writefasta)
if args.writenewick != False:
old, new = args.writenewick.split()
MyFasta.swap_in_newick(old, new)
if args.writeinformation != False:
MyFasta.gen_info(args.writeinformation)
print("All things finished, exiting...")
#TODO
#detailed information on how to use
#test everything
#????
# FISH FASTA ID SWAPPING HELPER
#### this is becoming a dedicated tip-name-editing package.
#requires that tips are in format given by FEAST's shorten or shorten-keep-info
#things it can do:
# 1. shorten too long seqids using common shortening phrases, or by removing info from the species-name (usually catches strain info)
# 2. remove weird characters from seqIDS
# 3. remove weird characters from AA sequences
# do this on 1. fasta files 2. nexus files (maybe? unclear) 3. newick files (maybe? unclear)
| |
"""Module responsible for adding generated test cases to a project.
Client of this module should use it through add_test_case_to_project() function.
"""
import os.path
from pythoscope.logger import log
from pythoscope.util import max_by_not_zero, module_path_to_name
from pythoscope.store import Module, TestClass, code_of
from pythoscope.code_trees_manager import CodeTreeNotFound
from pythoscope.astvisitor import find_last_leaf, get_starting_whitespace, \
is_node_of_type, remove_trailing_whitespace
from pythoscope.astbuilder import EmptyCode, Newline, create_import, \
insert_after, insert_before
def add_test_case_to_project(project, test_class, main_snippet=None, force=False):
existing_test_class = find_test_class_by_name(project, test_class.name)
try:
if not existing_test_class:
module = find_module_for_test_class(project, test_class)
log.info("Adding generated %s to %s." % (test_class.name, module.subpath))
ensure_imports(module, test_class.imports)
add_test_case(module, test_class)
ensure_main_snippet(module, main_snippet, force)
else:
ensure_imports(existing_test_class, test_class.imports)
merge_test_classes(existing_test_class, test_class, force)
ensure_main_snippet(existing_test_class.parent, main_snippet, force)
except CodeTreeNotFound, ex:
log.warning("Not adding %s to %s, because of a failed inspection." %\
(test_class.name, ex.module_subpath))
def add_test_case_without_append(test_suite, test_case):
test_suite.add_test_case_without_append(test_case)
def add_test_case(test_suite, test_case):
if isinstance(test_suite, Module):
# If the main_snippet exists we have to put the new test case
# before it. If it doesn't we put the test case at the end.
main_snippet = code_of(test_suite, 'main_snippet')
if main_snippet:
insert_before(main_snippet, test_case.code)
else:
code_of(test_suite).append_child(test_case.code)
elif isinstance(test_suite, TestClass):
# Append to the right node, so that indentation level of the
# new method is good.
if code_of(test_suite).children and is_node_of_type(code_of(test_suite).children[-1], 'suite'):
remove_trailing_whitespace(test_case.code)
suite = code_of(test_suite).children[-1]
# Prefix the definition with the right amount of whitespace.
node = find_last_leaf(suite.children[-2])
ident = get_starting_whitespace(suite)
# There's no need to have extra newlines.
if node.prefix.endswith("\n"):
node.prefix += ident.lstrip("\n")
else:
node.prefix += ident
# Insert before the class contents dedent.
suite.insert_child(-1, test_case.code)
else:
code_of(test_suite).append_child(test_case.code)
else:
raise TypeError("Tried to add a test case to %r." % test_suite)
add_test_case_without_append(test_suite, test_case)
test_suite.mark_as_changed()
def ensure_main_snippet(module, main_snippet, force=False):
"""Make sure the main_snippet is present. Won't overwrite the snippet
unless force flag is set.
"""
if not main_snippet:
return
current_main_snippet = code_of(module, 'main_snippet')
if not current_main_snippet:
code_of(module).append_child(main_snippet)
module.store_reference('main_snippet', main_snippet)
module.mark_as_changed()
elif force:
current_main_snippet.replace(main_snippet)
module.store_reference('main_snippet', main_snippet)
module.mark_as_changed()
def ensure_imports(test_suite, imports):
if isinstance(test_suite, TestClass):
module = test_suite.parent
elif isinstance(test_suite, Module):
module = test_suite
else:
raise TypeError("Tried to ensure imports on %r." % test_suite)
for imp in imports:
if not module.contains_import(imp):
insert_after_other_imports(module, create_import(imp))
module.mark_as_changed()
test_suite.ensure_imports(imports)
def insert_after_other_imports(module, code):
last_import = code_of(module, 'last_import')
if last_import:
insert_after(last_import, code)
else:
# Add 2 extra newlines separating imports from the code.
code_of(module).insert_child(0, Newline())
code_of(module).insert_child(0, Newline())
code_of(module).insert_child(0, code)
# Just inserted import becomes the last one.
module.store_reference('last_import', code)
def find_test_class_by_name(project, name):
for tcase in project.iter_test_cases():
if tcase.name == name:
return tcase
def merge_test_classes(test_class, other_test_class, force):
"""Merge other_test_case into test_case.
"""
for method in other_test_class.test_cases:
existing_test_method = test_class.find_method_by_name(method.name)
if not existing_test_method:
log.info("Adding generated %s to %s in %s." % \
(method.name, test_class.name, test_class.parent.subpath))
add_test_case(test_class, method)
elif force:
log.info("Replacing %s.%s from %s with generated version." % \
(test_class.name, existing_test_method.name, test_class.parent.subpath))
replace_test_case(test_class, existing_test_method, method)
else:
log.info("Test case %s.%s already exists in %s, skipping." % \
(test_class.name, existing_test_method.name, test_class.parent.subpath))
def replace_test_case(test_suite, old_test_case, new_test_case):
"""Replace one test case object with another.
As a side effect, AST of the new test case will replace part of the AST
in the old test case parent.
`Code` attribute of the new test case object will be removed.
"""
# The easiest way to get the new code inside the AST is to call
# replace() on the old test case code.
# It is destructive, but since we're discarding the old test case
# anyway, it doesn't matter.
code_of(old_test_case).replace(new_test_case.code)
test_suite.remove_test_case(old_test_case)
add_test_case_without_append(test_suite, new_test_case)
test_suite.mark_as_changed()
def find_module_for_test_class(project, test_class):
"""Find the best place for the new test case to be added. If there is
no such place in existing test modules, a new one will be created.
"""
return find_test_module(project, test_class) or \
create_test_module(project, test_class)
def find_test_module(project, test_class):
"""Find test module that will be good for the given test case.
"""
for module in test_class.associated_modules:
test_module = find_associate_test_module_by_name(project, module) or \
find_associate_test_module_by_test_class(project, module)
if test_module:
return test_module
def find_associate_test_module_by_name(project, module):
"""Try to find a test module with name corresponding to the name of
the application module.
"""
possible_paths = possible_test_module_paths(module, project.new_tests_directory)
for module in project.get_modules():
if module.subpath in possible_paths:
return module
def find_associate_test_module_by_test_class(project, module):
"""Try to find a test module with most test cases for the given
application module.
"""
def test_class_number(mod):
return len(mod.get_test_cases_for_module(module))
test_module = max_by_not_zero(test_class_number, project.get_modules())
if test_module:
return test_module
def test_module_name_for_test_case(test_case):
"""Come up with a name for a test module which will contain given test case.
"""
# Assuming the test case has at least one associated module, which indeed
# is a case in current implementation of generator.
return module_path_to_test_path(test_case.associated_modules[0].subpath)
def create_test_module(project, test_case):
"""Create a new test module for a given test case.
"""
test_name = test_module_name_for_test_case(test_case)
return project.create_test_module_from_name(test_name, code=EmptyCode())
def module_path_to_test_path(module):
"""Convert a module locator to a proper test filename.
"""
return "test_%s.py" % module_path_to_name(module)
def possible_test_module_names(module):
module_name = module_path_to_name(module.subpath)
for name in ["test_%s", "%s_test", "%sTest", "tests_%s", "%s_tests", "%sTests"]:
yield (name % module_name) + ".py"
for name in ["test%s", "Test%s", "%sTest", "tests%s", "Tests%s", "%sTests"]:
yield (name % module_name.capitalize()) + ".py"
def possible_test_module_paths(module, new_tests_directory):
"""Return possible locations of a test module corresponding to given
application module.
"""
test_directories = ["", "test", "tests"]
if new_tests_directory not in test_directories:
test_directories.append(new_tests_directory)
def generate():
for name in possible_test_module_names(module):
for test_directory in test_directories:
yield os.path.join(test_directory, name)
return list(generate())
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
import socket
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.java_gateway import do_server_auth
from pyspark.taskcontext import TaskContext
from pyspark.files import SparkFiles
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer, ArrowStreamPandasSerializer
from pyspark.sql.types import to_arrow_type
from pyspark.util import _get_argspec, fail_on_stopiteration
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_scalar_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_length(*a):
result = f(*a)
if not hasattr(result, "__len__"):
raise TypeError("Return type of the user-defined function should be "
"Pandas.Series, but is {}".format(type(result)))
if len(result) != len(a[0]):
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (len(a[0]), len(result)))
return result
return lambda *a: (verify_result_length(*a), arrow_return_type)
def wrap_grouped_map_pandas_udf(f, return_type, argspec):
def wrapped(key_series, value_series):
import pandas as pd
if len(argspec.args) == 1:
result = f(pd.concat(value_series, axis=1))
elif len(argspec.args) == 2:
key = tuple(s[0] for s in key_series)
result = f(key, pd.concat(value_series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
arrow_return_types = (to_arrow_type(field.dataType) for field in return_type)
return [(result[result.columns[i]], arrow_type)
for i, arrow_type in enumerate(arrow_return_types)]
return wrapped
def wrap_grouped_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result])
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_window_agg_pandas_udf(f, return_type):
# This is similar to grouped_agg_pandas_udf, the only difference
# is that window_agg_pandas_udf needs to repeat the return value
# to match window length, where grouped_agg_pandas_udf just returns
# the scalar value.
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result]).repeat(len(series[0]))
return lambda *a: (wrapped(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
row_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if row_func is None:
row_func = f
else:
row_func = chain(row_func, f)
# make sure StopIteration's raised in the user code are not ignored
# when they are processed in a for loop, raise them as RuntimeError's instead
func = fail_on_stopiteration(row_func)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
return arg_offsets, wrap_scalar_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
argspec = _get_argspec(row_func) # signature was lost when wrapping it
return arg_offsets, wrap_grouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
return arg_offsets, wrap_grouped_agg_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF:
return arg_offsets, wrap_window_agg_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_BATCHED_UDF:
return arg_offsets, wrap_udf(func, return_type)
else:
raise ValueError("Unknown eval type: {}".format(eval_type))
def read_udfs(pickleSer, infile, eval_type):
num_udfs = read_int(infile)
udfs = {}
call_udf = []
mapper_str = ""
if eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
# Create function like this:
# lambda a: f([a[0]], [a[0], a[1]])
# We assume there is only one UDF here because grouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
# See FlatMapGroupsInPandasExec for how arg_offsets are used to
# distinguish between grouping attributes and data attributes
arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type)
udfs['f'] = udf
split_offset = arg_offsets[0] + 1
arg0 = ["a[%d]" % o for o in arg_offsets[1: split_offset]]
arg1 = ["a[%d]" % o for o in arg_offsets[split_offset:]]
mapper_str = "lambda a: f([%s], [%s])" % (", ".join(arg0), ", ".join(arg1))
else:
# Create function like this:
# lambda a: (f0(a[0]), f1(a[1], a[2]), f2(a[3]))
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
if eval_type in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF):
timezone = utf8_deserializer.loads(infile)
ser = ArrowStreamPandasSerializer(timezone)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
sys.exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions." +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# initialize global state
taskContext = TaskContext._getOrCreate()
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
taskContext._localProperties = dict()
for i in range(read_int(infile)):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
taskContext._localProperties[k] = v
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
num_broadcast_variables = read_int(infile)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
serializer.dump_stream(func(split_index, iterator), outfile)
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
sys.exit(-1)
if __name__ == '__main__':
# Read information about how to connect back to the JVM from the environment.
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"])
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", java_port))
sock_file = sock.makefile("rwb", 65536)
do_server_auth(sock_file, auth_secret)
main(sock_file, sock_file)
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import django.test
from django.urls import reverse
from oslo_serialization import jsonutils
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.network_topology import views
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
JSON_URL = reverse('horizon:project:network_topology:json')
INDEX_URL = reverse('horizon:project:network_topology:index')
class NetworkTopologyTests(test.TestCase):
trans = views.TranslationHelper()
@test.create_mocks({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'network_list',
'router_list',
'port_list')})
def test_json_view(self):
self._test_json_view()
@django.test.utils.override_settings(
OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
@test.create_mocks({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'port_list')})
def test_json_view_router_disabled(self):
self._test_json_view(router_enable=False)
@django.test.utils.override_settings(CONSOLE_TYPE=None)
@test.create_mocks({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'network_list',
'router_list',
'port_list')})
def test_json_view_console_disabled(self):
self._test_json_view(with_console=False)
def _test_json_view(self, router_enable=True, with_console=True):
self.mock_server_list.return_value = [self.servers.list(), False]
tenant_networks = [net for net in self.networks.list()
if not net['router:external']]
external_networks = [net for net in self.networks.list()
if net['router:external']]
self.mock_network_list_for_tenant.return_value = tenant_networks
# router1 : gateway port not in the port list
# router2 : no gateway port
# router3 : gateway port included in port list
routers = self.routers.list() + self.routers_with_rules.list()
if router_enable:
self.mock_router_list.return_value = routers
self.mock_network_list.return_value = external_networks
self.mock_port_list.return_value = self.ports.list()
res = self.client.get(JSON_URL)
self.assertEqual('text/json', res['Content-Type'])
data = jsonutils.loads(res.content)
# servers
expect_server_urls = []
for server in self.servers.list():
expect_server = {
'id': server.id,
'name': server.name,
'status': server.status.title(),
'original_status': server.status,
'task': None,
'url': '/project/instances/%s/' % server.id
}
if server.status != 'BUILD' and with_console:
expect_server['console'] = 'auto_console'
expect_server_urls.append(expect_server)
self.assertEqual(expect_server_urls, data['servers'])
# routers
if router_enable:
expect_router_urls = [
{'id': router.id,
'external_gateway_info':
router.external_gateway_info,
'name': router.name,
'status': router.status.title(),
'original_status': router.status,
'url': '/project/routers/%s/' % router.id}
for router in routers]
self.assertEqual(expect_router_urls, data['routers'])
else:
self.assertFalse(data['routers'])
# networks
expect_net_urls = []
if router_enable:
expect_net_urls += [{
'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'status': net.status.title(),
'original_status': net.status,
'subnets': [{
'cidr': snet.cidr,
'id': snet.id,
'url': '/project/networks/subnets/%s/detail' % snet.id}
for snet in net.subnets]}
for net in external_networks]
expect_net_urls.extend([{
'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'status': net.status.title(),
'allow_delete_subnet': True,
'original_status': net.status,
'subnets': [{
'cidr': subnet.cidr,
'id': subnet.id,
'url': '/project/networks/subnets/%s/detail' % subnet.id}
for subnet in net.subnets]}
for net in tenant_networks])
for exp_net in expect_net_urls:
if exp_net['url'] is None:
del exp_net['url']
self.assertEqual(expect_net_urls, data['networks'])
valid_network_ids = [net.id for net in tenant_networks]
if router_enable:
valid_network_ids = [net.id for net in self.networks.list()]
# ports
expect_port_urls = [
{'id': port.id,
'device_id': port.device_id,
'device_owner': port.device_owner,
'fixed_ips': port.fixed_ips,
'network_id': port.network_id,
'status': port.status.title(),
'original_status': port.status,
'url': '/project/networks/ports/%s/detail' % port.id}
for port in self.ports.list()
if port.network_id in valid_network_ids]
if router_enable:
# fake port for router1 gateway (router1 on ext_net)
router1 = routers[0]
ext_net = external_networks[0]
expect_port_urls.append(
{'id': 'gateway%s' % ext_net.id,
'device_id': router1.id,
'network_id': ext_net.id,
'fixed_ips': []})
self.assertEqual(expect_port_urls, data['ports'])
self.mock_server_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_network_list_for_tenant.assert_called_once_with(
test.IsHttpRequest(), self.tenant.id,
include_pre_auto_allocate=False)
if router_enable:
self.mock_router_list.assert_called_once_with(
test.IsHttpRequest(), tenant_id=self.tenant.id)
self.mock_network_list.assert_called_once_with(
test.IsHttpRequest(), **{'router:external': True})
self.mock_port_list.assert_called_once_with(
test.IsHttpRequest())
class NetworkTopologyCreateTests(test.TestCase):
def _test_new_button_disabled_when_quota_exceeded(
self, expected_string,
networks_quota=10, routers_quota=10, instances_quota=10):
quota_data = self.quota_usages.first()
quota_data['network']['available'] = networks_quota
quota_data['router']['available'] = routers_quota
quota_data['instances']['available'] = instances_quota
self.mock_tenant_quota_usages.return_value = quota_data
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/network_topology/index.html')
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
self.mock_tenant_quota_usages.assert_has_calls([
mock.call(test.IsHttpRequest(), targets=('instances', )),
mock.call(test.IsHttpRequest(), targets=('network', )),
mock.call(test.IsHttpRequest(), targets=('router', )),
] * 3)
@test.create_mocks({quotas: ('tenant_quota_usages',)})
def test_create_network_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createnetwork')
classes = 'btn btn-default ajax-modal'
link_name = "Create Network (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(expected_string,
networks_quota=0)
@test.create_mocks({quotas: ('tenant_quota_usages',)})
def test_create_router_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createrouter')
classes = 'btn btn-default ajax-modal'
link_name = "Create Router (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(expected_string,
routers_quota=0)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.contrib.rnn.python.ops import rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class RNNCellTest(test.TestCase):
def testCoupledInputForgetGateLSTMCell(self):
with self.test_session() as sess:
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
expected_output = np.array(
[[0.121753, 0.121753],
[0.103349, 0.103349],
[0.100178, 0.100178]],
dtype=np.float32)
expected_state = np.array(
[[0.137523, 0.137523, 0.121753, 0.121753],
[0.105450, 0.105450, 0.103349, 0.103349],
[0.100742, 0.100742, 0.100178, 0.100178]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
output, state = rnn_cell.CoupledInputForgetGateLSTMCell(
num_units=num_units, forget_bias=1.0)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, state_size))
})
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testTimeFreqLSTMCell(self):
with self.test_session() as sess:
num_units = 8
state_size = num_units * 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = (input_size - feature_size) // frequency_skip + 1
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size * num_shifts])
output, state = rnn_cell.TimeFreqLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, int(state_size * (num_shifts))))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts))
self.assertEqual(res[1].shape, (batch_size, state_size * num_shifts))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testGridLSTMCell(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLSTMCellWithFrequencyBlocks(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_frequency_blocks = [1, 1]
total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1]
start_freqindex_list = [0, 2]
end_freqindex_list = [2, 4]
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=num_frequency_blocks,
start_freqindex_list=start_freqindex_list,
end_freqindex_list=end_freqindex_list,
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * total_blocks))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape,
(batch_size, num_units * total_blocks * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLstmCellWithCoupledInputForgetGates(self):
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020,
0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699],
[0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342,
0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171],
[0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533,
0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250]],
dtype=np.float32)
expected_state = np.array(
[[0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134,
0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865],
[0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432,
0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245],
[0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522,
0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390]],
dtype=np.float32)
for state_is_tuple in [False, True]:
with self.test_session() as sess:
with variable_scope.variable_scope(
"state_is_tuple" + str(state_is_tuple),
initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=state_is_tuple)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
if state_is_tuple:
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
else:
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units * num_shifts * 2), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
if not state_is_tuple:
self.assertAllClose(res[1], expected_state)
else:
# There should be num_shifts * 2 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 2)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341,
0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517,
0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552,
0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836,
0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288,
0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101,
0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts])
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCellWithSliceOffset(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654,
0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828,
0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345,
0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628,
0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488,
0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978,
0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
backward_slice_offset=1)
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(TypeError,
"The parameter cell is not RNNCell."):
rnn_cell.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
rnn_cell.AttentionCellWrapper(
lstm_cell, 0, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
rnn_cell.AttentionCellWrapper(
lstm_cell, -1, state_is_tuple=state_is_tuple)
with ops.Graph().as_default():
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
rnn_cell.AttentionCellWrapper(lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = array_ops.zeros([batch_size, num_units], dtype=np.float32)
attn_state_zeros = array_ops.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = array_ops.zeros(
[
batch_size,
num_units * 2 + attn_length * num_units + num_units
],
dtype=np.float32)
inputs = array_ops.zeros(
[batch_size, input_size], dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(),
[batch_size, num_units])
self.assertEquals(state[0][1].get_shape(),
[batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEquals(state.get_shape(), [
batch_size,
num_units * 2 + num_units + attn_length * num_units
])
tensors = [output, state]
zero_result = sum(
[math_ops.reduce_sum(math_ops.abs(x)) for x in tensors])
sess.run(variables.global_variables_initializer())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, num_units], dtype=np.float32),
dtype=dtypes.float32)
attn_state_zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, attn_length * num_units], dtype=np.float32),
dtype=dtypes.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = constant_op.constant(
0.1 * np.ones(
[
batch_size,
num_units * 2 + num_units + attn_length * num_units
],
dtype=np.float32),
dtype=dtypes.float32)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
sess.run(variables.global_variables_initializer())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[1.068372, 0.45496, -0.678277, 0.340538],
[1.018088, 0.378983, -0.572179, 0.268591]],
dtype=np.float32)
expected_state = np.array(
[[0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962,
0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077,
0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152,
0.51843399],
[0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637,
0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857,
0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457,
0.70582712]],
dtype=np.float32)
seed = 12345
random_seed.set_random_seed(seed)
for state_is_tuple in [False, True]:
with session.Session() as sess:
with variable_scope.variable_scope(
"state_is_tuple", reuse=state_is_tuple,
initializer=init_ops.glorot_uniform_initializer()):
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
zeros1 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = random_ops.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = array_ops.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
if state_is_tuple:
state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(variables.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
class LayerNormBasicLSTMCellTest(test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = core_rnn_cell_impl.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = core_rnn_cell_impl.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: rnn_cell.LayerNormBasicLSTMCell(2)
cell = core_rnn_cell_impl.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c = np.array([[-1.0, 1.0]])
expected_state1_h = np.array([[-0.38079708, 0.38079708]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = core_rnn_cell_impl.LSTMStateTuple(c, h)
cell = rnn_cell.LayerNormBasicLSTMCell(2)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_c = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = core_rnn_cell_impl.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = core_rnn_cell_impl.LSTMStateTuple(c1, h1)
def single_cell():
return rnn_cell.LayerNormBasicLSTMCell(2)
cell = core_rnn_cell_impl.MultiRNNCell(
[single_cell() for _ in range(2)])
h, (s0, s1) = cell(x, (state0, state1))
sess.run([variables.global_variables_initializer()])
res = sess.run([h, s0, s1], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithDropout(self):
def _is_close(x, y, digits=4):
delta = x - y
return delta < 10**(-digits)
def _is_close_in(x, items, digits=4):
for i in items:
if _is_close(x, i, digits):
return True
return False
keep_prob = 0.5
c_high = 2.9998924946
c_low = 0.999983298578
h_low = 0.761552567265
h_high = 0.995008519604
num_units = 5
allowed_low = [2, 3]
with self.test_session() as sess:
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(1)):
x = array_ops.zeros([1, 5])
c = array_ops.zeros([1, 5])
h = array_ops.zeros([1, 5])
state = core_rnn_cell_impl.LSTMStateTuple(c, h)
cell = rnn_cell.LayerNormBasicLSTMCell(
num_units, layer_norm=False, dropout_keep_prob=keep_prob)
g, s = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x.name: np.ones([1, 5]),
c.name: np.ones([1, 5]),
h.name: np.ones([1, 5]),
})
# Since the returned tensors are of size [1,n]
# get the first component right now.
actual_h = res[0][0]
actual_state_c = res[1].c[0]
actual_state_h = res[1].h[0]
# For each item in `c` (the cell inner state) check that
# it is equal to one of the allowed values `c_high` (not
# dropped out) or `c_low` (dropped out) and verify that the
# corresponding item in `h` (the cell activation) is coherent.
# Count the dropped activations and check that their number is
# coherent with the dropout probability.
dropped_count = 0
self.assertTrue((actual_h == actual_state_h).all())
for citem, hitem in zip(actual_state_c, actual_state_h):
self.assertTrue(_is_close_in(citem, [c_low, c_high]))
if _is_close(citem, c_low):
self.assertTrue(_is_close(hitem, h_low))
dropped_count += 1
elif _is_close(citem, c_high):
self.assertTrue(_is_close(hitem, h_high))
self.assertIn(dropped_count, allowed_low)
def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth,
num_layers, max_time, compiled):
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable(
"inputs", initializer=random_ops.random_uniform(
(max_time, batch_size, input_depth), seed=1))
maybe_xla = lambda c: rnn_cell.CompiledWrapper(c) if compiled else c
cell = core_rnn_cell_impl.MultiRNNCell(
[maybe_xla(core_rnn_cell_impl.LSTMCell(num_units))
for _ in range(num_layers)])
initial_state = cell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
outputs, final_state = rnn.dynamic_rnn(
cell=cell, inputs=inputs, initial_state=initial_state,
time_major=True)
flat_final_state = nest.flatten(final_state)
trainable_variables = variables.trainable_variables()
outputs_grad = gradients_impl.gradients(
[outputs],
trainable_variables + [inputs] + nest.flatten(initial_state))
final_state_grad = gradients_impl.gradients(
flat_final_state,
trainable_variables + [inputs] + nest.flatten(initial_state))
return {"outputs": outputs,
"final_state": flat_final_state,
"outputs_grad": outputs_grad,
"final_state_grad": final_state_grad}
class CompiledWrapperTest(test.TestCase):
def testMultiRNNCellWithLSTMCellAndXLA(self):
# TODO(b/34735319): Don't run this test if XLA is not available.
batch_size = 16
num_units = 32
input_depth = 12
num_layers = 2
max_time = 20
atol = 1e-6
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=True)
sess.run([variables.global_variables_initializer()])
xla_results = sess.run(xla_ops)
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
non_xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=False)
sess.run([variables.global_variables_initializer()])
non_xla_results = sess.run(non_xla_ops)
self.assertAllClose(
non_xla_results["outputs"], xla_results["outputs"], atol=atol)
for xla_value, non_xla_value in zip(
xla_results["final_state"], non_xla_results["final_state"]):
self.assertAllClose(xla_value, non_xla_value, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["outputs_grad"], non_xla_results["outputs_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["final_state_grad"], non_xla_results["final_state_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class BenchmarkLSTMCellXLA(test.Benchmark):
def benchmarkDynamicRNNWithMultiLSTMCell(self):
num_layers = 3
max_time = 50
print("benchmarkDynamicRNNWithMultiLSTMCell")
print("\t" +
"\t".join(["inter_th", "intra_th",
"batch_size", "num_units", "input_depth", "device",
"compiled", "wall_time"]))
warmup_run = True
for (threads,
device,
num_units,
batch_size,
input_depth,
compiled) in itertools.product(
[{"inter": 0, "intra": 0}, {"inter": 1, "intra": 4}],
["cpu", "gpu"],
[32, 512],
[1, 32, 256],
[32, 512],
[False, True]):
if threads["inter"] != 0:
# We only care about testing inter/intra op limitations on
# CPU with small batch size, to mimic embedded devices.
if device != "cpu" or batch_size != 1:
continue
if device == "cpu" and batch_size > 32:
continue
random_seed.set_random_seed(1234)
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=threads["inter"],
intra_op_parallelism_threads=threads["intra"],
allow_soft_placement=False)
with session.Session(config=config, graph=ops.Graph()) as sess:
with ops.device("/%s:0" % device):
ops_dict = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=compiled)
sess.run([variables.global_variables_initializer()])
all_ops = nest.flatten(ops_dict.values())
all_ops_group = control_flow_ops.group(*all_ops)
name_suffix = (
"inter_th_%d_intra_th_%d_bs_%d_units_%d_inputdepth_%d"
"_device_%s_xla_%s" % (
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled))
if warmup_run:
self.run_op_benchmark(
sess, all_ops_group, min_iters=30, name="ignore_warmup")
warmup_run = False
benchmark_results = self.run_op_benchmark(
sess, all_ops_group, min_iters=50,
name="benchmarkDynamicRNNWithMultiLSTMCell_%s" % name_suffix)
print("\t" +
"\t".join(["%s" % x for x in [
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled,
benchmark_results["wall_time"]]]))
if __name__ == "__main__":
test.main()
| |
## @file
# This file is used to define common string related functions used in parsing process
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import re
from . import DataType
import Common.LongFilePathOs as os
import string
from . import EdkLogger as EdkLogger
from . import GlobalData
from .BuildToolError import *
from CommonDataClass.Exceptions import *
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
gHexVerPatt = re.compile('0x[a-f0-9]{4}[a-f0-9]{4}$', re.IGNORECASE)
gHumanReadableVerPatt = re.compile(r'([1-9][0-9]*|0)\.[0-9]{1,2}$')
## GetSplitValueList
#
# Get a value list from a string with multiple values splited with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
# @retval list() A list for splitted string
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
ValueList = []
Last = 0
Escaped = False
InSingleQuoteString = False
InDoubleQuoteString = False
InParenthesis = 0
for Index in range(0, len(String)):
Char = String[Index]
if not Escaped:
# Found a splitter not in a string, split it
if (not InSingleQuoteString or not InDoubleQuoteString) and InParenthesis == 0 and Char == SplitTag:
ValueList.append(String[Last:Index].strip())
Last = Index + 1
if MaxSplit > 0 and len(ValueList) >= MaxSplit:
break
if Char == '\\' and (InSingleQuoteString or InDoubleQuoteString):
Escaped = True
elif Char == '"' and not InSingleQuoteString:
if not InDoubleQuoteString:
InDoubleQuoteString = True
else:
InDoubleQuoteString = False
elif Char == "'" and not InDoubleQuoteString:
if not InSingleQuoteString:
InSingleQuoteString = True
else:
InSingleQuoteString = False
elif Char == '(':
InParenthesis = InParenthesis + 1
elif Char == ')':
InParenthesis = InParenthesis - 1
else:
Escaped = False
if Last < len(String):
ValueList.append(String[Last:].strip())
elif Last == len(String):
ValueList.append('')
return ValueList
## GetSplitList
#
# Get a value list from a string with multiple values splited with SplitString
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitStr: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
# @retval list() A list for splitted string
#
def GetSplitList(String, SplitStr=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
return map(lambda l: l.strip(), String.split(SplitStr, MaxSplit))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict:
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
# @retval 0 DEFINE statement found, and valid
# @retval 1 DEFINE statement found, but not valid
# @retval -1 DEFINE statement not found
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GenInclude
#
# Parse a string with format "!include <Filename>"
# Return the file path
# Return False if invalid format or NOT FOUND
#
# @param String: String with INCLUDE statement
# @param IncludeFiles: INCLUDE statement to be parsed
# @param Arch: Supportted Arch
#
# @retval True
# @retval False
#
def GenInclude(String, IncludeFiles, Arch):
if String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1:
IncludeFile = CleanString(String[String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') + len(DataType.TAB_INCLUDE + ' ') : ])
MergeArches(IncludeFiles, IncludeFile, Arch)
return True
else:
return False
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get library classes successfully
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
newKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), newKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get Dynamic Pcds successfully
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [ 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
# @retval ReturnValue A list for module types
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in strings list
#
# This method replace macros used in a given string list. The macros are
# given in a dictionary.
#
# @param StringList StringList to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
#
# @retval NewList A new string list whose macros are replaced
#
def ReplaceMacros(StringList, MacroDefinitions={}, SelfReplacement=False):
NewList = []
for String in StringList:
if isinstance(String, type('')):
NewList.append(ReplaceMacro(String, MacroDefinitions, SelfReplacement))
else:
NewList.append(String)
return NewList
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
#
# @retval string The string whose macros are replaced
#
def ReplaceMacro(String, MacroDefinitions={}, SelfReplacement=False, RaiseError=False):
LastString = String
while String and MacroDefinitions:
MacroUsed = GlobalData.gMacroRefPattern.findall(String)
# no macro found in String, stop replacing
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if RaiseError:
raise SymbolNotFound("%s not defined" % Macro)
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
continue
if "$(%s)" % Macro not in MacroDefinitions[Macro]:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
# in case there's macro not defined
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DFEINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
# @retval Path Formatted path
#
def NormPath(Path, Defines={}):
IsRelativePath = False
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if Path.startswith(GlobalData.gWorkspace) and not Path.startswith(GlobalData.gBuildDirectory) and not os.path.exists(Path):
Path = Path[len (GlobalData.gWorkspace):]
if Path[0] == os.path.sep:
Path = Path[1:]
Path = mws.join(GlobalData.gWorkspace, Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
#
# @retval Path Formatted path
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False, BuildOption=False):
#
# remove whitespace
#
Line = Line.strip();
#
# Replace Edk's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InDoubleQuoteString = False
InSingleQuoteString = False
CommentInString = False
for Index in range(0, len(Line)):
if Line[Index] == '"' and not InSingleQuoteString:
InDoubleQuoteString = not InDoubleQuoteString
elif Line[Index] == "'" and not InDoubleQuoteString:
InSingleQuoteString = not InSingleQuoteString
elif Line[Index] == CommentCharacter and (InSingleQuoteString or InDoubleQuoteString):
CommentInString = True
elif Line[Index] == CommentCharacter and not (InSingleQuoteString or InDoubleQuoteString):
Line = Line[0: Index]
break
if CommentInString and BuildOption:
Line = Line.replace('"', '')
ChIndex = Line.find('#')
while ChIndex >= 0:
if GlobalData.gIsWindows:
if ChIndex == 0 or Line[ChIndex - 1] != '^':
Line = Line[0:ChIndex] + '^' + Line[ChIndex:]
ChIndex = Line.find('#', ChIndex + 2)
else:
ChIndex = Line.find('#', ChIndex + 1)
else:
if ChIndex == 0 or Line[ChIndex - 1] != '\\':
Line = Line[0:ChIndex] + '\\' + Line[ChIndex:]
ChIndex = Line.find('#', ChIndex + 2)
else:
ChIndex = Line.find('#', ChIndex + 1)
#
# remove whitespace again
#
Line = Line.strip();
return Line
## CleanString2
#
# Split statement with comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
#
# @retval Path Formatted path
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip();
#
# Replace Edk's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
#
# separate comments and statements, but we should escape comment character in string
#
InDoubleQuoteString = False
InSingleQuoteString = False
CommentInString = False
Comment = ''
for Index in range(0, len(Line)):
if Line[Index] == '"' and not InSingleQuoteString:
InDoubleQuoteString = not InDoubleQuoteString
elif Line[Index] == "'" and not InDoubleQuoteString:
InSingleQuoteString = not InSingleQuoteString
elif Line[Index] == CommentCharacter and (InDoubleQuoteString or InSingleQuoteString):
CommentInString = True
elif Line[Index] == CommentCharacter and not (InDoubleQuoteString or InSingleQuoteString):
Comment = Line[Index:].strip()
Line = Line[0:Index].strip()
break
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Successfully executed
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append(Line)
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval string The defined value
#
def GetDefineValue(String, Key, CommentCharacter):
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetHexVerValue
#
# Get a Hex Version Value
#
# @param VerString: The version string to be parsed
#
#
# @retval: If VerString is incorrectly formatted, return "None" which will break the build.
# If VerString is correctly formatted, return a Hex value of the Version Number (0xmmmmnnnn)
# where mmmm is the major number and nnnn is the adjusted minor number.
#
def GetHexVerValue(VerString):
VerString = CleanString(VerString)
if gHumanReadableVerPatt.match(VerString):
ValueList = VerString.split('.')
Major = ValueList[0]
Minor = ValueList[1]
if len(Minor) == 1:
Minor += '0'
DeciValue = (int(Major) << 16) + int(Minor);
return "0x%08x" % DeciValue
elif gHexVerPatt.match(VerString):
return VerString
else:
return None
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
#
# @retval True Successfully executed
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = map(string.strip, LineList[1].split(ValueSplitCharacter))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file to be checked
#
# @retval True The file type is correct
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo= -1):
if CheckFilename != '' and CheckFilename is not None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper():
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = "Invalid %s. '%s' is found, but '%s' file is needed" % (SectionName, CheckFilename, ExtName)
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo,
File=ContainerFilename, RaiseError=EdkLogger.IsRaiseError)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file to be checked
#
# @retval The file full path if the file exists
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo= -1):
CheckFile = ''
if CheckFilename != '' and CheckFilename is not None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = "Can't find file '%s' defined in section '%s'" % (CheckFile, SectionName)
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
# @retval int Index of the line
# @retval -1 The line is not found
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo= -1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = "Invalid statement '%s' is found in section '%s'" % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, ExtraData=Format, RaiseError=EdkLogger.IsRaiseError)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
# @retval string A full path
#
def WorkspaceFile(WorkspaceDir, Filename):
return mws.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
#
# @param String: The string need to be splited
#
# @retval String: The string after removed '""'
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# 1. Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return map(lambda s: s.replace("'", "''"), StringList)
## Convert To Sql String
#
# 1. Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
#
# Remove comment block
#
def RemoveBlockComment(Lines):
IsFindBlockComment = False
IsFindBlockCode = False
ReservedLine = ''
NewLines = []
for Line in Lines:
Line = Line.strip()
#
# Remove comment block
#
if Line.find(DataType.TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, DataType.TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(DataType.TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, DataType.TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
NewLines.append('')
continue
NewLines.append(Line)
return NewLines
#
# Get String of a List
#
def GetStringOfList(List, Split=' '):
if not isinstance(List, type([])):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
#
# Get HelpTextList from HelpTextClassList
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
def StringToArray(String):
if isinstance(String, unicode):
if len(unicode) == 0:
return "{0x00,0x00}"
return "{%s,0x00,0x00}" % ",".join("0x%02x,0x00" % ord(C) for C in String)
elif String.startswith('L"'):
if String == "L\"\"":
return "{0x00,0x00}"
else:
return "{%s,0x00,0x00}" % ",".join("0x%02x,0x00" % ord(C) for C in String[2:-1])
elif String.startswith('"'):
if String == "\"\"":
return "{0x00,0x00}"
else:
StringLen = len(String[1:-1])
if StringLen % 2:
return "{%s,0x00}" % ",".join("0x%02x" % ord(C) for C in String[1:-1])
else:
return "{%s,0x00,0x00}" % ",".join("0x%02x" % ord(C) for C in String[1:-1])
elif String.startswith('{'):
return "{%s}" % ",".join(C.strip() for C in String[1:-1].split(','))
else:
if len(String.split()) % 2:
return '{%s,0}' % ','.join(String.split())
else:
return '{%s,0,0}' % ','.join(String.split())
def StringArrayLength(String):
if isinstance(String, unicode):
return (len(String) + 1) * 2 + 1;
elif String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._app_service_certificate_orders_operations import build_create_or_update_certificate_request_initial, build_create_or_update_request_initial, build_delete_certificate_request, build_delete_request, build_get_certificate_request, build_get_request, build_list_by_resource_group_request, build_list_certificates_request, build_list_request, build_reissue_request, build_renew_request, build_resend_email_request, build_resend_request_emails_request, build_retrieve_certificate_actions_request, build_retrieve_certificate_email_history_request, build_retrieve_site_seal_request, build_update_certificate_request, build_update_request, build_validate_purchase_information_request, build_verify_domain_ownership_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AppServiceCertificateOrdersOperations:
"""AppServiceCertificateOrdersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_01_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.AppServiceCertificateOrderCollection"]:
"""List all certificate orders in a subscription.
Description for List all certificate orders in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateOrderCollection or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceCertificateOrderCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders'} # type: ignore
@distributed_trace_async
async def validate_purchase_information(
self,
app_service_certificate_order: "_models.AppServiceCertificateOrder",
**kwargs: Any
) -> None:
"""Validate information for a certificate order.
Description for Validate information for a certificate order.
:param app_service_certificate_order: Information for a certificate order.
:type app_service_certificate_order:
~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrder
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(app_service_certificate_order, 'AppServiceCertificateOrder')
request = build_validate_purchase_information_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.validate_purchase_information.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
validate_purchase_information.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/validateCertificateRegistrationInformation'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AppServiceCertificateOrderCollection"]:
"""Get certificate orders in a resource group.
Description for Get certificate orders in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateOrderCollection or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceCertificateOrderCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> "_models.AppServiceCertificateOrder":
"""Get a certificate order.
Description for Get a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order..
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateOrder, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
certificate_order_name: str,
certificate_distinguished_name: "_models.AppServiceCertificateOrder",
**kwargs: Any
) -> "_models.AppServiceCertificateOrder":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrder')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
certificate_order_name: str,
certificate_distinguished_name: "_models.AppServiceCertificateOrder",
**kwargs: Any
) -> AsyncLROPoller["_models.AppServiceCertificateOrder"]:
"""Create or update a certificate purchase order.
Description for Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrder
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AppServiceCertificateOrder or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrder]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
certificate_distinguished_name=certificate_distinguished_name,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> None:
"""Delete an existing certificate order.
Description for Delete an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
certificate_order_name: str,
certificate_distinguished_name: "_models.AppServiceCertificateOrderPatchResource",
**kwargs: Any
) -> "_models.AppServiceCertificateOrder":
"""Create or update a certificate purchase order.
Description for Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrderPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateOrder, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateOrder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrderPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace
def list_certificates(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AppServiceCertificateCollection"]:
"""List all certificates associated with a certificate order.
Description for List all certificates associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_certificates_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.list_certificates.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_certificates_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceCertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates'} # type: ignore
@distributed_trace_async
async def get_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
**kwargs: Any
) -> "_models.AppServiceCertificateResource":
"""Get the certificate associated with a certificate order.
Description for Get the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_certificate_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_certificate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
async def _create_or_update_certificate_initial(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
key_vault_certificate: "_models.AppServiceCertificateResource",
**kwargs: Any
) -> "_models.AppServiceCertificateResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(key_vault_certificate, 'AppServiceCertificateResource')
request = build_create_or_update_certificate_request_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_certificate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_certificate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
key_vault_certificate: "_models.AppServiceCertificateResource",
**kwargs: Any
) -> AsyncLROPoller["_models.AppServiceCertificateResource"]:
"""Creates or updates a certificate and associates with key vault secret.
Description for Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate: ~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AppServiceCertificateResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_certificate_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
key_vault_certificate=key_vault_certificate,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace_async
async def delete_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete the certificate associated with a certificate order.
Description for Delete the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_certificate_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete_certificate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace_async
async def update_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
key_vault_certificate: "_models.AppServiceCertificatePatchResource",
**kwargs: Any
) -> "_models.AppServiceCertificateResource":
"""Creates or updates a certificate and associates with key vault secret.
Description for Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate:
~azure.mgmt.web.v2021_01_15.models.AppServiceCertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_15.models.AppServiceCertificateResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(key_vault_certificate, 'AppServiceCertificatePatchResource')
request = build_update_certificate_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_certificate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace_async
async def reissue(
self,
resource_group_name: str,
certificate_order_name: str,
reissue_certificate_order_request: "_models.ReissueCertificateOrderRequest",
**kwargs: Any
) -> None:
"""Reissue an existing certificate order.
Description for Reissue an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param reissue_certificate_order_request: Parameters for the reissue.
:type reissue_certificate_order_request:
~azure.mgmt.web.v2021_01_15.models.ReissueCertificateOrderRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(reissue_certificate_order_request, 'ReissueCertificateOrderRequest')
request = build_reissue_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.reissue.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
reissue.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/reissue'} # type: ignore
@distributed_trace_async
async def renew(
self,
resource_group_name: str,
certificate_order_name: str,
renew_certificate_order_request: "_models.RenewCertificateOrderRequest",
**kwargs: Any
) -> None:
"""Renew an existing certificate order.
Description for Renew an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param renew_certificate_order_request: Renew parameters.
:type renew_certificate_order_request:
~azure.mgmt.web.v2021_01_15.models.RenewCertificateOrderRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(renew_certificate_order_request, 'RenewCertificateOrderRequest')
request = build_renew_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.renew.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/renew'} # type: ignore
@distributed_trace_async
async def resend_email(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> None:
"""Resend certificate email.
Description for Resend certificate email.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resend_email_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.resend_email.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resend_email.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendEmail'} # type: ignore
@distributed_trace_async
async def resend_request_emails(
self,
resource_group_name: str,
certificate_order_name: str,
name_identifier: "_models.NameIdentifier",
**kwargs: Any
) -> None:
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name_identifier: Email address.
:type name_identifier: ~azure.mgmt.web.v2021_01_15.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(name_identifier, 'NameIdentifier')
request = build_resend_request_emails_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.resend_request_emails.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resend_request_emails.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendRequestEmails'} # type: ignore
@distributed_trace_async
async def retrieve_site_seal(
self,
resource_group_name: str,
certificate_order_name: str,
site_seal_request: "_models.SiteSealRequest",
**kwargs: Any
) -> "_models.SiteSeal":
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param site_seal_request: Site seal request.
:type site_seal_request: ~azure.mgmt.web.v2021_01_15.models.SiteSealRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SiteSeal, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_15.models.SiteSeal
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SiteSeal"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(site_seal_request, 'SiteSealRequest')
request = build_retrieve_site_seal_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.retrieve_site_seal.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SiteSeal', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_site_seal.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/retrieveSiteSeal'} # type: ignore
@distributed_trace_async
async def verify_domain_ownership(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> None:
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_verify_domain_ownership_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.verify_domain_ownership.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
verify_domain_ownership.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/verifyDomainOwnership'} # type: ignore
@distributed_trace_async
async def retrieve_certificate_actions(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.CertificateOrderAction"]:
"""Retrieve the list of certificate actions.
Description for Retrieve the list of certificate actions.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of CertificateOrderAction, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2021_01_15.models.CertificateOrderAction]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.CertificateOrderAction"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_certificate_actions_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.retrieve_certificate_actions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[CertificateOrderAction]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_certificate_actions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions'} # type: ignore
@distributed_trace_async
async def retrieve_certificate_email_history(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.CertificateEmail"]:
"""Retrieve email history.
Description for Retrieve email history.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of CertificateEmail, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2021_01_15.models.CertificateEmail]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.CertificateEmail"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_certificate_email_history_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.retrieve_certificate_email_history.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[CertificateEmail]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_certificate_email_history.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory'} # type: ignore
| |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import datetime
import iso8601
import mock
from oslo.utils import timeutils
import webob.exc
from nova.api.openstack.compute.plugins.v3 import services
from nova import availability_zones
from nova.compute import cells_api
from nova import context
from nova import db
from nova import exception
from nova.servicegroup.drivers import db as db_driver
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_service
fake_services_list = [
dict(test_service.fake_service,
binary='nova-scheduler',
host='host1',
id=1,
disabled=True,
topic='scheduler',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
disabled_reason='test1'),
dict(test_service.fake_service,
binary='nova-compute',
host='host1',
id=2,
disabled=True,
topic='compute',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
disabled_reason='test2'),
dict(test_service.fake_service,
binary='nova-scheduler',
host='host2',
id=3,
disabled=False,
topic='scheduler',
updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
disabled_reason=None),
dict(test_service.fake_service,
binary='nova-compute',
host='host2',
id=4,
disabled=True,
topic='compute',
updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
disabled_reason='test4'),
]
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithService(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"binary": "nova-compute"}
class FakeRequestWithHost(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"host": "host1"}
class FakeRequestWithHostService(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"host": "host1", "binary": "nova-compute"}
def fake_service_get_all(services):
def service_get_all(context, filters=None, set_zones=False):
if set_zones or 'availability_zone' in filters:
return availability_zones.set_availability_zones(context,
services)
return services
return service_get_all
def fake_db_api_service_get_all(context, disabled=None):
return fake_services_list
def fake_db_service_get_by_host_binary(services):
def service_get_by_host_binary(context, host, binary):
for service in services:
if service['host'] == host and service['binary'] == binary:
return service
raise exception.HostBinaryNotFound(host=host, binary=binary)
return service_get_by_host_binary
def fake_service_get_by_host_binary(context, host, binary):
fake = fake_db_service_get_by_host_binary(fake_services_list)
return fake(context, host, binary)
def _service_get_by_id(services, value):
for service in services:
if service['id'] == value:
return service
return None
def fake_db_service_update(services):
def service_update(context, service_id, values):
service = _service_get_by_id(services, service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
return service
return service_update
def fake_service_update(context, service_id, values):
fake = fake_db_service_update(fake_services_list)
return fake(context, service_id, values)
def fake_utcnow():
return datetime.datetime(2012, 10, 29, 13, 42, 11)
fake_utcnow.override_time = None
def fake_utcnow_ts():
d = fake_utcnow()
return calendar.timegm(d.utctimetuple())
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.controller = services.ServiceController()
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
self.stubs.Set(self.controller.host_api, "service_get_all",
fake_service_get_all(fake_services_list))
self.stubs.Set(db, "service_get_by_args",
fake_db_service_get_by_host_binary(fake_services_list))
self.stubs.Set(db, "service_update",
fake_db_service_update(fake_services_list))
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'id': 1,
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'nova-compute',
'host': 'host1',
'id': 2,
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'nova-scheduler',
'host': 'host2',
'id': 3,
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'disabled_reason': None},
{'binary': 'nova-compute',
'host': 'host2',
'id': 4,
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_service_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'id': 1,
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'nova-compute',
'host': 'host1',
'id': 2,
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_service_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'id': 2,
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'nova-compute',
'host': 'host2',
'id': 4,
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_service_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'id': 2,
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_enable(self):
def _service_update(context, service_id, values):
self.assertIsNone(values['disabled_reason'])
return dict(test_service.fake_service, id=service_id)
self.stubs.Set(db, "service_update", _service_update)
body = {'service': {'host': 'host1',
'binary': 'nova-compute'}}
req = fakes.HTTPRequestV3.blank('/os-services/enable')
res_dict = self.controller.update(req, "enable", body)
self.assertEqual(res_dict['service']['status'], 'enabled')
self.assertNotIn('disabled_reason', res_dict['service'])
def test_services_enable_with_invalid_host(self):
body = {'service': {'host': 'invalid',
'binary': 'nova-compute'}}
req = fakes.HTTPRequestV3.blank('/os-services/enable')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req,
"enable",
body)
def test_services_enable_with_invalid_binary(self):
body = {'service': {'host': 'host1',
'binary': 'invalid'}}
req = fakes.HTTPRequestV3.blank('/os-services/enable')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req,
"enable",
body)
# This test is just to verify that the servicegroup API gets used when
# calling this API.
def test_services_with_exception(self):
def dummy_is_up(self, dummy):
raise KeyError()
self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
req = FakeRequestWithHostService()
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.index, req)
def test_services_disable(self):
req = fakes.HTTPRequestV3.blank('/os-services/disable')
body = {'service': {'host': 'host1',
'binary': 'nova-compute'}}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual(res_dict['service']['status'], 'disabled')
self.assertNotIn('disabled_reason', res_dict['service'])
def test_services_disable_with_invalid_host(self):
body = {'service': {'host': 'invalid',
'binary': 'nova-compute'}}
req = fakes.HTTPRequestV3.blank('/os-services/disable')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req,
"disable",
body)
def test_services_disable_with_invalid_binary(self):
body = {'service': {'host': 'host1',
'binary': 'invalid'}}
req = fakes.HTTPRequestV3.blank('/os-services/disable')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req,
"disable",
body)
def test_services_disable_log_reason(self):
req = \
fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
body = {'service': {'host': 'host1',
'binary': 'nova-compute',
'disabled_reason': 'test-reason'}}
res_dict = self.controller.update(req, "disable-log-reason", body)
self.assertEqual(res_dict['service']['status'], 'disabled')
self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
def test_mandatory_reason_field(self):
req = \
fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
body = {'service': {'host': 'host1',
'binary': 'nova-compute'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, "disable-log-reason", body)
def test_invalid_reason_field(self):
reason = ' '
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'a' * 256
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'it\'s a valid reason.'
self.assertTrue(self.controller._is_valid_as_reason(reason))
def test_services_delete(self):
request = fakes.HTTPRequestV3.blank('/v3/os-services/1',
use_admin_context=True)
request.method = 'DELETE'
with mock.patch.object(self.controller.host_api,
'service_delete') as service_delete:
self.controller.delete(request, '1')
service_delete.assert_called_once_with(
request.environ['nova.context'], '1')
self.assertEqual(self.controller.delete.wsgi_code, 204)
def test_services_delete_not_found(self):
request = fakes.HTTPRequestV3.blank('/v3/os-services/abc',
use_admin_context=True)
request.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, request, 'abc')
class ServicesCellsTest(test.TestCase):
def setUp(self):
super(ServicesCellsTest, self).setUp()
host_api = cells_api.HostAPI()
self.controller = services.ServiceController()
self.controller.host_api = host_api
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
services_list = []
for service in fake_services_list:
service = service.copy()
service['id'] = 'cell1@%d' % service['id']
services_list.append(service)
self.stubs.Set(host_api.cells_rpcapi, "service_get_all",
fake_service_get_all(services_list))
def test_services_detail(self):
req = FakeRequest()
res_dict = self.controller.index(req)
utc = iso8601.iso8601.Utc()
response = {'services': [
{'id': 'cell1@1',
'binary': 'nova-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
tzinfo=utc),
'disabled_reason': 'test1'},
{'id': 'cell1@2',
'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
tzinfo=utc),
'disabled_reason': 'test2'},
{'id': 'cell1@3',
'binary': 'nova-scheduler',
'host': 'host2',
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
tzinfo=utc),
'disabled_reason': None},
{'id': 'cell1@4',
'binary': 'nova-compute',
'host': 'host2',
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
tzinfo=utc),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import timedelta
from sqlalchemy import DDL
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.base import NEVER_SET, NO_VALUE
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime
from indico.core.db.sqlalchemy.util.models import populate_one_to_one_backrefs
from indico.util.date_time import overlaps
from indico.util.i18n import _
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
from indico.util.struct.enum import RichIntEnum
class TimetableEntryType(RichIntEnum):
__titles__ = [None, _("Session Block"), _("Contribution"), _("Break")]
# entries are uppercase since `break` is a keyword...
SESSION_BLOCK = 1
CONTRIBUTION = 2
BREAK = 3
def _make_check(type_, *cols):
all_cols = {'session_block_id', 'contribution_id', 'break_id'}
required_cols = all_cols & set(cols)
forbidden_cols = all_cols - required_cols
criteria = ['{} IS NULL'.format(col) for col in sorted(forbidden_cols)]
criteria += ['{} IS NOT NULL'.format(col) for col in sorted(required_cols)]
condition = 'type != {} OR ({})'.format(type_, ' AND '.join(criteria))
return db.CheckConstraint(condition, 'valid_{}'.format(type_.name.lower()))
class TimetableEntry(db.Model):
__tablename__ = 'timetable_entries'
@declared_attr
def __table_args__(cls):
return (db.Index('ix_timetable_entries_start_dt_desc', cls.start_dt.desc()),
_make_check(TimetableEntryType.SESSION_BLOCK, 'session_block_id'),
_make_check(TimetableEntryType.CONTRIBUTION, 'contribution_id'),
_make_check(TimetableEntryType.BREAK, 'break_id'),
db.CheckConstraint("type != {} OR parent_id IS NULL".format(TimetableEntryType.SESSION_BLOCK),
'valid_parent'),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
parent_id = db.Column(
db.Integer,
db.ForeignKey('events.timetable_entries.id'),
index=True,
nullable=True,
)
session_block_id = db.Column(
db.Integer,
db.ForeignKey('events.session_blocks.id'),
index=True,
unique=True,
nullable=True
)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
unique=True,
nullable=True
)
break_id = db.Column(
db.Integer,
db.ForeignKey('events.breaks.id'),
index=True,
unique=True,
nullable=True
)
type = db.Column(
PyIntEnum(TimetableEntryType),
nullable=False
)
start_dt = db.Column(
UTCDateTime,
nullable=False
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'timetable_entries',
order_by=lambda: TimetableEntry.start_dt,
cascade='all, delete-orphan',
lazy='dynamic'
)
)
session_block = db.relationship(
'SessionBlock',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
contribution = db.relationship(
'Contribution',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
break_ = db.relationship(
'Break',
cascade='all, delete-orphan',
single_parent=True,
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
children = db.relationship(
'TimetableEntry',
order_by='TimetableEntry.start_dt',
lazy=True,
backref=db.backref(
'parent',
remote_side=[id],
lazy=True
)
)
# relationship backrefs:
# - parent (TimetableEntry.children)
@property
def object(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return self.session_block
elif self.type == TimetableEntryType.CONTRIBUTION:
return self.contribution
elif self.type == TimetableEntryType.BREAK:
return self.break_
@object.setter
def object(self, value):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
self.session_block = self.contribution = self.break_ = None
if isinstance(value, SessionBlock):
self.session_block = value
elif isinstance(value, Contribution):
self.contribution = value
elif isinstance(value, Break):
self.break_ = value
elif value is not None:
raise TypeError('Unexpected object: {}'.format(value))
@hybrid_property
def duration(self):
return self.object.duration if self.object is not None else None
@duration.setter
def duration(self, value):
self.object.duration = value
@duration.expression
def duration(cls):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
return db.case({
TimetableEntryType.SESSION_BLOCK.value:
db.select([SessionBlock.duration])
.where(SessionBlock.id == cls.session_block_id)
.correlate_except(SessionBlock)
.as_scalar(),
TimetableEntryType.CONTRIBUTION.value:
db.select([Contribution.duration])
.where(Contribution.id == cls.contribution_id)
.correlate_except(Contribution)
.as_scalar(),
TimetableEntryType.BREAK.value:
db.select([Break.duration])
.where(Break.id == cls.break_id)
.correlate_except(Break)
.as_scalar(),
}, value=cls.type)
@hybrid_property
def end_dt(self):
if self.start_dt is None or self.duration is None:
return None
return self.start_dt + self.duration
@end_dt.expression
def end_dt(cls):
return cls.start_dt + cls.duration
@property
def session_siblings(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return [x for x in self.siblings
if x.session_block and x.session_block.session == self.session_block.session]
elif self.parent:
return self.siblings
else:
return []
@property
def siblings(self):
from indico.modules.events.timetable.util import get_top_level_entries, get_nested_entries
tzinfo = self.event.tzinfo
day = self.start_dt.astimezone(tzinfo).date()
siblings = (get_nested_entries(self.event)[self.parent_id]
if self.parent_id else
get_top_level_entries(self.event))
return [x for x in siblings if x.start_dt.astimezone(tzinfo).date() == day and x.id != self.id]
@property
def siblings_query(self):
tzinfo = self.event.tzinfo
day = self.start_dt.astimezone(tzinfo).date()
criteria = (TimetableEntry.id != self.id,
TimetableEntry.parent == self.parent,
db.cast(TimetableEntry.start_dt.astimezone(tzinfo), db.Date) == day)
return TimetableEntry.query.with_parent(self.event).filter(*criteria)
@locator_property
def locator(self):
return dict(self.event.locator, entry_id=self.id)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'type', 'start_dt', 'end_dt', _repr=self.object)
def can_view(self, user):
"""Checks whether the user will see this entry in the timetable."""
if self.type in (TimetableEntryType.CONTRIBUTION, TimetableEntryType.BREAK):
return self.object.can_access(user)
elif self.type == TimetableEntryType.SESSION_BLOCK:
if self.object.can_access(user):
return True
return any(x.can_access(user) for x in self.object.contributions)
def extend_start_dt(self, start_dt):
assert start_dt < self.start_dt
extension = self.start_dt - start_dt
self.start_dt = start_dt
self.duration = self.duration + extension
def extend_end_dt(self, end_dt):
diff = end_dt - self.end_dt
if diff < timedelta(0):
raise ValueError("New end_dt is before current end_dt.")
self.duration += diff
def extend_parent(self, by_start=True, by_end=True):
"""Extend start/end of parent objects if needed.
No extension if performed for entries crossing a day boundary in the
event timezone.
:param by_start: Extend parent by start datetime.
:param by_end: Extend parent by end datetime.
"""
tzinfo = self.event.tzinfo
if self.start_dt.astimezone(tzinfo).date() != self.end_dt.astimezone(tzinfo).date():
return
if self.parent is None:
if by_start and self.start_dt < self.event.start_dt:
self.event.start_dt = self.start_dt
if by_end and self.end_dt > self.event.end_dt:
self.event.end_dt = self.end_dt
else:
extended = False
if by_start and self.start_dt < self.parent.start_dt:
self.parent.extend_start_dt(self.start_dt)
extended = True
if by_end and self.end_dt > self.parent.end_dt:
self.parent.extend_end_dt(self.end_dt)
extended = True
if extended:
self.parent.extend_parent(by_start=by_start, by_end=by_end)
def is_parallel(self, in_session=False):
siblings = self.siblings if not in_session else self.session_siblings
for sibling in siblings:
if overlaps((self.start_dt, self.end_dt), (sibling.start_dt, sibling.end_dt)):
return True
return False
def move(self, start_dt):
"""Move the entry to start at a different time.
This method automatically moves children of the entry to
preserve their start time relative to the parent's start time.
"""
if self.type == TimetableEntryType.SESSION_BLOCK:
diff = start_dt - self.start_dt
for child in self.children:
child.start_dt += diff
self.start_dt = start_dt
def move_next_to(self, sibling, position='before'):
if sibling not in self.siblings:
raise ValueError("Not a sibling")
if position not in ('before', 'after'):
raise ValueError("Invalid position")
if position == 'before':
start_dt = sibling.start_dt - self.duration
else:
start_dt = sibling.end_dt
self.move(start_dt)
@listens_for(TimetableEntry.__table__, 'after_create')
def _add_timetable_consistency_trigger(target, conn, **kw):
sql = """
CREATE CONSTRAINT TRIGGER consistent_timetable
AFTER INSERT OR UPDATE
ON {}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW
EXECUTE PROCEDURE events.check_timetable_consistency('timetable_entry');
""".format(target.fullname)
DDL(sql).execute(conn)
@listens_for(TimetableEntry.session_block, 'set')
def _set_session_block(target, value, *unused):
target.type = TimetableEntryType.SESSION_BLOCK
@listens_for(TimetableEntry.contribution, 'set')
def _set_contribution(target, value, *unused):
target.type = TimetableEntryType.CONTRIBUTION
@listens_for(TimetableEntry.break_, 'set')
def _set_break(target, value, *unused):
target.type = TimetableEntryType.BREAK
@listens_for(TimetableEntry.start_dt, 'set')
def _set_start_dt(target, value, oldvalue, *unused):
from indico.modules.events.util import register_time_change
if oldvalue in (NEVER_SET, NO_VALUE):
return
if value != oldvalue and target.object is not None:
register_time_change(target)
populate_one_to_one_backrefs(TimetableEntry, 'session_block', 'contribution', 'break_')
| |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Handlers used for custom attribute columns."""
from dateutil.parser import parse
from sqlalchemy import and_
from ggrc import models
from ggrc.converters import errors
from ggrc.converters.handlers import handlers
_types = models.CustomAttributeDefinition.ValidTypes
class CustomAttributeColumHandler(handlers.TextColumnHandler):
"""Custom attribute column handler
This is a handler for all types of custom attribute column. It works with
any custom attribute definition and with mondatory flag on or off.
"""
_type_handlers = {
_types.TEXT: lambda self: self.get_text_value(),
_types.DATE: lambda self: self.get_date_value(),
_types.DROPDOWN: lambda self: self.get_dropdown_value(),
_types.CHECKBOX: lambda self: self.get_checkbox_value(),
_types.RICH_TEXT: lambda self: self.get_rich_text_value(),
_types.MAP: lambda self: self.get_person_value(),
}
def set_obj_attr(self):
"""Set object attribute method should do nothing for custom attributes.
CA values set in insert_object() method.
"""
if self.value is None:
return
cav = self._get_or_create_ca()
cav.attribute_value = self.value
if isinstance(cav.attribute_value, models.mixins.Identifiable):
obj = cav.attribute_value
cav.attribute_value = obj.__class__.__name__
cav.attribute_object_id = obj.id
def parse_item(self):
"""Parse raw value from csv file
Returns:
CustomAttributeValue with the correct definition type and value.
"""
self.definition = self.get_ca_definition()
if self.definition is None:
self.add_warning(errors.INVALID_ATTRIBUTE_WARNING,
column_name=self.display_name)
return None
type_ = self.definition.attribute_type.split(":")[0]
value_handler = self._type_handlers[type_]
return value_handler(self)
def get_value(self):
"""Return the value of the custom attrbute field.
Returns:
Text representation if the custom attribute value if it exists, otherwise
None.
"""
definition = self.get_ca_definition()
if not definition:
return ""
for value in self.row_converter.obj.custom_attribute_values:
if value.custom_attribute_id == definition.id:
if value.custom_attribute.attribute_type.startswith("Map:"):
if value.attribute_object_id:
obj = value.attribute_object
return getattr(obj, "email", getattr(obj, "slug", None))
elif value.custom_attribute.attribute_type == _types.CHECKBOX:
attr_val = value.attribute_value if value.attribute_value else u"0"
attr_val = int(attr_val)
return str(bool(attr_val)).upper()
else:
return value.attribute_value
return None
def _get_or_create_ca(self):
"""Get a CA value object for the current definition.
This function returns a custom attribute value object that already existed
or creates a new one.
Returns:
custom attribute value object.
"""
ca_definition = self.get_ca_definition()
if not self.row_converter.obj or not ca_definition:
return None
for ca_value in self.row_converter.obj.custom_attribute_values:
if ca_value.custom_attribute_id == ca_definition.id:
return ca_value
ca_value = models.CustomAttributeValue(
custom_attribute=ca_definition,
attributable=self.row_converter.obj,
)
return ca_value
def insert_object(self):
"""Add custom attribute objects to db session."""
def get_date_value(self):
"""Get date value from input string date."""
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
value = None
try:
value = parse(self.raw_value).strftime(
models.CustomAttributeValue.DATE_FORMAT_ISO,
)
except (TypeError, ValueError):
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
if self.mandatory and value is None:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_checkbox_value(self):
"""Get boolean value for checkbox fields."""
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
value = self.raw_value.lower() in ("yes", "true")
if self.raw_value.lower() not in ("yes", "true", "no", "false"):
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
value = None
if self.mandatory and value is None:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_dropdown_value(self):
"""Get valid value of the dropdown field."""
choices_list = self.definition.multi_choice_options.split(",")
valid_choices = [val.strip() for val in choices_list]
choice_map = {choice.lower(): choice for choice in valid_choices}
value = choice_map.get(self.raw_value.lower())
if value is None and self.raw_value != "":
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
if self.mandatory and value is None:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_text_value(self):
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
value = self.clean_whitespaces(self.raw_value)
if self.mandatory and not value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_rich_text_value(self):
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
if self.mandatory and not self.raw_value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return self.raw_value
def get_person_value(self):
"""Fetch a person based on the email text in column.
Returns:
Person model instance
"""
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
if self.mandatory and not self.raw_value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return
value = models.Person.query.filter_by(email=self.raw_value).first()
if self.mandatory and not value:
self.add_error(errors.WRONG_VALUE, column_name=self.display_name)
return value
def get_ca_definition(self):
"""Get custom attribute definition."""
cache = self.row_converter.block_converter.get_ca_definitions_cache()
return cache.get((None, self.display_name))
class ObjectCaColumnHandler(CustomAttributeColumHandler):
"""Handler for object level custom attributes."""
def set_value(self):
pass
def set_obj_attr(self):
"""Parse item and set the current value.
This is a hack to get set_value on this handler called after all other
values have already been set.
"""
if self.dry_run:
return
self.value = self.parse_item()
super(ObjectCaColumnHandler, self).set_obj_attr()
def get_ca_definition(self):
"""Get custom attribute definition for a specific object."""
if self.row_converter.obj.id is None:
return None
cad = models.CustomAttributeDefinition
cache = self.row_converter.block_converter.get_ca_definitions_cache()
definition = cache.get((self.row_converter.obj.id, self.display_name))
if not definition:
definition = cad.query.filter(and_(
cad.definition_id == self.row_converter.obj.id,
cad.title == self.display_name
)).first()
return definition
| |
import pytest
import io
import os
import json
import base64
import hashlib
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.core.provider import build_url
from waterbutler.providers.github import GitHubProvider
from waterbutler.providers.github import settings as github_settings
from waterbutler.providers.github.metadata import GitHubRevision
from waterbutler.providers.github.metadata import GitHubFileTreeMetadata
from waterbutler.providers.github.metadata import GitHubFolderTreeMetadata
from waterbutler.providers.github.metadata import GitHubFileContentMetadata
from waterbutler.providers.github.metadata import GitHubFolderContentMetadata
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {'token': 'naps'}
@pytest.fixture
def settings():
return {
'owner': 'cat',
'repo': 'food',
}
@pytest.fixture
def file_content():
return b'hungry'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def upload_response():
return {
"content": {
"name": "hello.txt",
"path": "notes/hello.txt",
"sha": "95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"size": 9,
"url": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"html_url": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt",
"git_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"type": "file",
"_links": {
"self": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"git": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"html": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt"
}
},
"commit": {
"sha": "7638417db6d59f3c431d3e1f261cc637155684cd",
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/7638417db6d59f3c431d3e1f261cc637155684cd",
"html_url": "https://github.com/octocat/Hello-World/git/commit/7638417db6d59f3c431d3e1f261cc637155684cd",
"author": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"committer": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"message": "my commit message",
"tree": {
"url": "https://api.github.com/repos/octocat/Hello-World/git/trees/691272480426f78a0138979dd3ce63b77f706feb",
"sha": "691272480426f78a0138979dd3ce63b77f706feb"
},
"parents": [
{
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"html_url": "https://github.com/octocat/Hello-World/git/commit/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"sha": "1acc419d4d6a9ce985db7be48c6349a0475975b5"
}
]
}
}
@pytest.fixture
def create_folder_response():
return {
"content": {
"name": ".gitkeep",
"path": "i/like/trains/.gitkeep",
"sha": "95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"size": 9,
"url": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"html_url": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt",
"git_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"type": "file",
"_links": {
"self": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"git": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"html": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt"
}
},
"commit": {
"sha": "7638417db6d59f3c431d3e1f261cc637155684cd",
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/7638417db6d59f3c431d3e1f261cc637155684cd",
"html_url": "https://github.com/octocat/Hello-World/git/commit/7638417db6d59f3c431d3e1f261cc637155684cd",
"author": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"committer": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"message": "my commit message",
"tree": {
"url": "https://api.github.com/repos/octocat/Hello-World/git/trees/691272480426f78a0138979dd3ce63b77f706feb",
"sha": "691272480426f78a0138979dd3ce63b77f706feb"
},
"parents": [
{
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"html_url": "https://github.com/octocat/Hello-World/git/commit/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"sha": "1acc419d4d6a9ce985db7be48c6349a0475975b5"
}
]
}
}
@pytest.fixture
def repo_metadata():
return {
'full_name': 'octocat/Hello-World',
'permissions': {
'push': False,
'admin': False,
'pull': True
},
'has_downloads': True,
'notifications_url': 'https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}',
'releases_url': 'https://api.github.com/repos/octocat/Hello-World/releases{/id}',
'downloads_url': 'https://api.github.com/repos/octocat/Hello-World/downloads',
'merges_url': 'https://api.github.com/repos/octocat/Hello-World/merges',
'owner': {
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'type': 'User',
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'url': 'https://api.github.com/users/octocat',
'html_url': 'https://github.com/octocat',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'site_admin': False,
'gravatar_id': '',
'repos_url': 'https://api.github.com/users/octocat/repos',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'id': 583231,
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'login': 'octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers'
},
'html_url': 'https://github.com/octocat/Hello-World',
'comments_url': 'https://api.github.com/repos/octocat/Hello-World/comments{/number}',
'git_url': 'git://github.com/octocat/Hello-World.git',
'ssh_url': 'git@github.com:octocat/Hello-World.git',
'language': None,
'pulls_url': 'https://api.github.com/repos/octocat/Hello-World/pulls{/number}',
'subscribers_count': 1850,
'forks_count': 1085,
'watchers_count': 1407,
'id': 1296269,
'keys_url': 'https://api.github.com/repos/octocat/Hello-World/keys{/key_id}',
'default_branch': 'master',
'stargazers_count': 1407,
'tags_url': 'https://api.github.com/repos/octocat/Hello-World/tags',
'clone_url': 'https://github.com/octocat/Hello-World.git',
'homepage': '',
'forks_url': 'https://api.github.com/repos/octocat/Hello-World/forks',
'branches_url': 'https://api.github.com/repos/octocat/Hello-World/branches{/branch}',
'url': 'https://api.github.com/repos/octocat/Hello-World',
'contents_url': 'https://api.github.com/repos/octocat/Hello-World/contents/{+path}',
'hooks_url': 'https://api.github.com/repos/octocat/Hello-World/hooks',
'git_tags_url': 'https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}',
'statuses_url': 'https://api.github.com/repos/octocat/Hello-World/statuses/{sha}',
'trees_url': 'https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}',
'contributors_url': 'https://api.github.com/repos/octocat/Hello-World/contributors',
'open_issues': 126,
'has_pages': False,
'pushed_at': '2014-06-11T21:51:23Z',
'network_count': 1085,
'commits_url': 'https://api.github.com/repos/octocat/Hello-World/commits{/sha}',
'git_commits_url': 'https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}',
'svn_url': 'https://github.com/octocat/Hello-World',
'forks': 1085,
'fork': False,
'subscription_url': 'https://api.github.com/repos/octocat/Hello-World/subscription',
'archive_url': 'https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}',
'subscribers_url': 'https://api.github.com/repos/octocat/Hello-World/subscribers',
'description': 'This your first repo!',
'blobs_url': 'https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}',
'teams_url': 'https://api.github.com/repos/octocat/Hello-World/teams',
'compare_url': 'https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}',
'issues_url': 'https://api.github.com/repos/octocat/Hello-World/issues{/number}',
'stargazers_url': 'https://api.github.com/repos/octocat/Hello-World/stargazers',
'private': False,
'created_at': '2011-01-26T19:01:12Z',
'issue_comment_url': 'https://api.github.com/repos/octocat/Hello-World/issues/comments/{number}',
'has_issues': True,
'milestones_url': 'https://api.github.com/repos/octocat/Hello-World/milestones{/number}',
'issue_events_url': 'https://api.github.com/repos/octocat/Hello-World/issues/events{/number}',
'languages_url': 'https://api.github.com/repos/octocat/Hello-World/languages',
'name': 'Hello-World',
'mirror_url': None,
'has_wiki': True,
'updated_at': '2014-12-12T16:45:49Z',
'watchers': 1407,
'open_issues_count': 126,
'labels_url': 'https://api.github.com/repos/octocat/Hello-World/labels{/name}',
'collaborators_url': 'https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}',
'assignees_url': 'https://api.github.com/repos/octocat/Hello-World/assignees{/user}',
'size': 558,
'git_refs_url': 'https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}',
'events_url': 'https://api.github.com/repos/octocat/Hello-World/events'
}
@pytest.fixture
def branch_metadata():
return {
'commit': {
'html_url': 'https://github.com/octocat/Hello-World/commit/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'committer': {
'html_url': 'https://github.com/octocat',
'login': 'octocat',
'type': 'User',
'gravatar_id': '',
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'id': 583231,
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'url': 'https://api.github.com/users/octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers',
'repos_url': 'https://api.github.com/users/octocat/repos',
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'site_admin': False
},
'parents': [{
'html_url': 'https://github.com/octocat/Hello-World/commit/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e',
'sha': '553c2077f0edc3d5dc5d17262f6aa498e69d6f8e'
}, {
'html_url': 'https://github.com/octocat/Hello-World/commit/762941318ee16e59dabbacb1b4049eec22f0d303',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/762941318ee16e59dabbacb1b4049eec22f0d303',
'sha': '762941318ee16e59dabbacb1b4049eec22f0d303'
}],
'sha': '7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'author': {
'html_url': 'https://github.com/octocat',
'login': 'octocat',
'type': 'User',
'gravatar_id': '',
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'id': 583231,
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'url': 'https://api.github.com/users/octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers',
'repos_url': 'https://api.github.com/users/octocat/repos',
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'site_admin': False
},
'comments_url': 'https://api.github.com/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d/comments',
'commit': {
'url': 'https://api.github.com/repos/octocat/Hello-World/git/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'message': 'Merge pull request #6 from Spaceghost/patch-1\n\nNew line at end of file.',
'committer': {
'email': 'octocat@nowhere.com',
'date': '2012-03-06T23:06:50Z',
'name': 'The Octocat'
},
'tree': {
'url': 'https://api.github.com/repos/octocat/Hello-World/git/trees/b4eecafa9be2f2006ce1b709d6857b07069b4608',
'sha': 'b4eecafa9be2f2006ce1b709d6857b07069b4608'
},
'comment_count': 51,
'author': {
'email': 'octocat@nowhere.com',
'date': '2012-03-06T23:06:50Z',
'name': 'The Octocat'
}
}
},
'_links': {
'html': 'https://github.com/octocat/Hello-World/tree/master',
'self': 'https://api.github.com/repos/octocat/Hello-World/branches/master'
},
'name': 'master'
}
@pytest.fixture
def content_repo_metadata_root():
return [
{
'path': 'file.txt',
'type': 'file',
'html_url': 'https://github.com/icereval/test/blob/master/file.txt',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'url': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'self': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html': 'https://github.com/icereval/test/blob/master/file.txt'
},
'name': 'file.txt',
'size': 0,
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/file.txt'
}, {
'path': 'level1',
'type': 'dir',
'html_url': 'https://github.com/icereval/test/tree/master/level1',
'git_url': 'https://api.github.com/repos/icereval/test/git/trees/bc1087ebfe8354a684bf9f8b75517784143dde86',
'url': 'https://api.github.com/repos/icereval/test/contents/level1?ref=master',
'sha': 'bc1087ebfe8354a684bf9f8b75517784143dde86',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/trees/bc1087ebfe8354a684bf9f8b75517784143dde86',
'self': 'https://api.github.com/repos/icereval/test/contents/level1?ref=master',
'html': 'https://github.com/icereval/test/tree/master/level1'
},
'name': 'level1',
'size': 0,
'download_url': None
}, {
'path': 'test.rst',
'type': 'file',
'html_url': 'https://github.com/icereval/test/blob/master/test.rst',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'url': 'https://api.github.com/repos/icereval/test/contents/test.rst?ref=master',
'sha': 'ca39bcbf849231525ce9e775935fcb18ed477b5a',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'self': 'https://api.github.com/repos/icereval/test/contents/test.rst?ref=master',
'html': 'https://github.com/icereval/test/blob/master/test.rst'
},
'name': 'test.rst',
'size': 190,
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/test.rst'
}
]
@pytest.fixture
def repo_tree_metadata_root():
return {
'tree': [
{
'url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'size': 0,
'type': 'blob',
'path': 'file.txt',
'mode': '100644',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391'
},
{
'type': 'tree',
'url': 'https://api.github.com/repos/icereval/test/git/trees/05353097666f449344b7f69036c70a52dc504088',
'path': 'level1',
'mode': '040000',
'sha': '05353097666f449344b7f69036c70a52dc504088'
},
{
'url': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'size': 190,
'type': 'blob',
'path': 'test.rst',
'mode': '100644',
'sha': 'ca39bcbf849231525ce9e775935fcb18ed477b5a'
}
],
'url': 'https://api.github.com/repos/icereval/test/git/trees/cd83e4a08261a54f1c4630fbb1de34d1e48f0c8a',
'truncated': False,
'sha': 'cd83e4a08261a54f1c4630fbb1de34d1e48f0c8a'
}
@pytest.fixture
def content_repo_metadata_root_file_txt():
return {
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'self': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html': 'https://github.com/icereval/test/blob/master/file.txt'
},
'content': '',
'url': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html_url': 'https://github.com/icereval/test/blob/master/file.txt',
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/file.txt',
'name': 'file.txt',
'type': 'file',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'encoding': 'base64',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'path': 'file.txt',
'size': 0
}
@pytest.fixture
def provider(auth, credentials, settings, repo_metadata):
provider = GitHubProvider(auth, credentials, settings)
provider._repo = repo_metadata
provider.default_branch = repo_metadata['default_branch']
return provider
class TestHelpers:
async def test_build_repo_url(self, provider, settings):
expected = provider.build_url('repos', settings['owner'], settings['repo'], 'contents')
assert provider.build_repo_url('contents') == expected
async def test_committer(self, auth, provider):
expected = {
'name': auth['name'],
'email': auth['email'],
}
assert provider.committer == expected
class TestValidatePath:
@pytest.mark.asyncio
async def test_validate_path(self, provider):
path = await provider.validate_path('/this/is/my/path')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, None)
assert path.parts[0].identifier == (provider.default_branch, None)
@pytest.mark.asyncio
async def test_validate_path_passes_branch(self, provider):
path = await provider.validate_path('/this/is/my/path', branch='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
assert path.parts[0].identifier == ('NotMaster', None)
@pytest.mark.asyncio
async def test_validate_path_passes_ref(self, provider):
path = await provider.validate_path('/this/is/my/path', ref='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
assert path.parts[0].identifier == ('NotMaster', None)
@pytest.mark.asyncio
async def test_validate_path_passes_file_sha(self, provider):
path = await provider.validate_path('/this/is/my/path', fileSha='Thisisasha')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, 'Thisisasha')
assert path.parts[0].identifier == (provider.default_branch, None)
class TestCRUD:
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_download_by_file_sha(self, provider, content_repo_metadata_root_file_txt):
# ref = hashlib.sha1().hexdigest()
# url = provider.build_repo_url('git', 'refs', 'heads', 'master')
# path = WaterButlerPath('/file.txt', _ids=(None, ('master', ref)))
# aiohttpretty.register_uri('GET', url, body=b'delicious')
# aiohttpretty.register_json_uri('GET', url, body={'object': {'sha': ref}})
# result = await provider.download(path)
# content = await result.read()
# assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = await provider.validate_path('/file.txt')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
latest_sha_url = provider.build_repo_url('git', 'refs', 'heads', path.identifier[0])
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path_ref_branch(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = await provider.validate_path('/file.txt', branch='other_branch')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path_revision(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = await provider.validate_path('/file.txt', branch='other_branch')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha='Just a test')
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path, revision='Just a test')
content = await result.read()
assert content == b'delicious'
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_download_bad_status(self, provider):
# ref = hashlib.sha1().hexdigest()
# url = provider.build_repo_url('git', 'blobs', ref)
# aiohttpretty.register_uri('GET', url, body=b'delicious', status=418)
# with pytest.raises(exceptions.DownloadError):
# await provider.download('', fileSha=ref)
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_upload_create(self, provider, upload_response, file_content, file_stream):
# message = 'so hungry'
# path = upload_response['content']['path'][::-1]
# metadata_url = provider.build_repo_url('contents', os.path.dirname(path))
# aiohttpretty.register_json_uri('GET', metadata_url, body=[upload_response['content']], status=200)
# upload_url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('PUT', upload_url, body=upload_response, status=201)
# await provider.upload(file_stream, path, message)
# expected_data = {
# 'path': path,
# 'message': message,
# 'content': base64.b64encode(file_content).decode('utf-8'),
# 'committer': provider.committer,
# }
# assert aiohttpretty.has_call(method='GET', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=upload_url, data=json.dumps(expected_data))
#
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_upload_update(self, provider, upload_response, file_content, file_stream):
# message = 'so hungry'
# sha = upload_response['content']['sha']
# path = '/' + upload_response['content']['path']
#
# upload_url = provider.build_repo_url('contents', provider.build_path(path))
# metadata_url = provider.build_repo_url('contents', os.path.dirname(path))
#
# aiohttpretty.register_json_uri('PUT', upload_url, body=upload_response)
# aiohttpretty.register_json_uri('GET', metadata_url, body=[upload_response['content']])
#
# await provider.upload(file_stream, path, message)
#
# expected_data = {
# 'path': path,
# 'message': message,
# 'content': base64.b64encode(file_content).decode('utf-8'),
# 'committer': provider.committer,
# 'sha': sha,
# }
#
# assert aiohttpretty.has_call(method='GET', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=upload_url, data=json.dumps(expected_data))
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_delete_with_branch(self, provider, repo_contents):
# path = os.path.join('/', repo_contents[0]['path'])
# sha = repo_contents[0]['sha']
# branch = 'master'
# message = 'deleted'
# url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('DELETE', url)
# await provider.delete(path, message, sha, branch=branch)
# expected_data = {
# 'message': message,
# 'sha': sha,
# 'committer': provider.committer,
# 'branch': branch,
# }
#
# assert aiohttpretty.has_call(method='DELETE', uri=url, data=json.dumps(expected_data))
#
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_delete_without_branch(self, provider, repo_contents):
# path = repo_contents[0]['path']
# sha = repo_contents[0]['sha']
# message = 'deleted'
# url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('DELETE', url)
# await provider.delete(path, message, sha)
# expected_data = {
# 'message': message,
# 'sha': sha,
# 'committer': provider.committer,
# }
#
# assert aiohttpretty.has_call(method='DELETE', uri=url, data=json.dumps(expected_data))
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, repo_metadata, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
path = await provider.validate_path('/file.txt')
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{
'commit': {
'tree': {'sha': ref},
'author': {'date': 'this is totally date'}
},
}])
result = await provider.metadata(path)
item = repo_tree_metadata_root['tree'][0]
web_view = provider._web_view(path=path)
assert result == GitHubFileTreeMetadata(item, web_view=web_view, commit={
'tree': {'sha': ref}, 'author': {'date': 'this is totally date'}
})
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_doesnt_exist(self, provider, repo_metadata, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
path = await provider.validate_path('/file.txt')
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[])
with pytest.raises(exceptions.NotFoundError):
await provider.metadata(path)
# TODO: Additional Tests
# async def test_metadata_root_file_txt_branch(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# async def test_metadata_root_file_txt_commit_sha(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root(self, provider, repo_metadata, content_repo_metadata_root):
path = await provider.validate_path('/')
url = provider.build_repo_url('contents', path.path, ref=provider.default_branch)
aiohttpretty.register_json_uri('GET', url, body=content_repo_metadata_root)
result = await provider.metadata(path)
ret = []
for item in content_repo_metadata_root:
if item['type'] == 'dir':
ret.append(GitHubFolderContentMetadata(item))
else:
ret.append(GitHubFileContentMetadata(item, web_view=item['html_url']))
assert result == ret
# TODO: Additional Tests
# async def test_metadata_non_root_folder(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# async def test_metadata_non_root_folder_branch(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# async def test_metadata_non_root_folder_commit_sha(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_out(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', path.child('.gitkeep').path)
aiohttpretty.register_uri('PUT', url, status=400)
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_must_be_folder(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy')
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_already_exists(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'Invalid request.\n\n"sha" wasn\'t supplied.'
})
with pytest.raises(exceptions.FolderNamingConflict) as e:
await provider.create_folder(path)
assert e.value.code == 409
assert e.value.message == 'Cannot create folder "Imarealboy" because a file or folder already exists at path "/Imarealboy/"'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_raises_other_422(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'github no likey'
})
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 422
assert e.value.data == {'message': 'github no likey'}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_returns_metadata(self, provider, repo_metadata, create_folder_response):
path = await provider.validate_path('/i/like/trains/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=201, body=create_folder_response)
metadata = await provider.create_folder(path)
assert metadata.kind == 'folder'
assert metadata.name == 'trains'
assert metadata.path == '/i/like/trains/'
| |
import os
import json
from shutil import copyfile, rmtree
from docker.tls import TLSConfig
from docker.errors import ContextException
from docker.context.config import get_meta_dir
from docker.context.config import get_meta_file
from docker.context.config import get_tls_dir
from docker.context.config import get_context_host
class Context:
"""A context."""
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
tls=False):
if not name:
raise Exception("Name not provided")
self.name = name
self.context_type = None
self.orchestrator = orchestrator
self.endpoints = {}
self.tls_cfg = {}
self.meta_path = "IN MEMORY"
self.tls_path = "IN MEMORY"
if not endpoints:
# set default docker endpoint if no endpoint is set
default_endpoint = "docker" if (
not orchestrator or orchestrator == "swarm"
) else orchestrator
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host, tls),
"SkipTLSVerify": not tls
}
}
return
# check docker endpoints
for k, v in endpoints.items():
if not isinstance(v, dict):
# unknown format
raise ContextException("""Unknown endpoint format for
context {}: {}""".format(name, v))
self.endpoints[k] = v
if k != "docker":
continue
self.endpoints[k]["Host"] = v.get("Host", get_context_host(
host, tls))
self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
"SkipTLSVerify", not tls))
def set_endpoint(
self, name="docker", host=None, tls_cfg=None,
skip_tls_verify=False, def_namespace=None):
self.endpoints[name] = {
"Host": get_context_host(host, not skip_tls_verify),
"SkipTLSVerify": skip_tls_verify
}
if def_namespace:
self.endpoints[name]["DefaultNamespace"] = def_namespace
if tls_cfg:
self.tls_cfg[name] = tls_cfg
def inspect(self):
return self.__call__()
@classmethod
def load_context(cls, name):
meta = Context._load_meta(name)
if meta:
instance = cls(
meta["Name"],
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
endpoints=meta.get("Endpoints", None))
instance.context_type = meta["Metadata"].get("Type", None)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
return None
@classmethod
def _load_meta(cls, name):
meta_file = get_meta_file(name)
if not os.path.isfile(meta_file):
return None
metadata = {}
try:
with open(meta_file) as f:
metadata = json.load(f)
except (OSError, KeyError, ValueError) as e:
# unknown format
raise Exception("""Detected corrupted meta file for
context {} : {}""".format(name, e))
# for docker endpoints, set defaults for
# Host and SkipTLSVerify fields
for k, v in metadata["Endpoints"].items():
if k != "docker":
continue
metadata["Endpoints"][k]["Host"] = v.get(
"Host", get_context_host(None, False))
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", True))
return metadata
def _load_certs(self):
certs = {}
tls_dir = get_tls_dir(self.name)
for endpoint in self.endpoints.keys():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
continue
ca_cert = None
cert = None
key = None
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
if filename.startswith("ca"):
ca_cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("cert"):
cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([ca_cert, cert, key]):
verify = None
if endpoint == "docker" and not self.endpoints["docker"].get(
"SkipTLSVerify", False):
verify = True
certs[endpoint] = TLSConfig(
client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
self.tls_cfg = certs
self.tls_path = tls_dir
def save(self):
meta_dir = get_meta_dir(self.name)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
with open(get_meta_file(self.name), "w") as f:
f.write(json.dumps(self.Metadata))
tls_dir = get_tls_dir(self.name)
for endpoint, tls in self.tls_cfg.items():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
os.makedirs(os.path.join(tls_dir, endpoint))
ca_file = tls.ca_cert
if ca_file:
copyfile(ca_file, os.path.join(
tls_dir, endpoint, os.path.basename(ca_file)))
if tls.cert:
cert_file, key_file = tls.cert
copyfile(cert_file, os.path.join(
tls_dir, endpoint, os.path.basename(cert_file)))
copyfile(key_file, os.path.join(
tls_dir, endpoint, os.path.basename(key_file)))
self.meta_path = get_meta_dir(self.name)
self.tls_path = get_tls_dir(self.name)
def remove(self):
if os.path.isdir(self.meta_path):
rmtree(self.meta_path)
if os.path.isdir(self.tls_path):
rmtree(self.tls_path)
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self):
return json.dumps(self.__call__(), indent=2)
def __call__(self):
result = self.Metadata
result.update(self.TLSMaterial)
result.update(self.Storage)
return result
def is_docker_host(self):
return self.context_type is None
@property
def Name(self):
return self.name
@property
def Host(self):
if not self.orchestrator or self.orchestrator == "swarm":
endpoint = self.endpoints.get("docker", None)
if endpoint:
return endpoint.get("Host", None)
return None
return self.endpoints[self.orchestrator].get("Host", None)
@property
def Orchestrator(self):
return self.orchestrator
@property
def Metadata(self):
meta = {}
if self.orchestrator:
meta = {"StackOrchestrator": self.orchestrator}
return {
"Name": self.name,
"Metadata": meta,
"Endpoints": self.endpoints
}
@property
def TLSConfig(self):
key = self.orchestrator
if not key or key == "swarm":
key = "docker"
if key in self.tls_cfg.keys():
return self.tls_cfg[key]
return None
@property
def TLSMaterial(self):
certs = {}
for endpoint, tls in self.tls_cfg.items():
cert, key = tls.cert
certs[endpoint] = list(
map(os.path.basename, [tls.ca_cert, cert, key]))
return {
"TLSMaterial": certs
}
@property
def Storage(self):
return {
"Storage": {
"MetadataPath": self.meta_path,
"TLSPath": self.tls_path
}}
| |
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
import gc
import os
import lz4
import plyvel
import logging
import StringIO
import marshal as pickle
import mokujin.triples as mtr
from mokujin import numencode
from mokujin.logicalform import POS
from mokujin.triples import ACTUAL_RELS
REL_ID_MAP = dict()
ID_REL_MAP = dict()
for rel in ACTUAL_RELS:
REL_ID_MAP[rel.rel_name] = len(REL_ID_MAP)
ID_REL_MAP[REL_ID_MAP[rel.rel_name]] = rel.rel_name
REL_POS_MAP = {
REL_ID_MAP[mtr.DepVerb_SubjVerbDirobj.rel_name]: (POS.NN, POS.VB, POS.NN, ),
REL_ID_MAP[mtr.DepVerb_SubjVerbIndirobj.rel_name]: (POS.NN, POS.VB, POS.NN, ),
REL_ID_MAP[mtr.DepVerb_SubjVerbInstr.rel_name]: (POS.NN, POS.VB, POS.NN, ),
REL_ID_MAP[mtr.DepVerb_SubjVerb.rel_name]: (POS.NN, POS.VB, ),
REL_ID_MAP[mtr.DepVerb_PrepCompl.rel_name]: (POS.NN, POS.VB, POS.PREP, POS.NN, ),
REL_ID_MAP[mtr.DepVerb_SubjVerbVerbPrepNoun.rel_name]: (POS.NN, POS.VB, POS.VB, POS.PREP, POS.NN, ),
REL_ID_MAP[mtr.DepVerb_SubjVerbVerb.rel_name]: (POS.NN, POS.VB, POS.VB, ),
REL_ID_MAP[mtr.DepAdj_NounAdj.rel_name]: (POS.NN, POS.ADJ, ),
REL_ID_MAP[mtr.DepAdv_VerbNounAdv.rel_name]: (POS.NN, POS.VB, POS.RB, ),
REL_ID_MAP[mtr.DepNoun_NounEqualPrepNoun.rel_name]: (POS.NN, POS.NN, POS.PREP, POS.NN, ),
REL_ID_MAP[mtr.DepNoun_NounNoun.rel_name]: (POS.NN, POS.NN, ),
REL_ID_MAP[mtr.DepNoun_NounNounNoun.rel_name]: (POS.NN, POS.NN, POS.NN, ),
REL_ID_MAP[mtr.DepNoun_NounEqualNoun.rel_name]: (POS.NN, POS.NN, ),
REL_ID_MAP[mtr.DepNoun_NounPrepNoun.rel_name]: (POS.NN, POS.PREP, POS.NN, ),
REL_ID_MAP[mtr.DepAny_Compl.rel_name]: (POS.ANY, POS.ANY, ),
REL_ID_MAP[mtr.DepNoun_NounEqualNoun.rel_name]: (POS.NN, POS.NN),
}
if len(REL_POS_MAP) != len(REL_ID_MAP):
logging.error("NOT ALL RELATIONS HAS POS MAP")
class ArgType(object):
NONE = -1
EMPTY = -2
STR_NONE = "<NONE>"
STR_EMPTY = "<->"
POS_DELIMITER = "-"
POS_NONE = "POS>"
class TripleReader(object):
def parse_triple_row(self, ts_row):
arguments = []
for i in range(1, (len(ts_row) - 1)):
argument = ts_row[i]
if argument == ArgType.STR_NONE:
arguments.append(ArgType.NONE)
elif argument == ArgType.STR_EMPTY:
arguments.append(ArgType.EMPTY)
else:
lemma_pos = argument.split(ArgType.POS_DELIMITER)
if lemma_pos[-1] == ArgType.POS_NONE:
arguments.append(ArgType.NONE)
else:
arguments.append("-".join(lemma_pos[0:(len(lemma_pos) - 1)]))
return ts_row[0], arguments, int(ts_row[-1])
def iter_triples(self, i_file):
for line in i_file:
row = line.split(", ")
triple = self.parse_triple_row(row)
yield triple
class DepTupleIndex(object):
"""
Dependency relation tuple indexer.
This class stores
"""
TUPLE_INDEX_DB_BLOCK_SIZE = 64
TERM_INDEX_DB_BLOCK_SIZE = 256
PLIST_CACHE_SIZE = 256000
STRING_ARRAY_SEP = chr(244)
def __init__(self, index_root):
self.index_root = index_root
self.term_ldb = DepTupleIndex.get_term_ldb(index_root, create=False)
self.plist_ldb = DepTupleIndex.get_plist_ldb(index_root, create=False)
self.tuple_ldb = DepTupleIndex.get_tuple_ldb(index_root, create=False)
self.term2id = {}
self.id2term = {}
self.id2tuple = {}
self.reltype2id = REL_ID_MAP
self.id2reltype = ID_REL_MAP
DepTupleIndex.load_terms(self.term_ldb, self.id2term, self.term2id)
DepTupleIndex.load_tuples(self.tuple_ldb, self.id2tuple)
@staticmethod
def tuple2stamp(d_tuple, term2id):
args = d_tuple[1]
stamp = [REL_ID_MAP[d_tuple[0]]]
for arg in args:
if arg == ArgType.NONE:
stamp.append(arg)
elif arg != ArgType.EMPTY:
stamp.append(term2id[arg])
stamp.append(d_tuple[-1])
return tuple(stamp)
@staticmethod
def stamp2tuple(stamp, id2term, map_none=False):
d_tuple = [ID_REL_MAP[stamp[0]]]
for i in range(1, len(stamp) - 1):
if stamp[i] >= 0:
d_tuple.append(id2term[stamp[i]])
else:
if map_none:
d_tuple.append("<NONE>")
else:
d_tuple.append(stamp[i])
d_tuple.append(stamp[-1])
return d_tuple
@staticmethod
def stamp_arg(stamp):
return stamp[1: len(stamp) - 1]
@staticmethod
def get_tuple_ldb(index_root, create=False):
db_path = os.path.join(index_root, "tuple.ldb")
return plyvel.DB(db_path,
compression="snappy",
write_buffer_size=1024 * (1024 ** 2), # 1 GB
block_size=512 * (1024 ** 2), # 512 MB
bloom_filter_bits=8,
create_if_missing=create,
error_if_exists=create)
@staticmethod
def get_term_ldb(index_root, create=False):
db_path = os.path.join(index_root, "term.ldb")
return plyvel.DB(db_path,
compression="snappy",
write_buffer_size=1024 * (1024 ** 2), # 1 GB
block_size=512 * (1024 ** 2), # 512 MB
bloom_filter_bits=8,
create_if_missing=create,
error_if_exists=create)
@staticmethod
def get_plist_ldb(index_root, create=False):
db_path = os.path.join(index_root, "plist.ldb")
return plyvel.DB(db_path,
compression="snappy",
write_buffer_size=1024 * (1024 ** 2), # 1 GB
block_size=512 * (1024 ** 2), # 512 MB
bloom_filter_bits=8,
create_if_missing=create,
error_if_exists=create)
@staticmethod
def write_tuples(id2tuple, tuple_ldb):
with tuple_ldb.write_batch() as wb:
for tuple_id, stamp in id2tuple.iteritems():
wb.put(str(tuple_id), pickle.dumps(stamp))
logging.info("Wrote %d tuples on disk." % len(id2tuple))
@staticmethod
def load_tuples(tuple_ldb, id2tuple):
for tuple_id_str, stamp_blob in tuple_ldb:
tuple_id = int(tuple_id_str)
id2tuple[tuple_id] = pickle.loads(stamp_blob)
logging.info("Loaded %d tuples into the memory." % len(id2tuple))
@staticmethod
def write_terms(term2id, term_ldb):
with term_ldb.write_batch() as wb:
for term, term_id in term2id.iteritems():
wb.put(str(term_id), term)
logging.info("Wrote %d terms on disk." % len(term2id))
@staticmethod
def load_terms(term_ldb, id2term, term2id):
for term_id_str, term in term_ldb:
term_id = int(term_id_str)
id2term[term_id] = term
term2id[term] = term_id
logging.info("Loaded %d terms into the memory." % len(id2term))
@staticmethod
def decode_posting_list(plist_blob):
plist = numencode.decode_plist(lz4.decompress(plist_blob))
return plist
@staticmethod
def encode_posting_list(plist):
return lz4.compressHC(numencode.encode_plist(plist))
@staticmethod
def update_posting_list(old_plist_blob, new_plist):
plist_blob = lz4.decompress(old_plist_blob)
updated_plist = numencode.update_plist(plist_blob, new_plist)
return lz4.compressHC(updated_plist)
@staticmethod
def write_plists(plist_dict, plist_ldb, final_iteration=True):
plist_dict_dict = {}
with plist_ldb.write_batch() as wb:
for term_id, plist in plist_dict.iteritems():
if 50 <= len(plist) <= 100000 and not final_iteration:
plist_dict_dict[term_id] = plist
continue
term_key = numencode.encode_uint(term_id)
try:
old_plist_blob = plist_ldb.get(term_key)
except KeyError:
old_plist_blob = None
if old_plist_blob is None:
plist_blob = DepTupleIndex.encode_posting_list(plist)
else:
plist_blob = DepTupleIndex.update_posting_list(old_plist_blob, plist)
wb.put(term_key, plist_blob)
logging.info("Wrote %d posting lists on disk." % len(plist_dict))
return plist_dict_dict
@staticmethod
def create(index_root, tuples, freq_threshold=5):
id2term = {}
term2id = {}
id2tuple = {}
plist_dict = {}
term_ldb = DepTupleIndex.get_term_ldb(index_root, create=True)
plist_ldb = DepTupleIndex.get_plist_ldb(index_root, create=True)
tuple_ldb = DepTupleIndex.get_tuple_ldb(index_root, create=True)
cached = 0
logging.info("Beginning creating index.")
for line_no, d_tuple in enumerate(tuples):
dep_arguments = d_tuple[1]
dep_frequency = d_tuple[-1]
if line_no % 25000 == 0:
logging.info("Indexing tuple #%d. Freq=%d." % (line_no, dep_frequency))
for term in dep_arguments:
# Skip special terms.
if term == -1 or term == -2:
continue
# Add term to dictionary.
term_id = term2id.get(term, -1)
if term_id == -1:
term_id = len(term2id)
term2id[term] = term_id
id2term[term_id] = term
# Get compact representation of dependency tuple.
stamp = DepTupleIndex.tuple2stamp(d_tuple, term2id)
if dep_frequency > freq_threshold:
# Generate ID for new tuple.
tuple_id = len(id2tuple)
id2tuple[tuple_id] = stamp
for arg_idx, arg in enumerate(stamp[1:-1]):
if arg >= 0:
arg_plist = plist_dict.get(arg)
if arg_plist is None:
plist_dict[arg] = [(tuple_id, arg_idx)]
else:
plist_dict[arg].append((tuple_id, arg_idx))
cached += 1
if cached == DepTupleIndex.PLIST_CACHE_SIZE:
logging.info("Writing %d posting lists to disc." % len(plist_dict))
plist_dict = DepTupleIndex.write_plists(plist_dict, plist_ldb, final_iteration=False)
cached = 0
gc.collect()
DepTupleIndex.write_terms(term2id, term_ldb)
DepTupleIndex.write_tuples(id2tuple, tuple_ldb)
DepTupleIndex.write_plists(plist_dict, plist_ldb, final_iteration=True)
class TripleSearchEngine(object):
def __init__(self, triple_index):
self.index = triple_index
self.id_term_map = triple_index.id2term
self.term_id_map = triple_index.term2id
self.id_triple_map = triple_index.id2tuple
self.arg_index = triple_index.plist_ldb
def search(self, rel_type=None, arg_query=()):
norm_query = []
for arg in arg_query:
if isinstance(arg, list) or isinstance(arg, tuple):
term, pos = arg
if isinstance(term, basestring):
if isinstance(term, unicode):
term = term.encode("utf-8")
term_id = self.term_id_map.get(term)
else:
term_id = term
elif isinstance(arg, basestring):
term, pos = arg, -1
if isinstance(term, unicode):
term = term.encode("utf-8")
term_id = self.term_id_map.get(term)
elif isinstance(arg, int):
term_id, pos = arg, -1
else:
term_id, pos = None, -1
if term_id is not None and term_id in self.id_term_map:
norm_query.append((term_id, pos))
results = None
for term_id, pos in norm_query:
try:
plist_blob = self.arg_index.get(numencode.encode_uint(term_id))
plist = self.index.decode_posting_list(plist_blob)
except KeyError:
plist = []
if pos != -1:
plist = filter(lambda plist_el: plist_el[1] == pos, plist)
plist = [plist_el[0] for plist_el in plist]
plist = set(plist)
if results is None:
results = plist
else:
results &= plist
if results is None:
return ()
results = [self.id_triple_map[triple_id] for triple_id in results]
if rel_type is not None:
results = filter(lambda triple: triple[0] == rel_type, results)
return results
def print_result(self, search_result, max_results=10):
for triple in search_result[:max_results]:
triple_str = "<Triple(%s, " % self.index.id_rel_map[triple[0]]
for i in range(1, len(triple) - 1):
if triple[i] >= 0:
triple_str += "%s, " % self.id_term_map[triple[i]]
else:
triple_str += "NONE, "
triple_str += " %d>" % triple[-1]
print triple_str
def pprint(self, triple):
pstr = StringIO.StringIO()
pstr.write("{")
pstr.write(ID_REL_MAP[triple[0]])
pstr.write(";")
terms = ";".join([self.id_term_map[term_id] if term_id >= 0 else "NONE" for term_id in triple[1:-1]])
pstr.write(terms)
pstr.write("}")
return pstr.getvalue()
class SimpleObjectIndex(object):
def __init__(self, data_dir, obj_to_terms, obj_to_str, str_to_obj):
self.data_dir = data_dir
self.obj_to_terms = obj_to_terms
self.obj_to_str = obj_to_str
self.str_to_obj = str_to_obj
self.id_term_map = None
self.term_id_map = None
self.objnum = 0
try:
import lz4 as compressor
self.compress = compressor.compress
self.compressHC = compressor.compressHC
self.decompress = compressor.decompress
except ImportError:
import zlib as compressor
self.compress = lambda data: compressor.compress(data, 3)
self.compressHC = lambda data: compressor.compress(data, 9)
self.decompress = lambda data: compressor.decompress(data)
def load_all(self):
id_term_map = self.load_terms()
self.id_term_map = [None] * len(id_term_map)
self.term_id_map = dict()
for term_id, term in id_term_map.iteritems():
self.id_term_map[term_id] = term
self.term_id_map[term] = term_id
self.objnum = self.load_objnum()
def load_objnum(self):
objnum_fl_path = "%s/OBJNUM" % self.data_dir
try:
with open(objnum_fl_path, "r") as objnum_fl:
objnum = int(objnum_fl.read())
except IOError:
objnum = 0
logging.info("LOADED DOCNUM %d" % objnum)
return objnum
def update_objnum(self, new_objnum):
objnum_fl_path = "%s/OBJNUM" % self.data_dir
prev_objnum = self.load_objnum()
with open(objnum_fl_path, "w") as objnum_fl:
objnum_fl.write(str(new_objnum))
logging.info("OBJNUM updated %d => %d [+%d]" % (prev_objnum, new_objnum, new_objnum - prev_objnum))
return new_objnum - prev_objnum
def decode_posting_list(self, plist_blob):
plist = numencode.decode_1d_plist(self.decompress(plist_blob))
return plist
def encode_posting_list(self, plist):
return self.compressHC(numencode.encode_1d_plist(plist))
def update_posting_list(self, old_plist_blob, new_plist):
plist_blob = self.decompress(old_plist_blob)
updated_plist = numencode.update_1d_plist(plist_blob, new_plist)
return self.compressHC(updated_plist)
def update_posting_lists(self, post_lists):
plist_store = leveldb.LevelDB("%s/plist.index" % self.data_dir)
w_batch = leveldb.WriteBatch()
upd_num = 0
new_num = 0
for term_id, plist in post_lists.iteritems():
term_key = numencode.encode_uint(term_id)
try:
old_plist_blob = plist_store.Get(term_key)
upd_num += 1
except KeyError:
new_num += 1
old_plist_blob = None
if old_plist_blob is None:
plist_blob = self.encode_posting_list(plist)
else:
plist_blob = self.update_posting_list(old_plist_blob, plist)
w_batch.Put(term_key, plist_blob)
plist_store.Write(w_batch, sync=True)
logging.info("updated %d plists, %d new" % (upd_num, new_num))
def load_posting_list(self, term_id, plist_store):
term_key = numencode.encode_uint(term_id)
plist_blob = plist_store.Get(term_key)
plist = self.decode_posting_list(plist_blob)
return plist
def write_objects(self, id_object_map):
object_store = leveldb.LevelDB("%s/object.db" % self.data_dir)
w_batch = leveldb.WriteBatch()
for obj_id, obj in id_object_map:
obj_str = self.obj_to_str(obj)
obj_blob = self.compressHC(obj_str)
obj_key = numencode.encode_uint(obj_id)
w_batch.Put(obj_key, obj_blob)
object_store.Write(w_batch, sync=True)
logging.info("wrote %d objects" % len(id_object_map))
self.update_objnum(self.objnum)
def load_object(self, obj_id, obj_store):
obj_key = numencode.encode_uint(obj_id)
obj_blob = obj_store.Get(obj_key)
obj_str = self.decompress(obj_blob)
obj = self.str_to_obj(obj_str)
return obj
def write_terms(self, id_term_map, batch_size=64):
term_store = leveldb.LevelDB("%s/term.db" % self.data_dir)
batch = []
term_id = 0
batch_key = 0
while term_id < len(id_term_map):
batch.append(id_term_map[term_id])
if term_id % batch_size == batch_size - 1:
batch_data = self.compressHC(pickle.dumps(batch))
term_store.Put(numencode.encode_uint(batch_key), batch_data)
batch = []
batch_key += 1
term_id += 1
if len(batch) > 0:
batch_data = self.compressHC(pickle.dumps(batch))
term_store.Put(numencode.encode_uint(batch_key), batch_data)
logging.info("wrote %d terms" % len(id_term_map))
def load_terms(self, batch_size=64):
id_term_map = dict()
term_store = leveldb.LevelDB("%s/term.db" % self.data_dir)
for batch_key, batch_data in term_store.RangeIter():
batch = pickle.loads(self.decompress(batch_data))
batch_key = numencode.decode_uint(batch_key)
for i in xrange(len(batch)):
term_id = batch_key * batch_size + i
id_term_map[term_id] = batch[i]
logging.info("INDEX: LOADED %d TERMS" % len(id_term_map))
return id_term_map
def index_term(self, term, object_id, post_lists):
term_id = self.term_id_map.get(term, -1)
if term_id == -1:
term_id = len(self.term_id_map)
self.term_id_map[term] = term_id
self.id_term_map.append(term)
plist = post_lists.get(term_id, -1)
if plist == -1:
post_lists[term_id] = [object_id]
else:
plist.append(object_id)
def update_index(self, objects, cache_size=(200000, 80000000)):
post_lists = dict()
id_obj_map = []
cached = 0
logging.info("starting creating index")
for obj in objects:
terms = self.obj_to_terms(obj)
for term in terms:
self.index_term(term, self.objnum, post_lists)
cached += 1
if cached > cache_size[1]:
self.update_posting_lists(post_lists)
post_lists = dict()
cached = 0
id_obj_map.append((self.objnum, obj))
if len(id_obj_map) > cache_size[0]:
self.write_objects(id_obj_map)
id_obj_map = []
self.objnum += 1
self.write_objects(id_obj_map)
self.update_posting_lists(post_lists)
self.write_terms(self.id_term_map)
logging.info("index done")
def find(self, query_terms_cnf=None):
for query_terms in query_terms_cnf:
plist_store = leveldb.LevelDB("%s/plist.index" % self.data_dir)
object_store = leveldb.LevelDB("%s/object.db" % self.data_dir)
if query_terms is None:
continue
result_ids = set()
for query_term in query_terms:
term_id = self.term_id_map.get(query_term, -1)
logging.info("TERM ID: %d" % term_id)
if term_id == -1:
logging.info("TERM NOT FOUND IN DICTIONARY")
continue
plist = self.load_posting_list(term_id, plist_store)
result_ids.update(plist)
logging.info("RETRIEVING %d OBJECTS FROM DISK" % len(result_ids))
for obj_id in result_ids:
obj = self.load_object(obj_id, object_store)
yield obj
| |
from __future__ import with_statement
#!/usr/bin/env python
#
# Copyright 2012 cloudysunny14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Created on 2012/06/27
@author: cloudysunny14@gmail.com
'''
import logging
from mapreduce.lib import files
from mapreduce import input_readers
from mapreduce import errors
Error = errors.Error
BadReaderParamsError = errors.BadReaderParamsError
class GoogleStorageLineInputReader(input_readers.InputReader):
"""Input reader for files from a stored in the GoogleCloudStorage.
You requires activate the google cloud storage and create bucket.
The class shouldn't be instantiated directly. Use the split_input
class method instead.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of file path
_MAX_FILE_PATHS_COUNT = 256
# Mapreduce parameters.
FILE_PATHS_PARAM = "file_paths"
# Serialyzation parameters.
INITIAL_POSITION_PARAM = "initial_position"
START_POSITION_PARAM = "start_position"
END_POSITION_PARAM = "end_position"
FILE_PATH_PARAM = "file_path"
def __init__(self, file_path, start_position, end_position):
"""Initializes this instance with the given file path and character range.
This GoogleStorageLineInputReader will read from the first record starting
after strictly after start_position until the first record ending at or
after end_position (exclusive). As an exception, if start_position is 0,
then this InputReader starts reading at the first record.
Args:
file_path: the file_path that this input reader is processing.
start_position: the position to start reading at.
end_position: a position in the last record to read.
"""
self._file_path = file_path
self._start_position = start_position
self._end_position = end_position
self._has_iterated = False
self._filestream = None
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.FILE_PATHS_PARAM not in params:
raise BadReaderParamsError("Must specify 'file_path' for mapper input")
file_paths = params[cls.FILE_PATHS_PARAM]
if isinstance(file_paths, basestring):
# This is a mechanism to allow multiple file paths (which do not contain
# commas) in a single string. It may go away.
file_paths = file_paths.split(",")
if len(file_paths) > cls._MAX_FILE_PATHS_COUNT:
raise BadReaderParamsError("Too many 'file_paths' for mapper input")
if not file_paths:
raise BadReaderParamsError("No 'file_paths' specified for mapper input")
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'file_paths' parameter with one or more file paths.
Returns:
A list of GoogleStorageLineInputReader corresponding to the
specified shards.
"""
params = _get_params(mapper_spec)
file_paths = params[cls.FILE_PATHS_PARAM]
if isinstance(file_paths, basestring):
# This is a mechanism to allow multiple file paths (which do not contain
# commas) in a single string. It may go away.
file_paths = file_paths.split(",")
file_sizes = {}
for file_path in file_paths:
fp = files.BufferedFile(file_path)
fp.seek(0, 2)
file_sizes[file_path] = fp.tell()
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_file = shard_count // len(file_paths)
if shards_per_file == 0:
shards_per_file = 1
chunks = []
for file_path, file_size in file_sizes.items():
file_chunk_size = file_size // shards_per_file
for i in xrange(shards_per_file - 1):
chunks.append(GoogleStorageLineInputReader.from_json(
{cls.FILE_PATH_PARAM: file_path,
cls.INITIAL_POSITION_PARAM: file_chunk_size * i,
cls.END_POSITION_PARAM: file_chunk_size * (i + 1)}))
chunks.append(GoogleStorageLineInputReader.from_json(
{cls.FILE_PATH_PARAM: file_path,
cls.INITIAL_POSITION_PARAM: file_chunk_size * (shards_per_file - 1),
cls.END_POSITION_PARAM: file_size}))
return chunks
def next(self):
"""Returns the next input from as an (offset, line) tuple."""
self._has_iterated = True
if not self._filestream:
self._filestream = files.BufferedFile(self._file_path)
if self._start_position:
self._filestream.seek(self._start_position)
self._filestream.readline()
start_position = self._filestream.tell()
if start_position > self._end_position:
raise StopIteration()
line = self._filestream.readline()
if not line:
raise StopIteration()
return start_position, line.rstrip("\n")
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._start_position
return offset
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
return {self.FILE_PATH_PARAM: self._file_path,
self.INITIAL_POSITION_PARAM: self._next_offset(),
self.END_POSITION_PARAM: self._end_position}
def __str__(self):
"""Returns the string representation of this GoogleStorageLineInputReader."""
return "FilePath(%r):[%d, %d]" % (
self._file_path, self._next_offset(), self._end_position)
@classmethod
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.FILE_PATH_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
def _get_params(mapper_spec, allowed_keys=None):
"""Obtain input reader parameters.
Utility function for input readers implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "input_reader"
subdictionary of mapper_spec parameters.
Returns:
mapper parameters as dict
Raises:
BadReaderParamsError: if parameters are invalid/missing or not allowed.
"""
if "input_reader" not in mapper_spec.params:
message = ("Input reader's parameters should be specified in "
"input_reader subdictionary.")
if allowed_keys:
raise errors.BadReaderParamsError(message)
else:
logging.warning(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("input_reader"), dict):
raise BadReaderParamsError(
"Input reader parameters should be a dictionary")
params = mapper_spec.params.get("input_reader")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadReaderParamsError(
"Invalid input_reader parameters: %s" % ",".join(params_diff))
return params
| |
"""Detaches a disk volume from a virtual machine."""
from baseCmd import *
from baseResponse import *
class detachVolumeCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the device ID on the virtual machine where volume is detached from"""
self.deviceid = None
self.typeInfo['deviceid'] = 'long'
"""the ID of the disk volume"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""the ID of the virtual machine where the volume is detached from"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
self.required = []
class detachVolumeResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""ID of the disk volume"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the disk volume"""
self.account = None
self.typeInfo['account'] = 'string'
"""the date the volume was attached to a VM instance"""
self.attached = None
self.typeInfo['attached'] = 'date'
"""the chain info of the volume"""
self.chaininfo = None
self.typeInfo['chaininfo'] = 'string'
"""the date the disk volume was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""the boolean state of whether the volume is destroyed or not"""
self.destroyed = None
self.typeInfo['destroyed'] = 'boolean'
"""the ID of the device on user vm the volume is attahed to. This tag is not returned when the volume is detached."""
self.deviceid = None
self.typeInfo['deviceid'] = 'long'
"""bytes read rate of the disk volume"""
self.diskBytesReadRate = None
self.typeInfo['diskBytesReadRate'] = 'long'
"""bytes write rate of the disk volume"""
self.diskBytesWriteRate = None
self.typeInfo['diskBytesWriteRate'] = 'long'
"""io requests read rate of the disk volume"""
self.diskIopsReadRate = None
self.typeInfo['diskIopsReadRate'] = 'long'
"""io requests write rate of the disk volume"""
self.diskIopsWriteRate = None
self.typeInfo['diskIopsWriteRate'] = 'long'
"""the display text of the disk offering"""
self.diskofferingdisplaytext = None
self.typeInfo['diskofferingdisplaytext'] = 'string'
"""ID of the disk offering"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""name of the disk offering"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""an optional field whether to the display the volume to the end user or not."""
self.displayvolume = None
self.typeInfo['displayvolume'] = 'boolean'
"""the domain associated with the disk volume"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain associated with the disk volume"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""Hypervisor the volume belongs to"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""true if the volume is extractable, false otherwise"""
self.isextractable = None
self.typeInfo['isextractable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""max iops of the disk volume"""
self.maxiops = None
self.typeInfo['maxiops'] = 'long'
"""min iops of the disk volume"""
self.miniops = None
self.typeInfo['miniops'] = 'long'
"""name of the disk volume"""
self.name = None
self.typeInfo['name'] = 'string'
"""the path of the volume"""
self.path = None
self.typeInfo['path'] = 'string'
"""the project name of the vpn"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vpn"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""provisioning type used to create volumes."""
self.provisioningtype = None
self.typeInfo['provisioningtype'] = 'string'
"""need quiesce vm or not when taking snapshot"""
self.quiescevm = None
self.typeInfo['quiescevm'] = 'boolean'
"""the display text of the service offering for root disk"""
self.serviceofferingdisplaytext = None
self.typeInfo['serviceofferingdisplaytext'] = 'string'
"""ID of the service offering for root disk"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""name of the service offering for root disk"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""size of the disk volume"""
self.size = None
self.typeInfo['size'] = 'long'
"""ID of the snapshot from which this volume was created"""
self.snapshotid = None
self.typeInfo['snapshotid'] = 'string'
"""the state of the disk volume"""
self.state = None
self.typeInfo['state'] = 'string'
"""the status of the volume"""
self.status = None
self.typeInfo['status'] = 'string'
"""name of the primary storage hosting the disk volume"""
self.storage = None
self.typeInfo['storage'] = 'string'
"""id of the primary storage hosting the disk volume; returned to admin user only"""
self.storageid = None
self.typeInfo['storageid'] = 'string'
"""shared or local storage"""
self.storagetype = None
self.typeInfo['storagetype'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""type of the disk volume (ROOT or DATADISK)"""
self.type = None
self.typeInfo['type'] = 'string'
"""id of the virtual machine"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'string'
"""display name of the virtual machine"""
self.vmdisplayname = None
self.typeInfo['vmdisplayname'] = 'string'
"""name of the virtual machine"""
self.vmname = None
self.typeInfo['vmname'] = 'string'
"""state of the virtual machine"""
self.vmstate = None
self.typeInfo['vmstate'] = 'string'
"""ID of the availability zone"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""name of the availability zone"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of resource tags associated with volume"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| |
import numpy as np
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
from scipy.stats import (binned_statistic, binned_statistic_2d,
binned_statistic_dd)
from scipy._lib._util import check_random_state
from .common_tests import check_named_results
class TestBinnedStatistic(object):
@classmethod
def setup_class(cls):
rng = check_random_state(9865)
cls.x = rng.uniform(size=100)
cls.y = rng.uniform(size=100)
cls.v = rng.uniform(size=100)
cls.X = rng.uniform(size=(100, 3))
cls.w = rng.uniform(size=100)
cls.u = rng.uniform(size=100) + 1e6
def test_1d_count(self):
x = self.x
v = self.v
count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
count2, edges2 = np.histogram(x, bins=10)
assert_allclose(count1, count2)
assert_allclose(edges1, edges2)
def test_gh5927(self):
# smoke test for gh5927 - binned_statistic was using `is` for string
# comparison
x = self.x
v = self.v
statistics = [u'mean', u'median', u'count', u'sum']
for statistic in statistics:
binned_statistic(x, v, statistic, bins=10)
def test_big_number_std(self):
# tests for numerical stability of std calculation
# see issue gh-10126 for more
x = self.x
u = self.u
stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10)
assert_allclose(stat1, stat2)
def test_non_finite_inputs_and_int_bins(self):
# if either `values` or `sample` contain np.inf or np.nan throw
# see issue gh-9010 for more
x = self.x
u = self.u
orig = u[0]
u[0] = np.inf
assert_raises(ValueError, binned_statistic, u, x, 'std', bins=10)
# need to test for non-python specific ints, e.g. np.int8, np.int64
assert_raises(ValueError, binned_statistic, u, x, 'std',
bins=np.int64(10))
u[0] = np.nan
assert_raises(ValueError, binned_statistic, u, x, 'count', bins=10)
# replace original value, u belongs the class
u[0] = orig
def test_1d_result_attributes(self):
x = self.x
v = self.v
res = binned_statistic(x, v, 'count', bins=10)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_1d_sum(self):
x = self.x
v = self.v
sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
sum2, edges2 = np.histogram(x, bins=10, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(edges1, edges2)
def test_1d_mean(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_std(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_min(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_max(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_median(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_bincode(self):
x = self.x[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
1, 2, 1])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
assert_allclose(bcount, count1)
def test_1d_range_keyword(self):
# Regression test for gh-3063, range can be (min, max) or [(min, max)]
np.random.seed(9865)
x = np.arange(30)
data = np.random.random(30)
mean, bins, _ = binned_statistic(x[:15], data[:15])
mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
assert_allclose(mean, mean_range)
assert_allclose(bins, bins_range)
assert_allclose(mean, mean_range2)
assert_allclose(bins, bins_range2)
def test_1d_multi_values(self):
x = self.x
v = self.v
w = self.w
stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10)
stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10)
stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(edges1v, edges2)
assert_allclose(bc1v, bc2)
def test_2d_count(self):
x = self.x
y = self.y
v = self.v
count1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'count', bins=5)
count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
assert_allclose(count1, count2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_result_attributes(self):
x = self.x
y = self.y
v = self.v
res = binned_statistic_2d(x, y, v, 'count', bins=5)
attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
check_named_results(res, attributes)
def test_2d_sum(self):
x = self.x
y = self.y
v = self.v
sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_mean(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_mean_unicode(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_std(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_min(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_max(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_median(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'median', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(
x, y, v, np.median, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_bincode(self):
x = self.x[:20]
y = self.y[:20]
v = self.v[:20]
count1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'count', bins=3)
bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
6, 11, 16, 6, 6, 11, 8])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_allclose(bcount, count1adj)
def test_2d_multi_values(self):
x = self.x
y = self.y
v = self.v
w = self.w
stat1v, binx1v, biny1v, bc1v = binned_statistic_2d(
x, y, v, 'mean', bins=8)
stat1w, binx1w, biny1w, bc1w = binned_statistic_2d(
x, y, w, 'mean', bins=8)
stat2, binx2, biny2, bc2 = binned_statistic_2d(
x, y, [v, w], 'mean', bins=8)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(binx1v, binx2)
assert_allclose(biny1w, biny2)
assert_allclose(bc1v, bc2)
def test_2d_binnumbers_unraveled(self):
x = self.x
y = self.y
v = self.v
stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20)
stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10)
stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d(
x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True)
bcx3 = np.searchsorted(edgesx, x, side='right')
bcy3 = np.searchsorted(edgesy, y, side='right')
# `numpy.searchsorted` is non-inclusive on right-edge, compensate
bcx3[x == x.max()] -= 1
bcy3[y == y.max()] -= 1
assert_allclose(bcx, bc2[0])
assert_allclose(bcy, bc2[1])
assert_allclose(bcx3, bc2[0])
assert_allclose(bcy3, bc2[1])
def test_dd_count(self):
X = self.X
v = self.v
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
count2, edges2 = np.histogramdd(X, bins=3)
assert_allclose(count1, count2)
assert_allclose(edges1, edges2)
def test_dd_result_attributes(self):
X = self.X
v = self.v
res = binned_statistic_dd(X, v, 'count', bins=3)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_dd_sum(self):
X = self.X
v = self.v
sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(edges1, edges2)
def test_dd_mean(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_std(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_min(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_max(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_median(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_bincode(self):
X = self.X[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
32, 36, 91, 43, 87, 81, 81])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_allclose(bcount, count1adj)
def test_dd_multi_values(self):
X = self.X
v = self.v
w = self.w
for stat in ["count", "sum", "mean", "std", "min", "max", "median",
np.std]:
stat1v, edges1v, bc1v = binned_statistic_dd(X, v, stat, bins=8)
stat1w, edges1w, bc1w = binned_statistic_dd(X, w, stat, bins=8)
stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], stat, bins=8)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(edges1v, edges2)
assert_allclose(edges1w, edges2)
assert_allclose(bc1v, bc2)
def test_dd_binnumbers_unraveled(self):
X = self.X
v = self.v
stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15)
stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20)
stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10)
stat2, edges2, bc2 = binned_statistic_dd(
X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True)
assert_allclose(bcx, bc2[0])
assert_allclose(bcy, bc2[1])
assert_allclose(bcz, bc2[2])
def test_dd_binned_statistic_result(self):
# NOTE: tests the reuse of bin_edges from previous call
x = np.random.random((10000, 3))
v = np.random.random((10000))
bins = np.linspace(0, 1, 10)
bins = (bins, bins, bins)
result = binned_statistic_dd(x, v, 'mean', bins=bins)
stat = result.statistic
result = binned_statistic_dd(x, v, 'mean',
binned_statistic_result=result)
stat2 = result.statistic
assert_allclose(stat, stat2)
def test_dd_zero_dedges(self):
x = np.random.random((10000, 3))
v = np.random.random((10000))
bins = np.linspace(0, 1, 10)
bins = np.append(bins, 1)
bins = (bins, bins, bins)
with assert_raises(ValueError, match='difference is numerically 0'):
binned_statistic_dd(x, v, 'mean', bins=bins)
def test_dd_range_errors(self):
# Test that descriptive exceptions are raised as appropriate for bad
# values of the `range` argument. (See gh-12996)
with assert_raises(ValueError,
match='In range, start must be <= stop'):
binned_statistic_dd([self.y], self.v,
range=[[1, 0]])
with assert_raises(
ValueError,
match='In dimension 1 of range, start must be <= stop'):
binned_statistic_dd([self.x, self.y], self.v,
range=[[1, 0], [0, 1]])
with assert_raises(
ValueError,
match='In dimension 2 of range, start must be <= stop'):
binned_statistic_dd([self.x, self.y], self.v,
range=[[0, 1], [1, 0]])
with assert_raises(
ValueError,
match='range given for 1 dimensions; 2 required'):
binned_statistic_dd([self.x, self.y], self.v,
range=[[0, 1]])
| |
"""Test applying the Identity Law to tree nodes."""
from ._helpers import ExpressionTreeAndNodeTestCase
class TestNodeApplyIdentityLaw(ExpressionTreeAndNodeTestCase):
def test_single_operand(self):
"""Test expressions of single operands."""
for symbol in ('A', 'operand', '0', '1'):
root = self.get_tree_root_from_expr_str(symbol)
transformed = root.apply_identity_law()
self.assertTrue(transformed is not root)
self.assertEqual(
str(transformed),
symbol)
def test_simple_cases_equivalent_to_0(self):
"""Test simple binary expressions that transform to 0."""
exprs_equivalent_to_0 = [
'1 and 0',
'0 and 1',
'0 and 0',
'1 and 1 and 1 and 0',
'1 and 1 and 0 and 1 and 1',
'A and 0',
'0 and B',
'~A and 0',
'0 and (A xor B)',
'(A -> B) and 0']
for expr in exprs_equivalent_to_0:
root = self.get_tree_root_from_expr_str(expr)
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'0')
def test_simple_cases_equivalent_to_1(self):
"""Test simple binary expressions that transform to 1."""
exprs_equivalent_to_1 = [
'0 or 1',
'1 or 0',
'1 or 1',
'0 or 0 or 0 or 1',
'0 or 1 or 0 or 0',
'B or 1',
'1 or B',
'~~~~~~B or 1',
'1 or (A and B and C)',
'(A xor B) or 1']
for expr in exprs_equivalent_to_1:
root = self.get_tree_root_from_expr_str(expr)
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'1')
def test_simple_cases_and_identity(self):
"""Test expressions ANDed with 1, pruning the 1 sub-expression."""
root = self.get_tree_root_from_expr_str('(A -> B) and 1')
transformed = root.apply_identity_law()
self.assertTrue(transformed is not root)
self.assertEqual(
str(transformed),
'\n'.join((
'->',
'`----A',
'`----B')))
root = self.get_tree_root_from_expr_str('1 and (A nand B)')
transformed = root.apply_identity_law()
self.assertTrue(transformed is not root)
self.assertEqual(
str(transformed),
'\n'.join((
'nand',
'`----A',
'`----B')))
def test_simple_cases_or_identity(self):
"""Test expressions ORed with 0, pruning the 0 sub-expression."""
root = self.get_tree_root_from_expr_str('operand or 0')
transformed = root.apply_identity_law()
self.assertTrue(transformed is not root)
self.assertEqual(
str(transformed),
'operand')
root = self.get_tree_root_from_expr_str('0 or (A and B)')
transformed = root.apply_identity_law()
self.assertTrue(transformed is not root)
self.assertEqual(
str(transformed),
'\n'.join((
'and',
'`----A',
'`----B')))
def test_chained_operators_equivalent_to_1(self):
"""Test chained AND and OR expressions yielding 1."""
exprs_equivalent_to_1 = [
'(1 or A or B or C)',
'A or B or C or D or E or 1',
'A or 1 or B or 1 or C or 1 or D',
'(A or 1 or B or C) and (A or B or C or D or 1)']
for expr in exprs_equivalent_to_1:
root = self.get_tree_root_from_expr_str(expr)
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(str(transformed), '1')
def test_chained_operators_equivalent_to_0(self):
"""Test chained AND and OR expressions yielding 0."""
exprs_equivalent_to_0 = [
'0 and A and B and C',
'A and B and 0',
'A and B and 0 and C and D and E',
'(A and B and 0 and C) or (A and 0 and B and C and D and E)']
for expr in exprs_equivalent_to_0:
root = self.get_tree_root_from_expr_str(expr)
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(str(transformed), '0')
def test_chained_operators_pruning_1(self):
"""Test chained AND operators, ensuring 1 literals are pruned."""
root = self.get_tree_root_from_expr_str('1 and A and B and C')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'and',
'`----A',
'`----and',
' `----B',
' `----C')))
root = self.get_tree_root_from_expr_str('A and B and C and 1')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'and',
'`----A',
'`----and',
' `----B',
' `----C')))
root = self.get_tree_root_from_expr_str('(A and 1 and B) or (1 and C)')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'or',
'`----and',
'| `----A',
'| `----B',
'`----C')))
def test_chained_operators_pruning_0(self):
"""Test chained OR operators, ensuring 0 literals are pruned."""
root = self.get_tree_root_from_expr_str('0 or A or B')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'or',
'`----A',
'`----B')))
root = self.get_tree_root_from_expr_str('A or B or 0')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'or',
'`----A',
'`----B')))
root = self.get_tree_root_from_expr_str('(A or 0 or B) and (C or 0)')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'and',
'`----or',
'| `----A',
'| `----B',
'`----C')))
def test_bubbling_up(self):
"""Test constants that bubble up the tree."""
root = self.get_tree_root_from_expr_str('(1 and (B or 1)) xor A')
transformed = root.apply_identity_law()
self.assertTrue(transformed is not root)
self.assertEqual(
str(transformed),
'\n'.join((
'xor',
'`----1',
'`----A')))
root = self.get_tree_root_from_expr_str(
'(B -> (C and 0)) and (1 or (A -> B -> C))')
transformed = root.apply_identity_law()
self.assertTrue(transformed is not root)
self.assertEqual(
str(transformed),
'\n'.join((
'->',
'`----B',
'`----0')))
def test_unaffected_expression(self):
"""Test expressions that should not be affected."""
root = self.get_tree_root_from_expr_str('A and B and C')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'and',
'`----A',
'`----and',
' `----B',
' `----C')))
root = self.get_tree_root_from_expr_str('0 xor 1')
transformed = root.apply_identity_law()
self.assertTrue(root is not transformed)
self.assertEqual(
str(transformed),
'\n'.join((
'xor',
'`----0',
'`----1')))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira Networks, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
import hashlib
import inspect
import json
from oslo.config import cfg
#FIXME(danwent): I'd like this file to get to the point where it has
# no neutron-specific logic in it
from neutron.common import constants
from neutron.common import exceptions as exception
from neutron.openstack.common import log
from neutron.plugins.nicira.common import (
exceptions as nvp_exc)
from neutron.plugins.nicira import NvpApiClient
LOG = log.getLogger(__name__)
# HTTP METHODS CONSTANTS
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
# Prefix to be used for all NVP API calls
URI_PREFIX = "/ws.v1"
# Resources exposed by NVP API
LSWITCH_RESOURCE = "lswitch"
LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE
LROUTER_RESOURCE = "lrouter"
# Current neutron version
LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE
LROUTERNAT_RESOURCE = "nat/lrouter"
LQUEUE_RESOURCE = "lqueue"
GWSERVICE_RESOURCE = "gateway-service"
NEUTRON_VERSION = "2013.1"
# Other constants for NVP resource
MAX_DISPLAY_NAME_LEN = 40
# Constants for NAT rules
MATCH_KEYS = ["destination_ip_addresses", "destination_port_max",
"destination_port_min", "source_ip_addresses",
"source_port_max", "source_port_min", "protocol"]
SNAT_KEYS = ["to_src_port_min", "to_src_port_max", "to_src_ip_min",
"to_src_ip_max"]
DNAT_KEYS = ["to_dst_port", "to_dst_ip_min", "to_dst_ip_max"]
# TODO(bgh): it would be more efficient to use a bitmap
taken_context_ids = []
# XXX Only cache default for now
_lqueue_cache = {}
def version_dependent(func):
func_name = func.__name__
def dispatch_version_dependent_function(cluster, *args, **kwargs):
nvp_ver = cluster.api_client.get_nvp_version()
if nvp_ver:
ver_major = int(nvp_ver.split('.')[0])
real_func = NVPLIB_FUNC_DICT[func_name][ver_major]
func_kwargs = kwargs
arg_spec = inspect.getargspec(real_func)
if not arg_spec.keywords and not arg_spec.varargs:
# drop args unknown to function from func_args
arg_set = set(func_kwargs.keys())
for arg in arg_set - set(arg_spec.args):
del func_kwargs[arg]
# NOTE(salvatore-orlando): shall we fail here if a required
# argument is not passed, or let the called function raise?
real_func(cluster, *args, **func_kwargs)
return dispatch_version_dependent_function
def _build_uri_path(resource,
resource_id=None,
parent_resource_id=None,
fields=None,
relations=None,
filters=None,
types=None,
is_attachment=False):
resources = resource.split('/')
res_path = resources[0] + (resource_id and "/%s" % resource_id or '')
if len(resources) > 1:
# There is also a parent resource to account for in the uri
res_path = "%s/%s/%s" % (resources[1],
parent_resource_id,
res_path)
if is_attachment:
res_path = "%s/attachment" % res_path
params = []
params.append(fields and "fields=%s" % fields)
params.append(relations and "relations=%s" % relations)
params.append(types and "types=%s" % types)
if filters:
params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()])
uri_path = "%s/%s" % (URI_PREFIX, res_path)
non_empty_params = [x for x in params if x is not None]
if non_empty_params:
query_string = '&'.join(non_empty_params)
if query_string:
uri_path += "?%s" % query_string
return uri_path
def _check_and_truncate_name(display_name):
if display_name and len(display_name) > MAX_DISPLAY_NAME_LEN:
LOG.debug(_("Specified name:'%s' exceeds maximum length. "
"It will be truncated on NVP"), display_name)
return display_name[:MAX_DISPLAY_NAME_LEN]
return display_name
def get_cluster_version(cluster):
"""Return major/minor version #."""
# Get control-cluster nodes
uri = "/ws.v1/control-cluster/node?_page_length=1&fields=uuid"
res = do_request(HTTP_GET, uri, cluster=cluster)
if res["result_count"] == 0:
return None
node_uuid = res["results"][0]["uuid"]
# Get control-cluster node status. It's unsupported to have controllers
# running different version so we just need the first node version.
uri = "/ws.v1/control-cluster/node/%s/status" % node_uuid
res = do_request(HTTP_GET, uri, cluster=cluster)
version_parts = res["version"].split(".")
version = "%s.%s" % tuple(version_parts[:2])
LOG.info(_("NVP controller cluster version: %s"), version)
return version
def get_all_query_pages(path, c):
need_more_results = True
result_list = []
page_cursor = None
query_marker = "&" if (path.find("?") != -1) else "?"
while need_more_results:
page_cursor_str = (
"_page_cursor=%s" % page_cursor if page_cursor else "")
body = do_request(HTTP_GET,
"%s%s%s" % (path, query_marker, page_cursor_str),
cluster=c)
page_cursor = body.get('page_cursor')
if not page_cursor:
need_more_results = False
result_list.extend(body['results'])
return result_list
# -------------------------------------------------------------------
# Network functions
# -------------------------------------------------------------------
def get_lswitches(cluster, neutron_net_id):
lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
relations="LogicalSwitchStatus")
results = []
try:
ls = do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
results.append(ls)
for tag in ls['tags']:
if (tag['scope'] == "multi_lswitch" and
tag['tag'] == "True"):
# Fetch extra logical switches
extra_lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
extra_switches = get_all_query_pages(extra_lswitch_uri_path,
cluster)
results.extend(extra_switches)
return results
except exception.NotFound:
raise exception.NetworkNotFound(net_id=neutron_net_id)
def create_lswitch(cluster, tenant_id, display_name,
transport_type=None,
transport_zone_uuid=None,
vlan_id=None,
neutron_net_id=None,
shared=None,
**kwargs):
nvp_binding_type = transport_type
if transport_type in ('flat', 'vlan'):
nvp_binding_type = 'bridge'
transport_zone_config = (
{"zone_uuid": (transport_zone_uuid or
cluster.default_tz_uuid),
"transport_type": (nvp_binding_type or
cfg.CONF.NVP.default_transport_type)})
lswitch_obj = {"display_name": _check_and_truncate_name(display_name),
"transport_zones": [transport_zone_config],
"tags": [{"tag": tenant_id, "scope": "os_tid"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]}
if nvp_binding_type == 'bridge' and vlan_id:
transport_zone_config["binding_config"] = {"vlan_translation":
[{"transport": vlan_id}]}
if neutron_net_id:
lswitch_obj["tags"].append({"tag": neutron_net_id,
"scope": "quantum_net_id"})
if shared:
lswitch_obj["tags"].append({"tag": "true",
"scope": "shared"})
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
uri = _build_uri_path(LSWITCH_RESOURCE)
lswitch = do_request(HTTP_POST, uri, json.dumps(lswitch_obj),
cluster=cluster)
LOG.debug(_("Created logical switch: %s"), lswitch['uuid'])
return lswitch
def update_lswitch(cluster, lswitch_id, display_name,
tenant_id=None, **kwargs):
uri = _build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
lswitch_obj = {"display_name": _check_and_truncate_name(display_name),
"tags": [{"tag": tenant_id, "scope": "os_tid"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]}
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
try:
return do_request(HTTP_PUT, uri, json.dumps(lswitch_obj),
cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=lswitch_id)
def create_l2_gw_service(cluster, tenant_id, display_name, devices):
"""Create a NVP Layer-2 Network Gateway Service.
:param cluster: The target NVP cluster
:param tenant_id: Identifier of the Openstack tenant for which
the gateway service.
:param display_name: Descriptive name of this gateway service
:param devices: List of transport node uuids (and network
interfaces on them) to use for the network gateway service
:raise NvpApiException: if there is a problem while communicating
with the NVP controller
"""
tags = [{"tag": tenant_id, "scope": "os_tid"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]
# NOTE(salvatore-orlando): This is a little confusing, but device_id in
# NVP is actually the identifier a physical interface on the gateway
# device, which in the Neutron API is referred as interface_name
gateways = [{"transport_node_uuid": device['id'],
"device_id": device['interface_name'],
"type": "L2Gateway"} for device in devices]
gwservice_obj = {
"display_name": _check_and_truncate_name(display_name),
"tags": tags,
"gateways": gateways,
"type": "L2GatewayServiceConfig"
}
return do_request(
"POST", _build_uri_path(GWSERVICE_RESOURCE),
json.dumps(gwservice_obj), cluster=cluster)
def create_lrouter(cluster, tenant_id, display_name, nexthop):
"""Create a NVP logical router on the specified cluster.
:param cluster: The target NVP cluster
:param tenant_id: Identifier of the Openstack tenant for which
the logical router is being created
:param display_name: Descriptive name of this logical router
:param nexthop: External gateway IP address for the logical router
:raise NvpApiException: if there is a problem while communicating
with the NVP controller
"""
display_name = _check_and_truncate_name(display_name)
tags = [{"tag": tenant_id, "scope": "os_tid"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]
lrouter_obj = {
"display_name": display_name,
"tags": tags,
"routing_config": {
"default_route_next_hop": {
"gateway_ip_address": nexthop,
"type": "RouterNextHop"
},
"type": "SingleDefaultRouteImplicitRoutingConfig"
},
"type": "LogicalRouterConfig"
}
return do_request(HTTP_POST, _build_uri_path(LROUTER_RESOURCE),
json.dumps(lrouter_obj), cluster=cluster)
def delete_lrouter(cluster, lrouter_id):
do_request(HTTP_DELETE, _build_uri_path(LROUTER_RESOURCE,
resource_id=lrouter_id),
cluster=cluster)
def delete_l2_gw_service(cluster, gateway_id):
do_request("DELETE", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def get_lrouter(cluster, lrouter_id):
return do_request(HTTP_GET,
_build_uri_path(LROUTER_RESOURCE,
resource_id=lrouter_id,
relations='LogicalRouterStatus'),
cluster=cluster)
def get_l2_gw_service(cluster, gateway_id):
return do_request(
"GET", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def get_lrouters(cluster, tenant_id, fields=None, filters=None):
actual_filters = {}
if filters:
actual_filters.update(filters)
if tenant_id:
actual_filters['tag'] = tenant_id
actual_filters['tag_scope'] = 'os_tid'
lrouter_fields = "uuid,display_name,fabric_status,tags"
return get_all_query_pages(
_build_uri_path(LROUTER_RESOURCE,
fields=lrouter_fields,
relations='LogicalRouterStatus',
filters=actual_filters),
cluster)
def get_l2_gw_services(cluster, tenant_id=None,
fields=None, filters=None):
actual_filters = dict(filters or {})
if tenant_id:
actual_filters['tag'] = tenant_id
actual_filters['tag_scope'] = 'os_tid'
return get_all_query_pages(
_build_uri_path(GWSERVICE_RESOURCE,
filters=actual_filters),
cluster)
def update_l2_gw_service(cluster, gateway_id, display_name):
# TODO(salvatore-orlando): Allow updates for gateways too
gwservice_obj = get_l2_gw_service(cluster, gateway_id)
if not display_name:
# Nothing to update
return gwservice_obj
gwservice_obj["display_name"] = _check_and_truncate_name(display_name)
return do_request("PUT", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
json.dumps(gwservice_obj), cluster=cluster)
def update_lrouter(cluster, lrouter_id, display_name, nexthop):
lrouter_obj = get_lrouter(cluster, lrouter_id)
if not display_name and not nexthop:
# Nothing to update
return lrouter_obj
# It seems that this is faster than the doing an if on display_name
lrouter_obj["display_name"] = (_check_and_truncate_name(display_name) or
lrouter_obj["display_name"])
if nexthop:
nh_element = lrouter_obj["routing_config"].get(
"default_route_next_hop")
if nh_element:
nh_element["gateway_ip_address"] = nexthop
return do_request(HTTP_PUT, _build_uri_path(LROUTER_RESOURCE,
resource_id=lrouter_id),
json.dumps(lrouter_obj),
cluster=cluster)
def delete_network(cluster, net_id, lswitch_id):
delete_networks(cluster, net_id, [lswitch_id])
#TODO(salvatore-orlando): Simplify and harmonize
def delete_networks(cluster, net_id, lswitch_ids):
for ls_id in lswitch_ids:
path = "/ws.v1/lswitch/%s" % ls_id
try:
do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=ls_id)
def query_lswitch_lports(cluster, ls_uuid, fields="*",
filters=None, relations=None):
# Fix filter for attachments
if filters and "attachment" in filters:
filters['attachment_vif_uuid'] = filters["attachment"]
del filters['attachment']
uri = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid,
fields=fields, filters=filters, relations=relations)
return do_request(HTTP_GET, uri, cluster=cluster)['results']
def query_lrouter_lports(cluster, lr_uuid, fields="*",
filters=None, relations=None):
uri = _build_uri_path(LROUTERPORT_RESOURCE, parent_resource_id=lr_uuid,
fields=fields, filters=filters, relations=relations)
return do_request(HTTP_GET, uri, cluster=cluster)['results']
def delete_port(cluster, switch, port):
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
try:
do_request(HTTP_DELETE, uri, cluster=cluster)
except exception.NotFound:
LOG.exception(_("Port or Network not found"))
raise exception.PortNotFound(net_id=switch,
port_id=port)
except NvpApiClient.NvpApiException:
raise exception.NeutronException()
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
"""Get port by neutron tag.
Returns the NVP UUID of the logical port with tag q_port_id equal to
neutron_port_id or None if the port is not Found.
"""
uri = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid,
fields='uuid',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' "
"on: '%(lswitch_uuid)s'") %
{'neutron_port_id': neutron_port_id,
'lswitch_uuid': lswitch_uuid})
res = do_request(HTTP_GET, uri, cluster=cluster)
num_results = len(res["results"])
if num_results >= 1:
if num_results > 1:
LOG.warn(_("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected.") %
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
return res["results"][0]
def get_port(cluster, network, port, relations=None):
LOG.info(_("get_port() %(network)s %(port)s"),
{'network': network, 'port': port})
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
if relations:
uri += "relations=%s" % relations
try:
return do_request(HTTP_GET, uri, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFound(port_id=port, net_id=network)
def _configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled):
lport_obj['allowed_address_pairs'] = []
if port_security_enabled:
for fixed_ip in fixed_ips:
ip_address = fixed_ip.get('ip_address')
if ip_address:
lport_obj['allowed_address_pairs'].append(
{'mac_address': mac_address, 'ip_address': ip_address})
# add address pair allowing src_ip 0.0.0.0 to leave
# this is required for outgoing dhcp request
lport_obj["allowed_address_pairs"].append(
{"mac_address": mac_address,
"ip_address": "0.0.0.0"})
lport_obj['security_profiles'] = list(security_profiles or [])
lport_obj['queue_uuid'] = queue_id
if mac_learning_enabled is not None:
lport_obj["mac_learning"] = mac_learning_enabled
lport_obj["type"] = "LogicalSwitchPortConfig"
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None):
# device_id can be longer than 40 so we rehash it
hashed_device_id = hashlib.sha1(device_id).hexdigest()
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=_check_and_truncate_name(display_name),
tags=[dict(scope='os_tid', tag=tenant_id),
dict(scope='q_port_id', tag=neutron_port_id),
dict(scope='vm_id', tag=hashed_device_id),
dict(scope='quantum', tag=NEUTRON_VERSION)])
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled)
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
result = do_request(HTTP_PUT, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Updated logical port %(result)s "
"on logical switch %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFound(port_id=lport_uuid, net_id=lswitch_uuid)
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None):
"""Creates a logical port on the assigned logical switch."""
# device_id can be longer than 40 so we rehash it
hashed_device_id = hashlib.sha1(device_id).hexdigest()
display_name = _check_and_truncate_name(display_name)
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=[dict(scope='os_tid', tag=tenant_id),
dict(scope='q_port_id', tag=neutron_port_id),
dict(scope='vm_id', tag=hashed_device_id),
dict(scope='quantum', tag=NEUTRON_VERSION)],
)
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled)
path = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid)
result = do_request(HTTP_POST, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Created logical port %(result)s on logical swtich %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id,
display_name, admin_status_enabled, ip_addresses):
"""Creates a logical port on the assigned logical router."""
tags = [dict(scope='os_tid', tag=tenant_id),
dict(scope='q_port_id', tag=neutron_port_id),
dict(scope='quantum', tag=NEUTRON_VERSION)]
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=tags,
ip_addresses=ip_addresses,
type="LogicalRouterPortConfig"
)
path = _build_uri_path(LROUTERPORT_RESOURCE,
parent_resource_id=lrouter_uuid)
result = do_request(HTTP_POST, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Created logical port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s"),
{'lport_uuid': result['uuid'],
'lrouter_uuid': lrouter_uuid})
return result
def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
tenant_id, neutron_port_id, display_name,
admin_status_enabled, ip_addresses):
"""Updates a logical port on the assigned logical router."""
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=[dict(scope='os_tid', tag=tenant_id),
dict(scope='q_port_id', tag=neutron_port_id),
dict(scope='quantum', tag=NEUTRON_VERSION)],
ip_addresses=ip_addresses,
type="LogicalRouterPortConfig"
)
# Do not pass null items to NVP
for key in lport_obj.keys():
if lport_obj[key] is None:
del lport_obj[key]
path = _build_uri_path(LROUTERPORT_RESOURCE,
lrouter_port_uuid,
parent_resource_id=lrouter_uuid)
result = do_request(HTTP_PUT, path,
json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Updated logical port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s"),
{'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
return result
def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
"""Creates a logical port on the assigned logical router."""
path = _build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, lrouter_uuid)
do_request(HTTP_DELETE, path, cluster=cluster)
LOG.debug(_("Delete logical router port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s"),
{'lport_uuid': lport_uuid,
'lrouter_uuid': lrouter_uuid})
def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid):
nvp_port = get_port(cluster, ls_uuid, lp_uuid,
relations="LogicalPortAttachment")
relations = nvp_port.get('_relations')
if relations:
att_data = relations.get('LogicalPortAttachment')
if att_data:
lrp_uuid = att_data.get('peer_port_uuid')
if lrp_uuid:
delete_router_lport(cluster, lr_uuid, lrp_uuid)
def find_router_gw_port(context, cluster, router_id):
"""Retrieves the external gateway port for a NVP logical router."""
# Find the uuid of nvp ext gw logical router port
# TODO(salvatore-orlando): Consider storing it in Neutron DB
results = query_lrouter_lports(
cluster, router_id,
relations="LogicalPortAttachment")
for lport in results:
if '_relations' in lport:
attachment = lport['_relations'].get('LogicalPortAttachment')
if attachment and attachment.get('type') == 'L3GatewayAttachment':
return lport
def plug_router_port_attachment(cluster, router_id, port_id,
attachment_uuid, nvp_attachment_type,
attachment_vlan=None):
"""Attach a router port to the given attachment.
Current attachment types:
- PatchAttachment [-> logical switch port uuid]
- L3GatewayAttachment [-> L3GatewayService uuid]
For the latter attachment type a VLAN ID can be specified as well.
"""
uri = _build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id,
is_attachment=True)
attach_obj = {}
attach_obj["type"] = nvp_attachment_type
if nvp_attachment_type == "PatchAttachment":
attach_obj["peer_port_uuid"] = attachment_uuid
elif nvp_attachment_type == "L3GatewayAttachment":
attach_obj["l3_gateway_service_uuid"] = attachment_uuid
if attachment_vlan:
attach_obj['vlan_id'] = attachment_vlan
else:
# TODO(salv-orlando): avoid raising generic exception
raise Exception(_("Invalid NVP attachment type '%s'"),
nvp_attachment_type)
return do_request(HTTP_PUT, uri, json.dumps(attach_obj), cluster=cluster)
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port."""
try:
r = do_request(HTTP_GET,
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port not found, Error: %s"), str(e))
raise exception.PortNotFound(port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN
def _plug_interface(cluster, lswitch_id, lport_id, att_obj):
uri = _build_uri_path(LSWITCHPORT_RESOURCE, lport_id, lswitch_id,
is_attachment=True)
return do_request(HTTP_PUT, uri, json.dumps(att_obj),
cluster=cluster)
def plug_l2_gw_service(cluster, lswitch_id, lport_id,
gateway_id, vlan_id=None):
"""Plug a Layer-2 Gateway Attachment object in a logical port."""
att_obj = {'type': 'L2GatewayAttachment',
'l2_gateway_service_uuid': gateway_id}
if vlan_id:
att_obj['vlan_id'] = vlan_id
return _plug_interface(cluster, lswitch_id, lport_id, att_obj)
def plug_interface(cluster, lswitch_id, port, type, attachment=None):
"""Plug a VIF Attachment object in a logical port."""
lport_obj = {}
if attachment:
lport_obj["vif_uuid"] = attachment
lport_obj["type"] = type
return _plug_interface(cluster, lswitch_id, port, lport_obj)
#------------------------------------------------------------------------------
# Security Profile convenience functions.
#------------------------------------------------------------------------------
EXT_SECURITY_PROFILE_ID_SCOPE = 'nova_spid'
TENANT_ID_SCOPE = 'os_tid'
def format_exception(etype, e, exception_locals):
"""Consistent formatting for exceptions.
:param etype: a string describing the exception type.
:param e: the exception.
:param execption_locals: calling context local variable dict.
:returns: a formatted string.
"""
msg = ["Error. %s exception: %s." % (etype, e)]
l = dict((k, v) for k, v in exception_locals.iteritems()
if k != 'request')
msg.append("locals=[%s]" % str(l))
return ' '.join(msg)
def do_request(*args, **kwargs):
"""Issue a request to the cluster specified in kwargs.
:param args: a list of positional arguments.
:param kwargs: a list of keyworkds arguments.
:returns: the result of the operation loaded into a python
object or None.
"""
cluster = kwargs["cluster"]
try:
res = cluster.api_client.request(*args)
if res:
return json.loads(res)
except NvpApiClient.ResourceNotFound:
raise exception.NotFound()
def mk_body(**kwargs):
"""Convenience function creates and dumps dictionary to string.
:param kwargs: the key/value pirs to be dumped into a json string.
:returns: a json string.
"""
return json.dumps(kwargs, ensure_ascii=False)
# -----------------------------------------------------------------------------
# Security Group API Calls
# -----------------------------------------------------------------------------
def create_security_profile(cluster, tenant_id, security_profile):
path = "/ws.v1/security-profile"
# Allow all dhcp responses and all ingress traffic
hidden_rules = {'logical_port_egress_rules':
[{'ethertype': 'IPv4',
'protocol': constants.UDP_PROTOCOL,
'port_range_min': constants.DHCP_RESPONSE_PORT,
'port_range_max': constants.DHCP_RESPONSE_PORT,
'ip_prefix': '0.0.0.0/0'}],
'logical_port_ingress_rules':
[{'ethertype': 'IPv4'},
{'ethertype': 'IPv6'}]}
tags = [dict(scope='os_tid', tag=tenant_id),
dict(scope='quantum', tag=NEUTRON_VERSION)]
display_name = _check_and_truncate_name(security_profile.get('name'))
body = mk_body(
tags=tags, display_name=display_name,
logical_port_ingress_rules=(
hidden_rules['logical_port_ingress_rules']),
logical_port_egress_rules=hidden_rules['logical_port_egress_rules']
)
rsp = do_request(HTTP_POST, path, body, cluster=cluster)
if security_profile.get('name') == 'default':
# If security group is default allow ip traffic between
# members of the same security profile is allowed and ingress traffic
# from the switch
rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4',
'profile_uuid': rsp['uuid']},
{'ethertype': 'IPv6',
'profile_uuid': rsp['uuid']}],
'logical_port_ingress_rules': [{'ethertype': 'IPv4'},
{'ethertype': 'IPv6'}]}
update_security_group_rules(cluster, rsp['uuid'], rules)
LOG.debug(_("Created Security Profile: %s"), rsp)
return rsp
def update_security_group_rules(cluster, spid, rules):
path = "/ws.v1/security-profile/%s" % spid
# Allow all dhcp responses in
rules['logical_port_egress_rules'].append(
{'ethertype': 'IPv4', 'protocol': constants.UDP_PROTOCOL,
'port_range_min': constants.DHCP_RESPONSE_PORT,
'port_range_max': constants.DHCP_RESPONSE_PORT,
'ip_prefix': '0.0.0.0/0'})
# If there are no ingress rules add bunk rule to drop all ingress traffic
if not rules['logical_port_ingress_rules']:
rules['logical_port_ingress_rules'].append(
{'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'})
try:
body = mk_body(
logical_port_ingress_rules=rules['logical_port_ingress_rules'],
logical_port_egress_rules=rules['logical_port_egress_rules'])
rsp = do_request(HTTP_PUT, path, body, cluster=cluster)
except exception.NotFound as e:
LOG.error(format_exception("Unknown", e, locals()))
#FIXME(salvatore-orlando): This should not raise NeutronException
raise exception.NeutronException()
LOG.debug(_("Updated Security Profile: %s"), rsp)
return rsp
def delete_security_profile(cluster, spid):
path = "/ws.v1/security-profile/%s" % spid
try:
do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e:
# FIXME(salv-orlando): should not raise NeutronException
LOG.error(format_exception("Unknown", e, locals()))
raise exception.NeutronException()
def _create_nat_match_obj(**kwargs):
nat_match_obj = {'ethertype': 'IPv4'}
delta = set(kwargs.keys()) - set(MATCH_KEYS)
if delta:
raise Exception(_("Invalid keys for NAT match: %s"), delta)
nat_match_obj.update(kwargs)
return nat_match_obj
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj)
uri = _build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id)
return do_request(HTTP_POST, uri, json.dumps(nat_rule_obj),
cluster=cluster)
def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
return {"to_source_ip_address_min": min_src_ip,
"to_source_ip_address_max": max_src_ip,
"type": "SourceNatRule",
"match": nat_match_obj}
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
LOG.info(_("No SNAT rules cannot be applied as they are not available in "
"this version of the NVP platform"))
def create_lrouter_snat_rule_v2(cluster, router_id,
min_src_ip, max_src_ip, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip,
to_dst_port=None, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = {
"to_destination_ip_address_min": dst_ip,
"to_destination_ip_address_max": dst_ip,
"type": "DestinationNatRule",
"match": nat_match_obj
}
if to_dst_port:
nat_rule_obj['to_destination_port'] = to_dst_port
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None,
match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = {
"type": "NoSourceNatRule",
"match": nat_match_obj
}
if order:
nat_rule_obj['order'] = order
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip,
order=None, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
if order:
nat_rule_obj['order'] = order
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None,
order=None, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = {
"to_destination_ip_address": dst_ip,
"type": "DestinationNatRule",
"match": nat_match_obj
}
if to_dst_port:
nat_rule_obj['to_destination_port'] = to_dst_port
if order:
nat_rule_obj['order'] = order
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
@version_dependent
def create_lrouter_dnat_rule(cluster, *args, **kwargs):
pass
@version_dependent
def create_lrouter_snat_rule(cluster, *args, **kwargs):
pass
@version_dependent
def create_lrouter_nosnat_rule(cluster, *args, **kwargs):
pass
def delete_nat_rules_by_match(cluster, router_id, rule_type,
max_num_expected,
min_num_expected=0,
**kwargs):
# remove nat rules
nat_rules = query_nat_rules(cluster, router_id)
to_delete_ids = []
for r in nat_rules:
if (r['type'] != rule_type):
continue
for key, value in kwargs.iteritems():
if not (key in r['match'] and r['match'][key] == value):
break
else:
to_delete_ids.append(r['uuid'])
if not (len(to_delete_ids) in
range(min_num_expected, max_num_expected + 1)):
raise nvp_exc.NvpNatRuleMismatch(actual_rules=len(to_delete_ids),
min_rules=min_num_expected,
max_rules=max_num_expected)
for rule_id in to_delete_ids:
delete_router_nat_rule(cluster, router_id, rule_id)
def delete_router_nat_rule(cluster, router_id, rule_id):
uri = _build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
do_request(HTTP_DELETE, uri, cluster=cluster)
def query_nat_rules(cluster, router_id, fields="*", filters=None):
uri = _build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id,
fields=fields, filters=filters)
return get_all_query_pages(uri, cluster)
# NOTE(salvatore-orlando): The following FIXME applies in general to
# each operation on list attributes.
# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface
def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
ips_to_add, ips_to_remove):
uri = _build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id)
try:
port = do_request(HTTP_GET, uri, cluster=cluster)
# TODO(salvatore-orlando): Enforce ips_to_add intersection with
# ips_to_remove is empty
ip_address_set = set(port['ip_addresses'])
ip_address_set = ip_address_set - set(ips_to_remove)
ip_address_set = ip_address_set | set(ips_to_add)
# Set is not JSON serializable - convert to list
port['ip_addresses'] = list(ip_address_set)
do_request(HTTP_PUT, uri, json.dumps(port), cluster=cluster)
except exception.NotFound as e:
# FIXME(salv-orlando):avoid raising different exception
data = {'lport_id': lport_id, 'lrouter_id': lrouter_id}
msg = (_("Router Port %(lport_id)s not found on router "
"%(lrouter_id)s") % data)
LOG.exception(msg)
raise nvp_exc.NvpPluginException(err_msg=msg)
except NvpApiClient.NvpApiException as e:
msg = _("An exception occurred while updating IP addresses on a "
"router logical port:%s") % str(e)
LOG.exception(msg)
raise nvp_exc.NvpPluginException(err_msg=msg)
# TODO(salvatore-orlando): Also handle changes in minor versions
NVPLIB_FUNC_DICT = {
'create_lrouter_dnat_rule': {2: create_lrouter_dnat_rule_v2,
3: create_lrouter_dnat_rule_v3},
'create_lrouter_snat_rule': {2: create_lrouter_snat_rule_v2,
3: create_lrouter_snat_rule_v3},
'create_lrouter_nosnat_rule': {2: create_lrouter_nosnat_rule_v2,
3: create_lrouter_nosnat_rule_v3}
}
# -----------------------------------------------------------------------------
# QOS API Calls
# -----------------------------------------------------------------------------
def create_lqueue(cluster, lqueue):
uri = _build_uri_path(LQUEUE_RESOURCE)
lqueue['tags'] = [{'tag': NEUTRON_VERSION, 'scope': 'quantum'}]
try:
return do_request(HTTP_POST, uri, json.dumps(lqueue),
cluster=cluster)['uuid']
except NvpApiClient.NvpApiException:
# FIXME(salv-orlando): This should not raise QauntumException
LOG.exception(_("Failed to create logical queue"))
raise exception.NeutronException()
def delete_lqueue(cluster, id):
try:
do_request(HTTP_DELETE, _build_uri_path(LQUEUE_RESOURCE,
resource_id=id),
cluster=cluster)
except Exception:
# FIXME(salv-orlando): This should not raise QauntumException
LOG.exception(_("Failed to delete logical queue"))
raise exception.NeutronException()
# -----------------------------------------------------------------------------
# NVP API Calls for check_nvp_config utility
# -----------------------------------------------------------------------------
def config_helper(http_method, http_uri, cluster):
try:
return do_request(http_method,
http_uri,
cluster=cluster)
except Exception as e:
msg = ("Error '%s' when connecting to controller(s): %s."
% (str(e), ', '.join(cluster.nvp_controllers)))
raise Exception(msg)
def check_cluster_connectivity(cluster):
"""Make sure that we can issue a request to each of the cluster nodes."""
return config_helper(HTTP_GET,
"/ws.v1/control-cluster",
cluster)
def get_gateway_services(cluster):
return config_helper(HTTP_GET,
"/ws.v1/gateway-service?fields=uuid",
cluster)
def get_transport_zones(cluster):
return config_helper(HTTP_GET,
"/ws.v1/transport-zone?fields=uuid",
cluster)
| |
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from swaggyjenkins.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from swaggyjenkins.exceptions import ApiAttributeError
def lazy_import():
from swaggyjenkins.model.link import Link
globals()['Link'] = Link
class GithubOrganizationlinks(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'repositories': (Link,), # noqa: E501
'_self': (Link,), # noqa: E501
'_class': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'repositories': 'repositories', # noqa: E501
'_self': 'self', # noqa: E501
'_class': '_class', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GithubOrganizationlinks - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
repositories (Link): [optional] # noqa: E501
_self (Link): [optional] # noqa: E501
_class (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GithubOrganizationlinks - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
repositories (Link): [optional] # noqa: E501
_self (Link): [optional] # noqa: E501
_class (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
InterlisDialog
A QGIS plugin
Interlis Import/Export
-------------------
begin : 2014-01-18
copyright : (C) 2014 by Pirmin Kalberer / Sourcepole
email : pka@sourcepole.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import pyqtSlot, Qt, QSettings, QEventLoop, QTimer, qDebug
from PyQt4.QtGui import QFileDialog, QMessageBox, QDialog, QDockWidget
from PyQt4.QtNetwork import QNetworkRequest, QNetworkReply
from qgis.core import QGis, QgsMessageLog, QgsVectorLayer, QgsDataSourceURI, \
QgsNetworkAccessManager
from qgis.gui import QgsMessageBar
from ui_interlis import Ui_Interlis
from sublayersdialog import SublayersDialog
import os.path
import tempfile
import codecs
from xml.etree import ElementTree
from pyqtconfig import QSettingsManager
from ogrtools.ogrtransform.ogrconfig import OgrConfig
from ogrtools.interlis.model_loader import ModelLoader
from ogrtools.interlis.ilismeta import ImdParser
from ogrtools.pyogr.singlegeomvrt import has_multi_geom_tables, ogr2vrt
try:
from osgeo import gdal
except ImportError:
import gdal
class InterlisDialog(QtGui.QDialog):
def __init__(self, plugin):
QtGui.QDialog.__init__(self)
self._plugin = plugin
# Set up the user interface from Designer.
self.ui = Ui_Interlis()
self.ui.setupUi(self)
# Initialize DB connection drop-down
self.ui.cbDbConnections.clear()
self.ui.cbDbConnections.addItems(self.dbConnectionList())
self._add_settings_handlers()
self.ui.cbResetData.setEnabled(False) # Not implemented yet
def setup(self):
self.ui.mDataLineEdit.setText("")
def _add_settings_handlers(self):
self._settings = QSettingsManager()
# self._settings.add_handler(
# 'interlis/modelFile', self.ui.mModelLineEdit)
self._settings.add_handler(
'interlis/dbConnection', self.ui.cbDbConnections)
self._settings.add_handler(
'interlis/ilisMetaUrl', self.ui.mIlisMetaUrlLineEdit)
self._settings.add_handler(
'interlis/skipFailures', self.ui.cbSkipFailures)
self._settings.add_handler(
'interlis/strokeCurve', self.ui.cbStrokeCurve)
def iliDs(self):
"""OGR connection string for selected Interlis transfer file + model"""
return self.iliFileDs(self.ui.mDataLineEdit.text())
def iliFileDs(self, fn):
"""OGR connection string for Interlis transfer file + model"""
if not fn:
return ""
if self.ui.mModelLineEdit.text():
return fn + "," + self.ui.mModelLineEdit.text()
else:
return fn
def _empty_transfer_ds(self):
imd = ImdParser(self.ui.mModelLineEdit.text())
transferfn = imd.gen_empty_transfer_file()
ds = transferfn + "," + self.ui.mModelLineEdit.text()
return ds
def pgDs(self):
"""OGR connection string for selected PostGIS DB"""
key = u"/PostgreSQL/connections/" + \
self.ui.cbDbConnections.currentText()
settings = QSettings()
settings.beginGroup(key)
params = {
'host': settings.value("host", type=str),
'port': settings.value("port", type=str),
'dbname': settings.value("database", type=str),
'user': settings.value("username", type=str),
'password': settings.value("password", type=str)
}
ds = 'PG:'
for k, v in params.items():
if v:
ds = ds + k + "='" + v + "' "
return ds
def pgUri(self):
"""QgsDataSourceURI for selected PostGIS DB"""
key = u"/PostgreSQL/connections/" + \
self.ui.cbDbConnections.currentText()
settings = QSettings()
settings.beginGroup(key)
uri = QgsDataSourceURI()
uri.setConnection(
settings.value("host", type=str),
settings.value("port", type=str),
settings.value("database", type=str),
settings.value("username", type=str),
settings.value("password", type=str),
QgsDataSourceURI.SSLmode(settings.value("sslmode", type=int))
)
uri.setUseEstimatedMetadata(
settings.value("estimatedMetadata", type=bool))
return uri
def dbConnectionList(self):
connection_names = []
settings = QSettings()
settings.beginGroup(u"/PostgreSQL/connections")
for name in settings.childGroups():
connection_names.append(name)
settings.endGroup()
return connection_names
def _create_wps_request(self, ili):
return """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<wps:Execute service="WPS" version="1.0.0" xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">
<ows:Identifier>ilismetalookup</ows:Identifier>
<wps:DataInputs>
<wps:Input>
<ows:Identifier>ilimodel</ows:Identifier>
<ows:Title>ilimodel</ows:Title>
<wps:Data>
<wps:LiteralData>%s</wps:LiteralData>
</wps:Data>
</wps:Input>
</wps:DataInputs>
</wps:Execute>
""" % ili
def _parse_wps_response(self, xml):
tree = ElementTree.ElementTree(ElementTree.fromstring(xml))
ns = {"wps": "http://www.opengis.net/wps/1.0.0"}
imd = tree.find(
"wps:ProcessOutputs/wps:Output/wps:Data/wps:LiteralData", ns)
if imd is None:
return None
else:
return imd.text
@pyqtSlot()
def on_mDataFileButton_clicked(self):
dataFilePath = QFileDialog.getOpenFileName(
None, "Open Interlis data file", self.ui.mDataLineEdit.text(),
"Interlis transfer file (*.itf *.ITF *.xtf *.XTF *.xml);;All files (*.*)")
if not dataFilePath:
return # dialog canceled
self.ui.mDataLineEdit.setText(dataFilePath)
@pyqtSlot(str)
def on_mDataLineEdit_textChanged(self, s):
self._update_data_import_button()
@pyqtSlot(bool)
def on_mModelAutoLoadCheckBox_toggled(self, checked):
self._update_data_import_button()
def _update_data_import_button(self):
self.ui.mImportButton.setEnabled(
self.ui.mDataLineEdit.text() != "" and
(self.ui.mModelAutoLoadCheckBox.isChecked() or
self.ui.mModelLineEdit.text() != ""))
def loadModel(self):
imd = None
try:
loader = ModelLoader(self.ui.mDataLineEdit.text())
models = loader.detect_models()
model_names = map(lambda m: m.name, models)
self._log_output("Looking up models: " + ', '.join(model_names))
ili = loader.gen_lookup_ili()
qDebug(ili)
wpsreq = self._create_wps_request(ili)
url = self.ui.mIlisMetaUrlLineEdit.text()
req = QNetworkRequest(QtCore.QUrl(url))
req.setHeader(QNetworkRequest.ContentTypeHeader, 'application/xml')
reply = QgsNetworkAccessManager.instance().post(req, wpsreq)
# Wait for reply or timeout
loop = QEventLoop()
reply.finished.connect(loop.quit)
QTimer.singleShot(15000, reply.abort)
loop.exec_()
if reply.isFinished() and reply.error() == QNetworkReply.NoError:
result = reply.readAll()
imd = self._parse_wps_response(result)
except:
qDebug("Exception during IlisModel download")
if imd is None:
self._show_log_window()
QgsMessageLog.logMessage(
"Couldn't download Ilismeta model", "Interlis",
QgsMessageLog.WARNING)
self.ui.mModelLineEdit.setText("")
else:
fh, imdfn = tempfile.mkstemp(suffix='.imd')
os.close(fh)
with codecs.open(imdfn, "w", encoding='utf-8') as file:
file.write(imd)
self.ui.mModelLineEdit.setText(imdfn)
@pyqtSlot()
def on_mModelFileButton_clicked(self):
modelFilePath = QFileDialog.getOpenFileName(
None, "Open Interlis model file", self.ui.mModelLineEdit.text(),
"IlisMeta model (*.imd *.IMD);;All files (*.*)")
if not modelFilePath:
return # dialog canceled
self.ui.mModelLineEdit.setText(modelFilePath)
@pyqtSlot(str)
def on_mModelLineEdit_textChanged(self, s):
self._update_model_import_buttons()
self._update_data_import_button()
self.ui.mModelAutoLoadCheckBox.setChecked(s == "")
@pyqtSlot(bool)
def on_mQgisLayer_toggled(self, checked):
self.ui.cbDbConnections.setEnabled(not checked)
self._update_model_import_buttons()
@pyqtSlot(int)
def on_cbDbConnections_currentIndexChanged(self, v):
self._update_model_import_buttons()
def _dbconn_selected(self):
return (not self.ui.mQgisLayer.isChecked() and
self.ui.cbDbConnections.currentIndex() != 0)
def _update_model_import_buttons(self):
self.ui.mImportEnumsButton.setEnabled(
self.ui.mModelLineEdit.text() != "" and self._dbconn_selected())
self.ui.mCreateSchemaButton.setEnabled(
self.ui.mModelLineEdit.text() != "" and self._dbconn_selected())
@pyqtSlot()
def on_mImportButton_clicked(self):
self.setCursor(Qt.WaitCursor)
self._set_stroke_curve_option()
try:
if self.ui.mModelAutoLoadCheckBox.isChecked():
self.loadModel()
if self.ui.mQgisLayer.isChecked():
self.importtoqgis()
else:
self.importtodb()
finally:
self.unsetCursor()
@pyqtSlot()
def on_mImportEnumsButton_clicked(self):
self.setCursor(Qt.WaitCursor)
try:
self.importenums()
finally:
self.unsetCursor()
@pyqtSlot()
def on_mCreateSchemaButton_clicked(self):
self.setCursor(Qt.WaitCursor)
try:
self.createschema()
finally:
self.unsetCursor()
def _ogr_config(self, ds):
ogrconfig = None # self.ui.mConfigLineEdit.text()
#self._log_output("_ogr_config ds: %s cfg: %s" % (ds, ogrconfig))
if ogrconfig:
cfg = OgrConfig(ds=ds, config=ogrconfig)
else:
cfg = OgrConfig(ds=ds, model=self.ui.mModelLineEdit.text())
return cfg
def _ogr_config_tmp(self, ds):
self._ogrconfig_tmp = None
cfg = self._ogr_config(ds)
if not cfg.is_loaded():
__, self._ogrconfig_tmp = tempfile.mkstemp('.cfg', 'ogr_')
self._gen_ogr_config(cfg, self._ogrconfig_tmp)
return cfg
def _gen_ogr_config(self, cfg, fn):
format = 'PostgreSQL'
cfgjson = cfg.generate_config(
format, outfile=fn, layer_list=[], srs="EPSG:21781")
qDebug(cfgjson)
def _set_stroke_curve_option(self):
strokeCurve = self.ui.cbStrokeCurve.isChecked()
gdal.SetConfigOption('OGR_STROKE_CURVE', str(strokeCurve))
def _remove_ogrconfig_tmp(self):
if self._ogrconfig_tmp is not None:
try:
os.remove(self._ogrconfig_tmp)
except WindowsError:
qDebug("WindowsError: Couldn't delete %s" %
self._ogrconfig_tmp)
self._ogrconfig_tmp = None
def importtoqgis(self):
# QGIS OGR provider only supports one geometry column per table
# We create a VRT with separate layers for each geometry
# This is also a workaround for a random sublayer order
# in QGIS 2.18 using "file.xtf,model.imd" as dataSourceUri.
__, vrt_tmp = tempfile.mkstemp('.vrt', 'tmp_')
ogr2vrt(self.iliDs(), vrt_tmp)
dataSourceUri = vrt_tmp
# QGIS 1.8:
# subLayerVectorLayer = QgsVectorLayer(
# dataSourceUri, "interlis_sublayers", "ogr")
# subLayerProvider = subLayerVectorLayer.dataProvider()
# if not subLayerProvider:
# QMessageBox.critical(None, "Error accessing interlis sublayers",
# "A problem occured during access of the sublayers")
# return
# subLayerList = subLayerProvider.subLayers()
# subLayerDialog = SublayersDialog()
# subLayerDialog.setupSublayerList(subLayerList)
# if subLayerDialog.exec_() == QDialog.Accepted:
# for layername in subLayerDialog.subLayerNames():
# add a new ogr layer for each selected sublayer
# self._plugin.iface.addVectorLayer(dataSourceUri + "|layername=" + layername, layername, "ogr")
# QGIS 2: Sublayer dialog opens automatically
self._plugin.iface.addVectorLayer(
dataSourceUri, "Interlis layer", "ogr")
self.accept()
self._plugin.iface.messageBar().pushMessage("Interlis", "Import finished",
level=QgsMessageBar.INFO, duration=2)
def importenums(self):
cfg = self._ogr_config_tmp(self._empty_transfer_ds())
self._log_output("Import Enums from %s" % self.ui.mModelLineEdit.text())
cfg.write_enum_tables(
dest=self.pgDs(), skipfailures=self.ui.cbSkipFailures.isChecked(), debug=True)
self._remove_ogrconfig_tmp()
self._log_output("Import finished")
def createschema(self):
cfg = self._ogr_config_tmp(self._empty_transfer_ds())
self._log_output("Create schema from %s" % self.ui.mModelLineEdit.text())
ogroutput = cfg.transform(
dest=self.pgDs(), skipfailures=self.ui.cbSkipFailures.isChecked(), debug=True)
self._remove_ogrconfig_tmp()
self._log_output("Import finished")
def importtodb(self):
self._log_output("Import data from %s" % self.iliDs())
cfg = self._ogr_config_tmp(self.iliDs())
ogroutput = cfg.transform(
dest=self.pgDs(), skipfailures=self.ui.cbSkipFailures.isChecked(), debug=True)
self._remove_ogrconfig_tmp()
self._plugin.messageLogWidget().show()
self._log_output(ogroutput)
self._log_output("Import finished")
uri = self.pgUri()
layer_infos = cfg.layer_infos()
layer_names = cfg.layer_names()
# if self.ui.cbImportEnums.isChecked():
# layer_infos += cfg.enum_infos()
# layer_names += cfg.enum_names()
subLayerDialog = SublayersDialog()
subLayerDialog.setupLayerList(layer_names)
if subLayerDialog.exec_() == QDialog.Accepted:
for layer_id in subLayerDialog.layerNames():
# add a new layer for each selected row
for layer in layer_infos:
if layer['name'] == layer_id:
geom_column = layer['geom_field'] if (
'geom_field' in layer) else None
uri.setDataSource("", layer['name'], geom_column)
self._plugin.iface.addVectorLayer(
uri.uri(), layer['name'], 'postgres')
self.accept()
self._plugin.iface.messageBar().pushMessage("Interlis", "Import finished",
level=QgsMessageBar.INFO, duration=2)
# def exporttoxtf(self):
# cfg = self._ogr_config(self.pgDs())
# fn = QFileDialog.getSaveFileName(
# None, "Save File", "",
# "Interlis 2 transfer (*.xtf *.XTF *.xml)")
# if not fn:
# return
# ds = self.iliFileDs(fn)
# ogroutput = cfg.transform_reverse(
# dest=ds, skipfailures=self.ui.cbSkipFailures.isChecked(),
# debug=True)
# self._log_output(ogroutput)
# QgsMessageLog.logMessage(
# "Export to '%s' finished" % self.ui.mDataLineEdit.text(),
# "Interlis", QgsMessageLog.INFO)
def _show_log_window(self):
logDock = self._plugin.iface.mainWindow().findChild(
QDockWidget, 'MessageLog')
logDock.show()
def _log_output(self, output, lines_per_msg=None):
if lines_per_msg is None:
QgsMessageLog.logMessage(output, "Interlis", QgsMessageLog.INFO)
else:
lines = output.splitlines()
for i in range(0, len(lines), lines_per_msg):
msg = "\n".join(lines[i:i + lines_per_msg])
QgsMessageLog.logMessage(msg, "Interlis", QgsMessageLog.INFO)
| |
import collections
from django.conf import settings
from django.db.transaction import non_atomic_requests
from django.http import (Http404, HttpResponsePermanentRedirect,
HttpResponseRedirect)
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import cache_page
from product_details import product_details
from mobility.decorators import mobile_template
from tower import ugettext_lazy as _lazy
from olympia import amo
from olympia.amo.models import manual_order
from olympia.amo.urlresolvers import reverse
from olympia.addons.models import Addon, AddonCategory, Category, FrozenAddon
from olympia.addons.utils import get_featured_ids, get_creatured_ids
from olympia.addons.views import BaseFilter, ESBaseFilter
from olympia.translations.query import order_by_translation
languages = dict((lang.lower(), val)
for lang, val in product_details.languages.items())
PAGINATE_PERSONAS_BY = 30
MIN_COUNT_FOR_LANDING = 4
def locale_display_name(locale):
"""
Return (english name, native name) for the locale.
Raises KeyError if the locale can't be found.
"""
if not locale:
raise KeyError
if locale.lower() in languages:
v = languages[locale.lower()]
return v['English'], v['native']
else:
# Take out the regional portion and try again.
hyphen = locale.rfind('-')
if hyphen == -1:
raise KeyError
else:
return locale_display_name(locale[:hyphen])
Locale = collections.namedtuple('Locale', 'locale display native dicts packs')
class AddonFilter(BaseFilter):
opts = (('featured', _lazy(u'Featured')),
('users', _lazy(u'Most Users')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Newest')))
extras = (('name', _lazy(u'Name')),
('popular', _lazy(u'Weekly Downloads')),
('updated', _lazy(u'Recently Updated')),
('hotness', _lazy(u'Up & Coming')))
class ThemeFilter(AddonFilter):
opts = (('users', _lazy(u'Most Users')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Newest')),
('featured', _lazy(u'Featured')))
extras = (('name', _lazy(u'Name')),
('popular', _lazy(u'Weekly Downloads')),
('updated', _lazy(u'Recently Updated')),
('hotness', _lazy(u'Up & Coming')))
class ESAddonFilter(ESBaseFilter):
opts = AddonFilter.opts
extras = AddonFilter.extras
def addon_listing(request, addon_types, filter_=AddonFilter, default=None):
if default is None:
default = 'rating' if request.MOBILE else 'featured'
# Set up the queryset and filtering for themes & extension listing pages.
if amo.ADDON_PERSONA in addon_types:
qs = Addon.objects.public().filter(type=amo.ADDON_PERSONA)
else:
qs = (Addon.objects.listed(request.APP, *amo.REVIEWED_STATUSES)
.filter(type__in=addon_types))
filter__ = filter_(request, qs, 'sort', default)
return filter__.qs, filter__
def _get_locales(addons):
"""Does the heavy lifting for language_tools."""
# This is a generator so we can {% cache addons %} in the template without
# running any of this code.
for addon in addons:
locale = addon.target_locale.lower()
try:
english, native = locale_display_name(locale)
# Add the locale as a differentiator if we had to strip the
# regional portion.
if locale not in languages:
native = '%s (%s)' % (native, locale)
addon.locale_display, addon.locale_native = english, native
except KeyError:
english = u'%s (%s)' % (addon.name, locale)
addon.locale_display, addon.locale_native = english, ''
# We don't need the whole add-on so only store the parts in use.
def slim(addon):
return {'slug': addon.slug,
'file_size': addon.current_version.all_files[0].size,
'locale_disambiguation': addon.locale_disambiguation}
locales = {}
for locale, addons in amo.utils.sorted_groupby(addons, 'target_locale'):
addons = list(addons)
dicts = [slim(a) for a in addons if a.type == amo.ADDON_DICT]
packs = [slim(a) for a in addons if a.type == amo.ADDON_LPAPP]
addon = addons[0]
locales[locale] = Locale(addon.target_locale, addon.locale_display,
addon.locale_native, dicts, packs)
for locale in sorted(locales.items(), key=lambda x: x[1].display):
yield locale
# We never use the category, but this makes it
# uniform with the other type listings.
@non_atomic_requests
def language_tools(request, category=None):
types = (amo.ADDON_DICT, amo.ADDON_LPAPP)
addons = (Addon.objects.public()
.filter(appsupport__app=request.APP.id, type__in=types,
target_locale__isnull=False).exclude(target_locale=''))
locales = _get_locales(addons)
lang_addons = _get_locales(addons.filter(target_locale=request.LANG))
addon_ids = addons.values_list('pk', flat=True)
return render(request, 'browse/language_tools.html',
{'locales': list(locales), 'lang_addons': list(lang_addons),
# Pass keys separately so only IDs get cached.
'addons': addon_ids,
'search_cat': '%s,0' % amo.ADDON_DICT})
@non_atomic_requests
def themes(request, category=None):
TYPE = amo.ADDON_THEME
if category is not None:
q = Category.objects.filter(application=request.APP.id, type=TYPE)
category = get_object_or_404(q, slug=category)
addons, filter = addon_listing(request, [TYPE], default='users',
filter_=ThemeFilter)
sorting = filter.field
src = 'cb-btn-%s' % sorting
dl_src = 'cb-dl-%s' % sorting
if category is not None:
addons = addons.filter(categories__id=category.id)
addons = amo.utils.paginate(request, addons, 16, count=addons.count())
return render(request, 'browse/themes.html',
{'section': 'themes', 'addon_type': TYPE, 'addons': addons,
'category': category, 'filter': filter, 'sorting': sorting,
'search_cat': '%s,0' % TYPE, 'src': src, 'dl_src': dl_src})
@mobile_template('browse/{mobile/}extensions.html')
@non_atomic_requests
def extensions(request, category=None, template=None):
TYPE = amo.ADDON_EXTENSION
if category is not None:
q = Category.objects.filter(application=request.APP.id, type=TYPE)
category = get_object_or_404(q, slug=category)
sort = request.GET.get('sort')
if not sort and not request.MOBILE and category and category.count > 4:
return category_landing(request, category)
addons, filter = addon_listing(request, [TYPE])
sorting = filter.field
src = 'cb-btn-%s' % sorting
dl_src = 'cb-dl-%s' % sorting
if category:
addons = addons.filter(categories__id=category.id)
addons = amo.utils.paginate(request, addons, count=addons.count())
return render(request, template,
{'section': 'extensions', 'addon_type': TYPE,
'category': category, 'addons': addons,
'filter': filter, 'sorting': sorting,
'sort_opts': filter.opts, 'src': src,
'dl_src': dl_src, 'search_cat': '%s,0' % TYPE})
@mobile_template('browse/{mobile/}extensions.html')
@non_atomic_requests
def es_extensions(request, category=None, template=None):
TYPE = amo.ADDON_EXTENSION
if category is not None:
q = Category.objects.filter(application=request.APP.id, type=TYPE)
category = get_object_or_404(q, slug=category)
if ('sort' not in request.GET and not request.MOBILE
and category and category.count > 4):
return category_landing(request, category)
qs = (Addon.search().filter(type=TYPE, app=request.APP.id,
is_disabled=False,
status__in=amo.REVIEWED_STATUSES))
filter = ESAddonFilter(request, qs, key='sort', default='popular')
qs, sorting = filter.qs, filter.field
src = 'cb-btn-%s' % sorting
dl_src = 'cb-dl-%s' % sorting
if category:
qs = qs.filter(category=category.id)
addons = amo.utils.paginate(request, qs)
return render(request, template,
{'section': 'extensions', 'addon_type': TYPE,
'category': category, 'addons': addons,
'filter': filter, 'sorting': sorting,
'sort_opts': filter.opts, 'src': src,
'dl_src': dl_src, 'search_cat': '%s,0' % TYPE})
class CategoryLandingFilter(BaseFilter):
opts = (('featured', _lazy(u'Featured')),
('users', _lazy(u'Most Popular')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Recently Added')))
def __init__(self, request, base, category, key, default):
self.category = category
self.ids = AddonCategory.creatured_random(category, request.LANG)
super(CategoryLandingFilter, self).__init__(request, base, key,
default)
def filter_featured(self):
qs = self.base_queryset.all()
return manual_order(qs, self.ids, pk_name='addons.id')
@non_atomic_requests
def category_landing(request, category, addon_type=amo.ADDON_EXTENSION,
Filter=CategoryLandingFilter):
base = (Addon.objects.listed(request.APP)
.exclude(type=amo.ADDON_PERSONA)
.filter(categories__id=category.id))
filter = Filter(request, base, category, key='browse', default='featured')
return render(request, 'browse/impala/category_landing.html',
{'section': amo.ADDON_SLUGS[addon_type],
'addon_type': addon_type, 'category': category,
'filter': filter, 'sorting': filter.field,
'search_cat': '%s,0' % category.type})
@non_atomic_requests
def creatured(request, category):
TYPE = amo.ADDON_EXTENSION
q = Category.objects.filter(application=request.APP.id, type=TYPE)
category = get_object_or_404(q, slug=category)
ids = AddonCategory.creatured_random(category, request.LANG)
addons = manual_order(Addon.objects.public(), ids, pk_name='addons.id')
return render(request, 'browse/creatured.html',
{'addons': addons, 'category': category,
'sorting': 'featured'})
class PersonasFilter(BaseFilter):
opts = (('up-and-coming', _lazy(u'Up & Coming')),
('created', _lazy(u'Recently Added')),
('popular', _lazy(u'Most Popular')),
('rating', _lazy(u'Top Rated')))
def filter(self, field):
# Special case with dashes.
if field == 'up-and-coming':
# See bug 944096 related to the popularity.
return (self.base_queryset.filter(persona__popularity__gte=100)
.order_by('-persona__movers'))
else:
return super(PersonasFilter, self).filter(field)
def filter_created(self):
return self.base_queryset.order_by('-created')
def filter_popular(self):
return self.base_queryset.order_by('-persona__popularity')
def filter_rating(self):
return self.base_queryset.order_by('-bayesian_rating')
def personas_listing(request, category_slug=None):
# Common pieces used by browse and search.
TYPE = amo.ADDON_PERSONA
q = Category.objects.filter(type=TYPE)
categories = order_by_translation(q, 'name')
frozen = list(FrozenAddon.objects.values_list('addon', flat=True))
base = Addon.objects.public().filter(type=TYPE).exclude(id__in=frozen)
cat = None
if category_slug is not None:
try:
cat = Category.objects.filter(slug=category_slug, type=TYPE)[0]
except IndexError:
# Maybe it's a Complete Theme?
try:
cat = Category.objects.filter(slug=category_slug,
type=amo.ADDON_THEME)[0]
except IndexError:
raise Http404
else:
# Hey, it was a Complete Theme.
url = reverse('browse.themes', args=[cat.slug])
if 'sort' in request.GET:
url = amo.utils.urlparams(url, sort=request.GET['sort'])
return redirect(url, permanent=not settings.DEBUG)
base = base.filter(categories__id=cat.id)
filter_ = PersonasFilter(request, base, key='sort',
default='up-and-coming')
return categories, filter_, base, cat
@mobile_template('browse/personas/{mobile/}')
@non_atomic_requests
def personas(request, category=None, template=None):
listing = personas_listing(request, category)
# I guess this was a Complete Theme after all.
if isinstance(listing,
(HttpResponsePermanentRedirect, HttpResponseRedirect)):
return listing
categories, filter_, base, cat = listing
if filter_.field == 'up-and-coming':
# Almost hardcoding the number of element because performing
# `filter_.qs.count()` is a performance killer. We're still
# verifying the `base.count()` for the template switch below.
base_count = base.count()
count = (base_count if base_count < MIN_COUNT_FOR_LANDING
else PAGINATE_PERSONAS_BY * settings.PERSONA_DEFAULT_PAGES)
else:
# Pass the count from base instead of letting it come from
# filter_.qs.count() since that would join against personas.
count = cat.count if cat else base.count()
addons = amo.utils.paginate(request, filter_.qs, PAGINATE_PERSONAS_BY,
count=count)
if ('sort' not in request.GET and (
(request.MOBILE and not cat) or
(not request.MOBILE and count > MIN_COUNT_FOR_LANDING))):
template += 'category_landing.html'
else:
template += 'grid.html'
if cat:
ids = AddonCategory.creatured_random(cat, request.LANG)
featured = manual_order(base, ids, pk_name="addons.id")
else:
ids = Addon.featured_random(request.APP, request.LANG)
featured = manual_order(base, ids, pk_name="addons.id")
ctx = {'categories': categories, 'category': cat, 'addons': addons,
'filter': filter_, 'sorting': filter_.field,
'sort_opts': filter_.opts,
'featured': featured, 'search_cat': 'themes',
'is_homepage': cat is None and 'sort' not in request.GET}
return render(request, template, ctx)
@non_atomic_requests
def legacy_theme_redirects(request, category=None, category_name=None):
url = None
if category_name is not None:
# This format is for the Complete Themes RSS feed.
url = reverse('browse.themes.rss', args=[category_name])
else:
if not category or category == 'all':
url = reverse('browse.personas')
else:
try:
# Theme?
cat = Category.objects.filter(slug=category,
type=amo.ADDON_PERSONA)[0]
except IndexError:
pass
else:
# Hey, it was a Theme.
url = reverse('browse.personas', args=[cat.slug])
if url:
if 'sort' in request.GET:
url = amo.utils.urlparams(url, sort=request.GET['sort'])
return redirect(url, permanent=not settings.DEBUG)
else:
raise Http404
@non_atomic_requests
def legacy_fulltheme_redirects(request, category=None):
"""Full Themes have already been renamed to Complete Themes!"""
url = request.get_full_path().replace('/full-themes',
'/complete-themes')
return redirect(url, permanent=not settings.DEBUG)
@cache_page(60 * 60 * 24 * 365)
@non_atomic_requests
def legacy_creatured_redirect(request, category):
category = get_object_or_404(Category.objects, slug=category,
application=request.APP.id)
return legacy_redirects(request, amo.ADDON_EXTENSION, category, 'featured')
@cache_page(60 * 60 * 24 * 365)
@non_atomic_requests
def legacy_redirects(request, type_, category=None, sort=None, format=None):
type_slug = amo.ADDON_SLUGS.get(int(type_), 'extensions')
if not category or category == 'all':
url = reverse('browse.%s' % type_slug)
else:
if not isinstance(category, Category):
category = get_object_or_404(Category.objects, id=category)
if format == 'rss':
if type_slug in ('language-tools', 'personas'):
raise Http404
url = reverse('browse.%s.rss' % type_slug, args=[category.slug])
else:
url = reverse('browse.%s' % type_slug, args=[category.slug])
mapping = {'updated': 'updated', 'newest': 'created', 'name': 'name',
'weeklydownloads': 'popular', 'averagerating': 'rating',
'featured': 'featured'}
if 'sort' in request.GET and request.GET['sort'] in mapping:
url += '?sort=%s' % mapping[request.GET['sort']]
elif sort in mapping:
url += '?sort=%s' % mapping[sort]
return HttpResponsePermanentRedirect(url)
class SearchToolsFilter(AddonFilter):
opts = (('name', _lazy(u'Name')),
('updated', _lazy(u'Updated')),
('created', _lazy(u'Created')),
('popular', _lazy(u'Downloads')),
('rating', _lazy(u'Rating')))
def filter_featured(self):
# Featured search add-ons in all locales:
APP, LANG = self.request.APP, self.request.LANG
ids = get_featured_ids(APP, LANG, amo.ADDON_SEARCH)
try:
search_cat = Category.objects.get(slug='search-tools',
application=APP.id)
others = get_creatured_ids(search_cat, LANG)
ids.extend(o for o in others if o not in ids)
except Category.DoesNotExist:
pass
return manual_order(Addon.objects.valid(), ids, 'addons.id')
class SearchExtensionsFilter(AddonFilter):
opts = (('popular', _lazy(u'Most Popular')),
('created', _lazy(u'Recently Added')),)
@non_atomic_requests
def search_tools(request, category=None):
"""View the search tools page."""
APP, TYPE = request.APP, amo.ADDON_SEARCH
qs = Category.objects.filter(application=APP.id, type=TYPE)
categories = order_by_translation(qs, 'name')
addons, filter = addon_listing(request, [TYPE], SearchToolsFilter,
'popular')
if category:
category = get_object_or_404(qs, slug=category)
addons = addons.filter(categories__id=category.id)
addons = amo.utils.paginate(request, addons)
base = (Addon.objects.listed(request.APP, amo.STATUS_PUBLIC)
.filter(type=amo.ADDON_EXTENSION))
sidebar_ext = SearchExtensionsFilter(request, base, 'sort', 'popular')
return render(request, 'browse/search_tools.html',
{'categories': categories, 'category': category,
'addons': addons, 'filter': filter,
'search_extensions_filter': sidebar_ext})
@non_atomic_requests
def moreinfo_redirect(request):
try:
addon_id = int(request.GET.get('id', ''))
return redirect('discovery.addons.detail', addon_id, permanent=True)
except ValueError:
raise Http404
| |
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import abc
import collections
import errno
import heapq
import logging
import select
import time
import threading
import pika.compat
from pika.adapters.utils import nbio_interface
from pika.adapters.base_connection import BaseConnection
from pika.adapters.utils.selector_ioloop_adapter import (
SelectorIOServicesAdapter, AbstractSelectorIOLoop)
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Reason for this unconventional dict initialization is the fact that on some
# platforms select.error is an aliases for OSError. We don't want the lambda
# for select.error to win over one for OSError.
_SELECT_ERROR_CHECKERS = {}
if pika.compat.PY3:
# InterruptedError is undefined in PY2
# pylint: disable=E0602
_SELECT_ERROR_CHECKERS[InterruptedError] = lambda e: True
_SELECT_ERROR_CHECKERS[select.error] = lambda e: e.args[0] == errno.EINTR
_SELECT_ERROR_CHECKERS[IOError] = lambda e: e.errno == errno.EINTR
_SELECT_ERROR_CHECKERS[OSError] = lambda e: e.errno == errno.EINTR
# We can reduce the number of elements in the list by looking at super-sub
# class relationship because only the most generic ones needs to be caught.
# For now the optimization is left out.
# Following is better but still incomplete.
# _SELECT_ERRORS = tuple(filter(lambda e: not isinstance(e, OSError),
# _SELECT_ERROR_CHECKERS.keys())
# + [OSError])
_SELECT_ERRORS = tuple(_SELECT_ERROR_CHECKERS.keys())
def _is_resumable(exc):
"""Check if caught exception represents EINTR error.
:param exc: exception; must be one of classes in _SELECT_ERRORS
"""
checker = _SELECT_ERROR_CHECKERS.get(exc.__class__, None)
if checker is not None:
return checker(exc)
else:
return False
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(
self, # pylint: disable=R0913
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param callable on_open_callback: Method to call on connection open
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param None | IOLoop | nbio_interface.AbstractIOServices custom_ioloop:
Provide a custom I/O Loop object.
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
:raises: RuntimeError
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = SelectorIOServicesAdapter(custom_ioloop or IOLoop())
super(SelectConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod:`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = SelectorIOServicesAdapter(custom_ioloop or IOLoop())
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
def _get_write_buffer_size(self):
"""
:returns: Current size of output data buffered by the transport
:rtype: int
"""
return self._transport.get_write_buffer_size()
class _Timeout(object):
"""Represents a timeout"""
__slots__ = (
'deadline',
'callback',
)
def __init__(self, deadline, callback):
"""
:param float deadline: timer expiration as non-negative epoch number
:param callable callback: callback to call when timeout expires
:raises ValueError, TypeError:
"""
if deadline < 0:
raise ValueError(
'deadline must be non-negative epoch number, but got %r' %
(deadline,))
if not callable(callback):
raise TypeError(
'callback must be a callable, but got %r' % (callback,))
self.deadline = deadline
self.callback = callback
def __eq__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline == other.deadline
return NotImplemented
def __ne__(self, other):
"""NOTE: not supporting sort stability"""
result = self.__eq__(other)
if result is not NotImplemented:
return not result
return NotImplemented
def __lt__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline < other.deadline
return NotImplemented
def __gt__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline > other.deadline
return NotImplemented
def __le__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline <= other.deadline
return NotImplemented
def __ge__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline >= other.deadline
return NotImplemented
class _Timer(object):
"""Manage timeouts for use in ioloop"""
# Cancellation count threshold for triggering garbage collection of
# cancelled timers
_GC_CANCELLATION_THRESHOLD = 1024
def __init__(self):
self._timeout_heap = []
# Number of canceled timeouts on heap; for scheduling garbage
# collection of canceled timeouts
self._num_cancellations = 0
def close(self):
"""Release resources. Don't use the `_Timer` instance after closing
it
"""
# Eliminate potential reference cycles to aid garbage-collection
if self._timeout_heap is not None:
for timeout in self._timeout_heap:
timeout.callback = None
self._timeout_heap = None
def call_later(self, delay, callback):
"""Schedule a one-shot timeout given delay seconds.
NOTE: you may cancel the timer before dispatch of the callback. Timer
Manager cancels the timer upon dispatch of the callback.
:param float delay: Non-negative number of seconds from now until
expiration
:param callable callback: The callback method, having the signature
`callback()`
:rtype: _Timeout
:raises ValueError, TypeError
"""
if self._timeout_heap is None:
raise ValueError("Timeout closed before call")
if delay < 0:
raise ValueError(
'call_later: delay must be non-negative, but got %r' % (delay,))
now = pika.compat.time_now()
timeout = _Timeout(now + delay, callback)
heapq.heappush(self._timeout_heap, timeout)
LOGGER.debug(
'call_later: added timeout %r with deadline=%r and '
'callback=%r; now=%s; delay=%s', timeout, timeout.deadline,
timeout.callback, now, delay)
return timeout
def remove_timeout(self, timeout):
"""Cancel the timeout
:param _Timeout timeout: The timer to cancel
"""
# NOTE removing from the heap is difficult, so we just deactivate the
# timeout and garbage-collect it at a later time; see discussion
# in http://docs.python.org/library/heapq.html
if timeout.callback is None:
LOGGER.debug(
'remove_timeout: timeout was already removed or called %r',
timeout)
else:
LOGGER.debug(
'remove_timeout: removing timeout %r with deadline=%r '
'and callback=%r', timeout, timeout.deadline, timeout.callback)
timeout.callback = None
self._num_cancellations += 1
def get_remaining_interval(self):
"""Get the interval to the next timeout expiration
:returns: non-negative number of seconds until next timer expiration;
None if there are no timers
:rtype: float
"""
if self._timeout_heap:
now = pika.compat.time_now()
interval = max(0, self._timeout_heap[0].deadline - now)
else:
interval = None
return interval
def process_timeouts(self):
"""Process pending timeouts, invoking callbacks for those whose time has
come
"""
if self._timeout_heap:
now = pika.compat.time_now()
# Remove ready timeouts from the heap now to prevent IO starvation
# from timeouts added during callback processing
ready_timeouts = []
while self._timeout_heap and self._timeout_heap[0].deadline <= now:
timeout = heapq.heappop(self._timeout_heap)
if timeout.callback is not None:
ready_timeouts.append(timeout)
else:
self._num_cancellations -= 1
# Invoke ready timeout callbacks
for timeout in ready_timeouts:
if timeout.callback is None:
# Must have been canceled from a prior callback
self._num_cancellations -= 1
continue
timeout.callback()
timeout.callback = None
# Garbage-collect canceled timeouts if they exceed threshold
if (self._num_cancellations >= self._GC_CANCELLATION_THRESHOLD and
self._num_cancellations > (len(self._timeout_heap) >> 1)):
self._num_cancellations = 0
self._timeout_heap = [
t for t in self._timeout_heap if t.callback is not None
]
heapq.heapify(self._timeout_heap)
class PollEvents(object):
"""Event flags for I/O"""
# Use epoll's constants to keep life easy
READ = getattr(select, 'POLLIN', 0x01) # available for read
WRITE = getattr(select, 'POLLOUT', 0x04) # available for write
ERROR = getattr(select, 'POLLERR', 0x08) # error on associated fd
class IOLoop(AbstractSelectorIOLoop):
"""I/O loop implementation that picks a suitable poller (`select`,
`poll`, `epoll`, `kqueue`) to use based on platform.
Implements the
`pika.adapters.utils.selector_ioloop_adapter.AbstractSelectorIOLoop`
interface.
"""
# READ/WRITE/ERROR per `AbstractSelectorIOLoop` requirements
READ = PollEvents.READ
WRITE = PollEvents.WRITE
ERROR = PollEvents.ERROR
def __init__(self):
self._timer = _Timer()
# Callbacks requested via `add_callback`
self._callbacks = collections.deque()
self._poller = self._get_poller(self._get_remaining_interval,
self.process_timeouts)
def close(self):
"""Release IOLoop's resources.
`IOLoop.close` is intended to be called by the application or test code
only after `IOLoop.start()` returns. After calling `close()`, no other
interaction with the closed instance of `IOLoop` should be performed.
"""
if self._callbacks is not None:
self._poller.close()
self._timer.close()
# Set _callbacks to empty list rather than None so that race from
# another thread calling add_callback_threadsafe() won't result in
# AttributeError
self._callbacks = []
@staticmethod
def _get_poller(get_wait_seconds, process_timeouts):
"""Determine the best poller to use for this environment and instantiate
it.
:param get_wait_seconds: Function for getting the maximum number of
seconds to wait for IO for use by the poller
:param process_timeouts: Function for processing timeouts for use by the
poller
:returns: The instantiated poller instance supporting `_PollerBase` API
:rtype: object
"""
poller = None
kwargs = dict(
get_wait_seconds=get_wait_seconds,
process_timeouts=process_timeouts)
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
poller = EPollPoller(**kwargs)
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
poller = KQueuePoller(**kwargs)
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
poller = PollPoller(**kwargs)
if not poller:
LOGGER.debug('Using SelectPoller')
poller = SelectPoller(**kwargs)
return poller
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: handle to the created timeout that may be passed to
`remove_timeout()`
:rtype: object
"""
return self._timer.call_later(delay, callback)
def remove_timeout(self, timeout_handle):
"""Remove a timeout
:param timeout_handle: Handle of timeout to remove
"""
self._timer.remove_timeout(timeout_handle)
def add_callback_threadsafe(self, callback):
"""Requests a call to the given function as soon as possible in the
context of this IOLoop's thread.
NOTE: This is the only thread-safe method in IOLoop. All other
manipulations of IOLoop must be performed from the IOLoop's thread.
For example, a thread may request a call to the `stop` method of an
ioloop that is running in a different thread via
`ioloop.add_callback_threadsafe(ioloop.stop)`
:param callable callback: The callback method
"""
if not callable(callback):
raise TypeError(
'callback must be a callable, but got %r' % (callback,))
# NOTE: `deque.append` is atomic
self._callbacks.append(callback)
# Wake up the IOLoop which may be running in another thread
self._poller.wake_threadsafe()
LOGGER.debug('add_callback_threadsafe: added callback=%r', callback)
# To satisfy `AbstractSelectorIOLoop` requirement
add_callback = add_callback_threadsafe
def process_timeouts(self):
"""[Extension] Process pending callbacks and timeouts, invoking those
whose time has come. Internal use only.
"""
# Avoid I/O starvation by postponing new callbacks to the next iteration
for _ in pika.compat.xrange(len(self._callbacks)):
callback = self._callbacks.popleft()
LOGGER.debug('process_timeouts: invoking callback=%r', callback)
callback()
self._timer.process_timeouts()
def _get_remaining_interval(self):
"""Get the remaining interval to the next callback or timeout
expiration.
:returns: non-negative number of seconds until next callback or timer
expiration; None if there are no callbacks and timers
:rtype: float
"""
if self._callbacks:
return 0
return self._timer.get_remaining_interval()
def add_handler(self, fd, handler, events):
"""Start watching the given file descriptor for events
:param int fd: The file descriptor
:param callable handler: When requested event(s) occur,
`handler(fd, events)` will be called.
:param int events: The event mask using READ, WRITE, ERROR.
"""
self._poller.add_handler(fd, handler, events)
def update_handler(self, fd, events):
"""Changes the events we watch for
:param int fd: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
self._poller.update_handler(fd, events)
def remove_handler(self, fd):
"""Stop watching the given file descriptor for events
:param int fd: The file descriptor
"""
self._poller.remove_handler(fd)
def start(self):
"""[API] Start the main poller loop. It will loop until requested to
exit. See `IOLoop.stop`.
"""
self._poller.start()
def stop(self):
"""[API] Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback_threadsafe(ioloop.stop)`
"""
self._poller.stop()
def activate_poller(self):
"""[Extension] Activate the poller
"""
self._poller.activate_poller()
def deactivate_poller(self):
"""[Extension] Deactivate the poller
"""
self._poller.deactivate_poller()
def poll(self):
"""[Extension] Wait for events of interest on registered file
descriptors until an event of interest occurs or next timer deadline or
`_PollerBase._MAX_POLL_TIMEOUT`, whichever is sooner, and dispatch the
corresponding event handlers.
"""
self._poller.poll()
class _PollerBase(pika.compat.AbstractBase): # pylint: disable=R0902
"""Base class for select-based IOLoop implementations"""
# Drop out of the poll loop every _MAX_POLL_TIMEOUT secs as a worst case;
# this is only a backstop value; we will run timeouts when they are
# scheduled.
_MAX_POLL_TIMEOUT = 5
# if the poller uses MS override with 1000
POLL_TIMEOUT_MULT = 1
def __init__(self, get_wait_seconds, process_timeouts):
"""
:param get_wait_seconds: Function for getting the maximum number of
seconds to wait for IO for use by the poller
:param process_timeouts: Function for processing timeouts for use by the
poller
"""
self._get_wait_seconds = get_wait_seconds
self._process_timeouts = process_timeouts
# We guard access to the waking file descriptors to avoid races from
# closing them while another thread is calling our `wake()` method.
self._waking_mutex = threading.Lock()
# fd-to-handler function mappings
self._fd_handlers = dict()
# event-to-fdset mappings
self._fd_events = {
PollEvents.READ: set(),
PollEvents.WRITE: set(),
PollEvents.ERROR: set()
}
self._processing_fd_event_map = {}
# Reentrancy tracker of the `start` method
self._running = False
self._stopping = False
# Create ioloop-interrupt socket pair and register read handler.
self._r_interrupt, self._w_interrupt = self._get_interrupt_pair()
self.add_handler(self._r_interrupt.fileno(), self._read_interrupt,
PollEvents.READ)
def close(self):
"""Release poller's resources.
`close()` is intended to be called after the poller's `start()` method
returns. After calling `close()`, no other interaction with the closed
poller instance should be performed.
"""
# Unregister and close ioloop-interrupt socket pair; mutual exclusion is
# necessary to avoid race condition with `wake_threadsafe` executing in
# another thread's context
assert not self._running, 'Cannot call close() before start() unwinds.'
with self._waking_mutex:
if self._w_interrupt is not None:
self.remove_handler(self._r_interrupt.fileno()) # pylint: disable=E1101
self._r_interrupt.close()
self._r_interrupt = None
self._w_interrupt.close()
self._w_interrupt = None
self.deactivate_poller()
self._fd_handlers = None
self._fd_events = None
self._processing_fd_event_map = None
def wake_threadsafe(self):
"""Wake up the poller as soon as possible. As the name indicates, this
method is thread-safe.
"""
with self._waking_mutex:
if self._w_interrupt is None:
return
try:
# Send byte to interrupt the poll loop, use send() instead of
# os.write for Windows compatibility
self._w_interrupt.send(b'X')
except pika.compat.SOCKET_ERROR as err:
if err.errno != errno.EWOULDBLOCK:
raise
except Exception as err:
# There's nothing sensible to do here, we'll exit the interrupt
# loop after POLL_TIMEOUT secs in worst case anyway.
LOGGER.warning("Failed to send interrupt to poller: %s", err)
raise
def _get_max_wait(self):
"""Get the interval to the next timeout event, or a default interval
:returns: maximum number of self.POLL_TIMEOUT_MULT-scaled time units
to wait for IO events
:rtype: int
"""
delay = self._get_wait_seconds()
if delay is None:
delay = self._MAX_POLL_TIMEOUT
else:
delay = min(delay, self._MAX_POLL_TIMEOUT)
return delay * self.POLL_TIMEOUT_MULT
def add_handler(self, fileno, handler, events):
"""Add a new fileno to the set to be monitored
:param int fileno: The file descriptor
:param callable handler: What is called when an event happens
:param int events: The event mask using READ, WRITE, ERROR
"""
self._fd_handlers[fileno] = handler
self._set_handler_events(fileno, events)
# Inform the derived class
self._register_fd(fileno, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
# Record the change
events_cleared, events_set = self._set_handler_events(fileno, events)
# Inform the derived class
self._modify_fd_events(
fileno,
events=events,
events_to_clear=events_cleared,
events_to_set=events_set)
def remove_handler(self, fileno):
"""Remove a file descriptor from the set
:param int fileno: The file descriptor
"""
try:
del self._processing_fd_event_map[fileno]
except KeyError:
pass
events_cleared, _ = self._set_handler_events(fileno, 0)
del self._fd_handlers[fileno]
# Inform the derived class
self._unregister_fd(fileno, events_to_clear=events_cleared)
def _set_handler_events(self, fileno, events):
"""Set the handler's events to the given events; internal to
`_PollerBase`.
:param int fileno: The file descriptor
:param int events: The event mask (READ, WRITE, ERROR)
:returns: a 2-tuple (events_cleared, events_set)
:rtype: tuple
"""
events_cleared = 0
events_set = 0
for evt in (PollEvents.READ, PollEvents.WRITE, PollEvents.ERROR):
if events & evt:
if fileno not in self._fd_events[evt]:
self._fd_events[evt].add(fileno)
events_set |= evt
else:
if fileno in self._fd_events[evt]:
self._fd_events[evt].discard(fileno)
events_cleared |= evt
return events_cleared, events_set
def activate_poller(self):
"""Activate the poller
"""
# Activate the underlying poller and register current events
self._init_poller()
fd_to_events = collections.defaultdict(int)
for event, file_descriptors in self._fd_events.items():
for fileno in file_descriptors:
fd_to_events[fileno] |= event
for fileno, events in fd_to_events.items():
self._register_fd(fileno, events)
def deactivate_poller(self):
"""Deactivate the poller
"""
self._uninit_poller()
def start(self):
"""Start the main poller loop. It will loop until requested to exit.
This method is not reentrant and will raise an error if called
recursively (pika/pika#1095)
:raises: RuntimeError
"""
if self._running:
raise RuntimeError('IOLoop is not reentrant and is already running')
LOGGER.debug('Entering IOLoop')
self._running = True
self.activate_poller()
try:
# Run event loop
while not self._stopping:
self.poll()
self._process_timeouts()
finally:
try:
LOGGER.debug('Deactivating poller')
self.deactivate_poller()
finally:
self._stopping = False
self._running = False
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to stop
before this method returns.
"""
LOGGER.debug('Stopping IOLoop')
self._stopping = True
self.wake_threadsafe()
@abc.abstractmethod
def poll(self):
"""Wait for events on interested filedescriptors.
"""
raise NotImplementedError
@abc.abstractmethod
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
raise NotImplementedError
@abc.abstractmethod
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
raise NotImplementedError
@abc.abstractmethod
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask (READ, WRITE, ERROR)
"""
raise NotImplementedError
@abc.abstractmethod
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
raise NotImplementedError
@abc.abstractmethod
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
raise NotImplementedError
def _dispatch_fd_events(self, fd_event_map):
""" Helper to dispatch callbacks for file descriptors that received
events.
Before doing so we re-calculate the event mask based on what is
currently set in case it has been changed under our feet by a
previous callback. We also take a store a refernce to the
fd_event_map so that we can detect removal of an
fileno during processing of another callback and not generate
spurious callbacks on it.
:param dict fd_event_map: Map of fds to events received on them.
"""
# Reset the prior map; if the call is nested, this will suppress the
# remaining dispatch in the earlier call.
self._processing_fd_event_map.clear()
self._processing_fd_event_map = fd_event_map
for fileno in pika.compat.dictkeys(fd_event_map):
if fileno not in fd_event_map:
# the fileno has been removed from the map under our feet.
continue
events = fd_event_map[fileno]
for evt in [PollEvents.READ, PollEvents.WRITE, PollEvents.ERROR]:
if fileno not in self._fd_events[evt]:
events &= ~evt
if events:
handler = self._fd_handlers[fileno]
handler(fileno, events)
@staticmethod
def _get_interrupt_pair():
""" Use a socketpair to be able to interrupt the ioloop if called
from another thread. Socketpair() is not supported on some OS (Win)
so use a pair of simple TCP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"""
return pika.compat._nonblocking_socketpair() # pylint: disable=W0212
def _read_interrupt(self, _interrupt_fd, _events):
""" Read the interrupt byte(s). We ignore the event mask as we can ony
get here if there's data to be read on our fd.
:param int _interrupt_fd: (unused) The file descriptor to read from
:param int _events: (unused) The events generated for this fd
"""
try:
# NOTE Use recv instead of os.read for windows compatibility
self._r_interrupt.recv(512) # pylint: disable=E1101
except pika.compat.SOCKET_ERROR as err:
if err.errno != errno.EAGAIN:
raise
class SelectPoller(_PollerBase):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
if (self._fd_events[PollEvents.READ] or
self._fd_events[PollEvents.WRITE] or
self._fd_events[PollEvents.ERROR]):
read, write, error = select.select(
self._fd_events[PollEvents.READ],
self._fd_events[PollEvents.WRITE],
self._fd_events[PollEvents.ERROR], self._get_max_wait())
else:
# NOTE When called without any FDs, select fails on
# Windows with error 10022, 'An invalid argument was
# supplied'.
time.sleep(self._get_max_wait())
read, write, error = [], [], []
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
# Build an event bit mask for each fileno we've received an event for
fd_event_map = collections.defaultdict(int)
for fd_set, evt in zip(
(read, write, error),
(PollEvents.READ, PollEvents.WRITE, PollEvents.ERROR)):
for fileno in fd_set:
fd_event_map[fileno] |= evt
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
# It's a no op in SelectPoller
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
# It's a no op in SelectPoller
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
# It's a no op in SelectPoller
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
# It's a no op in SelectPoller
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
# It's a no op in SelectPoller
class KQueuePoller(_PollerBase):
# pylint: disable=E1101
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self, get_wait_seconds, process_timeouts):
"""Create an instance of the KQueuePoller
"""
self._kqueue = None
super(KQueuePoller, self).__init__(get_wait_seconds, process_timeouts)
@staticmethod
def _map_event(kevent):
"""return the event type associated with a kevent object
:param kevent kevent: a kevent object as returned by kqueue.control()
"""
mask = 0
if kevent.filter == select.KQ_FILTER_READ:
mask = PollEvents.READ
elif kevent.filter == select.KQ_FILTER_WRITE:
mask = PollEvents.WRITE
if kevent.flags & select.KQ_EV_EOF:
# May be set when the peer reader disconnects. We don't check
# KQ_EV_EOF for KQ_FILTER_READ because in that case it may be
# set before the remaining data is consumed from sockbuf.
mask |= PollEvents.ERROR
elif kevent.flags & select.KQ_EV_ERROR:
mask = PollEvents.ERROR
else:
LOGGER.critical('Unexpected kevent: %s', kevent)
return mask
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
kevents = self._kqueue.control(None, 1000, self._get_max_wait())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
fd_event_map = collections.defaultdict(int)
for event in kevents:
fd_event_map[event.ident] |= self._map_event(event)
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
assert self._kqueue is None
self._kqueue = select.kqueue()
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
if self._kqueue is not None:
self._kqueue.close()
self._kqueue = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
self._modify_fd_events(
fileno, events=events, events_to_clear=0, events_to_set=events)
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
if self._kqueue is None:
return
kevents = list()
if events_to_clear & PollEvents.READ:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
if events_to_set & PollEvents.READ:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if events_to_clear & PollEvents.WRITE:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
if events_to_set & PollEvents.WRITE:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
self._kqueue.control(kevents, 0)
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
self._modify_fd_events(
fileno, events=0, events_to_clear=events_to_clear, events_to_set=0)
class PollPoller(_PollerBase):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1000
def __init__(self, get_wait_seconds, process_timeouts):
"""Create an instance of the KQueuePoller
"""
self._poll = None
super(PollPoller, self).__init__(get_wait_seconds, process_timeouts)
@staticmethod
def _create_poller():
"""
:rtype: `select.poll`
"""
return select.poll() # pylint: disable=E1101
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
events = self._poll.poll(self._get_max_wait())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
fd_event_map = collections.defaultdict(int)
for fileno, event in events:
# NOTE: On OS X, when poll() sets POLLHUP, it's mutually-exclusive with
# POLLOUT and it doesn't seem to set POLLERR along with POLLHUP when
# socket connection fails, for example. So, we need to at least add
# POLLERR when we see POLLHUP
if (event & select.POLLHUP) and pika.compat.ON_OSX:
event |= select.POLLERR
fd_event_map[fileno] |= event
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
assert self._poll is None
self._poll = self._create_poller()
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
if self._poll is not None:
if hasattr(self._poll, "close"):
self._poll.close()
self._poll = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
if self._poll is not None:
self._poll.register(fileno, events)
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
if self._poll is not None:
self._poll.modify(fileno, events)
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
if self._poll is not None:
self._poll.unregister(fileno)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1
@staticmethod
def _create_poller():
"""
:rtype: `select.poll`
"""
return select.epoll() # pylint: disable=E1101
| |
import json
import logging
import requests
import os
import pexpect
import yaml
import shutil
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from teuthology.config import config as teuth_config
from teuthology.exceptions import CommandFailedError, AnsibleFailedError
from teuthology.repo_utils import fetch_repo
from . import Task
log = logging.getLogger(__name__)
class LoggerFile(object):
"""
A thin wrapper around a logging.Logger instance that provides a file-like
interface.
Used by Ansible.execute_playbook() when it calls pexpect.run()
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, string):
self.logger.log(self.level, string.decode('utf-8', 'ignore'))
def flush(self):
pass
class Ansible(Task):
"""
A task to run ansible playbooks
Required configuration parameters:
playbook: Required; can either be a list of plays, or a path/URL to a
playbook. In the case of a path, it may be relative to the
repo's on-disk location (if a repo is provided), or
teuthology's working directory.
Optional configuration parameters:
repo: A path or URL to a repo (defaults to '.'). Given a repo
value of 'foo', ANSIBLE_ROLES_PATH is set to 'foo/roles'
branch: If pointing to a remote git repo, use this branch. Defaults
to 'master'.
hosts: A list of teuthology roles or partial hostnames (or a
combination of the two). ansible-playbook will only be run
against hosts that match.
inventory: A path to be passed to ansible-playbook with the
--inventory-file flag; useful for playbooks that also have
vars they need access to. If this is not set, we check for
/etc/ansible/hosts and use that if it exists. If it does
not, we generate a temporary file to use.
tags: A string including any (comma-separated) tags to be passed
directly to ansible-playbook.
vars: A dict of vars to be passed to ansible-playbook via the
--extra-vars flag
cleanup: If present, the given or generated playbook will be run
again during teardown with a 'cleanup' var set to True.
This will allow the playbook to clean up after itself,
if the playbook supports this feature.
reconnect: If set to True (the default), then reconnect to hosts after
ansible-playbook completes. This is in case the playbook
makes changes to the SSH configuration, or user accounts -
we would want to reflect those changes immediately.
Examples:
tasks:
- ansible:
repo: https://github.com/ceph/ceph-cm-ansible.git
playbook:
- roles:
- some_role
- another_role
hosts:
- client.0
- host1
tasks:
- ansible:
repo: /path/to/repo
inventory: /path/to/inventory
playbook: /path/to/playbook.yml
tags: my_tags
vars:
var1: string_value
var2:
- list_item
var3:
key: value
"""
# set this in subclasses to provide a group to
# assign hosts to for dynamic inventory creation
inventory_group = None
def __init__(self, ctx, config):
super(Ansible, self).__init__(ctx, config)
self.log = log
self.generated_inventory = False
self.generated_playbook = False
def setup(self):
super(Ansible, self).setup()
self.find_repo()
self.get_playbook()
self.get_inventory() or self.generate_hosts_file()
if not hasattr(self, 'playbook_file'):
self.generate_playbook()
@property
def failure_log(self):
if not hasattr(self, '_failure_log'):
self._failure_log = NamedTemporaryFile(
prefix="teuth_ansible_failures_",
delete=False,
)
return self._failure_log
def find_repo(self):
"""
Locate the repo we're using; cloning it from a remote repo if necessary
"""
repo = self.config.get('repo', '.')
if repo.startswith(('http://', 'https://', 'git@', 'git://')):
repo_path = fetch_repo(
repo,
self.config.get('branch', 'master'),
)
else:
repo_path = os.path.abspath(os.path.expanduser(repo))
self.repo_path = repo_path
def get_playbook(self):
"""
If necessary, fetch and read the playbook file
"""
playbook = self.config['playbook']
if isinstance(playbook, list):
# Multiple plays in a list
self.playbook = playbook
elif isinstance(playbook, str) and playbook.startswith(('http://',
'https://')):
response = requests.get(playbook)
response.raise_for_status()
self.playbook = yaml.safe_load(response.text)
elif isinstance(playbook, str):
try:
playbook_path = os.path.expanduser(playbook)
if not playbook_path.startswith('/'):
# If the path is not absolute at this point, look for the
# playbook in the repo dir. If it's not there, we assume
# the path is relative to the working directory
pb_in_repo = os.path.join(self.repo_path, playbook_path)
if os.path.exists(pb_in_repo):
playbook_path = pb_in_repo
self.playbook_file = file(playbook_path)
playbook_yaml = yaml.safe_load(self.playbook_file)
self.playbook = playbook_yaml
except Exception:
log.error("Unable to read playbook file %s", playbook)
raise
else:
raise TypeError(
"playbook value must either be a list, URL or a filename")
log.info("Playbook: %s", self.playbook)
def get_inventory(self):
"""
Determine whether or not we're using an existing inventory file
"""
self.inventory = self.config.get('inventory')
etc_ansible_hosts = '/etc/ansible/hosts'
if self.inventory:
self.inventory = os.path.expanduser(self.inventory)
elif os.path.exists(etc_ansible_hosts):
self.inventory = etc_ansible_hosts
return self.inventory
def generate_hosts_file(self):
"""
Generate a hosts (inventory) file to use. This should not be called if
we're using an existing file.
"""
hosts = self.cluster.remotes.keys()
hostnames = [remote.hostname for remote in hosts]
hostnames.sort()
inventory = []
if self.inventory_group:
inventory.append('[{0}]'.format(self.inventory_group))
inventory.extend(hostnames + [''])
hosts_str = '\n'.join(inventory)
hosts_file = NamedTemporaryFile(prefix="teuth_ansible_hosts_",
delete=False)
hosts_file.write(hosts_str)
hosts_file.flush()
self.generated_inventory = True
self.inventory = hosts_file.name
def generate_playbook(self):
"""
Generate a playbook file to use. This should not be called if we're
using an existing file.
"""
for play in self.playbook:
# Ensure each play is applied to all hosts mentioned in the --limit
# flag we specify later
play['hosts'] = 'all'
pb_buffer = StringIO()
pb_buffer.write('---\n')
yaml.safe_dump(self.playbook, pb_buffer)
pb_buffer.seek(0)
playbook_file = NamedTemporaryFile(prefix="teuth_ansible_playbook_",
delete=False)
playbook_file.write(pb_buffer.read())
playbook_file.flush()
self.playbook_file = playbook_file
self.generated_playbook = True
def begin(self):
super(Ansible, self).begin()
self.execute_playbook()
def execute_playbook(self, _logfile=None):
"""
Execute ansible-playbook
:param _logfile: Use this file-like object instead of a LoggerFile for
testing
"""
environ = os.environ
environ['ANSIBLE_SSH_PIPELINING'] = '1'
environ['ANSIBLE_FAILURE_LOG'] = self.failure_log.name
environ['ANSIBLE_ROLES_PATH'] = "%s/roles" % self.repo_path
args = self._build_args()
command = ' '.join(args)
log.debug("Running %s", command)
out_log = self.log.getChild('out')
out, status = pexpect.run(
command,
logfile=_logfile or LoggerFile(out_log, logging.INFO),
withexitstatus=True,
timeout=None,
)
if status != 0:
self._handle_failure(command, status)
if self.config.get('reconnect', True) is True:
remotes = self.cluster.remotes.keys()
log.debug("Reconnecting to %s", remotes)
for remote in remotes:
remote.reconnect()
def _handle_failure(self, command, status):
failures = None
with open(self.failure_log.name, 'r') as fail_log:
try:
failures = yaml.safe_load(fail_log)
except yaml.parser.ParserError:
log.error(
"Failed to parse ansible failure log: {0}".format(
self.failure_log.name,
)
)
failures = fail_log
if failures:
self._archive_failures()
raise AnsibleFailedError(failures)
raise CommandFailedError(command, status)
def _archive_failures(self):
if self.ctx.archive:
archive_path = "{0}/ansible_failures.yaml".format(self.ctx.archive)
log.info("Archiving ansible failure log at: {0}".format(
archive_path,
))
shutil.move(
self.failure_log.name,
archive_path
)
os.chmod(archive_path, 0664)
def _build_args(self):
"""
Assemble the list of args to be executed
"""
fqdns = [r.hostname for r in self.cluster.remotes.keys()]
# Assume all remotes use the same username
user = self.cluster.remotes.keys()[0].user
extra_vars = dict(ansible_ssh_user=user)
extra_vars.update(self.config.get('vars', dict()))
args = [
'ansible-playbook', '-v',
"--extra-vars", "'%s'" % json.dumps(extra_vars),
'-i', self.inventory,
'--limit', ','.join(fqdns),
self.playbook_file.name,
]
tags = self.config.get('tags')
if tags:
args.extend(['--tags', tags])
return args
def teardown(self):
self._cleanup()
if self.generated_inventory:
os.remove(self.inventory)
if self.generated_playbook:
os.remove(self.playbook_file.name)
super(Ansible, self).teardown()
def _cleanup(self):
"""
If the ``cleanup`` key exists in config the same playbook will be
run again during the teardown step with the var ``cleanup`` given with
a value of ``True``. If supported, this will allow the playbook to
cleanup after itself during teardown.
"""
if self.config.get("cleanup"):
log.info("Running ansible cleanup...")
extra = dict(cleanup=True)
if self.config.get('vars'):
self.config.get('vars').update(extra)
else:
self.config['vars'] = extra
self.execute_playbook()
else:
log.info("Skipping ansible cleanup...")
class CephLab(Ansible):
__doc__ = """
A very simple subclass of Ansible that defaults to:
- ansible:
repo: {git_base}ceph-cm-ansible.git
playbook: cephlab.yml
If a dynamic inventory is used, all hosts will be assigned to the
group 'testnodes'.
""".format(git_base=teuth_config.ceph_git_base_url)
# Set the name so that Task knows to look up overrides for
# 'ansible.cephlab' instead of just 'cephlab'
name = 'ansible.cephlab'
inventory_group = 'testnodes'
def __init__(self, ctx, config):
config = config or dict()
if 'playbook' not in config:
config['playbook'] = 'cephlab.yml'
if 'repo' not in config:
config['repo'] = os.path.join(teuth_config.ceph_git_base_url,
'ceph-cm-ansible.git')
super(CephLab, self).__init__(ctx, config)
task = Ansible
cephlab = CephLab
| |
"""*Returns weather forecast for a location.*
This module can search weather reports using Wunderground API.
Weather reports consist of a current weather in observation location,
a 3 day weather forecast and distance from observation location to
the requested location. Module also features an interactive mode where user
can search new locations relative to the original location.
Usage:
```
/weather
/weather Palo Alto, CA
```
Interactive mode:
```
[distance] [cardinal direction]
100 NW - weather in 100km to northwest from original location
```
"""
import json
from math import asin, atan2, cos, degrees, pi, radians, sin, sqrt
#from telegram import KeyboardButton, ParseMode, ReplyKeyboardMarkup
import telegram
from geopy.geocoders import Nominatim
try:
# For Python 3.0 and later
from urllib.request import urlopen
from configparser import ConfigParser
except ImportError:
# Fall back to Python 2's urllib
from urllib import urlopen
from ConfigParser import ConfigParser
config = ConfigParser()
config.read('telepybot.conf')
api_key = config.get('weather', 'wundergroundApiKey')
def handle_update(bot, update, update_queue, logger):
"""Get weather forecast for location from update.
This is the main function that modulehander calls.
Args:
bot (telegram.Bot): Telegram bot itself
update (telegram.Update): Update that will be processed
update_queue (Queue): Queue containing all incoming and unhandled updates
logger (Logger): Logger that writes to bots own log file.
"""
chat_id = update.message.chat_id
bot.sendChatAction(chat_id, action=telegram.ChatAction.TYPING)
try:
command = update.message.text.split(' ', 1)[1]
except IndexError:
command = ''
finally:
message = update.message
location = None
while not location:
#text = ("Please send a location or type a city.\nYou may also "
# "cancel by typing \"cancel\"")
text = "Please send a location or type a city."
if message.location:
reply_markup = telegram.ReplyKeyboardHide()
bot.sendMessage(
chat_id=chat_id,
text="Searching forecast.",
reply_markup=reply_markup)
bot.sendChatAction(chat_id, action=telegram.ChatAction.TYPING)
location = parse_location(message.location)
elif command != '':
if command.lower() == 'cancel':
reply_markup = telegram.ReplyKeyboardHide()
bot.sendMessage(
chat_id=chat_id,
text="Cancelled.",
reply_markup=reply_markup)
return
try:
geolocator = Nominatim()
geo_code = geolocator.geocode(command)
if not geo_code:
raise ValueError("geolocator.geocode() returned None")
reply_markup = telegram.ReplyKeyboardHide()
bot.sendMessage(
chat_id=chat_id,
text="Searching forecast.",
reply_markup=reply_markup)
location = parse_location(geo_code)
except ValueError as e:
logger.info("location %s caused error %s" % (command, e))
text = "Couldn't find that location. Try anothet location"
bot.sendMessage(chat_id=chat_id, text=text)
message = update_queue.get().message
if message.text.startswith('/'):
# User accesses antoher module
update_queue.put(update)
return
command = message.text
bot.sendChatAction(chat_id, action=telegram.ChatAction.TYPING)
# TODO: fix this horrible structure
else:
location_keyboard = telegram.KeyboardButton(
text='Send location', request_location=True)
reply_markup = telegram.ReplyKeyboardMarkup(
[[location_keyboard], ['Cancel']])
bot.sendMessage(
chat_id=chat_id, text=text, reply_markup=reply_markup)
message = update_queue.get().message
if message.text.startswith('/'):
# User accesses antoher module
update_queue.put(update)
return
command = message.text
bot.sendChatAction(chat_id, action=telegram.ChatAction.TYPING)
report = construct_report(location)
bot.sendMessage(
chat_id=chat_id, text=report, parse_mode=telegram.ParseMode.MARKDOWN)
# Interactive mode, where user can change location e.g. "100 N"
text = """To search weather for relative position,
type [distance in km] [direction], e.g. "100 N"."""
bot.sendMessage(chat_id=chat_id, text=text)
while True:
update = update_queue.get()
bot.sendChatAction(chat_id, action=telegram.ChatAction.TYPING)
try:
distance, direction = update.message.text.split()
distance = int(distance)
new_location = calculate_new_query(location, distance, direction)
bot.sendMessage(
chat_id=chat_id,
text=construct_report(new_location),
parse_mode=telegram.ParseMode.MARKDOWN)
except ValueError:
if update.message.text.startswith('/'):
# User accesses antoher module
update_queue.put(update)
else:
text = "Invalid command. Interaction stopped"
bot.sendMessage(chat_id=chat_id, text=text)
break
def construct_report(query):
"""Construct the weather report that will be sent to user."""
response = urlopen('http://api.wunderground.com/api/' + api_key +
'/conditions/forecast/alert/q/' + query + '.json')
# Python 3 compatibility
response_str = response.read().decode('utf-8')
text = json.loads(response_str)
try:
error = text['response']['error']['type']
if error == 'querynotfound':
return "Sorry, couldn't fetch weather report from that location"
except KeyError:
pass
curr = text['current_observation']
distance = calculate_distance(
float(query.split(',')[1]), float(query.split(',')[0]),
float(curr['observation_location']['longitude']),
float(curr['observation_location']['latitude']))
bearing = calculate_direction(
float(query.split(',')[0]), float(query.split(',')[1]),
float(curr['observation_location']['latitude']),
float(curr['observation_location']['longitude']))
# Build report which contains location, current observation etc.
report = ('*{}*\nAccuracy: {}km {}\n{}\n{}, {}C, {}km/h, '
'{}mm past hour\n\n').format(
curr['observation_location']['full'], distance, bearing,
curr['observation_time'], curr['weather'], curr['temp_c'],
curr['wind_kph'], curr['precip_1hr_metric'])
forecast = text['forecast']['txt_forecast']['forecastday']
# Forecast for several time periods
for i in range(1, 8):
report += '*{}:* {} Probability for precipitation: {}%\n\n'.format(
forecast[i]['title'], forecast[i]['fcttext_metric'],
forecast[i]['pop'])
return report
def calculate_distance(lon1, lat1, lon2, lat2):
"""Calculate the great circle distance between two
coordinate points
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * asin(sqrt(a))
r = 6371 # earth radius in km
return "{0:.2f}".format(r * c) # Leave two decimals and convert to string
def calculate_direction(lat1, lon1, lat2, lon2):
"""Calculate compass bearing from starting point to
the end point and then convert it to a cardinal direction.
"""
lat1rad = radians(lat1)
lat2rad = radians(lat2)
dlon = radians(lon2 - lon1)
x = sin(dlon) * cos(lat2rad)
y = cos(lat1rad) * sin(lat2rad) - sin(lat1rad) * cos(lat2rad) * cos(dlon)
init_bearing = degrees(atan2(x, y))
compass_bearing = init_bearing % 360
directions = ["N", "NE", "E", "SE", "S", "SW", "W", "NW", "N"]
i = int(round(compass_bearing / 45))
return directions[i]
def calculate_new_query(old_query, distance, direction):
"""Find new location from interactive mode."""
angle = {
'N': 0,
'NE': pi / 4,
'E': pi / 2,
'SE': 3 * pi / 4,
'S': pi,
'SW': 5 * pi / 4,
'W': 3 * pi / 2,
'NW': 7 * pi / 4
}
r = 6371 # earth radius in km
dist = float(distance) / r # angular distance
old_lat, old_lon = old_query.split(',')
old_lat = radians(float(old_lat))
old_lon = radians(float(old_lon))
new_lat = asin(
sin(old_lat) * cos(dist) + cos(old_lat) * sin(dist) * cos(angle[
direction.upper()]))
new_lon = old_lon + atan2(
sin((angle[direction.upper()])) * sin(dist) * cos(old_lat), cos(dist) -
sin(old_lat) * sin(new_lat))
return '{},{}'.format(degrees(new_lat), degrees(new_lon))
def parse_location(location):
"""Convert location to string, e.g. "60.161928,24.951688"
"""
return str(location.latitude) + ',' + str(location.longitude)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2016, 2017 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import asynctest
from livebridge.base import BaseTarget, InvalidTargetResource
from livebridge.bridge import LiveBridge
from livebridge.components import get_hash
class LiveBridgeTest(asynctest.TestCase):
def setUp(self):
self.user = "foo"
self.password = "bla"
self.source_id = 12345
self.endpoint = "https://example.com/api"
self.label = "Testlabel"
self.bridge_config = {"auth": {"user": self.user, "password": self.password}, "type": "liveblog",
"source_id": self.source_id, "endpoint": self.endpoint,
"label": self.label}
self.bridge = LiveBridge(self.bridge_config)
self.bridge.api_client = asynctest.MagicMock()
self.bridge.api_client.last_updated = None
self.sc = BaseTarget()
async def test_init(self):
assert self.bridge.source_id == self.source_id
assert self.bridge.endpoint == self.endpoint
assert self.bridge.label == self.label
assert self.bridge.hash == get_hash(self.bridge_config)
assert self.bridge.retry_multiplier == 5
assert self.bridge.max_retries == 10
async def test_client_init(self):
assert repr(self.bridge).startswith("<LiveBridge [Testlabel] https://example.com/api 12345 MD5:") == True
async def test_add_target(self):
assert len(self.bridge.targets) == 0
self.bridge.add_target(self.sc)
assert len(self.bridge.targets) == 1
async def test_put_to_queue(self):
with asynctest.patch("asyncio.ensure_future") as mocked_ensure:
mocked_ensure.return_value = "test"
self.bridge._queue_consumer = asynctest.CoroutineMock(return_value="test")
self.bridge.queue = asynctest.MagicMock()
self.bridge.queue.put = asynctest.CoroutineMock(return_value=None)
await self.bridge._put_to_queue({"foo": "baz"})
assert self.bridge._queue_consumer.call_count == 1
assert self.bridge.queue.put.call_count == 1
assert self.bridge.queue_task == "test"
await self.bridge._put_to_queue({"foo": "baz"})
assert self.bridge._queue_consumer.call_count == 1
assert self.bridge.queue.put.call_count == 2
assert self.bridge.queue_task == "test"
async def test_stop(self):
self.bridge.queue_task = asynctest.MagicMock()
self.bridge.queue_task.cancel = asynctest.CoroutineMock(return_value=None)
sleep_task = asynctest.MagicMock()
self.bridge.sleep_tasks.append(sleep_task)
assert self.bridge.stop() is True
assert self.bridge.queue_task.cancel.call_count == 1
async def test_sleep_cancel(self):
self.loop.call_later(3, self.bridge.stop)
await self.bridge._sleep(10)
async def test_action_done_exception(self):
item_in = {
"target": asynctest.MagicMock(),
"post": asynctest.MagicMock(),
"count": 0
}
self.bridge.queue.task_done = asynctest.CoroutineMock(return_value=True)
future = asynctest.MagicMock(asyncio.Future)
future.exception = asynctest.Mock(return_value=None)
await self.bridge._action_done(future, item_in)
assert self.bridge.queue.task_done.call_count == 1
assert future.exception.call_count == 1
async def test_action_done_exception_invalid_target(self):
item_in = {
"target": asynctest.MagicMock(),
"post": asynctest.MagicMock(),
"count": 0
}
self.bridge.queue.task_done = asynctest.CoroutineMock(return_value=True)
future = asynctest.MagicMock(asyncio.Future)
future.exception = asynctest.Mock(return_value=InvalidTargetResource("Test"))
await self.bridge._action_done(future, item_in)
assert self.bridge.queue.task_done.call_count == 1
assert future.exception.call_count == 1
async def test_action_done(self):
item_in = {
"target": asynctest.MagicMock(),
"post": asynctest.MagicMock(),
"count": 0
}
future = asyncio.Future()
self.bridge.queue.task_done = asynctest.CoroutineMock(return_value=True)
self.bridge._put_to_queue = asynctest.CoroutineMock(return_value=True)
future.exception = asynctest.CoroutineMock(return_value=Exception("TestException"))
await self.bridge._action_done(future, item_in)
assert item_in["count"] == 1
assert self.bridge._put_to_queue.call_count == 1
assert self.bridge._put_to_queue.call_args == asynctest.call(item_in)
assert self.bridge.queue.task_done.call_count == 1
async def test_action_done_max_retries(self):
item_in = {
"target": asynctest.MagicMock(),
"post": asynctest.MagicMock(),
"count": 10
}
future = asyncio.Future()
self.bridge.queue.task_done = asynctest.CoroutineMock(return_value=True)
self.bridge._sleep = asynctest.CoroutineMock(return_value=True)
self.bridge._put_to_queue = asynctest.CoroutineMock(return_value=True)
future.exception = asynctest.CoroutineMock(return_value=Exception("TestException"))
await self.bridge._action_done(future, item_in)
assert item_in["count"] == 10
assert self.bridge._put_to_queue.call_count == 0
assert self.bridge._sleep.call_count == 0
assert self.bridge.queue.task_done.call_count == 1
async def test_process_action(self):
task = {
"target": asynctest.MagicMock(),
"post": asynctest.MagicMock(),
"count": 0
}
task["target"].handle_post = asynctest.CoroutineMock(return_value=True)
self.bridge._action_done = asynctest.CoroutineMock(return_value=True)
await self.bridge._process_action(task)
assert task["target"].handle_post.call_count == 1
assert task["target"].handle_post.call_args == asynctest.call(task["post"])
async def test_queue_consumer(self):
task = {
"target": asynctest.MagicMock(),
"post": asynctest.CoroutineMock(return_value=True),
"count": 0
}
self.bridge.queue.get = asynctest.CoroutineMock(side_effect=[task, Exception("Test")])
self.bridge.queue.task_done = asynctest.CoroutineMock(return_value=True)
self.bridge._process_action = asynctest.CoroutineMock(return_value=None)
await self.bridge._queue_consumer()
self.bridge.queue.get.call_count == 2
self.bridge._process_action.call_count == 1
self.bridge.queue.task_done.call_count == 1
async def test_queue_consumer_aborting(self):
item = {
"target": asynctest.MagicMock(),
"post": asynctest.MagicMock(),
"count": 11
}
self.bridge.queue.get = asynctest.CoroutineMock(side_effect=[item, asyncio.CancelledError()])
self.bridge._process_action = asynctest.CoroutineMock(return_value=None)
self.bridge.queue.task_done = asynctest.CoroutineMock()
await self.bridge._queue_consumer()
self.bridge._process_action.call_count == 1
self.bridge.queue.get.call_count == 2
self.bridge.queue.task_done.call_count == 1
async def test_check_posts(self):
self.bridge.new_posts = asynctest.CoroutineMock(return_value=None)
self.bridge.api_client.poll = asynctest.CoroutineMock(return_value=["one", "two"])
self.bridge._put_to_queue = asynctest.CoroutineMock(return_value=True)
res = await self.bridge.check_posts()
assert res is True
assert self.bridge.api_client.poll.call_count == 1
self.bridge.new_posts.assert_called_once_with(["one", "two"])
async def test_check_posts_empty(self):
self.bridge.source.get = asynctest.CoroutineMock(side_effect=Exception)
self.bridge.source.poll = asynctest.CoroutineMock(return_value=False)
assert self.bridge.source.last_updated is None
res = await self.bridge.check_posts()
assert res is True
async def test_check_posts_failing(self):
self.bridge.source.poll = asynctest.CoroutineMock(side_effect=Exception)
assert self.bridge.source.last_updated is None
res = await self.bridge.check_posts()
assert res is True
async def test_new_posts(self):
# return of target
handle_res = asynctest.MagicMock()
handle_res.id = "654321"
handle_res.target_doc = {"foo": "baz"}
# target
target = asynctest.MagicMock()
target.handle_post = asynctest.CoroutineMock(return_value=handle_res)
target.type = "scribble"
target.target_id = "foo"
# post
post = asynctest.MagicMock()
post.id = "12345"
self.bridge.targets.append(target)
# mock method calls
api_res = asynctest.MagicMock()
api_res.updated = "2016-10-31T10:10:10+0:00"
# test one
self.bridge._put_to_queue = asynctest.CoroutineMock(return_value=True)
res = await self.bridge.new_posts([api_res])
assert res is None
assert self.bridge._put_to_queue.call_count == 1
async def test_new_posts_failing(self):
res = await self.bridge.new_posts(lambda: Exception())
assert res is None
async def test_listen_ws(self):
self.bridge.source.listen = asynctest.CoroutineMock(return_value=None)
res = await self.bridge.listen_ws()
assert type(res) == asyncio.Task
self.bridge.source.listen.assert_called_once_with(self.bridge.new_posts)
@asynctest.fail_on(unused_loop=False)
def test_get_hash(self):
assert get_hash({"foo": "bar"}) == "dd63dafcbd4d5b28badfcaf86fb6fcdb"
assert get_hash([1, 2, 3, 4, 5, 6]) == "199ff5b613f5dc25dff99df513516bf9"
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import unittest
import warnings
from scrapy.utils.deprecate import create_deprecated_class, update_classpath
from tests import mock
class MyWarning(UserWarning):
pass
class SomeBaseClass(object):
pass
class NewName(SomeBaseClass):
pass
class WarnWhenSubclassedTest(unittest.TestCase):
def _mywarnings(self, w, category=MyWarning):
return [x for x in w if x.category is MyWarning]
def test_no_warning_on_definition(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
w = self._mywarnings(w)
self.assertEqual(w, [])
def test_subclassing_warning_message(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.UserClass inherits from "
"deprecated class tests.test_utils_deprecate.Deprecated, "
"please inherit from tests.test_utils_deprecate.NewName."
" (warning only on first subclass, there may be others)"
)
self.assertEqual(w[0].lineno, inspect.getsourcelines(UserClass)[1])
def test_custom_class_paths(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
new_class_path='foo.NewClass',
old_class_path='bar.OldClass',
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
_ = Deprecated()
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('foo.NewClass', str(w[0].message))
self.assertIn('bar.OldClass', str(w[0].message))
self.assertIn('foo.NewClass', str(w[1].message))
self.assertIn('bar.OldClass', str(w[1].message))
def test_subclassing_warns_only_on_direct_childs(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_once=False,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class NoWarnOnMe(UserClass):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_subclassing_warns_once_by_default(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class FooClass(Deprecated):
pass
class BarClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_warning_on_instance(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
# ignore subclassing warnings
with warnings.catch_warnings(record=True):
class UserClass(Deprecated):
pass
with warnings.catch_warnings(record=True) as w:
_, lineno = Deprecated(), inspect.getlineno(inspect.currentframe())
_ = UserClass() # subclass instances don't warn
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.Deprecated is deprecated, "
"instantiate tests.test_utils_deprecate.NewName instead."
)
self.assertEqual(w[0].lineno, lineno)
def test_warning_auto_message(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
class UserClass2(Deprecated):
pass
msg = str(w[0].message)
self.assertIn("tests.test_utils_deprecate.NewName", msg)
self.assertIn("tests.test_utils_deprecate.Deprecated", msg)
def test_issubclass(self):
with warnings.catch_warnings(record=True):
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass1(NewName):
pass
class UpdatedUserClass1a(NewName):
pass
class OutdatedUserClass1(DeprecatedName):
pass
class OutdatedUserClass1a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert issubclass(UpdatedUserClass1, NewName)
assert issubclass(UpdatedUserClass1a, NewName)
assert issubclass(UpdatedUserClass1, DeprecatedName)
assert issubclass(UpdatedUserClass1a, DeprecatedName)
assert issubclass(OutdatedUserClass1, DeprecatedName)
assert not issubclass(UnrelatedClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OutdatedUserClass1, OutdatedUserClass1a)
assert not issubclass(OutdatedUserClass1a, OutdatedUserClass1)
self.assertRaises(TypeError, issubclass, object(), DeprecatedName)
def test_isinstance(self):
with warnings.catch_warnings(record=True):
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass2(NewName):
pass
class UpdatedUserClass2a(NewName):
pass
class OutdatedUserClass2(DeprecatedName):
pass
class OutdatedUserClass2a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert isinstance(UpdatedUserClass2(), NewName)
assert isinstance(UpdatedUserClass2a(), NewName)
assert isinstance(UpdatedUserClass2(), DeprecatedName)
assert isinstance(UpdatedUserClass2a(), DeprecatedName)
assert isinstance(OutdatedUserClass2(), DeprecatedName)
assert isinstance(OutdatedUserClass2a(), DeprecatedName)
assert not isinstance(OutdatedUserClass2a(), OutdatedUserClass2)
assert not isinstance(OutdatedUserClass2(), OutdatedUserClass2a)
assert not isinstance(UnrelatedClass(), DeprecatedName)
assert not isinstance(OldStyleClass(), DeprecatedName)
def test_clsdict(self):
with warnings.catch_warnings(record=True):
Deprecated = create_deprecated_class('Deprecated', NewName, {'foo': 'bar'})
self.assertEqual(Deprecated.foo, 'bar')
def test_deprecate_a_class_with_custom_metaclass(self):
Meta1 = type('Meta1', (type,), {})
New = Meta1('New', (), {})
Deprecated = create_deprecated_class('Deprecated', New)
def test_deprecate_subclass_of_deprecated_class(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
AlsoDeprecated = create_deprecated_class('AlsoDeprecated', Deprecated,
new_class_path='foo.Bar',
warn_category=MyWarning)
w = self._mywarnings(w)
self.assertEqual(len(w), 0, str(map(str, w)))
with warnings.catch_warnings(record=True) as w:
AlsoDeprecated()
class UserClass(AlsoDeprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('AlsoDeprecated', str(w[0].message))
self.assertIn('foo.Bar', str(w[0].message))
self.assertIn('AlsoDeprecated', str(w[1].message))
self.assertIn('foo.Bar', str(w[1].message))
def test_inspect_stack(self):
with mock.patch('inspect.stack', side_effect=IndexError):
with warnings.catch_warnings(record=True) as w:
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class SubClass(DeprecatedName):
pass
self.assertIn("Error detecting parent module", str(w[0].message))
@mock.patch('scrapy.utils.deprecate.DEPRECATION_RULES',
[('scrapy.contrib.pipeline.', 'scrapy.pipelines.'),
('scrapy.contrib.', 'scrapy.extensions.')])
class UpdateClassPathTest(unittest.TestCase):
def test_old_path_gets_fixed(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.contrib.debug.Debug')
self.assertEqual(output, 'scrapy.extensions.debug.Debug')
self.assertEqual(len(w), 1)
self.assertIn("scrapy.contrib.debug.Debug", str(w[0].message))
self.assertIn("scrapy.extensions.debug.Debug", str(w[0].message))
def test_sorted_replacement(self):
with warnings.catch_warnings(record=True):
output = update_classpath('scrapy.contrib.pipeline.Pipeline')
self.assertEqual(output, 'scrapy.pipelines.Pipeline')
def test_unmatched_path_stays_the_same(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.unmatched.Path')
self.assertEqual(output, 'scrapy.unmatched.Path')
self.assertEqual(len(w), 0)
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import hashlib
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import textwrap
from programs import file_locks
from programs.tracing import Tracing
try:
USER_NAME = os.getlogin()
except (AttributeError, OSError):
import getpass
USER_NAME = getpass.getuser()
def get_file_contents_if_exists(path, default=None):
with Tracing("BuckProject.get_file_contents_if_it_exists", args={"path": path}):
if not os.path.exists(path):
return default
with open(path) as f:
contents = f.read().strip()
return default if not contents else contents
def write_contents_to_file(path, contents):
with Tracing("BuckProject.write_contents_to_file", args={"path": path}):
with open(path, "w") as output_file:
output_file.write(str(contents))
def makedirs(path):
try:
os.makedirs(path)
except OSError as e:
# Potentially the case that multiple processes are running in parallel
# (e.g. a series of linters running buck query without buckd), so we
# should just swallow the error.
# This is mostly equivalent to os.makedirs(path, exist_ok=True) in
# Python 3.
if e.errno == errno.EEXIST and os.path.isdir(path):
return
raise
def _is_eden(path):
return os.path.isdir(os.path.join(path, ".eden", "root"))
def _find_eden_root(project_root):
return os.readlink(os.path.join(project_root, ".eden", "root"))
def _add_eden_bindmount(eden_root, path):
relative_path = os.path.relpath(path, eden_root)
logging.debug(
"Adding eden mount at {}, path relative to eden {}".format(path, relative_path)
)
try:
subprocess.check_output(["eden", "redirect", "add", relative_path, "bind"])
except subprocess.CalledProcessError:
logging.warning("Could not add eden redirect for " + path)
raise
def add_eden_bindmounts(repo_root, buck_out):
if not _is_eden(repo_root):
return
add_bindmounts = os.environ.get("NO_BUCK_ADD_EDEN_BINDMOUNTS", "0").strip() == "0"
if not add_bindmounts:
logging.warning(
"Skipping adding eden bindmounts because "
+ "NO_BUCK_ADD_EDEN_BINDMOUNTS was set"
)
return
eden_root = _find_eden_root(repo_root)
eden_bindmounts = {buck_out}
eden_bindmounts_file = os.path.join(repo_root, ".buck-eden-bindmounts")
if os.path.exists(eden_bindmounts_file):
logging.debug("Reading eden bindmounts from " + eden_bindmounts_file)
with open(eden_bindmounts_file, "r") as fin:
for bindmount in fin:
bindmount = bindmount.strip()
if bindmount and not bindmount.startswith("#"):
eden_bindmounts.add(os.path.join(repo_root, bindmount))
for mount in eden_bindmounts:
if os.path.exists(mount):
if _is_eden(mount):
msg = (
"Eden bindmount at {path} was requested, but it is already a "
"directory within an eden filesystem.\n"
"In order to prevent destructive actions on user data, you "
"must remove this directory yourself.\n"
"Please stop buck with `buck killall`, remove {path}, and run buck "
"again."
)
logging.warning(msg.format(path=mount))
else:
logging.debug(
"Eden bindmount at {} already exists, skipping".format(mount)
)
else:
_add_eden_bindmount(eden_root, mount)
class BuckProject:
def __init__(self, root):
self.root = root
try:
isolated_pos = sys.argv.index("--isolation_prefix")
if isolated_pos < len(sys.argv):
self.prefix = sys.argv[isolated_pos + 1]
else:
self.prefix = ""
except ValueError:
self.prefix = ""
self._buck_out_dirname = "buck-out"
if len(self.prefix) > 0:
self._buck_out_dirname = self.prefix + "-" + self._buck_out_dirname
self._buck_out = os.path.join(self.root, self._buck_out_dirname)
add_eden_bindmounts(self.root, self._buck_out)
self._buck_out_tmp = os.path.join(self._buck_out, "tmp")
makedirs(self._buck_out_tmp)
self._buck_out_log = os.path.join(self._buck_out, "log")
makedirs(self._buck_out_log)
self.tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=self._buck_out_tmp)
# Only created if buckd is used.
self.buckd_tmp_dir = None
self.buckd_dir = os.path.join(root, self.prefix + ".buckd")
self.buckd_version_file = os.path.join(self.buckd_dir, "buckd.version")
self.buckd_pid_file = os.path.join(self.buckd_dir, "pid")
self.buckd_stdout = os.path.join(self.buckd_dir, "stdout")
self.buckd_stderr = os.path.join(self.buckd_dir, "stderr")
self.buckd_jvm_args_file = os.path.join(self.buckd_dir, "buckjavaargs.running")
buck_javaargs_path = os.path.join(self.root, ".buckjavaargs")
self.buck_javaargs = get_file_contents_if_exists(buck_javaargs_path)
buck_javaargs_path_local = os.path.join(self.root, ".buckjavaargs.local")
self.buck_javaargs_local = get_file_contents_if_exists(buck_javaargs_path_local)
# A hash that uniquely identifies this instance of buck.
# Historically, this has meant 'one buck per repo' or 'one buck per root',
# but isolation mode means we can have multiple bucks coexisting.
# Useful for disambiguating identifiers in a global namespace.
def get_instance_hash(self):
return hashlib.sha256(
"{}{}".format(self.root, self.prefix).encode("utf-8")
).hexdigest()
# keep in sync with get_buckd_transport_address
def get_buckd_transport_file_path(self):
if os.name == "nt":
return u"\\\\.\\pipe\\buckd_{0}".format(self.get_instance_hash())
else:
return os.path.join(self.buckd_dir, "sock")
def get_buckd_transport_address(self):
if os.name == "nt":
# Nailgun prepends named pipe prefix by itself
return "local:buckd_{0}".format(self.get_instance_hash())
else:
# Nailgun assumes path is relative to self.root
return "local:{0}.buckd/sock".format(self.prefix)
def get_running_buckd_version(self):
return get_file_contents_if_exists(self.buckd_version_file)
def get_running_buckd_pid(self):
try:
return int(get_file_contents_if_exists(self.buckd_pid_file))
except ValueError:
return None
except TypeError:
return None
def get_running_buckd_jvm_args(self):
args_string = get_file_contents_if_exists(self.buckd_jvm_args_file)
return args_string.split("\n") if args_string is not None else []
def get_buckd_stdout(self):
return self.buckd_stdout
def get_buckd_stderr(self):
return self.buckd_stderr
def get_buck_out_log_dir(self):
return self._buck_out_log
def get_buck_out_relative_dir(self):
return self._buck_out_dirname
def get_section_lock_path(self, section):
prefix_user_hash = hashlib.sha256(
(self.prefix + "\n" + USER_NAME).encode("utf8")
).hexdigest()
return os.path.join(
tempfile.gettempdir(), ".buck_lock_%s_%s" % (section, prefix_user_hash)
)
def clean_up_buckd(self):
with Tracing("BuckProject.clean_up_buckd"):
if os.path.exists(self.buckd_dir):
file_locks.rmtree_if_can_lock(self.buckd_dir)
def create_buckd_dir(self):
makedirs(self.buckd_dir)
def create_buckd_tmp_dir(self):
if self.buckd_tmp_dir is not None:
return self.buckd_tmp_dir
self.buckd_tmp_dir = tempfile.mkdtemp(
prefix="buckd_tmp.", dir=self._buck_out_tmp
)
return self.buckd_tmp_dir
def save_buckd_version(self, version):
write_contents_to_file(self.buckd_version_file, version)
def save_buckd_pid(self, pid):
write_contents_to_file(self.buckd_pid_file, str(pid))
def save_buckd_jvm_args(self, args):
write_contents_to_file(self.buckd_jvm_args_file, "\n".join(args))
@staticmethod
def from_current_dir():
with Tracing("BuckProject.from_current_dir"):
current_dir = os.getcwd()
if "--version" in sys.argv or "-V" in sys.argv:
return BuckProject(current_dir)
at_root_dir = False
while not at_root_dir:
if os.path.exists(os.path.join(current_dir, ".buckconfig")):
return BuckProject(current_dir)
parent_dir = os.path.dirname(current_dir)
at_root_dir = current_dir == parent_dir
current_dir = parent_dir
raise NoBuckConfigFoundException()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with Tracing("BuckProject.__exit__"):
if os.path.exists(self.tmp_dir):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class NoBuckConfigFoundException(Exception):
def __init__(self):
no_buckconfig_message_path = ".no_buckconfig_message"
default_message = textwrap.dedent(
"""\
This does not appear to be the root of a Buck project. Please 'cd'
to the root of your project before running buck. If this really is
the root of your project, run
'touch .buckconfig'
and then re-run your buck command."""
)
message = get_file_contents_if_exists(
no_buckconfig_message_path, default_message
)
Exception.__init__(self, message)
| |
"""Module for unittest parent class."""
import os
import unittest
import bot.lib.lib as lib
def additional_tests():
return unittest.defaultTestLoader.discover("..")
class TestBot(unittest.TestCase):
"""Test class that all bot unittets should subclass."""
def setUp(self):
"""Get config, set simulation pins to known state, set test flag."""
# Load config and logger
self.config = lib.get_config("bot/config.yaml")
self.logger = lib.get_logger()
# Set testing flag in config
self.orig_test_state = self.config["testing"]
lib.set_testing(True)
def setup_pwm(self, pwm_num, run, duty_ns, period_ns, polarity):
"""Set files that simulate BBB PWMs to known state.
Note that pin properties (all params other than pwm_num) should
be newline terminated because this is how the BeagleBone Black
stores values in the file-like objects it uses to control hardware.
:param pwm_num: Pin number of PWM pin to set to given state.
:type pwm_num: int
:param run: Run state to set PWM pin to (1/0, newline terminated).
:type run: string
:param duty_ns: Duty cycle to set PWM pin to (newline terminated).
:type duty_ns: string
:param period_ns: Period to set PWM pin to (newline terminated).
:type period_ns: string
:param polarity: Polarity to set PWM pin to (1/0, newline terminated).
:type polarity: string
"""
if type(run) is not str:
self.logger.error("Param 'run' must be a string")
raise ValueError("Param 'rum' must be a string")
if type(duty_ns) is not str:
self.logger.error("Param 'duty_ns' must be a string")
raise ValueError("Param 'duty_ns' must be a string")
if type(period_ns) is not str:
self.logger.error("Param 'period_ns' must be a string")
raise ValueError("Param 'period_ns' must be a string")
if type(polarity) is not str:
self.logger.error("Param 'polarity' must be a string")
raise ValueError("Param 'polarity' must be a string")
if run[-1:] != "\n":
self.logger.error("Param 'run' must be newline-terminated")
raise ValueError("Param 'run' must be newline-terminated")
if duty_ns[-1:] != "\n":
self.logger.error("Param 'duty_ns' must be newline-terminated")
raise ValueError("Param 'duty_ns' must be newline-terminated")
if period_ns[-1:] != "\n":
self.logger.error("Param 'period_ns' must be newline-terminated")
raise ValueError("Param 'period_ns' must be newline-terminated")
if polarity[-1:] != "\n":
self.logger.error("Param 'polarity' must be newline-terminated")
raise ValueError("Param 'polarity' must be newline-terminated")
test_dir = self.config["test_pwm_base_dir"] + str(pwm_num)
# Build test directories if they don't exist
if not os.path.exists(test_dir):
os.makedirs(test_dir)
# Set known values in PWM simulated hardware files
with open(test_dir + "/run", "w") as f:
f.write(run)
with open(test_dir + "/duty_ns", "w") as f:
f.write(duty_ns)
with open(test_dir + "/period_ns", "w") as f:
f.write(period_ns)
with open(test_dir + "/polarity", "w") as f:
f.write(polarity)
def setup_gpio(self, gpio_num, value="0\n", direction="out\n"):
"""Set files that simulate BBB GPIOs to known state.
Note that pin properties (all params other than gpio_num) should
be newline terminated because this is how the BeagleBone Black
stores values in the file-like objects it uses to control hardware.
:param gpio_num: Pin number of GPIO to set to value/direction params.
:type gpio_num: int
:param value: Value to set GPIO to. Default is recommended.
:type value: string
:param direction: Direction to set GPIO to. Default is recommended.
:type direction: string
"""
if type(value) is not str:
self.logger.error("Param 'value' must be a string")
raise ValueError("Param 'value' must be a string")
if type(direction) is not str:
self.logger.error("Param 'direction' must be a string")
raise ValueError("Param 'direction' must be a string")
if value[-1:] != "\n":
self.logger.error("Param 'value' must be newline-terminated")
raise ValueError("Param 'value' must be newline-terminated")
if direction[-1:] != "\n":
self.logger.error("Param 'direction' must be newline-terminated")
raise ValueError("Param 'direction' must be newline-terminated")
test_dir = self.config["test_gpio_base_dir"] + str(gpio_num)
# Build test directories if they don't exist
if not os.path.exists(test_dir):
os.makedirs(test_dir)
# Set known values in GPIO simulated hardware files
with open(test_dir + "/value", "w") as f:
f.write(value)
with open(test_dir + "/direction", "w") as f:
f.write(direction)
def setup_adc(self, adc_num, value="0\n"):
"""Set files that simulate BBB ADCs to known state.
Note that pin properties (all params other than adc_num) should
be newline terminated because this is how the BeagleBone Black
stores values in the file-like objects it uses to control hardware.
:param adc_num: Pin number of ADC to set to value param.
:type adc_num: int
:param value: Value to set ADC pin to. Default is recommended.
:type value: string
"""
if type(value) is not str:
self.logger.error("Param 'value' must be a string")
raise ValueError("Param 'value' must be a string")
if value[-1:] != "\n":
self.logger.error("Param 'value' must be newline-terminated")
raise ValueError("Param 'value' must be newline-terminated")
test_dir = self.config["test_adc_base_dir"]
# Create ADC test directory if it doesn't exist
if not os.path.exists(test_dir):
os.makedirs(test_dir)
# Set known values in ADC simulated hardware file
with open(test_dir + "/AIN" + str(adc_num), "w") as f:
f.write(value)
def get_pwm(self, pwm_num):
"""Get current values in simulated PWM file.
Note that each value in the returned dict will be a newline
terminated string, as this is how the BeagleBone black stores
values in the file-like objects it uses to control hardware.
:param pwm_num: Pin number of PWM pin to read.
:type pwm_num: int
:returns: Dict with run, duty_ns, period_ns and polarity PWM info.
"""
test_dir = self.config["test_pwm_base_dir"] + str(pwm_num)
# Get values in PWM simulated hardware files
results = {}
with open(test_dir + "/run", "r") as f:
results["run"] = f.read()
with open(test_dir + "/duty_ns", "r") as f:
results["duty_ns"] = f.read()
with open(test_dir + "/period_ns", "r") as f:
results["period_ns"] = f.read()
with open(test_dir + "/polarity", "r") as f:
results["polarity"] = f.read()
return results
def get_gpio(self, gpio_num):
"""Get current values in simulated GPIO file.
Note that each value in the returned dict will be a newline
terminated string, as this is how the BeagleBone black stores
values in the file-like objects it uses to control hardware.
:param gpio_num: Pin number of GPIO to read.
:type gpio_num: int
:returns: Dict with value and direction of given simulated GPIO.
"""
test_dir = self.config["test_gpio_base_dir"] + str(gpio_num)
# Get values in GPIO simulated hardware files
results = {}
with open(test_dir + "/value", "r") as f:
results["value"] = f.read()
with open(test_dir + "/direction", "r") as f:
results["direction"] = f.read()
return results
def get_adc(self, adc_num):
"""Get current value in simulated ADC file.
Note that the returned value will be a newline terminated
string, as this is how the BeagleBone black stores values
in the file-like objects it uses to control hardware.
:param adc_num: Pin number of ADC to read.
:type adc_num: int
:returns: Current value of the given simulated ADC.
"""
test_dir = self.config["test_adc_base_dir"]
# Get value in ADC simulated hardware file
with open(test_dir + "/AIN" + str(adc_num), "r") as f:
return f.read()
def tearDown(self):
"""Restore testing flag state in config file."""
lib.set_testing(self.orig_test_state)
| |
from __future__ import unicode_literals
from django.contrib import messages
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView
from django_prices.templatetags.prices_i18n import gross
from prices import Price
from ...order.models import Order, OrderedItem, OrderNote
from ...userprofile.forms import AddressForm
from ..views import (StaffMemberOnlyMixin, FilterByStatusMixin,
staff_member_required)
from .forms import (OrderNoteForm, MoveItemsForm,
ChangeQuantityForm, ShipGroupForm, CapturePaymentForm,
ReleasePaymentForm, RefundPaymentForm)
class OrderListView(StaffMemberOnlyMixin, FilterByStatusMixin, ListView):
template_name = 'dashboard/order/list.html'
paginate_by = 20
model = Order
def get_queryset(self):
qs = super(OrderListView, self).get_queryset()
return qs.prefetch_related(
'groups', 'payments', 'groups__items').select_related('user')
@staff_member_required
def order_details(request, order_pk):
qs = (Order.objects
.select_related('user', 'shipping_address', 'billing_address')
.prefetch_related('notes', 'payments', 'history',
'groups', 'groups__items'))
order = get_object_or_404(qs, pk=order_pk)
notes = order.notes.all()
all_payments = order.payments.all()
payment = order.payments.last()
groups = list(order)
captured = preauthorized = Price(0, currency=order.get_total().currency)
if payment:
can_capture = (payment.status == 'preauth' and
order.status != 'cancelled')
can_release = payment.status == 'preauth'
can_refund = payment.status == 'confirmed'
preauthorized = payment.get_total_price()
if payment.status == 'confirmed':
captured = payment.get_captured_price()
else:
can_capture = can_release = can_refund = False
ctx = {'order': order, 'all_payments': all_payments, 'payment': payment,
'notes': notes, 'groups': groups, 'captured': captured,
'preauthorized': preauthorized, 'can_capture': can_capture,
'can_release': can_release, 'can_refund': can_refund}
return TemplateResponse(request, 'dashboard/order/detail.html', ctx)
@staff_member_required
def order_add_note(request, order_pk):
order = get_object_or_404(Order, pk=order_pk)
note = OrderNote(order=order, user=request.user)
form = OrderNoteForm(request.POST or None, instance=note)
status = 200
if form.is_valid():
form.save()
msg = _('Added note')
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
elif form.errors:
status = 400
ctx = {'order': order, 'form': form}
ctx.update(csrf(request))
template = 'dashboard/order/modal_add_note.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def capture_payment(request, order_pk, payment_pk):
order = get_object_or_404(Order, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
amount = order.get_total().quantize('0.01').gross
form = CapturePaymentForm(request.POST or None, payment=payment,
initial={'amount': amount})
if form.is_valid() and form.capture():
amount = form.cleaned_data['amount']
msg = _('Captured %(amount)s') % {'amount': gross(amount)}
payment.order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {'captured': payment.captured_amount, 'currency': payment.currency,
'form': form, 'order': order, 'payment': payment}
return TemplateResponse(request, 'dashboard/order/modal_capture.html', ctx,
status=status)
@staff_member_required
def refund_payment(request, order_pk, payment_pk):
order = get_object_or_404(Order, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
amount = payment.captured_amount
form = RefundPaymentForm(request.POST or None, payment=payment,
initial={'amount': amount})
if form.is_valid() and form.refund():
amount = form.cleaned_data['amount']
msg = _('Refunded %(amount)s') % {'amount': gross(amount)}
payment.order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {'captured': payment.captured_amount, 'currency': payment.currency,
'form': form, 'order': order, 'payment': payment}
return TemplateResponse(request, 'dashboard/order/modal_refund.html', ctx,
status=status)
@staff_member_required
def release_payment(request, order_pk, payment_pk):
order = get_object_or_404(Order, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
form = ReleasePaymentForm(request.POST or None, payment=payment)
if form.is_valid() and form.release():
msg = _('Released payment')
payment.order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {'captured': payment.captured_amount, 'currency': payment.currency,
'form': form, 'order': order, 'payment': payment}
return TemplateResponse(request, 'dashboard/order/modal_release.html', ctx,
status=status)
@staff_member_required
def orderline_change_quantity(request, order_pk, line_pk):
order = get_object_or_404(Order, pk=order_pk)
item = get_object_or_404(OrderedItem.objects.filter(
delivery_group__order=order), pk=line_pk)
form = ChangeQuantityForm(request.POST or None, instance=item)
status = 200
old_quantity = item.quantity
if form.is_valid():
with transaction.atomic():
form.save()
msg = _(
'Changed quantity for product %(product)s from'
' %(old_quantity)s to %(new_quantity)s') % {
'product': item.product, 'old_quantity': old_quantity,
'new_quantity': item.quantity}
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
elif form.errors:
status = 400
ctx = {'order': order, 'object': item, 'form': form}
template = 'dashboard/order/modal_change_quantity.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def orderline_split(request, order_pk, line_pk):
order = get_object_or_404(Order, pk=order_pk)
item = get_object_or_404(OrderedItem.objects.filter(
delivery_group__order=order), pk=line_pk)
form = MoveItemsForm(request.POST or None, item=item)
line_pk = None
if item:
line_pk = item.pk
status = 200
if form.is_valid():
old_group = item.delivery_group
how_many = form.cleaned_data['quantity']
with transaction.atomic():
target_group = form.move_items()
if not old_group.pk:
old_group = _('removed group')
msg = _(
'Moved %(how_many)s items %(item)s from %(old_group)s'
' to %(new_group)s') % {
'how_many': how_many, 'item': item, 'old_group': old_group,
'new_group': target_group}
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
elif form.errors:
status = 400
ctx = {'order': order, 'object': item, 'form': form, 'line_pk': line_pk}
template = 'dashboard/order/modal_split_order_line.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def ship_delivery_group(request, order_pk, group_pk):
order = get_object_or_404(Order, pk=order_pk)
group = get_object_or_404(order.groups.all(), pk=group_pk)
form = ShipGroupForm(request.POST or None, instance=group)
status = 200
if form.is_valid():
with transaction.atomic():
form.save()
msg = _('Shipped %s') % group
messages.success(request, msg)
group.order.create_history_entry(comment=msg, user=request.user)
return redirect('dashboard:order-details', order_pk=order_pk)
elif form.errors:
status = 400
ctx = {'order': order, 'group': group, 'form': form}
template = 'dashboard/order/modal_ship_delivery_group.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def address_view(request, order_pk, address_type):
order = Order.objects.get(pk=order_pk)
if address_type == 'shipping':
address = order.shipping_address
success_msg = _('Updated shipping address')
else:
address = order.billing_address
success_msg = _('Updated billing address')
form = AddressForm(request.POST or None, instance=address)
status = 200
if form.is_valid():
form.save()
order.create_history_entry(comment=success_msg, user=request.user)
messages.success(request, success_msg)
elif form.errors:
status = 400
ctx = {'order': order, 'address_type': address_type, 'form': form}
return TemplateResponse(request, 'dashboard/order/modal_address_edit.html',
ctx, status=status)
| |
from __future__ import print_function
import os
import ast
import json
import glob
import re
import sys
import demisto_client
from threading import Thread, Lock
from demisto_sdk.commands.common.tools import run_threads_list
from google.cloud.storage import Bucket
from packaging.version import Version
from typing import List
from Tests.Marketplace.marketplace_services import init_storage_client, Pack, load_json
from Tests.Marketplace.upload_packs import download_and_extract_index
from Tests.Marketplace.marketplace_constants import GCPConfig, PACKS_FULL_PATH, IGNORED_FILES, PACKS_FOLDER, Metadata
from Tests.scripts.utils.content_packs_util import is_pack_deprecated
from Tests.scripts.utils import logging_wrapper as logging
PACK_METADATA_FILE = 'pack_metadata.json'
PACK_PATH_VERSION_REGEX = re.compile(fr'^{GCPConfig.PRODUCTION_STORAGE_BASE_PATH}/[A-Za-z0-9-_.]+/(\d+\.\d+\.\d+)/[A-Za-z0-9-_.]'
r'+\.zip$')
SUCCESS_FLAG = True
def get_pack_display_name(pack_id: str) -> str:
"""
Gets the display name of the pack from the pack ID.
:param pack_id: ID of the pack.
:return: Name found in the pack metadata, otherwise an empty string.
"""
metadata_path = os.path.join(PACKS_FULL_PATH, pack_id, PACK_METADATA_FILE)
if pack_id and os.path.isfile(metadata_path):
with open(metadata_path, 'r') as json_file:
pack_metadata = json.load(json_file)
return pack_metadata.get('name')
return ''
def is_pack_hidden(pack_id: str) -> bool:
"""
Check if the given pack is deprecated.
:param pack_id: ID of the pack.
:return: True if the pack is deprecated, i.e. has 'hidden: true' field, False otherwise.
"""
metadata_path = os.path.join(PACKS_FULL_PATH, pack_id, PACK_METADATA_FILE)
if pack_id and os.path.isfile(metadata_path):
with open(metadata_path, 'r') as json_file:
pack_metadata = json.load(json_file)
return pack_metadata.get('hidden', False)
else:
logging.warning(f'Could not open metadata file of pack {pack_id}')
return False
def create_dependencies_data_structure(response_data: dict, dependants_ids: list, dependencies_data: list,
checked_packs: list):
""" Recursively creates the packs' dependencies data structure for the installation requests
(only required and uninstalled).
Args:
response_data (dict): The GET /search/dependencies response data.
dependants_ids (list): A list of the dependant packs IDs.
dependencies_data (list): The dependencies data structure to be created.
checked_packs (list): Required dependants that were already found.
"""
next_call_dependants_ids = []
for dependency in response_data:
dependants = dependency.get('dependants', {})
for dependant in dependants.keys():
is_required = dependants[dependant].get('level', '') == 'required'
if dependant in dependants_ids and is_required and dependency.get('id') not in checked_packs:
dependencies_data.append({
'id': dependency.get('id'),
'version': dependency.get('extras', {}).get('pack', {}).get('currentVersion')
})
next_call_dependants_ids.append(dependency.get('id'))
checked_packs.append(dependency.get('id'))
if next_call_dependants_ids:
create_dependencies_data_structure(response_data, next_call_dependants_ids, dependencies_data, checked_packs)
def get_pack_dependencies(client: demisto_client, pack_data: dict, lock: Lock):
""" Get the pack's required dependencies.
Args:
client (demisto_client): The configured client to use.
pack_data (dict): Contains the pack ID and version.
lock (Lock): A lock object.
Returns:
(list) The pack's dependencies.
"""
pack_id = pack_data['id']
logging.debug(f'Getting dependencies for pack {pack_id}')
try:
response_data, status_code, _ = demisto_client.generic_request_func(
client,
path='/contentpacks/marketplace/search/dependencies',
method='POST',
body=[pack_data],
accept='application/json',
_request_timeout=None
)
if 200 <= status_code < 300:
dependencies_data: list = []
dependants_ids = [pack_id]
reseponse_data = ast.literal_eval(response_data).get('dependencies', [])
create_dependencies_data_structure(reseponse_data, dependants_ids, dependencies_data, dependants_ids)
dependencies_str = ', '.join([dep['id'] for dep in dependencies_data])
if dependencies_data:
logging.debug(f'Found the following dependencies for pack {pack_id}: {dependencies_str}')
return dependencies_data
if status_code == 400:
logging.error(f'Unable to find dependencies for {pack_id}.')
return []
else:
result_object = ast.literal_eval(response_data)
msg = result_object.get('message', '')
raise Exception(f'Failed to get pack {pack_id} dependencies - with status code {status_code}\n{msg}\n')
except Exception:
logging.exception(f'The request to get pack {pack_id} dependencies has failed.')
lock.acquire()
global SUCCESS_FLAG
SUCCESS_FLAG = False
lock.release()
def search_pack(client: demisto_client,
pack_display_name: str,
pack_id: str,
lock: Lock) -> dict:
""" Make a pack search request.
Args:
client (demisto_client): The configured client to use.
pack_display_name (string): The pack display name.
pack_id (string): The pack ID.
lock (Lock): A lock object.
Returns:
(dict): Returns the pack data if found, or empty dict otherwise.
"""
try:
# make the search request
response_data, status_code, _ = demisto_client.generic_request_func(client,
path=f'/contentpacks/marketplace/{pack_id}',
method='GET',
accept='application/json',
_request_timeout=None)
if 200 <= status_code < 300:
result_object = ast.literal_eval(response_data)
if result_object and result_object.get('currentVersion'):
logging.debug(f'Found pack "{pack_display_name}" by its ID "{pack_id}" in bucket!')
pack_data = {
'id': result_object.get('id'),
'version': result_object.get('currentVersion')
}
return pack_data
else:
raise Exception(f'Did not find pack "{pack_display_name}" by its ID "{pack_id}" in bucket.')
else:
result_object = ast.literal_eval(response_data)
msg = result_object.get('message', '')
err_msg = f'Search request for pack "{pack_display_name}" with ID "{pack_id}", failed with status code ' \
f'{status_code}\n{msg}'
raise Exception(err_msg)
except Exception:
logging.exception(f'Search request for pack "{pack_display_name}" with ID "{pack_id}", failed.')
lock.acquire()
global SUCCESS_FLAG
SUCCESS_FLAG = False
lock.release()
return {}
def find_malformed_pack_id(error_message: str) -> List:
"""
Find the pack ID from the installation error message.
Args:
error_message (str): The error message of the failed installation pack.
Returns: Pack_id (str)
"""
malformed_pack_pattern = re.compile(r'invalid version [0-9.]+ for pack with ID ([\w_-]+)')
malformed_pack_id = malformed_pack_pattern.findall(str(error_message))
if malformed_pack_id:
return malformed_pack_id
else:
return []
def install_nightly_packs(client: demisto_client,
host: str,
packs_to_install: List,
request_timeout: int = 999999):
"""
Install content packs on nightly build.
We will catch the exception if pack fails to install and send the request to install packs again without the
corrupted pack.
Args:
client(demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
Returns:
None: No data returned.
"""
logging.info(f'Installing packs on server {host}')
# make the pack installation request
all_packs_install_successfully = False
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
while not all_packs_install_successfully:
try:
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs in server {host}:\n{packs_to_install_str}')
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for pack in
ast.literal_eval(response_data)]
logging.success(f'Packs were successfully installed on server {host}')
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs on server {host}- with status code {status_code}\n{message}\n')
break
except Exception as e:
all_packs_install_successfully = False
malformed_pack_id = find_malformed_pack_id(str(e))
if not malformed_pack_id:
logging.exception('The request to install packs has failed')
raise
pack_ids_to_install = {pack['id'] for pack in packs_to_install}
malformed_pack_id = malformed_pack_id[0]
if malformed_pack_id not in pack_ids_to_install:
logging.exception(
f'The pack {malformed_pack_id} has failed to install even though it was not in the installation list')
raise
logging.warning(f'The request to install packs on server {host} has failed, retrying without {malformed_pack_id}')
# Remove the malformed pack from the pack to install list.
packs_to_install = [pack for pack in packs_to_install if pack['id'] not in malformed_pack_id]
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
def install_packs_from_artifacts(client: demisto_client, host: str, test_pack_path: str, pack_ids_to_install: List):
"""
Installs all the packs located in the artifacts folder of the BitHub actions build. Please note:
The server always returns a 200 status even if the pack was not installed.
:param client: Demisto-py client to connect to the server.
:param host: FQDN of the server.
:param test_pack_path: Path the the test pack directory.
:param pack_ids_to_install: List of pack IDs to install.
:return: None. Call to server waits until a successful response.
"""
logging.info(f"Test pack path is: {test_pack_path}")
logging.info(f"Pack IDs to install are: {pack_ids_to_install}")
local_packs = glob.glob(f"{test_pack_path}/*.zip")
for local_pack in local_packs:
if any(pack_id in local_pack for pack_id in pack_ids_to_install):
logging.info(f'Installing the following pack: {local_pack}')
upload_zipped_packs(client=client, host=host, pack_path=local_pack)
def install_packs_private(client: demisto_client,
host: str,
pack_ids_to_install: List,
test_pack_path: str):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
pack_ids_to_install (list): List of Pack IDs to install.
test_pack_path (str): Path where test packs are located.
"""
install_packs_from_artifacts(client,
host,
pack_ids_to_install=pack_ids_to_install,
test_pack_path=test_pack_path)
def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
is_nightly: bool = False):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
is_nightly (bool): Is the build nightly or not.
"""
if is_nightly:
install_nightly_packs(client, host, packs_to_install)
return
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
logging.info(f'Installing packs on server {host}')
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs on server {host}:\n{packs_to_install_str}')
# make the pack installation request
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for
pack in
ast.literal_eval(response_data)]
logging.success(f'Packs were successfully installed on server {host}')
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception as e:
logging.exception(f'The request to install packs has failed. Additional info: {str(e)}')
global SUCCESS_FLAG
SUCCESS_FLAG = False
finally:
return SUCCESS_FLAG
def search_pack_and_its_dependencies(client: demisto_client,
pack_id: str,
packs_to_install: list,
installation_request_body: list,
lock: Lock):
""" Searches for the pack of the specified file path, as well as its dependencies,
and updates the list of packs to be installed accordingly.
Args:
client (demisto_client): The configured client to use.
pack_id (str): The id of the pack to be installed.
packs_to_install (list) A list of the packs to be installed in this iteration.
installation_request_body (list): A list of packs to be installed, in the request format.
lock (Lock): A lock object.
"""
pack_data = {}
if pack_id not in packs_to_install:
pack_display_name = get_pack_display_name(pack_id)
if pack_display_name:
pack_data = search_pack(client, pack_display_name, pack_id, lock)
if pack_data is None:
pack_data = {
'id': pack_id,
'version': '1.0.0'
}
if pack_data:
dependencies = get_pack_dependencies(client, pack_data, lock)
current_packs_to_install = [pack_data]
if dependencies:
# Check that the dependencies don't include a deprecated pack:
for dependency in dependencies:
pack_path = os.path.join(PACKS_FOLDER, dependency.get('id'))
if is_pack_deprecated(pack_path):
logging.critical(f'Pack {pack_id} depends on pack {dependency.get("id")} which is a deprecated '
f'pack.')
global SUCCESS_FLAG
SUCCESS_FLAG = False
else:
current_packs_to_install.extend(dependencies)
lock.acquire()
for pack in current_packs_to_install:
if pack['id'] not in packs_to_install:
packs_to_install.append(pack['id'])
installation_request_body.append(pack)
lock.release()
def get_latest_version_from_bucket(pack_id: str, production_bucket: Bucket) -> str:
""" Retrieves the latest version of pack in the bucket
Args:
pack_id (str): The pack id to retrieve the latest version
production_bucket (Bucket): The GCS production bucket
Returns: The latest version of the pack as it is in the production bucket
"""
pack_bucket_path = os.path.join(GCPConfig.PRODUCTION_STORAGE_BASE_PATH, pack_id)
logging.debug(f'Trying to get latest version for pack {pack_id} from bucket path {pack_bucket_path}')
# Adding the '/' in the end of the prefix to search for the exact pack id
pack_versions_paths = [f.name for f in production_bucket.list_blobs(prefix=f'{pack_bucket_path}/') if
f.name.endswith('.zip')]
pack_versions = []
for path in pack_versions_paths:
versions = PACK_PATH_VERSION_REGEX.findall(path)
if not versions:
continue
pack_versions.append(Version(versions[0]))
logging.debug(f'Found the following zips for {pack_id} pack: {pack_versions}')
if pack_versions:
pack_latest_version = str(max(pack_versions))
return pack_latest_version
else:
logging.error(f'Could not find any versions for pack {pack_id} in bucket path {pack_bucket_path}')
return ''
def get_pack_installation_request_data(pack_id: str, pack_version: str):
"""
Returns the installation request data of a given pack and its version. The request must have the ID and Version.
:param pack_id: Id of the pack to add.
:param pack_version: Version of the pack to add.
:return: The request data part of the pack
"""
return {
'id': pack_id,
'version': pack_version
}
def install_all_content_packs_for_nightly(client: demisto_client, host: str, service_account: str):
""" Iterates over the packs currently located in the Packs directory. Wrapper for install_packs.
Retrieving the latest version of each pack from the production bucket.
:param client: Demisto-py client to connect to the server.
:param host: FQDN of the server.
:param service_account: The full path to the service account json.
:return: None. Prints the response from the server in the build.
"""
all_packs = []
# Initiate the GCS client and get the production bucket
storage_client = init_storage_client(service_account)
production_bucket = storage_client.bucket(GCPConfig.PRODUCTION_BUCKET)
logging.debug(f"Installing all content packs for nightly flow in server {host}")
# Add deprecated packs to IGNORED_FILES list:
for pack_id in os.listdir(PACKS_FULL_PATH):
if is_pack_hidden(pack_id):
logging.debug(f'Skipping installation of hidden pack "{pack_id}"')
IGNORED_FILES.append(pack_id)
for pack_id in os.listdir(PACKS_FULL_PATH):
if pack_id not in IGNORED_FILES:
pack_version = get_latest_version_from_bucket(pack_id, production_bucket)
if pack_version:
all_packs.append(get_pack_installation_request_data(pack_id, pack_version))
install_packs(client, host, all_packs, is_nightly=True)
def install_all_content_packs_from_build_bucket(client: demisto_client, host: str, server_version: str,
bucket_packs_root_path: str, service_account: str,
extract_destination_path: str):
""" Iterates over the packs currently located in the Build bucket. Wrapper for install_packs.
Retrieving the metadata of the latest version of each pack from the index.zip of the build bucket.
:param client: Demisto-py client to connect to the server.
:param host: FQDN of the server.
:param server_version: The version of the server the packs are installed on.
:param bucket_packs_root_path: The prefix to the root of packs in the bucket
:param service_account: Google Service Account
:param extract_destination_path: the full path of extract folder for the index.
:return: None. Prints the response from the server in the build.
"""
all_packs = []
logging.debug(f"Installing all content packs in server {host} from packs path {bucket_packs_root_path}")
storage_client = init_storage_client(service_account)
build_bucket = storage_client.bucket(GCPConfig.CI_BUILD_BUCKET)
index_folder_path, _, _ = download_and_extract_index(build_bucket, extract_destination_path, bucket_packs_root_path)
for pack_id in os.listdir(index_folder_path):
if os.path.isdir(os.path.join(index_folder_path, pack_id)):
metadata_path = os.path.join(index_folder_path, pack_id, Pack.METADATA)
pack_metadata = load_json(metadata_path)
if 'partnerId' in pack_metadata: # not installing private packs
continue
pack_version = pack_metadata.get(Metadata.CURRENT_VERSION, Metadata.SERVER_DEFAULT_MIN_VERSION)
server_min_version = pack_metadata.get(Metadata.SERVER_MIN_VERSION, Metadata.SERVER_DEFAULT_MIN_VERSION)
hidden = pack_metadata.get(Metadata.HIDDEN, False)
# Check if the server version is greater than the minimum server version required for this pack or if the
# pack is hidden (deprecated):
if ('Master' in server_version or Version(server_version) >= Version(server_min_version)) and \
not hidden:
logging.debug(f"Appending pack id {pack_id}")
all_packs.append(get_pack_installation_request_data(pack_id, pack_version))
else:
reason = 'Is hidden' if hidden else f'min server version is {server_min_version}'
logging.debug(f'Pack: {pack_id} with version: {pack_version} will not be installed on {host}. '
f'Pack {reason}.')
return install_packs(client, host, all_packs)
def upload_zipped_packs(client: demisto_client,
host: str,
pack_path: str):
""" Install packs from zip file.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
pack_path (str): path to pack zip.
"""
header_params = {
'Content-Type': 'multipart/form-data'
}
file_path = os.path.abspath(pack_path)
files = {'file': file_path}
logging.info(f'Making "POST" request to server {host} - to install all packs from file {pack_path}')
# make the pack installation request
try:
response_data, status_code, _ = client.api_client.call_api(resource_path='/contentpacks/installed/upload',
method='POST',
header_params=header_params, files=files)
if 200 <= status_code < 300:
logging.info(f'All packs from file {pack_path} were successfully installed on server {host}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception:
logging.exception('The request to install packs has failed.')
sys.exit(1)
def search_and_install_packs_and_their_dependencies_private(test_pack_path: str,
pack_ids: list,
client: demisto_client):
""" Searches for the packs from the specified list, searches their dependencies, and then installs them.
Args:
test_pack_path (str): Path of where the test packs are located.
pack_ids (list): A list of the pack ids to search and install.
client (demisto_client): The client to connect to.
Returns (list, bool):
A list of the installed packs' ids, or an empty list if is_nightly == True.
A flag that indicates if the operation succeeded or not.
"""
host = client.api_client.configuration.host
logging.info(f'Starting to search and install packs in server: {host}')
install_packs_private(client, host, pack_ids, test_pack_path)
return SUCCESS_FLAG
def search_and_install_packs_and_their_dependencies(pack_ids: list,
client: demisto_client):
""" Searches for the packs from the specified list, searches their dependencies, and then
installs them.
Args:
pack_ids (list): A list of the pack ids to search and install.
client (demisto_client): The client to connect to.
Returns (list, bool):
A list of the installed packs' ids, or an empty list if is_nightly == True.
A flag that indicates if the operation succeeded or not.
"""
host = client.api_client.configuration.host
logging.info(f'Starting to search and install packs in server: {host}')
packs_to_install: list = [] # we save all the packs we want to install, to avoid duplications
installation_request_body: list = [] # the packs to install, in the request format
threads_list = []
lock = Lock()
for pack_id in pack_ids:
thread = Thread(target=search_pack_and_its_dependencies,
kwargs={'client': client,
'pack_id': pack_id,
'packs_to_install': packs_to_install,
'installation_request_body': installation_request_body,
'lock': lock})
threads_list.append(thread)
run_threads_list(threads_list)
install_packs(client, host, installation_request_body)
return packs_to_install, SUCCESS_FLAG
| |
from __future__ import unicode_literals
from django.db import models
from django.db.models import Q
from django import forms
from django.apps import apps
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.core.exceptions import ObjectDoesNotExist, ValidationError, NON_FIELD_ERRORS
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from .conf import (CUSTOM_TYPE_TEXT, CUSTOM_TYPE_INTEGER, CUSTOM_TYPE_FLOAT,
CUSTOM_TYPE_TIME, CUSTOM_TYPE_DATE, CUSTOM_TYPE_DATETIME, CUSTOM_TYPE_BOOLEAN,
settings)
from .utils import import_class
#==============================================================================
class CustomFieldsBuilder(object):
"""
The builder class is the core of django-custard.
From here it is possible to setup custom fields support for your models.
"""
#--------------------------------------------------------------------------
def __init__(self, fields_model, values_model,
custom_content_types=settings.CUSTOM_CONTENT_TYPES):
"""
Custom fields builder class. This helps defining classes to enable
custom fields in application.
:param fields_model: the app.Model name of fields model
:param values_model: the app.Model name of the values model
:param custom_content_types: which content types are allowed to have custom fields
:return:
"""
self.fields_model = fields_model.split(".")
self.values_model = values_model.split(".")
self.custom_content_types = custom_content_types
if self.custom_content_types and len(self.custom_content_types):
self.content_types_query = None
for c in self.custom_content_types:
model_tuple = c.split(".")
model_query = Q(app_label=model_tuple[0], model=model_tuple[1])
if self.content_types_query:
self.content_types_query |= model_query
else:
self.content_types_query = model_query
else:
self.content_types_query = Q()
#--------------------------------------------------------------------------
@property
def fields_model_class(self):
return apps.get_model(self.fields_model[0], self.fields_model[1])
@property
def values_model_class(self):
return apps.get_model(self.values_model[0], self.values_model[1])
#--------------------------------------------------------------------------
def create_fields(self, base_model=models.Model, base_manager=models.Manager):
"""
This method will create a model which will hold field types defined
at runtime for each ContentType.
:param base_model: base model class to inherit from
:return:
"""
CONTENT_TYPES = self.content_types_query
class CustomContentTypeFieldManager(base_manager):
pass
@python_2_unicode_compatible
class CustomContentTypeField(base_model):
DATATYPE_CHOICES = (
(CUSTOM_TYPE_TEXT, _('text')),
(CUSTOM_TYPE_INTEGER, _('integer')),
(CUSTOM_TYPE_FLOAT, _('float')),
(CUSTOM_TYPE_TIME, _('time')),
(CUSTOM_TYPE_DATE, _('date')),
(CUSTOM_TYPE_DATETIME, _('datetime')),
(CUSTOM_TYPE_BOOLEAN, _('boolean')),
)
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name='+',
limit_choices_to=CONTENT_TYPES)
name = models.CharField(_('name'), max_length=100, db_index=True)
label = models.CharField(_('label'), max_length=100)
data_type = models.CharField(_('data type'), max_length=8, choices=DATATYPE_CHOICES, db_index=True)
help_text = models.CharField(_('help text'), max_length=200, blank=True, null=True)
required = models.BooleanField(_('required'), default=False)
searchable = models.BooleanField(_('searchable'), default=True)
initial = models.CharField(_('initial'), max_length=200, blank=True, null=True)
min_length = models.PositiveIntegerField(_('min length'), blank=True, null=True)
max_length = models.PositiveIntegerField(_('max length'), blank=True, null=True)
min_value = models.FloatField(_('min value'), blank=True, null=True)
max_value = models.FloatField(_('max value'), blank=True, null=True)
objects = CustomContentTypeFieldManager()
class Meta:
verbose_name = _('custom field')
verbose_name_plural = _('custom fields')
abstract = True
def save(self, *args, **kwargs):
super(CustomContentTypeField, self).save(*args, **kwargs)
def clean(self):
# if field is required must issue a initial value
if self.required:
# TODO - must create values for all instances that have not
#print model.objects.values_list('pk', flat=True)
#print self.field.filter(content_type=self.content_type)
#objs = self.field.filter(content_type=self.content_type) \
# .exclude(object_id__in=model.objects.values_list('pk', flat=True))
#for obj in objs:
# print obj
pass
def _check_validate_already_defined_in_model(self):
model = self.content_type.model_class()
if self.name in [f.name for f in model._meta.fields]:
raise ValidationError({ 'name': (_('Custom field already defined as model field for content type %(model_name)s') % {'model_name': model.__name__},) })
def _check_validate_already_defined_in_custom_fields(self):
model = self.content_type.model_class()
qs = self.__class__._default_manager.filter(
content_type=self.content_type,
name=self.name,
)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
raise ValidationError({ 'name': (_('Custom field already defined for content type %(model_name)s') % {'model_name': model.__name__},) })
def __str__(self):
return "%s" % self.name
return CustomContentTypeField
#--------------------------------------------------------------------------
def create_values(self, base_model=models.Model, base_manager=models.Manager):
"""
This method will create a model which will hold field values for
field types of custom_field_model.
:param base_model:
:param base_manager:
:return:
"""
_builder = self
class CustomContentTypeFieldValueManager(base_manager):
def create(self, **kwargs):
"""
Subclass create in order to be able to use "value" in kwargs
instead of using "value_%s" passing also type directly
"""
if 'value' in kwargs:
value = kwargs.pop('value')
created_object = super(CustomContentTypeFieldValueManager, self).create(**kwargs)
created_object.value = value
return created_object
else:
return super(CustomContentTypeFieldValueManager, self).create(**kwargs)
@python_2_unicode_compatible
class CustomContentTypeFieldValue(base_model):
custom_field = models.ForeignKey('.'.join(_builder.fields_model),
verbose_name=_('custom field'),
related_name='+')
content_type = models.ForeignKey(ContentType, editable=False,
verbose_name=_('content type'),
limit_choices_to=_builder.content_types_query)
object_id = models.PositiveIntegerField(_('object id'), db_index=True)
content_object = GenericForeignKey('content_type', 'object_id')
value_text = models.TextField(blank=True, null=True)
value_integer = models.IntegerField(blank=True, null=True)
value_float = models.FloatField(blank=True, null=True)
value_time = models.TimeField(blank=True, null=True)
value_date = models.DateField(blank=True, null=True)
value_datetime = models.DateTimeField(blank=True, null=True)
value_boolean = models.NullBooleanField(blank=True)
objects = CustomContentTypeFieldValueManager()
def _get_value(self):
return getattr(self, 'value_%s' % self.custom_field.data_type)
def _set_value(self, new_value):
setattr(self, 'value_%s' % self.custom_field.data_type, new_value)
value = property(_get_value, _set_value)
class Meta:
unique_together = ('custom_field', 'content_type', 'object_id')
verbose_name = _('custom field value')
verbose_name_plural = _('custom field values')
abstract = True
def save(self, *args, **kwargs):
# save content type as user shouldn't be able to change it
self.content_type = self.custom_field.content_type
super(CustomContentTypeFieldValue, self).save(*args, **kwargs)
def validate_unique(self, exclude=None):
qs = self.__class__._default_manager.filter(
custom_field=self.custom_field,
content_type=self.custom_field.content_type,
object_id=self.object_id,
)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
raise ValidationError({ NON_FIELD_ERRORS: (_('A value for this custom field already exists'),) })
def __str__(self):
return "%s: %s" % (self.custom_field.name, self.value)
return CustomContentTypeFieldValue
#--------------------------------------------------------------------------
def create_manager(self, base_manager=models.Manager):
"""
This will create the custom Manager that will use the fields_model and values_model
respectively.
:param base_manager: the base manager class to inherit from
:return:
"""
_builder = self
class CustomManager(base_manager):
def search(self, search_data, custom_args={}):
"""
Search inside the custom fields for this model for any match
of search_data and returns existing model instances
:param search_data:
:param custom_args:
:return:
"""
query = None
lookups = (
'%s__%s' % ('value_text', 'icontains'),
)
content_type = ContentType.objects.get_for_model(self.model)
custom_args = dict({ 'content_type': content_type, 'searchable': True }, **custom_args)
custom_fields = dict((f.name, f) for f in _builder.fields_model_class.objects.filter(**custom_args))
for value_lookup in lookups:
for key, f in custom_fields.items():
found = _builder.values_model_class.objects.filter(**{ 'custom_field': f,
'content_type': content_type,
value_lookup: search_data })
if found.count() > 0:
if query is None:
query = Q()
query = query & Q(**{ str('%s__in' % self.model._meta.pk.name):
[obj.object_id for obj in found] })
if query is None:
return self.get_queryset().none()
return self.get_queryset().filter(query)
return CustomManager
#--------------------------------------------------------------------------
def create_mixin(self):
"""
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
"""
_builder = self
class CustomModelMixin(object):
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self)
@classmethod
def get_model_custom_fields(cls):
""" Return a list of custom fields for this model, callable at model level """
return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls))
def get_custom_fields(self):
""" Return a list of custom fields for this model """
return _builder.fields_model_class.objects.filter(content_type=self._content_type)
def get_custom_value(self, field):
""" Get a value for a specified custom field """
return _builder.values_model_class.objects.get(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
def set_custom_value(self, field, value):
""" Set a value for a specified custom field """
custom_value, created = \
_builder.values_model_class.objects.get_or_create(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
custom_value.value = value
custom_value.full_clean()
custom_value.save()
return custom_value
#def __getattr__(self, name):
# """ Get a value for a specified custom field """
# try:
# obj = _builder.values_model_class.objects.get(custom_field__name=name,
# content_type=self._content_type,
# object_id=self.pk)
# return obj.value
# except ObjectDoesNotExist:
# pass
# return super(CustomModelMixin, self).__getattr__(name)
return CustomModelMixin
#--------------------------------------------------------------------------
def create_modelform(self, base_form=forms.ModelForm,
field_types=settings.CUSTOM_FIELD_TYPES,
widget_types=settings.CUSTOM_WIDGET_TYPES):
"""
This creates the class that implements a ModelForm that knows about
the custom fields
:param base_form:
:param field_types:
:param widget_types:
:return:
"""
_builder = self
class CustomFieldModelBaseForm(base_form):
def __init__(self, *args, **kwargs):
"""
Constructor
"""
# additional form variables
self.custom_classes = None
self.is_custom_form = True
self.instance = None
# construct the form
super(CustomFieldModelBaseForm, self).__init__(*args, **kwargs)
# init custom fields from model in the form
self.init_custom_fields()
def clean(self):
"""
Clean the form
"""
cleaned_data = super(CustomFieldModelBaseForm, self).clean()
return cleaned_data
def save(self, commit=True):
"""
Save the form
"""
self.instance = super(CustomFieldModelBaseForm, self).save(commit=commit)
if self.instance and commit:
self.instance.save()
self.save_custom_fields()
return self.instance
def init_custom_fields(self):
"""
Populate the ``form.fields[]`` with the additional fields coming from
the custom fields models.
"""
content_type = self.get_content_type()
fields = self.get_fields_for_content_type(content_type)
for f in fields:
name = str(f.name)
initial = f.initial
self.fields[name] = self.get_formfield_for_field(f)
self.fields[name].is_custom = True
self.fields[name].label = f.label
self.fields[name].required = f.required
self.fields[name].widget = self.get_widget_for_field(f)
if self.instance and self.instance.pk:
value = self.search_value_for_field(f,
content_type,
self.instance.pk)
if len(value) > 0:
initial = value[0].value
self.fields[name].initial = self.initial[name] = initial
def save_custom_fields(self):
""" Perform save and validation over the custom fields """
if not self.instance.pk:
raise Exception("The model instance has not been saved. Have you called instance.save() ?")
content_type = self.get_content_type()
fields = self.get_fields_for_content_type(content_type)
for f in fields:
name = str(f.name)
fv = self.search_value_for_field(f,
content_type,
self.instance.pk)
if len(fv) > 0:
value = fv[0]
value.value = self.cleaned_data[name]
else:
value = self.create_value_for_field(f,
self.instance.pk,
self.cleaned_data[name])
value.save()
def get_model(self):
"""
Returns the actual model this ``ModelForm`` is referring to
"""
return self._meta.model
def get_content_type(self):
"""
Returns the content type instance of the model this ``ModelForm`` is
referring to
"""
return ContentType.objects.get_for_model(self.get_model())
def get_formfield_for_field(self, field):
"""
Returns the defined formfield instance built from the type of the field
:param field: custom field instance
:return: the formfield instance
"""
field_attrs = {
'label': field.label,
'help_text': field.help_text,
'required': field.required,
}
if field.data_type == CUSTOM_TYPE_TEXT:
#widget_attrs = {}
if field.min_length:
field_attrs['min_length'] = field.min_length
if field.max_length:
field_attrs['max_length'] = field.max_length
# widget_attrs['maxlength'] = field.max_length
#field_attrs['widget'] = widgets.AdminTextInputWidget(attrs=widget_attrs)
elif field.data_type == CUSTOM_TYPE_INTEGER:
if field.min_value: field_attrs['min_value'] = int(float(field.min_value))
if field.max_value: field_attrs['max_value'] = int(float(field.max_value))
#field_attrs['widget'] = spinner.IntegerSpinnerWidget(attrs=field_attrs)
elif field.data_type == CUSTOM_TYPE_FLOAT:
if field.min_value: field_attrs['min_value'] = float(field.min_value)
if field.max_value: field_attrs['max_value'] = float(field.max_value)
#field_attrs['widget'] = spinner.SpinnerWidget(attrs=field_attrs)
elif field.data_type == CUSTOM_TYPE_TIME:
#field_attrs['widget'] = date.TimePickerWidget()
pass
elif field.data_type == CUSTOM_TYPE_DATE:
#field_attrs['widget'] = date.DatePickerWidget()
pass
elif field.data_type == CUSTOM_TYPE_DATETIME:
#field_attrs['widget'] = date.DateTimePickerWidget()
pass
elif field.data_type == CUSTOM_TYPE_BOOLEAN:
pass
field_type = import_class(field_types[field.data_type])
return field_type(**field_attrs)
def get_widget_for_field(self, field, attrs={}):
"""
Returns the defined widget type instance built from the type of the field
:param field: custom field instance
:param attrs: attributes of widgets
:return: the widget instance
"""
return import_class(widget_types[field.data_type])(**attrs)
def get_fields_for_content_type(self, content_type):
"""
Returns all fields for a given content type
Example implementation:
return MyCustomField.objects.filter(content_type=content_type)
:param content_type: content type to search
:return: the custom field instances
"""
return _builder.fields_model_class.objects.filter(content_type=content_type)
def search_value_for_field(self, field, content_type, object_id):
"""
This function will return the CustomFieldValue instance for a given
field of an object that has the given content_type
Example implementation:
return MyCustomFieldValue.objects.filter(custom_field=field,
content_type=content_type,
object_id=object_id)
:param field: the custom field instance
:param content_type: the content type instance
:param object_id: the object id this value is referring to
:return: CustomFieldValue queryset
"""
return _builder.values_model_class.objects.filter(custom_field=field,
content_type=content_type,
object_id=object_id)
def create_value_for_field(self, field, object_id, value):
"""
Create a value for a given field of an object
Example implementation:
return MyCustomFieldValue(custom_field=field,
object_id=object_id,
value=value)
:param field: the custom field instance
:param object_id: the object id this value is referring to
:param value: the value to set
:return: the value instance (not saved!)
"""
return _builder.values_model_class(custom_field=field,
object_id=object_id,
value=value)
return CustomFieldModelBaseForm
def create_modeladmin(self, base_admin=admin.ModelAdmin):
"""
This creates the class that implements a ModelForm that knows about
the custom fields
:param base_admin:
:return:
"""
_builder = self
class CustomFieldModelBaseAdmin(base_admin):
def __init__(self, *args, **kwargs):
super(CustomFieldModelBaseAdmin, self).__init__(*args, **kwargs)
def save_model(self, request, obj, form, change):
obj.save()
if hasattr(form, 'save_custom_fields'):
form.save_custom_fields()
return CustomFieldModelBaseAdmin
#===============================================================================
# This class is an empty class to avoid migrations errors
class CustomModelMixin(object):
pass
| |
# Copyright (c) 2015 Tencent Inc.
# All rights reserved.
#
# Author: Li Wenting <wentingli@tencent.com>
# Date: August 28, 2015
"""
This is the maven module which manages jar files downloaded
from maven repository
"""
import os
import shutil
import subprocess
import time
import configparse
import console
def is_valid_id(id):
"""Check if id is valid. """
parts = id.split(':')
if len(parts) == 3:
group, artifact, version = parts
if group and artifact and version:
return True
return False
class MavenArtifact(object):
"""
MavenArtifact represents a jar artifact and its transitive dependencies
separated by colon in maven cache.
"""
def __init__(self, path):
self.path = path
self.deps = None
class MavenCache(object):
"""MavenCache. Manages maven jar files. """
__instance = None
@staticmethod
def instance(log_dir):
if not MavenCache.__instance:
MavenCache.__instance = MavenCache(log_dir)
return MavenCache.__instance
def __init__(self, log_dir):
"""Init method. """
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.__log_dir = log_dir
# key: (id, classifier)
# id: jar id in the format group:artifact:version
# value: an instance of MavenArtifact
self.__jar_database = {}
java_config = configparse.blade_config.get_config('java_config')
self.__maven = java_config.get('maven')
self.__central_repository = java_config.get('maven_central')
# Local repository is set to the maven default directory
# and could not be configured currently
local_repository = '~/.m2/repository'
self.__local_repository = os.path.expanduser(local_repository)
self.__need_check_config = True
# Download the snapshot artifact daily
self.__build_time = time.time()
self.__one_day_interval = 86400
def _generate_jar_path(self, id):
"""Generate jar path within local repository. """
group, artifact, version = id.split(':')
return os.path.join(self.__local_repository,
group.replace('.', '/'), artifact, version)
def _check_config(self):
"""Check whether maven is configured correctly. """
if not self.__need_check_config:
return
if not self.__maven:
console.error_exit('MavenCache was not configured')
self.__need_check_config = False
def _check_id(self, id):
"""Check if id is valid. """
if not is_valid_id(id):
console.error_exit('Invalid id %s: Id should be group:artifact:version, '
'such as jaxen:jaxen:1.1.6' % id)
def _is_log_expired(self, log):
"""Check if the modification time of log file is expired relative to build time. """
return self.__build_time - os.path.getmtime(log) > self.__one_day_interval
def _download_jar(self, id, classifier):
group, artifact, version = id.split(':')
pom = artifact + '-' + version + '.pom'
jar = artifact + '-' + version + '.jar'
log = artifact + '__download.log'
if classifier:
jar = artifact + '-' + version + '-' + classifier + '.jar'
log = artifact + '-' + classifier + '__download.log'
log_path = os.path.join(self.__log_dir, log)
target_path = self._generate_jar_path(id)
target_log = os.path.join(target_path, log)
if (os.path.isfile(os.path.join(target_path, jar)) and
os.path.isfile(os.path.join(target_path, pom))):
if not version.endswith('-SNAPSHOT'):
return True
if os.path.isfile(target_log) and not self._is_log_expired(target_log):
return True
if classifier:
id = '%s:%s' % (id, classifier)
console.info('Downloading %s from central repository...' % id)
cmd = ' '.join([self.__maven,
'dependency:get',
'-DgroupId=%s' % group,
'-DartifactId=%s' % artifact,
'-Dversion=%s' % version])
if classifier:
cmd += ' -Dclassifier=%s' % classifier
if subprocess.call('%s > %s' % (cmd, log_path), shell=True):
console.warning('Error occurred when downloading %s from central '
'repository. Check %s for more details.' % (
id, log_path))
cmd += ' -Dtransitive=false'
if subprocess.call('%s > %s' % (cmd, log_path + '.transitive'),
shell=True):
return False
console.warning('Download standalone artifact %s successfully, but '
'its transitive dependencies are unavailable.' % id)
shutil.copy(log_path, target_log)
return True
def _download_dependency(self, id, classifier):
group, artifact, version = id.split(':')
target_path = self._generate_jar_path(id)
log, classpath = artifact + '__classpath.log', 'classpath.txt'
if classifier:
log = artifact + '-' + classifier + '__classpath.log'
classpath = 'classpath-%s.txt' % classifier
log = os.path.join(target_path, log)
if os.path.isfile(os.path.join(target_path, classpath)):
if not version.endswith('-SNAPSHOT'):
return True
if os.path.isfile(log) and not self._is_log_expired(log):
return True
if classifier:
id = '%s:%s' % (id, classifier)
# Currently analyzing dependencies of classifier jar
# usually fails. Here when there is no classpath.txt
# file but classpath.log exists, that means the failure
# of analyzing dependencies last time
if (not os.path.exists(os.path.join(target_path, classpath))
and os.path.exists(log)):
return False
console.info('Downloading %s dependencies...' % id)
pom = os.path.join(target_path, artifact + '-' + version + '.pom')
cmd = ' '.join([self.__maven,
'dependency:build-classpath',
'-DincludeScope=runtime',
'-Dmdep.outputFile=%s' % classpath])
if classifier:
cmd += ' -Dclassifier=%s' % classifier
cmd += ' -f %s > %s' % (pom, log)
if subprocess.call(cmd, shell=True):
console.warning('Error occurred when resolving %s dependencies. '
'Check %s for more details.' % (id, log))
return False
return True
def _download_artifact(self, id, classifier):
"""Download the specified jar and its transitive dependencies. """
if not self._download_jar(id, classifier):
return False
group, artifact, version = id.split(':')
path = self._generate_jar_path(id)
jar = artifact + '-' + version + '.jar'
if classifier:
jar = artifact + '-' + version + '-' + classifier + '.jar'
self.__jar_database[(id, classifier)] = MavenArtifact(os.path.join(path, jar))
return True
def _get_artifact_from_database(self, id, classifier):
"""get_artifact_from_database. """
self._check_config()
self._check_id(id)
if (id, classifier) not in self.__jar_database:
if not self._download_artifact(id, classifier):
console.error_exit('Download %s failed' % id)
return self.__jar_database[(id, classifier)]
def get_jar_path(self, id, classifier):
"""get_jar_path
Return local jar path corresponding to the id specified in the
format group:artifact:version.
Download jar files and its transitive dependencies if needed.
"""
artifact = self._get_artifact_from_database(id, classifier)
return artifact.path
def get_jar_deps_path(self, id, classifier):
"""get_jar_deps_path
Return a string of the dependencies path separated by colon.
This string can be used in java -cp later.
"""
artifact = self._get_artifact_from_database(id, classifier)
if artifact.deps is None:
if not self._download_dependency(id, classifier):
# Ignore dependency download error
artifact.deps = ''
else:
path = self._generate_jar_path(id)
classpath = os.path.join(path, 'classpath.txt')
if classifier:
classpath = os.path.join(path, 'classpath-%s.txt' % classifier)
with open(classpath) as f:
# Read the first line
artifact.deps = f.readline()
return artifact.deps
| |
"""Script to check the configuration file."""
from __future__ import annotations
import argparse
import asyncio
from collections import OrderedDict
from collections.abc import Callable, Mapping, Sequence
from glob import glob
import logging
import os
from typing import Any
from unittest.mock import patch
from homeassistant import core
from homeassistant.config import get_default_config_dir
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import area_registry, device_registry, entity_registry
from homeassistant.helpers.check_config import async_check_ha_config_file
from homeassistant.util.yaml import Secrets
import homeassistant.util.yaml.loader as yaml_loader
# mypy: allow-untyped-calls, allow-untyped-defs
REQUIREMENTS = ("colorlog==6.6.0",)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS: dict[str, tuple[str, Callable]] = {
"load": ("homeassistant.util.yaml.loader.load_yaml", yaml_loader.load_yaml),
"load*": ("homeassistant.config.load_yaml", yaml_loader.load_yaml),
"secrets": ("homeassistant.util.yaml.loader.secret_yaml", yaml_loader.secret_yaml),
}
PATCHES: dict[str, Any] = {}
C_HEAD = "bold"
ERROR_STR = "General Errors"
def color(the_color, *args, reset=None):
"""Color helper."""
# pylint: disable=import-outside-toplevel
from colorlog.escape_codes import escape_codes, parse_colors
try:
if not args:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + " ".join(args) + escape_codes[reset or "reset"]
except KeyError as k:
raise ValueError(f"Invalid color {k!s} in {the_color}") from k
def run(script_args: list) -> int:
"""Handle check config commandline script."""
parser = argparse.ArgumentParser(description="Check Home Assistant configuration.")
parser.add_argument("--script", choices=["check_config"])
parser.add_argument(
"-c",
"--config",
default=get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"-i",
"--info",
nargs="?",
default=None,
const="all",
help="Show a portion of the config",
)
parser.add_argument(
"-f", "--files", action="store_true", help="Show used configuration files"
)
parser.add_argument(
"-s", "--secrets", action="store_true", help="Show secret information"
)
args, unknown = parser.parse_known_args()
if unknown:
print(color("red", "Unknown arguments:", ", ".join(unknown)))
config_dir = os.path.join(os.getcwd(), args.config)
print(color("bold", "Testing configuration at", config_dir))
res = check(config_dir, args.secrets)
domain_info: list[str] = []
if args.info:
domain_info = args.info.split(",")
if args.files:
print(color(C_HEAD, "yaml files"), "(used /", color("red", "not used") + ")")
deps = os.path.join(config_dir, "deps")
yaml_files = [
f
for f in glob(os.path.join(config_dir, "**/*.yaml"), recursive=True)
if not f.startswith(deps)
]
for yfn in sorted(yaml_files):
the_color = "" if yfn in res["yaml_files"] else "red"
print(color(the_color, "-", yfn))
if res["except"]:
print(color("bold_white", "Failed config"))
for domain, config in res["except"].items():
domain_info.append(domain)
print(" ", color("bold_red", domain + ":"), color("red", "", reset="red"))
dump_dict(config, reset="red")
print(color("reset"))
if domain_info:
if "all" in domain_info:
print(color("bold_white", "Successful config (all)"))
for domain, config in res["components"].items():
print(" ", color(C_HEAD, domain + ":"))
dump_dict(config)
else:
print(color("bold_white", "Successful config (partial)"))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(" ", color(C_HEAD, domain + ":"))
dump_dict(res["components"].get(domain))
if args.secrets:
flatsecret: dict[str, str] = {}
for sfn, sdict in res["secret_cache"].items():
sss = []
for skey in sdict:
if skey in flatsecret:
_LOGGER.error(
"Duplicated secrets in files %s and %s", flatsecret[skey], sfn
)
flatsecret[skey] = sfn
sss.append(color("green", skey) if skey in res["secrets"] else skey)
print(color(C_HEAD, "Secrets from", sfn + ":"), ", ".join(sss))
print(color(C_HEAD, "Used Secrets:"))
for skey, sval in res["secrets"].items():
if sval is None:
print(" -", skey + ":", color("red", "not found"))
continue
print(" -", skey + ":", sval)
return len(res["except"])
def check(config_dir, secrets=False):
"""Perform a check by mocking hass load functions."""
logging.getLogger("homeassistant.loader").setLevel(logging.CRITICAL)
res: dict[str, Any] = {
"yaml_files": OrderedDict(), # yaml_files loaded
"secrets": OrderedDict(), # secret cache and secrets loaded
"except": OrderedDict(), # exceptions raised (with config)
#'components' is a HomeAssistantConfig # noqa: E265
"secret_cache": {},
}
# pylint: disable=possibly-unused-variable
def mock_load(filename, secrets=None):
"""Mock hass.util.load_yaml to save config file names."""
res["yaml_files"][filename] = True
return MOCKS["load"][1](filename, secrets)
# pylint: disable=possibly-unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS["secrets"][1](ldr, node)
except HomeAssistantError:
val = None
res["secrets"][node.value] = val
return val
# Patches with local mock functions
for key, val in MOCKS.items():
if not secrets and key == "secrets":
continue
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()[f"mock_{key.replace('*', '')}"]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
if secrets:
# Ensure !secrets point to the patched function
yaml_loader.SafeLineLoader.add_constructor("!secret", yaml_loader.secret_yaml)
def secrets_proxy(*args):
secrets = Secrets(*args)
res["secret_cache"] = secrets._cache
return secrets
try:
with patch.object(yaml_loader, "Secrets", secrets_proxy):
res["components"] = asyncio.run(async_check_config(config_dir))
res["secret_cache"] = {
str(key): val for key, val in res["secret_cache"].items()
}
for err in res["components"].errors:
domain = err.domain or ERROR_STR
res["except"].setdefault(domain, []).append(err.message)
if err.config:
res["except"].setdefault(domain, []).append(err.config)
except Exception as err: # pylint: disable=broad-except
print(color("red", "Fatal error while loading config:"), str(err))
res["except"].setdefault(ERROR_STR, []).append(str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
if secrets:
# Ensure !secrets point to the original function
yaml_loader.SafeLineLoader.add_constructor(
"!secret", yaml_loader.secret_yaml
)
return res
async def async_check_config(config_dir):
"""Check the HA config."""
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
await area_registry.async_load(hass)
await device_registry.async_load(hass)
await entity_registry.async_load(hass)
components = await async_check_ha_config_file(hass)
await hass.async_stop(force=True)
return components
def line_info(obj, **kwargs):
"""Display line config source."""
if hasattr(obj, "__config_file__"):
return color(
"cyan", f"[source {obj.__config_file__}:{obj.__line__ or '?'}]", **kwargs
)
return "?"
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml_loader.yaml.dump(config).
"""
def sort_dict_key(val):
"""Return the dict key for sorting."""
key = str(val[0]).lower()
return "0" if key == "platform" else key
indent_str = indent_count * " "
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + "-"
if isinstance(layer, Mapping):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, (dict, list)):
print(indent_str, str(key) + ":", line_info(value, **kwargs))
dump_dict(value, indent_count + 2)
else:
print(indent_str, str(key) + ":", value)
indent_str = indent_count * " "
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(" ", indent_str, i)
| |
"""
* English / US language file
*
*
* $Id$
*
"""
trans = {
# Login form
'Login' : 'Log in',
'Log out' : 'Log out',
'Username' : 'Username',
'Password' : 'Password',
'Login failed' : 'Failed to log in to VirtualBox Web Console. Please check your username and password.',
# General actions
'File' : 'File',
'Edit' : 'Edit',
'Save' : 'Save',
'OK' : 'OK',
'Cancel' : 'Cancel',
'Create' : 'Create',
'Select' : 'Select',
'Up' : 'Up',
'Down' : 'Down',
'Yes' : 'Yes',
'No' : 'No',
'Close' : 'Close',
'Any' : 'Any',
'New' : 'New',
'Add' : 'Add',
'Delete' : 'Delete',
'Keep' : 'Keep',
'Settings' : 'Settings',
'Preferences' : 'Preferences',
'Refresh' : 'Refresh',
'Start' : 'Start',
'Power Off' : 'Power Off',
'Details' : 'Details',
'Console' : 'Console',
'Description' : 'Description',
'Configuration' : 'Configuration',
'Operating System' : 'Operating System',
'Machine' : 'Machine',
'Enabled' : 'Enabled',
'Disabled' : 'Disabled',
'Hosting' : 'Hosting',
'Basic' : 'Basic',
'Advanced' : 'Advanced',
'None' : 'None',
'Help' : 'Help',
'About' : 'About',
'Version' : 'Version',
'VirtualBox User Manual' : 'VirtualBox User Manual',
'Operation Canceled' : 'Operation Canceled',
'Next' : 'Next',
'Back' : 'Back',
'Finish' : 'Finish',
'Select File' : 'Select File',
'Select Folder' : 'Select Folder',
'Server List' : 'Server List',
# Power button
'Stop' : 'Stop',
'Pause' : 'Pause',
'Reset' : 'Reset',
'Save State' : 'Save State',
'ACPI Power Button' : 'ACPI Power Button',
'ACPI Sleep Button' : 'ACPI Sleep Button',
'ACPI event not handled' : 'The ACPI event was not handled by the virtual machine.',
'Approx X remaining' : 'Approx %s remaining', # %s will be replaced with a time. E.g. Approx 2 minutes, 4 seconds remaining #
'X ago' : '%s ago', # %s will be replaced with a time. E.g. 20 hours ago #
'minutes' : 'minutes',
'seconds' : 'seconds',
'hours' : 'hours',
'days' : 'days',
# Preview box #
'Preview' : 'Preview',
'Update Disabled' : 'Update Disabled',
'Every X seconds' : 'Every %s seconds', # %s will be replaced with numeric values #
'Open in new window' : 'Open in new window', # View saved VM screenshot #
# Snapshots #
'Snapshots' : 'Snapshots',
'Snapshot Folder' : 'Snapshot Folder',
'Current State' : 'Current State',
'Restore' : 'Restore',
'Restore Snapshot' : 'Restore Snapshot',
'Take Snapshot' : 'Take Snapshot',
'Delete Snapshot' : 'Delete Snapshot',
'Snapshot Details' : 'Snapshot Details',
'Snapshot Name' : 'Snapshot Name',
'Snapshot Description' : 'Snapshot Description',
'Restore Snapshot Message' : 'Are you sure you want to restore snapshot %s? This will cause you to lose your current machine state, which cannot be recovered.',
'Delete Snapshot Message1' : 'Deleting the snapshot will cause the state information saved in it to be lost, and disk data spread over several image files that VirtualBox has created together with the snapshot will be merged into one file. This can be a lengthy process, and the information in the snapshot cannot be recovered.',
'Delete Snapshot Message2' : 'Are you sure you want to delete the selected snapshot %s?',
'Taken' : 'Taken',
'changed' : 'changed',
# Discard State #
'Discard' : 'Discard',
'Discard Message1' : 'Are you sure you want to discard the saved state of the virtual machine %s?', # %s willl be replaced with VM name
'Discard Message2' : 'This operation is equivalent to resetting or powering off the machine without doing a proper shutdown of the guest OS.',
# Delete or Unregister Inaccessible Machine #
'VM Inaccessible' : 'The selected VM is inaccessible. Please respect the error message shown below and press the Refresh button if you wish to repeat the accessibility check.',
'Delete VM Message1' : 'Are you sure you want to permanently delete the virtual machine %s?',
'Delete VM Message2' : 'This operation cannot be undone.',
'Delete VM Message3' : 'If you select <b>Delete All</b> everything gets removed. This includes the machine itself, but also the virtual disks attached to it. If you want preserve the virtual disks for later use, select <b>Keep Harddisks</b>.',
'Delete All' : 'Delete All',
'Keep Harddisks' : 'Keep Harddisks',
'Unregister VM Message1' : 'Are you sure you want to unregister the inaccessible virtual machine %s?',
'Unregister VM Message2' : 'You will not be able to register it again from this GUI',
'Unregister' : 'Unregister',
# Error fetching machines #
'Error vmlist 1' : 'There was an error obtaining the list of registered virtual machines from VirtualBox. Make sure vboxwebsrv is running and that the settings in vboxweb\'s configuration file are correct.',
'Error vmlist 2' : 'The list of virtual machines will not begin auto-refreshing again until this page is reloaded.',
# Properties #
'host' : 'VirtualBox Host',
'Port' : 'Port',
'General' : 'General',
'Name' : 'Name',
'OS Type' : 'OS Type',
# Options in Preferences / Global Settings #
'Default Hard Disk Folder' : 'Default Hard Disk Folder',
'Default Machine Folder' : 'Default Machine Folder',
'VRDP Authentication Library' : 'VRDP Authentication Library',
'Add host-only network' : 'Add host-only network',
'Remove host-only network' : 'Remove host-only network',
'Edit host-only network' : 'Edit host-only network',
'Host-only Network Details' : 'Host-only Network Details',
'Host-only Networks' : 'Host-only Networks',
'IPv4Mask' : 'Network Mask',
'IPv6Mask' : 'IPv6 Network Mask Length',
'Server Address' : 'Server Address',
'Server Mask' : 'Server Mask',
'Lower Address Bound' : 'Lower Address Bound',
'Upper Address Bound' : 'Upper Address Bound',
'DHCP Server' : 'DHCP Server',
'DHCP enabled' : 'DHCP enabled',
'Manually configured' : 'Manually configured',
'Delete Interface Message1' : 'Deleting this host-only network will remove the host-only interface this network is based on. Do you want to remove the (host-only network) interface %s?',
'Delete Interface Message2' : 'Note: this interface may be in use by one or more virtual network adapters belonging to one of your VMs. After it is removed, these adapters will no longer be usable until you correct their settings by either choosing a different interface name or a different adapter attachment type.',
'System' : 'System',
'Base Memory' : 'Base Memory',
'Memory' : 'Memory',
'free' : 'free', # as in free/available memory
'Enable IO APIC' : 'Enable IO APIC',
'Enable EFI' : 'Enable EFI (special OSes only)',
'Hardware clock in UTC time' : 'Hardware clock in UTC time',
'Processors' : 'Processor(s)',
'Boot Order' : 'Boot Order',
'Removable Media' : 'Removable Media',
'Remember Runtime Changes' : 'Remember Runtime Changes',
'Motherboard' : 'Motherboard',
'Acceleration' : 'Acceleration',
'Extended Features' : 'Extended Features',
'CPUs' : 'CPUs',
'VCPU' : 'VT-x/AMD-V',
'Nested Paging' : 'Nested Paging',
'Hardware Virtualization' : 'Hardware Virtualization',
'Enable VCPU' : 'Enable VT-x/AMD-V',
'Enable Nested Paging' : 'Enable Nested Paging',
'Enable PAE/NX' : 'Enable PAE/NX',
'Display' : 'Display',
'Video' : 'Video',
'Video 2d' : '2D Acceleration',
'Video 3d' : '3D Acceleration',
'Video Memory' : 'Video Memory',
'Remote Display' : 'Remote Display',
'Remote Console' : 'Remote Console (RDP)',
'Ports' : 'Ports',
'Net Address' : 'Net Address',
'Enable Server' : 'Enable Server',
'Server Port' : 'Server Port',
'Authentication Timeout' : 'Authentication Timeout',
'Authentication Method' : 'Authentication Method',
'External' : 'External',
'Guest' : 'Guest',
'Allow Multiple Connections' : 'Allow Multiple Connections',
'Storage' : 'Storage',
'Storage Tree' : 'Storage Tree',
'Attributes' : 'Attributes',
'Type' : 'Type',
'Slot' : 'Slot',
'Size' : 'Size',
'Virtual Size' : 'Virtual Size',
'Actual Size' : 'Actual Size',
'Location' : 'Location',
'Information' : 'Information',
'Use host I/O cache' : 'Use host I/O cache',
'IDE Controller' : 'IDE Controller',
'Primary Master' : 'Primary Master',
'Primary Slave' : 'Primary Slave',
'Secondary Master' : 'Secondary Master',
'Secondary Slave' : 'Secondary Slave',
'Floppy Controller' : 'Floppy Controller',
'Floppy Device' : 'Floppy Device',
'SCSI Controller' : 'SCSI Controller',
'SCSI Port' : 'SCSI Port',
'SATA Controller' : 'SATA Controller',
'SATA Port' : 'SATA Port',
'SAS Controller' : 'SAS Controller',
'SAS Port' : 'SAS Port',
'HardDisk' : 'Hard Disk',
'Floppy' : 'Floppy',
'DVD' : 'CD/DVD',
'Type (Format)' : 'Type (Format)',
'Add Attachment' : 'Add Attachment',
'Remove Attachment' : 'Remove Attachment',
'Add Controller' : 'Add Controller',
'Remove Controller' : 'Remove Controller',
'Add CD/DVD Device' : 'Add CD/DVD Device',
'Add Hard Disk' : 'Add Hard Disk',
'Add Floppy Device' : 'Add Floppy Device',
'DVD Device' : 'CD/DVD Device',
'Empty' : 'Empty',
'Passthrough' : 'Passthrough',
'Unknown Device' : 'Unknown Device',
'Host Drive' : 'Host Drive',
'Add IDE Controller' : 'Add IDE Controller',
'Add Floppy Controller' : 'Add Floppy Controller',
'Add SCSI Controller' : 'Add SCSI Controller',
'Add SATA Controller' : 'Add SATA Controller',
'Add SAS Controller' : 'Add SAS Controller',
'LsiLogic' : 'LsiLogic',
'BusLogic' : 'BusLogic',
'IntelAhci' : 'AHCI',
'PIIX3' : 'PIIX3',
'PIIX4' : 'PIIX4',
'ICH6' : 'ICH6',
'I82078' : 'I82078',
'LsiLogicSas' : 'LsiLogic SAS',
'Differencing Disks' : 'Differencing Disks',
'No unused media message 1' : 'There is no unused media available for the newly created attachment.',
'No unused media message 2' : 'Press the Create button to start the New Virtual Disk wizard and create a new medium, or press Select if you wish to open the Virtual Media Manager.',
'storage attached indirectly' : 'Attaching this disk will be performed indirectly using a newly created differencing hard disk.',
'base disk indirectly attached' : 'This base hard disk is indirectly attached using the following differencing hard disk:',
'Attached to' : 'Attached to',
'Not Attached' : 'Not Attached',
'USB' : 'USB',
'USB Controller' : 'USB Controller',
'Enable USB Controller' : 'Enable USB Controller',
'Enable USB 2.0 Controller' : 'Enable USB 2.0 Controller',
'USB Device Filters' : 'USB Device Filters',
'Add Empty Filter' : 'Add Empty Filter',
'Add Filter From Device' : 'Add Filter From Device',
'Edit Filter' : 'Edit Filter',
'Remove Filter' : 'Remove Filter',
'Move Filter Up' : 'Move Filter Up',
'Move Filter Down' : 'Move Filter Down',
'Device Filters' : 'Device Filters',
'active' : 'active',
'USB Filter' : 'USB Filter',
'New Filter' : 'New Filter',
'Vendor ID' : 'Vendor ID',
'Product ID' : 'Product ID',
'Revision' : 'Revision',
'Manufacturer' : 'Manufacturer',
'Serial No' : 'Serial No.',
'Remote' : 'Remote',
'Shared Folders' : 'Shared Folders',
'Shared Folder' : 'Shared Folder',
'Folders List' : 'Folders List',
'Path' : 'Path',
'Access' : 'Access',
# read only & read/write
'ro' : 'Read-Only',
'rw' : 'Writable',
'Auto-Mount' : 'Auto-Mount', # 3.2.8
'Full Access' : 'Full Access',
'Add Shared Folder' : 'Add Shared Folder',
'Edit Shared Folder' : 'Edit Shared Folder',
'Remove Shared Folder' : 'Remove Shared Folder',
'Audio' : 'Audio',
'Enable Audio' : 'Enable Audio',
'Host Audio Driver' : 'Host Audio Driver',
'Audio Controller' : 'Audio Controller',
'WinMM' : 'Windows multimedia',
'Null Audio Driver' : 'Null Audio Driver',
'OSS' : 'Open Sound System',
'ALSA' : 'Advanced Linux Sound Architecture',
'DirectSound' : 'Microsoft DirectSound',
'CoreAudio' : 'Core Audio',
'MMPM' : 'Reserved for historical reasons.', # In API. May never see it in the real world #
'Pulse' : 'Pulse Audio',
'SolAudio' : 'Solaris Audio',
'HDA' : 'Intel HD Audio', # 3.2.8
'AC97' : 'ICH AC97',
'SB16' : 'SoundBlaster 16',
'Network' : 'Network',
'Adapter' : 'Adapter',
'Network Adapter' : 'Network Adapter',
'Enable Network Adapter' : 'Enable Network Adapter',
'Adapter Type' : 'Adapter Type',
'adapter' : 'adapter',
'Bridged' : 'Bridged',
'Bridged Adapter' : 'Bridged Adapter',
'HostOnly' : 'Host Only',
'Internal' : 'Internal',
'Internal Network' : 'Internal Network',
'Host-only Adapter' : 'Host-only Adapter',
'NAT' : 'NAT',
'network' : 'network',
'Ethernet' : 'Ethernet',
'PPP' : 'PPP',
'SLIP' : 'SLIP',
'IPv4Addr' : 'IP Address',
'IPv6Addr' : 'IP(v6) Address',
'Mac Address' : 'MAC Address',
'Cable connected' : 'Cable connected',
'netMediumType' : 'Type',
'Guest Network Adapters' : 'Guest Network Adapters',
# New #
'Port Forwarding' : 'Port Forwarding',
'Port Forwarding Rules' : 'Port Forwarding Rules',
'Protocol' : 'Protocol',
'Host IP' : 'Host IP',
'Host Port' : 'Host Port',
'Guest IP' : 'Guest IP',
'Guest Port' : 'Guest Port',
'TCP' : 'TCP',
'UDP' : 'UDP',
'Rule' : 'Rule',
'Insert new rule' : 'Insert new rule',
'Delete selected rule' : 'Delete selected rule',
'Invalid IP Address' : 'Invalid IP Address',
'The current port forwarding rules are not valid' : 'The current port forwarding rules are not valid. None of the host or guest port values may be set to zero.',
'Am79C970A' : 'AMD PCNet-PCI II network card',
'Am79C973' : 'AMD PCNet-FAST III network card',
'I82540EM' : 'Intel PRO/1000 MT Desktop network card',
'I82543GC' : 'Intel PRO/1000 T Server network card',
'I82545EM' : 'Intel PRO/1000 MT Server network card',
'Virtio' : 'Virtio network device ',
# Machine states
'PoweredOff' : 'Powered Off',
'Saved' : 'Saved',
'Teleported' : 'Teleported',
'Aborted' : 'Aborted',
'Running' : 'Running',
'Paused' : 'Paused',
'Stuck' : 'Stuck',
'Teleporting' : 'Teleporting',
'LiveSnapshotting' : 'Live Snapshotting',
'Starting' : 'Starting',
'Stopping' : 'Stopping',
'Saving' : 'Saving',
'Restoring' : 'Restoring',
'TeleportingPausedVM' : 'Teleporting Paused VM',
'TeleportingIn' : 'Teleporting In',
'RestoringSnapshot' : 'Restoring Snapshot',
'DeletingSnapshot' : 'Deleting Snapshot',
'SettingUp' : 'Setting Up',
'FirstOnline' : 'First Online',
'LastOnline' : 'Last Online',
'FirstTransient' : 'First Transient',
'LastTransient' : 'Last Transient',
# Mount dialog
'Mount' : 'Mount',
# list separator
'LIST_SEP' : ', ',
# Sizes
'B' : 'B',
'KB' : 'KB',
'MB' : 'MB',
'GB' : 'GB',
'TB' : 'TB',
# Virtual Media Manager
'Open Virtual Media Manager' : 'Open Virtual Media Manager',
'Virtual Media Manager' : 'Virtual Media Manager',
'Are you sure remove medium' : 'Are you sure remove the medium %s from the list of known media?',
'Medium remove note' : 'Note that the storage unit of this medium will not be deleted and that it will be possible to add it to the list again later.',
'Are you sure release medium' : 'Are you sure you want to release the medium %s?',
'This will detach from' : 'This will detach it from the following virtual machine(s): %s.',
'Please select a medium.' : 'Please select a medium.',
'VMM Remove Media Message1' : 'Do you want to delete the storage unit of the hard disk %s?',
'VMM Remove Media Message2' : 'If you select Delete then the specified storage unit will be permanently deleted. <b>This operation cannot be undone.</b>',
'VMM Remove Media Message3' : 'If you select Keep then the hard disk will be only removed from the list of known hard disks, but the storage unit will be left untouched which makes it possible to add this hard disk to the list later again.',
'Normal' : 'Normal',
'Writethrough' : 'Writethrough',
'Immutable' : 'Immutable',
'Actions' : 'Actions',
'Clone' : 'Clone',
'Remove' : 'Remove',
'Release' : 'Release',
'Hard Disks' : 'Hard Disks',
'CD/DVD Images' : 'CD/DVD Images',
'Floppy Images' : 'Floppy Images',
# New hard disk wizard #
'Create New Virtual Disk' : 'Create New Virtual Disk',
'newDisk Welcome' : 'Welcome to the Create New Virtual Disk Wizard!',
'newDisk Step1 Message1' : 'This wizard will help you to create a new virtual hard disk for your virtual Machine.',
'newDisk Step1 Message2' : 'Use the Next button to go to the next page of the wizard and the Back button to return to the previous page.',
'Hard Disk Storage Type' : 'Hard Disk Storage Type',
'newDisk Step2 Message1' : 'Select the type of virtual hard disk you want to create.',
'newDisk Step2 dynamic' : 'A <b>dynamically expanding storage</b> initially occupies a very small amount of space on your physical hard disk. It will grow dynamically (up to the size specified) as the Guest OS claims disk space.',
'newDisk Step2 fixed' : 'A <b>fixed-size storage</b> does not grow. It is stored in a file approximately the same size as the size of the virtual hard disk. The creation of a fixed-size storage may take a long time depending on the storage size and the write performance of your hard disk.',
'Storage Type' : 'Storage Type',
'Dynamically expanding storage' : 'Dynamically expanding storage',
'Fixed-size storage' : 'Fixed size storage',
'Virtual Disk Location and Size' : 'Virtual Disk Location and Size',
'newDisk Step3 Message1' : 'Select the location of a file to store the hard disk data or type a file name in the entry field.',
'newDisk Step3 Message2' : 'Select the size of the virtual hard disk in megabytes. The size will be reported to the Guest OS as the maximum size of this hard disk.',
'Summary' : 'Summary',
'newDisk Step4 Message1' : 'You are going to create a new hard disk with the following parameters:',
'newDisk Step4 Message2' : 'If the above settings are correct, press the Finish button. Once you press it, a new hard disk will be created.',
# New virtual machine wizard #
'Create New Virtual Machine' : 'Create New Virtual Machine',
'New Virtual Machine Wizard' : 'New Virtual Machine Wizard',
'newVM Welcome' : 'Welcome to the New Virtual Machine Wizard!',
'newVM Step1 Message1' : 'This wizard will guide you through the steps necessary to create a new virtual machine for VirtualBox.',
'newVM Step1 Message2' : 'Use the Next button to go to the next page of the wizard and the Back button to return to the previous page.',
'VM Name and OS Type' : 'VM Name and OS Type',
'newVM Step2 Message1' : 'Enter a name for the new virtual machine and select the type of guest operating system you plan to install onto the virtual machine.',
'newVM Step2 Message2' : 'The name of the virtual machine usually indicates its software and hardware configuration. It will be used by all VirtualBox components to identify your virtual machine.',
'newVM Step3 Message1' : 'Select the amount of base memory (RAM) in megabytes to be allocated to the virtual machine.',
'newVM Step3 Message2' : 'The recommended base memory size is %s MB.', # %s will be replaced with the recommended memory size at run time #
'Virtual Hard Disk' : 'Virtual Hard Disk',
'Boot Hard Disk' : 'Boot Hard Disk',
'Create new hard disk' : 'Create new hard disk',
'Use existing hard disk' : 'Use existing hard disk',
'newVM Step4 Message1' : 'Select the hard disk image to be used as the boot disk of the virtual machine. You can either create a new hard disk using the New button or select an existing hard disk image from the drop-down list or by pressing the Existing button (to invoke the Virtual Media Manager dialog).',
'newVM Step4 Message2' : 'If you need a more complicated hard disk setup, you can skip this step and attach hard disks later using the VM Settings dialog.',
'newVM Step4 Message3' : 'The recommended size of the boot hard disk is %s MB.', # %s will be replaced with the recommended memory size at run time #
'newVM Step5 Message1' : 'You are going to create a new virtual machine with the following parameters:',
'newVM Step5 Message2' : 'If the above is correct press the Finish button. Once you press it, a new virtual machine will be created.',
'newVM Step5 Message3' : 'Note that you can alter these and all other setting of the created virtual machine at any time using the Settings dialog accessible through the menu of the main window.',
# VM Log files #
'Show Log' : 'Show Log',
'Logs' : 'Logs',
'No logs found.' : 'No logs found for the selected virtual machine.',
# Import / Export Appliances #
'Export Appliance' : 'Export Appliance',
'Appliance Export Wizard' : 'Appliance Export Wizard',
'Appliance Export Wizard Welcome' : 'Welcome to the Appliance Export Wizard!',
'appExport Step1 Message1' : 'This wizard will guide you through the process of exporting an appliance.',
'appExport Step1 Message2' : 'Use the Next button to go to the next page of the wizard and the Back button to return to the previous page. You can also press Cancel if you want to cancel the execution of this wizard.',
'appExport Step1 Message3' : 'Please select the virtual machines that should be added to the appliance. You can select more than one. Please note that these machines have to be turned off before they can be exported.',
'Appliance Export Settings' : 'Appliance Export Settings',
'appExport Step2 Message1' : 'Here you can change additional configuration values of the selected virtual machines. You can modify most of the properties shown by double-clicking on the items.',
'appExport Step3 Message1' : 'Please choose a filename to export the OVF to.',
'Import Appliance' : 'Import Appliance',
'Appliance Import Wizard' : 'Appliance Import Wizard',
'Appliance Import Wizard Welcome' : 'Welcome to the Appliance Import Wizard!',
'appImport Step1 Message1' : 'This wizard will guide you through the process of importing an appliance.',
'appImport Step1 Message2' : 'Use the Next button to go to the next page of the wizard and the Back button to return to the previous page. You can also press Cancel if you want to cancel the execution of this wizard.',
'appImport Step1 Message3' : 'VirtualBox currently supports importing appliances saved in the Open Virtualization Format (OVF). To continue, select the file to import below:',
'Appliance Import Settings' : 'Appliance Import Settings',
'appImport Step2 Message1' : 'These are the virtual machines contained in the appliance and the suggested settings of the imported VirtualBox machines. You can change many of the properties shown by double-clicking on the items and disable others using the check boxes below.',
'appImport Step3 Message1' : 'Please choose a filename to import the OVF to.',
'Write legacy OVF' : 'Write legacy OVF 0.9',
'Virtual System X' : 'Virtual System %s', # %s will be replaced with the virtual system number
'Product' : 'Product',
'Product-URL' : 'Product-URL',
'Vendor' : 'Vendor',
'Vendor-URL' : 'Vendor-URL',
'License' : 'License',
'Hard Disk Controller' : 'Hard Disk Controller',
'Virtual Disk Image' : 'Virtual Disk Image',
'Warnings' : 'Warnings',
# Operation in progress onUnLoad warning message #
'Operation in progress' : 'Warning: A VirtualBox internal operation is in progress. Closing this window or navigating away from this web page may cause unexpected and undesirable results. Please wait for the operation to complete.',
'Loading ...' : 'Loading ...', # "loading ..." screen
# Versions #
'Unsupported version' : 'You are using an untested version of VirtualBox (%s) with VirtualBox Web Console. This may cause unknown and undesireable results.',
'Do not show message again' : 'Do not show this message again.',
# Fatal connection error #
'Fatal error' : 'An error occurred communicating with your vboxwebsrv. No more requests will be sent by VirtualBox Web Console until the error is corrected and this page is refreshed. The details of this connection error should be displayed in a subsequent dialog box.',
# Guest properties error #
'Unable to retrieve guest properties' : 'Unable to retrieve guest properties. Make sure the virtual machine is running and has the VirtualBox Guest Additions installed.',
#RDP #
'User name' : 'User name',
'Password' : 'Password',
'Connecting to' : 'Connecting to',
'Connected to' : 'Connected to',
'Requested desktop size' : 'Requested desktop size',
'Connect' : 'Connect',
'Detach' : 'Detach',
'Disconnect' : 'Disconnect',
"Ctrl-Alt-Del" : "Send Ctrl-Alt-Del",
'Disconnect reason' : 'Disconnect reason',
"Redirection by" : "Redirection by",
'Virtual machine is not running or RDP configured.' : 'Virtual machine is not running or is not configured to accept RDP connections.',
# Operating Systems #
'Other' : 'Other/Unknown',
'Windows31' : 'Windows 3.1',
'Windows95' : 'Windows 95',
'Windows98' : 'Windows 98',
'WindowsMe' : 'Windows Me',
'WindowsNT4' : 'Windows NT 4',
'Windows2000' : 'Windows 2000',
'WindowsXP' : 'Windows XP',
'WindowsXP_64' : 'Windows XP (64 bit)',
'Windows2003' : 'Windows 2003',
'Windows2003_64' : 'Windows 2003 (64 bit)',
'WindowsVista' : 'Windows Vista',
'WindowsVista_64' : 'Windows Vista (64 bit)',
'Windows2008' : 'Windows 2008',
'Windows2008_64' : 'Windows 2008 (64 bit)',
'Windows7' : 'Windows 7',
'Windows7_64' : 'Windows 7 (64 bit)',
'WindowsNT' : 'Other Windows',
'Linux22' : 'Linux 2.2',
'Linux24' : 'Linux 2.4',
'Linux24_64' : 'Linux 2.4 (64 bit)',
'Linux26' : 'Linux 2.6',
'Linux26_64' : 'Linux 2.6 (64 bit)',
'ArchLinux' : 'Arch Linux',
'ArchLinux_64' : 'Arch Linux (64 bit)',
'Debian' : 'Debian',
'Debian_64' : 'Debian (64 bit)',
'OpenSUSE' : 'openSUSE',
'OpenSUSE_64' : 'openSUSE (64 bit)',
'Fedora' : 'Fedora',
'Fedora_64' : 'Fedora (64 bit)',
'Gentoo' : 'Gentoo',
'Gentoo_64' : 'Gentoo (64 bit)',
'Mandriva' : 'Mandriva',
'Mandriva_64' : 'Mandriva (64 bit)',
'RedHat' : 'Red Hat',
'RedHat_64' : 'Red Hat (64 bit)',
'Turbolinux' : 'Turbolinux',
'Ubuntu' : 'Ubuntu',
'Ubuntu_64' : 'Ubuntu (64 bit)',
'Xandros' : 'Xandros',
'Xandros_64' : 'Xandros (64 bit)',
'Linux' : 'Other Linux',
'Solaris' : 'Solaris',
'Solaris_64' : 'Solaris (64 bit)',
'OpenSolaris' : 'OpenSolaris',
'OpenSolaris_64' : 'OpenSolaris (64 bit)',
'FreeBSD' : 'FreeBSD',
'FreeBSD_64' : 'FreeBSD (64 bit)',
'OpenBSD' : 'OpenBSD',
'OpenBSD_64' : 'OpenBSD (64 bit)',
'NetBSD' : 'NetBSD',
'NetBSD_64' : 'NetBSD (64 bit)',
'OS2Warp3' : 'OS/2 Warp 3',
'OS2Warp4' : 'OS/2 Warp 4',
'OS2Warp45' : 'OS/2 Warp 4.5',
'OS2eCS' : 'eComStation',
'OS2' : 'Other OS/2',
'DOS' : 'DOS',
'Netware' : 'Netware',
'MacOS' : 'Mac OS X Server',
'MacOS_64' : 'Mac OS X Server (64 bit)',
}
| |
""" Utilities for dealing with NTP time stamps """
__author__ = 'Luke Campbell <LCampbell@ASAScience.com>, Michael Meisinger'
import time
import datetime
import struct
import numbers
try:
import numpy as np
except ImportError:
np = None
class NTP4Time(object):
"""
Utility wrapper for handling time in ntpv4
Everything is in UTC
"""
FRAC = np.float32(4294967296.) if np else None
JAN_1970 = np.uint32(2208988800) if np else None
JAN_1970_INT = 2208988800
EPOCH = datetime.datetime(1900, 1, 1)
ntpv4_timestamp = '! 2I'
ntpv4_date = '! 2I Q'
def __init__(self, date=None):
""" Can be initialized with a standard unix time stamp """
# Is it correct to represent NTP4 internally as datetime?
if date is None:
date = time.time()
if isinstance(date, numbers.Number):
self._dt = datetime.datetime.utcfromtimestamp(date)
elif isinstance(date, datetime.datetime):
self._dt = date
elif isinstance(date, datetime.date):
self._dt = datetime.datetime.combine(date, datetime.time())
@classmethod
def utcnow(cls):
return NTP4Time()
@property
def year(self):
return self._dt.year
@property
def month(self):
return self._dt.month
@property
def day(self):
return self._dt.day
@property
def hour(self):
return self._dt.hour
@property
def minute(self):
return self._dt.minute
@property
def second(self):
return self._dt.second
@property
def date(self):
from ion.util.time_utils import IonDate
return IonDate(self.year, self.month, self.day)
@property
def era(self):
delta = (self._dt - self.EPOCH).total_seconds()
return np.uint32( int(delta) / 2**32)
@property
def seconds(self):
delta = self._dt - self.EPOCH
return np.uint32(np.trunc(delta.total_seconds()))
@seconds.setter
def seconds(self,value):
delta = datetime.timedelta(seconds=value)
self._dt = self.EPOCH + delta
@property
def useconds(self):
delta = self._dt - self.EPOCH
return np.uint32(np.modf(delta.total_seconds())[0] * 1e6)
@property
def microseconds(self):
return self.useconds
def __repr__(self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, str(self), id(self))
def __str__(self):
return self._dt.isoformat()
def to_ntp64(self):
"""
Returns the NTPv4 64bit timestamp as binary (str)
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Seconds |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Fraction |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
delta = (self._dt - self.EPOCH).total_seconds()
seconds = np.uint32(np.trunc(delta))
fraction = np.uint32((delta - int(delta)) * 2**32)
timestamp = struct.pack(self.ntpv4_timestamp, seconds, fraction)
return timestamp
@classmethod
def from_ntp64(cls, val):
"""
Converts a RFC 5905 (NTPv4) compliant 64bit time stamp into an NTP4Time object
"""
seconds, fraction = struct.unpack(cls.ntpv4_timestamp, val)
it = cls()
it.seconds = seconds + (fraction *1e0 / 2**32)
return it
def to_ntp_date(self):
"""
Returns the NTPv4 128bit date timestamp
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Era Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Era Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Fraction |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
delta = (self._dt - self.EPOCH).total_seconds()
era = int(delta) / (2**32)
offset = np.uint32(np.trunc(delta)) # Overflow does all the work for us
fraction = np.uint64((delta - int(delta)) * 2**64)
ntp_date = struct.pack(self.ntpv4_date, era, offset, fraction)
return ntp_date
@classmethod
def from_ntp_date(cls, value):
"""
Returns an NTP4Time object based on the 128bit RFC 5905 (NTPv4) Date Format
"""
era, seconds, fraction = struct.unpack(cls.ntpv4_date, value)
it = cls()
it.seconds = (era * 2**32) + seconds + (fraction * 1e0 / 2**64)
return it
def to_string(self):
"""
Creates a hexidecimal string of the NTP time stamp (serialization)
"""
val = self.to_ntp64()
assert len(val) == 8
arr = [0] * 8
for i in xrange(8):
arr[i] = '%02x' % ord(val[i])
retval = ''.join(arr)
return retval
def to_extended_string(self):
"""
Creates a hexidecimal string of the NTP date format (serialization)
"""
val = self.to_ntp_date()
assert len(val) == 16
arr = [0] * 16
for i in xrange(16):
arr[i] = '%02x' % ord(val[i])
retval = ''.join(arr)
return retval
def to_np_value(self, dtype="i8"):
"""
Returns 64bit NTPv4 representation as i8 value.
"""
val = self.to_ntp64()
return np.fromstring(val, dtype=dtype)
@classmethod
def np_from_string(cls, s, dtype="i8"):
return np.fromstring(s, dtype=dtype)
@classmethod
def from_string(cls, s):
"""
Creates an NTP4Time object from the serialized time stamp
"""
assert len(s) == 16
arr = [0] * 8
for i in xrange(8):
arr[i] = chr(int(s[2*i:2*i+2],16))
retval = ''.join(arr)
it = cls.from_ntp64(retval)
return it
@classmethod
def from_extended_string(cls, s):
"""
Creates an NTP4Time object from the serialized extended time stamp
"""
assert len(s) == 32
arr = [0] * 16
for i in xrange(16):
arr[i] = chr(int(s[2*i:2*i+2],16))
retval = ''.join(arr)
it = cls.from_ntp_date(retval)
return it
def to_unix(self):
"""
Returns the unix timestamp for this NTP4Time
"""
delta = self._dt - self.EPOCH
return delta.total_seconds() - self.JAN_1970_INT
@staticmethod
def htonstr(val):
import sys
if sys.byteorder == 'little':
l = len(val)
nval = [0] * l
for i in xrange(l/2):
nval[i*2] = val[l - i*2 - 2]
nval[i*2+1] = val[l - i*2 - 1]
return ''.join(nval)
return val
@staticmethod
def htonl(val):
import sys
val = np.uint32(val)
if sys.byteorder == 'little':
return val.byteswap()
return val
@staticmethod
def htonll(val):
import sys
val = np.uint64(val)
if sys.byteorder == 'little':
return val.byteswap()
return val
def __eq__(self, other):
return isinstance(other, NTP4Time) and self._dt == other._dt
def __ne__(self, other):
return not isinstance(other, NTP4Time) or self._dt != other._dt
def __gt__(self, other):
return isinstance(other, NTP4Time) and self._dt > other._dt
def __ge__(self, other):
return isinstance(other, NTP4Time) and self._dt >= other._dt
def __lt__(self, other):
return isinstance(other, NTP4Time) and self._dt < other._dt
def __le__(self, other):
return isinstance(other, NTP4Time) and self._dt <= other._dt
def to_sortable(self):
""" Returns a long integer maintaining sort order """
delta = (self._dt - self.EPOCH).total_seconds()
seconds = np.uint32(np.trunc(delta))
fraction = np.uint32((delta - int(delta)) * 2**32)
value = int(seconds) * 2**32 + int(fraction)
return value
| |
"""
Settings and configuration for wallabag-cli.
"""
import base64
import json
import time
from collections import OrderedDict
from Crypto.Cipher import AES
from Crypto.Hash import MD5
import getpass
import math
import os
from pathlib import Path
import socket
from sys import exit
CONFIG_DIRECTORY = os.path.expanduser("~")
CONFIG_FILENAME = ".wallabag-cli"
__global_custom_path = None
class Configs():
"""
Static struct for storing the global configs.
"""
# wallabag server
serverurl = ""
username = ""
password = ""
# wallabag api oauth2
client = ""
secret = ""
# oauth2 token
access_token = ""
expires = 0
def is_token_expired():
"""
Returns if the last created oauth2 token is expired.
"""
return Configs.expires - time.time() < 0
def set_config(name, value):
"""
Sets a config value to a given value without checking validity.
"""
if hasattr(Configs, name):
setattr(Configs, name, value)
def get_config(name):
"""
Get a config value or None as default. Use "api.get_token()" instead if you
wish to get a valid oauth2 token.
"""
return getattr(Configs, name, None)
def __cryptkey():
s1 = getpass.getuser()
s2 = socket.gethostname()
return MD5.new((s1 + s2).encode("utf-8")).hexdigest()
def __encrypt(value):
try:
blocks = math.ceil(len(value) / 16)
value = value.ljust(blocks * 16, ' ')
ret = AES.new(__cryptkey()).encrypt(value)
ret = base64.b64encode(ret)
ret = ret.decode("utf-8")
except:
ret = None
return ret
def __decrypt(value):
try:
ret = base64.b64decode(value)
ret = AES.new(__cryptkey()).decrypt(ret)
ret = ret.decode("utf-8")
ret = ret.rstrip()
except:
ret = None
return ret
def __configs2dictionary():
"""
Converts the configuration values to a json serializable dictionary.
Returns
-------
dictionary
Dictionary with the configurations
"""
wallabag_api = OrderedDict()
wallabag_api_oauth2 = OrderedDict()
wallabag_api_oauth2_token = OrderedDict()
wallabag_api['serverurl'] = Configs.serverurl
wallabag_api['username'] = Configs.username
wallabag_api['password'] = __encrypt(Configs.password)
wallabag_api_oauth2['client'] = Configs.client
wallabag_api_oauth2['secret'] = __encrypt(Configs.secret)
wallabag_api["oauth2"] = wallabag_api_oauth2
wallabag_api_oauth2_token["access_token"] = Configs.access_token
wallabag_api_oauth2_token["expires"] = Configs.expires
wallabag_api["oauth2"]["token"] = wallabag_api_oauth2_token
return {"WARNING": "Do not edit this file manually. Use 'wallabag config' instead!", "wallabag_api": wallabag_api}
def __dicionary2config(configdict):
for item in configdict:
if isinstance(configdict[item], str) or isinstance(configdict[item], int) or \
isinstance(configdict[item], float):
if item in ["password", "secret"]:
configdict[item] = __decrypt(configdict[item])
if configdict[item] == None:
return False
set_config(item, configdict[item])
elif isinstance(configdict[item], dict):
__dicionary2config(configdict[item])
return True
def set_path(path):
global __global_custom_path
__global_custom_path = path
def get_path(local_custom_path=None):
if local_custom_path is not None:
return local_custom_path
if __global_custom_path is not None:
return __global_custom_path
return os.path.join(CONFIG_DIRECTORY, CONFIG_FILENAME)
def is_valid(custom_path=None):
"""
Returns True if a config file is suitable.
"""
path = get_path(custom_path)
if not exist(path):
return False
load(path)
if "" in [Configs.serverurl, Configs.username, Configs.password,
Configs.client, Configs.secret]:
return False
return True
def exist(custom_path=None):
"""
Returns True if a config file exists.
"""
path = get_path(custom_path)
file = Path(path)
return file.is_file()
def save(custom_path=None):
"""
Saves the config into a file.
Parameters
----------
path : string
Optional non default config filename.
Returns
-------
bool
True if successful
"""
path = get_path(custom_path)
try:
with open(path, mode='w') as file:
jsonsave = json.dumps(__configs2dictionary(), indent=4)
file.write(jsonsave)
file.close()
return True
except:
return False
def load(custom_path=None):
"""
Loads the config into a dictionary.
Parameters
----------
path : string
Optional non default config filename.
Returns
-------
bool
True if successfull. Otherwise the config will be filles with default values
"""
path = get_path(custom_path)
if not exist(path):
return False
try:
with open(path, mode='r') as file:
filecontent = file.read()
file.close()
dic = json.loads(filecontent)
return __dicionary2config(dic['wallabag_api'])
except json.decoder.JSONDecodeError:
return False
except PermissionError:
return False
def load_or_create(custom_path=None):
"""
Loads aconfig file or creates a blank one.
"""
path = get_path(custom_path)
success = False
if not exist(path):
success = save(path)
else:
success = load(path)
if not success:
print("Error: Could not load or create the config file.")
print()
exit(-1)
| |
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
assert_raises_regex
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
class TestBasic:
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
B = np.char.array(A)
assert_equal(B.dtype.itemsize, 10)
assert_array_equal(B, [[b'abc', b'2'],
[b'long', b'0123456789']])
def test_from_object_array_unicode(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
assert_raises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', u'Sigma \u03a3'],
['long', '0123456789']])
def test_from_string_array(self):
A = np.array([[b'abc', b'foo'],
[b'long ', b'0123456789']])
assert_equal(A.dtype.type, np.string_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B[0, 0] = 'changed'
assert_(B[0, 0] != A[0, 0])
C = np.char.asarray(A)
assert_array_equal(C, A)
assert_equal(C.dtype, A.dtype)
C[0, 0] = 'changed again'
assert_(C[0, 0] != B[0, 0])
assert_(C[0, 0] == A[0, 0])
def test_from_unicode_array(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.unicode_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B = np.char.array(A, **kw_unicode_true)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
def fail():
np.char.array(A, **kw_unicode_false)
assert_raises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
B = np.char.array([u'\u03a3'])
assert_(issubclass((A + B).dtype.type, np.unicode_))
def test_from_string(self):
A = np.char.array(b'abc')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 3)
assert_(issubclass(A.dtype.type, np.string_))
def test_from_unicode(self):
A = np.char.array(u'\u03a3')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
class TestVecString:
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
assert_raises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
assert_raises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
assert_raises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
assert_raises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
assert_raises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.int_, 'strip')
assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
assert_raises(ValueError, fail)
class TestWhitespace:
def setup(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
def test1(self):
assert_(np.all(self.A == self.B))
assert_(np.all(self.A >= self.B))
assert_(np.all(self.A <= self.B))
assert_(not np.any(self.A > self.B))
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
class TestChar:
def setup(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
class TestComparisons:
def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
['051', 'tuv']]).view(np.chararray)
def test_not_equal(self):
assert_array_equal((self.A != self.B), [[True, False], [True, True]])
def test_equal(self):
assert_array_equal((self.A == self.B), [[False, True], [False, False]])
def test_greater_equal(self):
assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
def test_less_equal(self):
assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
def test_greater(self):
assert_array_equal((self.A > self.B), [[False, False], [True, True]])
def test_less(self):
assert_array_equal((self.A < self.B), [[True, False], [False, False]])
def test_type(self):
out1 = np.char.equal(self.A, self.B)
out2 = np.char.equal('a', 'a')
assert_(isinstance(out1, np.ndarray))
assert_(isinstance(out2, np.ndarray))
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
def setup(self):
TestComparisons.setup(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
def setup(self):
TestComparisons.setup(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
class TestInformation:
def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_len(self):
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
def test_count(self):
assert_(issubclass(self.A.count('').dtype.type, np.integer))
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
# Python doesn't seem to like counting NULL characters
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
def test_endswith(self):
assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.endswith('3', 'fdjk')
assert_raises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
def test_index(self):
def fail():
self.A.index('a')
assert_raises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
def test_isalnum(self):
assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
def test_isalpha(self):
assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
def test_isdigit(self):
assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
def test_islower(self):
assert_(issubclass(self.A.islower().dtype.type, np.bool_))
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
def test_isspace(self):
assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
def test_istitle(self):
assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
def test_isupper(self):
assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
def test_rfind(self):
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
def test_rindex(self):
def fail():
self.A.rindex('a')
assert_raises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
def test_startswith(self):
assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.startswith('3', 'fdjk')
assert_raises(TypeError, fail)
class TestMethods:
def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_capitalize(self):
tgt = [[b' abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
assert_array_equal(self.A.capitalize(), tgt)
tgt = [[u' \u03c3 ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
assert_array_equal(self.B.capitalize(), tgt)
def test_center(self):
assert_(issubclass(self.A.center(10).dtype.type, np.string_))
C = self.A.center([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.center(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_(np.all(C.endswith(b'#')))
C = np.char.center(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO ', b' FOO '],
[b' FOO ', b' FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_decode(self):
A = np.char.array([b'\\u03a3'])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
def test_encode(self):
B = self.B.encode('unicode_escape')
assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
def test_expandtabs(self):
T = self.A.expandtabs()
assert_(T[2, 0] == b'123 345 \0')
def test_join(self):
# NOTE: list(b'123') == [49, 50, 51]
# so that b','.join(b'123') results to an error on Py3
A0 = self.A.decode('ascii')
A = np.char.join([',', '#'], A0)
assert_(issubclass(A.dtype.type, np.unicode_))
tgt = np.array([[' ,a,b,c, ', ''],
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
assert_array_equal(np.char.join([',', '#'], A0), tgt)
def test_ljust(self):
assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
C = self.A.ljust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.ljust(20, b'#')
assert_array_equal(C.startswith(b'#'), [
[False, True], [False, False], [False, False]])
assert_(np.all(C.endswith(b'#')))
C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b'FOO ', b'FOO '],
[b'FOO ', b'FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_lower(self):
tgt = [[b' abc ', b''],
[b'12345', b'mixedcase'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.lower().dtype.type, np.string_))
assert_array_equal(self.A.lower(), tgt)
tgt = [[u' \u03c3 ', u''],
[u'12345', u'mixedcase'],
[u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
assert_array_equal(self.B.lower(), tgt)
def test_lstrip(self):
tgt = [[b'abc ', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
assert_array_equal(self.A.lstrip(), tgt)
tgt = [[b' abc', b''],
[b'2345', b'ixedCase'],
[b'23 \t 345 \x00', b'UPPER']]
assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
tgt = [[u'\u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.lstrip(), tgt)
def test_partition(self):
P = self.A.partition([b'3', b'M'])
tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_replace(self):
R = self.A.replace([b'3', b'a'],
[b'##########', b'@'])
tgt = [[b' abc ', b''],
[b'12##########45', b'MixedC@se'],
[b'12########## \t ##########45 \x00', b'UPPER']]
assert_(issubclass(R.dtype.type, np.string_))
assert_array_equal(R, tgt)
def test_rjust(self):
assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
C = self.A.rjust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.rjust(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_array_equal(C.endswith(b'#'),
[[False, True], [False, False], [False, False]])
C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO', b' FOO'],
[b' FOO', b' FOO']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_rpartition(self):
P = self.A.rpartition([b'3', b'M'])
tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_rsplit(self):
A = self.A.rsplit(b'3')
tgt = [[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_rstrip(self):
assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
tgt = [[b' abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_array_equal(self.A.rstrip(), tgt)
tgt = [[b' abc ', b''],
[b'1234', b'MixedCase'],
[b'123 \t 345 \x00', b'UPP']
]
assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
tgt = [[u' \u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.rstrip(), tgt)
def test_strip(self):
tgt = [[b'abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_(issubclass(self.A.strip().dtype.type, np.string_))
assert_array_equal(self.A.strip(), tgt)
tgt = [[b' abc ', b''],
[b'234', b'ixedCas'],
[b'23 \t 345 \x00', b'UPP']]
assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
tgt = [[u'\u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
assert_array_equal(self.B.strip(), tgt)
def test_split(self):
A = self.A.split(b'3')
tgt = [
[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_splitlines(self):
A = np.char.array(['abc\nfds\nwer']).splitlines()
assert_(issubclass(A.dtype.type, np.object_))
assert_(A.shape == (1,))
assert_(len(A[0]) == 3)
def test_swapcase(self):
tgt = [[b' ABC ', b''],
[b'12345', b'mIXEDcASE'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
assert_array_equal(self.A.swapcase(), tgt)
tgt = [[u' \u03c3 ', u''],
[u'12345', u'mIXEDcASE'],
[u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
assert_array_equal(self.B.swapcase(), tgt)
def test_title(self):
tgt = [[b' Abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.title().dtype.type, np.string_))
assert_array_equal(self.A.title(), tgt)
tgt = [[u' \u03a3 ', u''],
[u'12345', u'Mixedcase'],
[u'123 \t 345 \0 ', u'Upper']]
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
assert_array_equal(self.B.title(), tgt)
def test_upper(self):
tgt = [[b' ABC ', b''],
[b'12345', b'MIXEDCASE'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.upper().dtype.type, np.string_))
assert_array_equal(self.A.upper(), tgt)
tgt = [[u' \u03a3 ', u''],
[u'12345', u'MIXEDCASE'],
[u'123 \t 345 \0 ', u'UPPER']]
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
assert_array_equal(self.B.upper(), tgt)
def test_isnumeric(self):
def fail():
self.A.isnumeric()
assert_raises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
def test_isdecimal(self):
def fail():
self.A.isdecimal()
assert_raises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
class TestOperations:
def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
['051', 'tuv']]).view(np.chararray)
def test_add(self):
AB = np.array([['abcefg', '123456'],
['789051', 'xyztuv']]).view(np.chararray)
assert_array_equal(AB, (self.A + self.B))
assert_(len((self.A + self.B)[0][0]) == 6)
def test_radd(self):
QA = np.array([['qabc', 'q123'],
['q789', 'qxyz']]).view(np.chararray)
assert_array_equal(QA, ('q' + self.A))
def test_mul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (self.A * r))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
A*ob
def test_rmul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (r * self.A))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
ob * A
def test_mod(self):
"""Ticket #856"""
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
C = np.array([[3, 7], [19, 1]])
FC = np.array([['3', '7.000000'],
['19', '1']]).view(np.chararray)
assert_array_equal(FC, F % C)
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
assert_array_equal(A1, (A % 1))
A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
def test_rmod(self):
assert_(("%s" % self.A) == str(self.A))
assert_(("%r" % self.A) == repr(self.A))
for ob in [42, object()]:
with assert_raises_regex(
TypeError, "unsupported operand type.* and 'chararray'"):
ob % self.A
def test_slice(self):
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
dtype='S4').view(np.chararray)
sl1 = arr[:]
assert_array_equal(sl1, arr)
assert_(sl1.base is arr)
assert_(sl1.base.base is arr.base)
sl2 = arr[:, :]
assert_array_equal(sl2, arr)
assert_(sl2.base is arr)
assert_(sl2.base.base is arr.base)
assert_(arr[0, 0] == b'abc')
def test_empty_indexing():
"""Regression test for ticket 1948."""
# Check that indexing a chararray with an empty list/array returns an
# empty chararray instead of a chararray with a single empty string in it.
s = np.chararray((4,))
assert_(s[[]].size == 0)
| |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestQuery(unittest.TestCase):
_PROJECT = 'PROJECT'
@staticmethod
def _get_target_class():
from google.cloud.datastore.query import Query
return Query
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _make_client(self):
return _Client(self._PROJECT)
def test_ctor_defaults(self):
client = self._make_client()
query = self._make_one(client)
self.assertIs(query._client, client)
self.assertEqual(query.project, client.project)
self.assertIsNone(query.kind)
self.assertEqual(query.namespace, client.namespace)
self.assertIsNone(query.ancestor)
self.assertEqual(query.filters, [])
self.assertEqual(query.projection, [])
self.assertEqual(query.order, [])
self.assertEqual(query.distinct_on, [])
def test_ctor_explicit(self):
from google.cloud.datastore.key import Key
_PROJECT = 'OTHER_PROJECT'
_KIND = 'KIND'
_NAMESPACE = 'OTHER_NAMESPACE'
client = self._make_client()
ancestor = Key('ANCESTOR', 123, project=_PROJECT)
FILTERS = [('foo', '=', 'Qux'), ('bar', '<', 17)]
PROJECTION = ['foo', 'bar', 'baz']
ORDER = ['foo', 'bar']
DISTINCT_ON = ['foo']
query = self._make_one(
client,
kind=_KIND,
project=_PROJECT,
namespace=_NAMESPACE,
ancestor=ancestor,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
distinct_on=DISTINCT_ON,
)
self.assertIs(query._client, client)
self.assertEqual(query.project, _PROJECT)
self.assertEqual(query.kind, _KIND)
self.assertEqual(query.namespace, _NAMESPACE)
self.assertEqual(query.ancestor.path, ancestor.path)
self.assertEqual(query.filters, FILTERS)
self.assertEqual(query.projection, PROJECTION)
self.assertEqual(query.order, ORDER)
self.assertEqual(query.distinct_on, DISTINCT_ON)
def test_ctor_bad_projection(self):
BAD_PROJECTION = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
projection=BAD_PROJECTION)
def test_ctor_bad_order(self):
BAD_ORDER = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
order=BAD_ORDER)
def test_ctor_bad_distinct_on(self):
BAD_DISTINCT_ON = object()
self.assertRaises(TypeError, self._make_one, self._make_client(),
distinct_on=BAD_DISTINCT_ON)
def test_ctor_bad_filters(self):
FILTERS_CANT_UNPACK = [('one', 'two')]
self.assertRaises(ValueError, self._make_one, self._make_client(),
filters=FILTERS_CANT_UNPACK)
def test_namespace_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.namespace = val
self.assertRaises(ValueError, _assign, object())
def test_namespace_setter(self):
_NAMESPACE = 'OTHER_NAMESPACE'
query = self._make_one(self._make_client())
query.namespace = _NAMESPACE
self.assertEqual(query.namespace, _NAMESPACE)
def test_kind_setter_w_non_string(self):
query = self._make_one(self._make_client())
def _assign(val):
query.kind = val
self.assertRaises(TypeError, _assign, object())
def test_kind_setter_wo_existing(self):
_KIND = 'KIND'
query = self._make_one(self._make_client())
query.kind = _KIND
self.assertEqual(query.kind, _KIND)
def test_kind_setter_w_existing(self):
_KIND_BEFORE = 'KIND_BEFORE'
_KIND_AFTER = 'KIND_AFTER'
query = self._make_one(self._make_client(), kind=_KIND_BEFORE)
self.assertEqual(query.kind, _KIND_BEFORE)
query.kind = _KIND_AFTER
self.assertEqual(query.project, self._PROJECT)
self.assertEqual(query.kind, _KIND_AFTER)
def test_ancestor_setter_w_non_key(self):
query = self._make_one(self._make_client())
def _assign(val):
query.ancestor = val
self.assertRaises(TypeError, _assign, object())
self.assertRaises(TypeError, _assign, ['KIND', 'NAME'])
def test_ancestor_setter_w_key(self):
from google.cloud.datastore.key import Key
_NAME = u'NAME'
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('name', '=', _NAME)
query.ancestor = key
self.assertEqual(query.ancestor.path, key.path)
def test_ancestor_deleter_w_key(self):
from google.cloud.datastore.key import Key
key = Key('KIND', 123, project=self._PROJECT)
query = self._make_one(client=self._make_client(), ancestor=key)
del query.ancestor
self.assertIsNone(query.ancestor)
def test_add_filter_setter_w_unknown_operator(self):
query = self._make_one(self._make_client())
self.assertRaises(ValueError, query.add_filter,
'firstname', '~~', 'John')
def test_add_filter_w_known_operator(self):
query = self._make_one(self._make_client())
query.add_filter('firstname', '=', u'John')
self.assertEqual(query.filters, [('firstname', '=', u'John')])
def test_add_filter_w_all_operators(self):
query = self._make_one(self._make_client())
query.add_filter('leq_prop', '<=', u'val1')
query.add_filter('geq_prop', '>=', u'val2')
query.add_filter('lt_prop', '<', u'val3')
query.add_filter('gt_prop', '>', u'val4')
query.add_filter('eq_prop', '=', u'val5')
self.assertEqual(len(query.filters), 5)
self.assertEqual(query.filters[0], ('leq_prop', '<=', u'val1'))
self.assertEqual(query.filters[1], ('geq_prop', '>=', u'val2'))
self.assertEqual(query.filters[2], ('lt_prop', '<', u'val3'))
self.assertEqual(query.filters[3], ('gt_prop', '>', u'val4'))
self.assertEqual(query.filters[4], ('eq_prop', '=', u'val5'))
def test_add_filter_w_known_operator_and_entity(self):
from google.cloud.datastore.entity import Entity
query = self._make_one(self._make_client())
other = Entity()
other['firstname'] = u'John'
other['lastname'] = u'Smith'
query.add_filter('other', '=', other)
self.assertEqual(query.filters, [('other', '=', other)])
def test_add_filter_w_whitespace_property_name(self):
query = self._make_one(self._make_client())
PROPERTY_NAME = ' property with lots of space '
query.add_filter(PROPERTY_NAME, '=', u'John')
self.assertEqual(query.filters, [(PROPERTY_NAME, '=', u'John')])
def test_add_filter___key__valid_key(self):
from google.cloud.datastore.key import Key
query = self._make_one(self._make_client())
key = Key('Foo', project=self._PROJECT)
query.add_filter('__key__', '=', key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_filter___key__not_equal_operator(self):
from google.cloud.datastore.key import Key
key = Key('Foo', project=self._PROJECT)
query = self._make_one(self._make_client())
query.add_filter('__key__', '<', key)
self.assertEqual(query.filters, [('__key__', '<', key)])
def test_filter___key__invalid_value(self):
query = self._make_one(self._make_client())
self.assertRaises(ValueError, query.add_filter, '__key__', '=', None)
def test_projection_setter_empty(self):
query = self._make_one(self._make_client())
query.projection = []
self.assertEqual(query.projection, [])
def test_projection_setter_string(self):
query = self._make_one(self._make_client())
query.projection = 'field1'
self.assertEqual(query.projection, ['field1'])
def test_projection_setter_non_empty(self):
query = self._make_one(self._make_client())
query.projection = ['field1', 'field2']
self.assertEqual(query.projection, ['field1', 'field2'])
def test_projection_setter_multiple_calls(self):
_PROJECTION1 = ['field1', 'field2']
_PROJECTION2 = ['field3']
query = self._make_one(self._make_client())
query.projection = _PROJECTION1
self.assertEqual(query.projection, _PROJECTION1)
query.projection = _PROJECTION2
self.assertEqual(query.projection, _PROJECTION2)
def test_keys_only(self):
query = self._make_one(self._make_client())
query.keys_only()
self.assertEqual(query.projection, ['__key__'])
def test_key_filter_defaults(self):
from google.cloud.datastore.key import Key
client = self._make_client()
query = self._make_one(client)
self.assertEqual(query.filters, [])
key = Key('Kind', 1234, project='project')
query.key_filter(key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_key_filter_explicit(self):
from google.cloud.datastore.key import Key
client = self._make_client()
query = self._make_one(client)
self.assertEqual(query.filters, [])
key = Key('Kind', 1234, project='project')
query.key_filter(key, operator='>')
self.assertEqual(query.filters, [('__key__', '>', key)])
def test_order_setter_empty(self):
query = self._make_one(self._make_client(), order=['foo', '-bar'])
query.order = []
self.assertEqual(query.order, [])
def test_order_setter_string(self):
query = self._make_one(self._make_client())
query.order = 'field'
self.assertEqual(query.order, ['field'])
def test_order_setter_single_item_list_desc(self):
query = self._make_one(self._make_client())
query.order = ['-field']
self.assertEqual(query.order, ['-field'])
def test_order_setter_multiple(self):
query = self._make_one(self._make_client())
query.order = ['foo', '-bar']
self.assertEqual(query.order, ['foo', '-bar'])
def test_distinct_on_setter_empty(self):
query = self._make_one(self._make_client(), distinct_on=['foo', 'bar'])
query.distinct_on = []
self.assertEqual(query.distinct_on, [])
def test_distinct_on_setter_string(self):
query = self._make_one(self._make_client())
query.distinct_on = 'field1'
self.assertEqual(query.distinct_on, ['field1'])
def test_distinct_on_setter_non_empty(self):
query = self._make_one(self._make_client())
query.distinct_on = ['field1', 'field2']
self.assertEqual(query.distinct_on, ['field1', 'field2'])
def test_distinct_on_multiple_calls(self):
_DISTINCT_ON1 = ['field1', 'field2']
_DISTINCT_ON2 = ['field3']
query = self._make_one(self._make_client())
query.distinct_on = _DISTINCT_ON1
self.assertEqual(query.distinct_on, _DISTINCT_ON1)
query.distinct_on = _DISTINCT_ON2
self.assertEqual(query.distinct_on, _DISTINCT_ON2)
def test_fetch_defaults_w_client_attr(self):
from google.cloud.datastore.query import Iterator
client = self._make_client()
query = self._make_one(client)
iterator = query.fetch()
self.assertIsInstance(iterator, Iterator)
self.assertIs(iterator._query, query)
self.assertIs(iterator.client, client)
self.assertIsNone(iterator.max_results)
self.assertEqual(iterator._offset, 0)
def test_fetch_w_explicit_client(self):
from google.cloud.datastore.query import Iterator
client = self._make_client()
other_client = self._make_client()
query = self._make_one(client)
iterator = query.fetch(limit=7, offset=8, client=other_client)
self.assertIsInstance(iterator, Iterator)
self.assertIs(iterator._query, query)
self.assertIs(iterator.client, other_client)
self.assertEqual(iterator.max_results, 7)
self.assertEqual(iterator._offset, 8)
class TestIterator(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore.query import Iterator
return Iterator
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
query = object()
client = object()
iterator = self._make_one(query, client)
self.assertFalse(iterator._started)
self.assertIs(iterator.client, client)
self.assertIsNotNone(iterator._item_to_value)
self.assertIsNone(iterator.max_results)
self.assertEqual(iterator.page_number, 0)
self.assertIsNone(iterator.next_page_token,)
self.assertEqual(iterator.num_results, 0)
self.assertIs(iterator._query, query)
self.assertIsNone(iterator._offset)
self.assertIsNone(iterator._end_cursor)
self.assertTrue(iterator._more_results)
def test_constructor_explicit(self):
query = object()
client = object()
limit = 43
offset = 9
start_cursor = b'8290\xff'
end_cursor = b'so20rc\ta'
iterator = self._make_one(
query, client, limit=limit, offset=offset,
start_cursor=start_cursor, end_cursor=end_cursor)
self.assertFalse(iterator._started)
self.assertIs(iterator.client, client)
self.assertIsNotNone(iterator._item_to_value)
self.assertEqual(iterator.max_results, limit)
self.assertEqual(iterator.page_number, 0)
self.assertEqual(iterator.next_page_token, start_cursor)
self.assertEqual(iterator.num_results, 0)
self.assertIs(iterator._query, query)
self.assertEqual(iterator._offset, offset)
self.assertEqual(iterator._end_cursor, end_cursor)
self.assertTrue(iterator._more_results)
def test__build_protobuf_empty(self):
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
client = _Client(None)
query = Query(client)
iterator = self._make_one(query, client)
pb = iterator._build_protobuf()
expected_pb = query_pb2.Query()
self.assertEqual(pb, expected_pb)
def test__build_protobuf_all_values(self):
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
client = _Client(None)
query = Query(client)
limit = 15
offset = 9
start_bytes = b'i\xb7\x1d'
start_cursor = 'abcd'
end_bytes = b'\xc3\x1c\xb3'
end_cursor = 'wxyz'
iterator = self._make_one(
query, client, limit=limit, offset=offset,
start_cursor=start_cursor, end_cursor=end_cursor)
self.assertEqual(iterator.max_results, limit)
iterator.num_results = 4
iterator._skipped_results = 1
pb = iterator._build_protobuf()
expected_pb = query_pb2.Query(
start_cursor=start_bytes,
end_cursor=end_bytes,
offset=offset - iterator._skipped_results,
)
expected_pb.limit.value = limit - iterator.num_results
self.assertEqual(pb, expected_pb)
def test__process_query_results(self):
from google.cloud.proto.datastore.v1 import query_pb2
iterator = self._make_one(None, None,
end_cursor='abcd')
self.assertIsNotNone(iterator._end_cursor)
entity_pbs = [
_make_entity('Hello', 9998, 'PRAHJEKT'),
]
cursor_as_bytes = b'\x9ai\xe7'
cursor = b'mmnn'
skipped_results = 4
more_results_enum = query_pb2.QueryResultBatch.NOT_FINISHED
response_pb = _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results)
result = iterator._process_query_results(response_pb)
self.assertEqual(result, entity_pbs)
self.assertEqual(iterator._skipped_results, skipped_results)
self.assertEqual(iterator.next_page_token, cursor)
self.assertTrue(iterator._more_results)
def test__process_query_results_done(self):
from google.cloud.proto.datastore.v1 import query_pb2
iterator = self._make_one(None, None,
end_cursor='abcd')
self.assertIsNotNone(iterator._end_cursor)
entity_pbs = [
_make_entity('World', 1234, 'PROJECT'),
]
cursor_as_bytes = b''
skipped_results = 44
more_results_enum = query_pb2.QueryResultBatch.NO_MORE_RESULTS
response_pb = _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results)
result = iterator._process_query_results(response_pb)
self.assertEqual(result, entity_pbs)
self.assertEqual(iterator._skipped_results, skipped_results)
self.assertIsNone(iterator.next_page_token)
self.assertFalse(iterator._more_results)
def test__process_query_results_bad_enum(self):
iterator = self._make_one(None, None)
more_results_enum = 999
response_pb = _make_query_response(
[], b'', more_results_enum, 0)
with self.assertRaises(ValueError):
iterator._process_query_results(response_pb)
def _next_page_helper(self, txn_id=None):
from google.api.core import page_iterator
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.datastore.query import Query
more_enum = query_pb2.QueryResultBatch.NOT_FINISHED
result = _make_query_response([], b'', more_enum, 0)
project = 'prujekt'
ds_api = _make_datastore_api(result)
if txn_id is None:
client = _Client(project, datastore_api=ds_api)
else:
transaction = mock.Mock(id=txn_id, spec=['id'])
client = _Client(
project, datastore_api=ds_api, transaction=transaction)
query = Query(client)
iterator = self._make_one(query, client)
page = iterator._next_page()
self.assertIsInstance(page, page_iterator.Page)
self.assertIs(page._parent, iterator)
partition_id = entity_pb2.PartitionId(project_id=project)
if txn_id is None:
read_options = datastore_pb2.ReadOptions()
else:
read_options = datastore_pb2.ReadOptions(transaction=txn_id)
empty_query = query_pb2.Query()
ds_api.run_query.assert_called_once_with(
project, partition_id, read_options, query=empty_query)
def test__next_page(self):
self._next_page_helper()
def test__next_page_in_transaction(self):
txn_id = b'1xo1md\xe2\x98\x83'
self._next_page_helper(txn_id)
def test__next_page_no_more(self):
from google.cloud.datastore.query import Query
ds_api = _make_datastore_api()
client = _Client(None, datastore_api=ds_api)
query = Query(client)
iterator = self._make_one(query, client)
iterator._more_results = False
page = iterator._next_page()
self.assertIsNone(page)
ds_api.run_query.assert_not_called()
class Test__item_to_entity(unittest.TestCase):
def _call_fut(self, iterator, entity_pb):
from google.cloud.datastore.query import _item_to_entity
return _item_to_entity(iterator, entity_pb)
def test_it(self):
entity_pb = mock.sentinel.entity_pb
patch = mock.patch(
'google.cloud.datastore.helpers.entity_from_protobuf')
with patch as entity_from_protobuf:
result = self._call_fut(None, entity_pb)
self.assertIs(result, entity_from_protobuf.return_value)
entity_from_protobuf.assert_called_once_with(entity_pb)
class Test__pb_from_query(unittest.TestCase):
def _call_fut(self, query):
from google.cloud.datastore.query import _pb_from_query
return _pb_from_query(query)
def test_empty(self):
from google.cloud.proto.datastore.v1 import query_pb2
pb = self._call_fut(_Query())
self.assertEqual(list(pb.projection), [])
self.assertEqual(list(pb.kind), [])
self.assertEqual(list(pb.order), [])
self.assertEqual(list(pb.distinct_on), [])
self.assertEqual(pb.filter.property_filter.property.name, '')
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op,
query_pb2.CompositeFilter.OPERATOR_UNSPECIFIED)
self.assertEqual(list(cfilter.filters), [])
self.assertEqual(pb.start_cursor, b'')
self.assertEqual(pb.end_cursor, b'')
self.assertEqual(pb.limit.value, 0)
self.assertEqual(pb.offset, 0)
def test_projection(self):
pb = self._call_fut(_Query(projection=['a', 'b', 'c']))
self.assertEqual([item.property.name for item in pb.projection],
['a', 'b', 'c'])
def test_kind(self):
pb = self._call_fut(_Query(kind='KIND'))
self.assertEqual([item.name for item in pb.kind], ['KIND'])
def test_ancestor(self):
from google.cloud.datastore.key import Key
from google.cloud.proto.datastore.v1 import query_pb2
ancestor = Key('Ancestor', 123, project='PROJECT')
pb = self._call_fut(_Query(ancestor=ancestor))
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
ancestor_pb = ancestor.to_protobuf()
self.assertEqual(pfilter.value.key_value, ancestor_pb)
def test_filter(self):
from google.cloud.proto.datastore.v1 import query_pb2
query = _Query(filters=[('name', '=', u'John')])
query.OPERATORS = {
'=': query_pb2.PropertyFilter.EQUAL,
}
pb = self._call_fut(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, 'name')
self.assertEqual(pfilter.value.string_value, u'John')
def test_filter_key(self):
from google.cloud.datastore.key import Key
from google.cloud.proto.datastore.v1 import query_pb2
key = Key('Kind', 123, project='PROJECT')
query = _Query(filters=[('__key__', '=', key)])
query.OPERATORS = {
'=': query_pb2.PropertyFilter.EQUAL,
}
pb = self._call_fut(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND)
self.assertEqual(len(cfilter.filters), 1)
pfilter = cfilter.filters[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
key_pb = key.to_protobuf()
self.assertEqual(pfilter.value.key_value, key_pb)
def test_order(self):
from google.cloud.proto.datastore.v1 import query_pb2
pb = self._call_fut(_Query(order=['a', '-b', 'c']))
self.assertEqual([item.property.name for item in pb.order],
['a', 'b', 'c'])
self.assertEqual([item.direction for item in pb.order],
[query_pb2.PropertyOrder.ASCENDING,
query_pb2.PropertyOrder.DESCENDING,
query_pb2.PropertyOrder.ASCENDING])
def test_distinct_on(self):
pb = self._call_fut(_Query(distinct_on=['a', 'b', 'c']))
self.assertEqual([item.name for item in pb.distinct_on],
['a', 'b', 'c'])
class _Query(object):
def __init__(self,
client=object(),
kind=None,
project=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
distinct_on=()):
self._client = client
self.kind = kind
self.project = project
self.namespace = namespace
self.ancestor = ancestor
self.filters = filters
self.projection = projection
self.order = order
self.distinct_on = distinct_on
class _Client(object):
def __init__(self, project, datastore_api=None, namespace=None,
transaction=None):
self.project = project
self._datastore_api = datastore_api
self.namespace = namespace
self._transaction = transaction
@property
def current_transaction(self):
return self._transaction
def _make_entity(kind, id_, project):
from google.cloud.proto.datastore.v1 import entity_pb2
key = entity_pb2.Key()
key.partition_id.project_id = project
elem = key.path.add()
elem.kind = kind
elem.id = id_
return entity_pb2.Entity(key=key)
def _make_query_response(
entity_pbs, cursor_as_bytes, more_results_enum, skipped_results):
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import query_pb2
return datastore_pb2.RunQueryResponse(
batch=query_pb2.QueryResultBatch(
skipped_results=skipped_results,
end_cursor=cursor_as_bytes,
more_results=more_results_enum,
entity_results=[
query_pb2.EntityResult(entity=entity)
for entity in entity_pbs
],
),
)
def _make_datastore_api(result=None):
run_query = mock.Mock(return_value=result, spec=[])
return mock.Mock(run_query=run_query, spec=['run_query'])
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/mnt/hgfs/tmpcode/pyqt-http/untitled.ui'
#
# Created: Fri Jun 5 10:59:33 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import socket
import signal
import errno
import sys
import os
import platform
import time
#from sendfile import sendfile
class Worker(QtCore.QThread):
trigger = QtCore.pyqtSignal(int, int, str)
def __init__(self,parent=None):
super(Worker,self).__init__(parent)
def __del__(self):
self.wait()
def set(self, strHost, port, httpheader, fullFileName, totalLen):
self.ip = strHost
self.p = port
self.hdr = httpheader
self.fn = fullFileName
self.fileLen = totalLen
def run(self):
#signal.signal(signal.SIGUSR1,sigHander)
global lisfd
lisfd = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
lisfd.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
lisfd.bind((self.ip, self.p))
lisfd.listen(10)
self.runflag = True
lisfd.setblocking(0)
while self.runflag:
if self.runflag == False:
break
try:
confd,addr = lisfd.accept()
except socket.error, msg:
if msg.errno == errno.EINTR or msg.errno == errno.EAGAIN or msg.errno == errno.EWOULDBLOCK:
print msg
else:
raise
time.sleep(1)
continue
print "connect by ",addr
ip = addr[0]
port = addr[1]
addrStr = "%s:%d"%(ip, port)
confd.settimeout(10)
try:
#print "recving"
#data = confd.recv(1024, socket.MSG_DONTWAIT)
data = confd.recv(1024)
except socket.error, msg:
#print msg
confd.close()
continue
#print "recv end"
if not data:
break
print(data)
confd.send(self.hdr)
print addrStr
self.trigger.emit(0, self.fileLen, addrStr)
file = open(self.fn, "rb")
#offset = 0
#totalSent = long(0);
while True:
if self.runflag == False:
return
chunk = file.read(65536)
if not chunk:
break # EOF
try:
confd.sendall(chunk)
except socket.error, msg:
print msg
lisfd.close()
return
#totalSent += 65536
self.trigger.emit(65536, self.fileLen, addrStr)
#confd.send('\n\n')
confd.close()
self.trigger.emit(self.fileLen, self.fileLen, addrStr)
print "send fin"
else:
lisfd.close()
print "stop"
def GetFileSize(filename):
len = os.path.getsize(filename)
return len
def HttpResponse(header,filename):
f = open(filename, "rb")
contxtlist = f.readlines()
size=os.path.getsize(filename)
context = ''.join(contxtlist)
response = "%s %d\n\n%s\n\n" % (header,size,context)
return response
def TestPlatform():
print ("----------Operation System--------------------------")
#Windows will be : (32bit, WindowsPE)
#Linux will be : (32bit, ELF)
print(platform.architecture())
#Windows will be : Windows-XP-5.1.2600-SP3 or Windows-post2008Server-6.1.7600
#Linux will be : Linux-2.6.18-128.el5-i686-with-redhat-5.3-Final
print(platform.platform())
#Windows will be : Windows
#Linux will be : Linux
print(platform.system())
print ("--------------Python Version-------------------------")
#Windows and Linux will be : 3.1.1 or 3.1.3
print(platform.python_version())
def WhichPlatform():
sysstr = platform.system()
if(sysstr =="Windows"):
print ("Call Windows tasks")
return "windows"
elif(sysstr == "Linux"):
print ("Call Linux tasks")
return "linux"
else:
print ("Other System tasks")
return "others"
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
from PyQt4 import QtGui, QtCore
from PIL import ImageQt
import qrcode
class Image(qrcode.image.base.BaseImage):
def __init__(self, border, width, box_size):
self.border = border
self.width = width
self.box_size = box_size
size = (width + border * 2) * box_size
self._image = QtGui.QImage(
size, size, QtGui.QImage.Format_RGB16)
self._image.fill(QtCore.Qt.white)
def pixmap(self):
return QtGui.QPixmap.fromImage(self._image)
def drawrect(self, row, col):
painter = QtGui.QPainter(self._image)
painter.fillRect(
(col + self.border) * self.box_size,
(row + self.border) * self.box_size,
self.box_size, self.box_size,
QtCore.Qt.black)
def save(self, stream, kind=None):
pass
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(650, 220, 100, 50))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.linkLabel = QtGui.QLabel(self.centralwidget)
self.linkLabel.setGeometry(QtCore.QRect(450, 0, 300, 160))
self.linkLabel.setObjectName(_fromUtf8("label"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(455, 160, 200, 60))
self.label.setObjectName(_fromUtf8("label"))
self.label.setWordWrap(True)
self.addr = QtGui.QLabel(self.centralwidget)
self.addr.setGeometry(QtCore.QRect(450, 105, 150, 30))
self.addr.setObjectName(_fromUtf8("addr"))
self.addr.setWordWrap(True)
self.addr.setText("remoteaddr");
self.ratio = QtGui.QLabel(self.centralwidget)
self.ratio.setGeometry(QtCore.QRect(610, 105, 250, 30))
self.ratio.setObjectName(_fromUtf8("ratio"))
self.ratio.setWordWrap(True)
self.ratio.setText("ratio");
self.textEdit = QtGui.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(680, 180, 50, 30))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.pushButton_2 = QtGui.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(450, 220, 100, 50))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pb = QtGui.QProgressBar(self.centralwidget)
self.pb.setGeometry(QtCore.QRect(450, 130, 300, 20))
self.pb.setObjectName(_fromUtf8("pb"))
self.pb.setRange(0, 0)
self.pb.setRange(0, 100)
self.pb.setValue(0)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.qrLabel = QtGui.QLabel(self.centralwidget)
self.qrLabel.setGeometry(QtCore.QRect(20, 2, 300, 300))
self.qrLabel.setObjectName(_fromUtf8("label"))
#self.refreshQRCode()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def refreshQRCode(self, port):
global localIP
text = unicode("http://%s:%d"%(localIP, port))
self.linkLabel.setText("Please visit: %s"%text);
print text
self.qrLabel.setPixmap(
qrcode.make(text, image_factory=Image).pixmap())
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pushButton.setText(_translate("MainWindow", "run", None))
self.label.setText(_translate("MainWindow", "choose a file", None))
self.textEdit.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'SimSun\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">1234</p></body></html>", None))
self.pushButton_2.setText(_translate("MainWindow", "choose file", None))
class Window( QtGui.QMainWindow ):
def __init__( self ):
super( Window, self ).__init__()
self.setWindowTitle( "hello" )
self.resize( 200, 300 )
self.uiWin = Ui_MainWindow()
self.uiWin.setupUi(self)
self.fullFileName = ""
self.fileName = ""
self.thread=Worker()
self.connect(self.uiWin.pushButton,
QtCore.SIGNAL('clicked()'),
self.runHttpSvr)
self.connect(self.uiWin.pushButton_2,
QtCore.SIGNAL('clicked()'),
self.chooseFile)
self.running = False
self.thread.trigger.connect(self.updatePb)
def updatePb(self, sent2, total2, addr):
if sent2 == 0:
self.sentLen = 0
#print sent
#print total2
self.sentLen += sent2
total = self.fileLen
#print total
val = self.sentLen/float(total)*100
if val <= 100:
self.uiWin.pb.setValue(val)
self.uiWin.addr.setText(addr)
self.uiWin.ratio.setText("%d/%d"%(self.sentLen, total))
else:
self.uiWin.pb.setValue(100)
self.uiWin.addr.setText(addr)
self.uiWin.ratio.setText("%d/%d"%(total, total))
def runHttpSvr(self):
if self.running :
#global lisfd
#lisfd.close()
self.thread.runflag = False
self.running = False
#self.uiWin.label.setText("svr is not running")
self.uiWin.pushButton.setText("run")
#os.kill(1234, signal.SIGUSR1)
return
if len(self.fullFileName) == 0 or len(self.fileName) == 0:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"error", "not choose file",
QtGui.QMessageBox.NoButton, self)
msgBox.show()
return
if self.fileName and os.path.exists(self.fullFileName):
print 'OK, the "%s" file exists.'%self.fullFileName
else:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"error", "Sorry, I cannot find the '%s' file."%self.fullFileName,
QtGui.QMessageBox.NoButton, self)
msgBox.show()
return
port = int(self.uiWin.textEdit.toPlainText())
if port < 1 or port > 65535:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"error", "port[%s] error"%self.uiWin.textEdit.toPlainText(),
QtGui.QMessageBox.NoButton, self)
msgBox.show()
return
strHost = "0.0.0.0"
self.fileLen = GetFileSize(self.fullFileName)
httpheader = '''\
HTTP/1.1 200 OK
Context-Type: bin;charset=UTF-8
Server: Python-slp version 1.0
'''
httpheader += "Content-Disposition: attachment;filename=%s\n" % self.fileName
httpheader += 'Context-Length: %d\n\n'% self.fileLen
print httpheader
self.sentLen = 0
self.thread.set(strHost, port, httpheader, self.fullFileName, self.fileLen)
self.thread.start()
self.running = True
self.uiWin.refreshQRCode(port)
#self.uiWin.label.setText("svr is running")
self.uiWin.pushButton.setText("stop")
def chooseFile(self):
#self.uiWin.label.setText("choosefile")
name = QtGui.QFileDialog.getOpenFileName(self)
if name:
self.fullFileName = unicode(name , "utf8")
saperator = '/'
self.fileName = self.fullFileName.split(saperator)[-1]
#print self.fullFileName
#print self.fileName
self.uiWin.label.setText(self.fullFileName)
import socket
if WhichPlatform() == "linux":
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
if __name__ == '__main__':
global localIP
#localIP = socket.gethostbyname(socket.gethostname())
if WhichPlatform() != "windows":
localIP = get_ip_address("eth0")
print "local ip:%s "%localIP
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| |
"""
@package mi.dataset.parser.test
@file marine-integrations/mi/dataset/parser/test/test_spkir_abj_cspp.py
@author Jeff Roy
@brief Test code for a spkir_abj_cspp data parser
spkir_abj_cspp is based on cspp_base.py
test_dosta_abcdjm_cspp.py fully tests all of the capabilities of the
base parser. That level of testing is omitted from this test suite
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.idk.config import Config
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.parser.cspp_base import \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.spkir_abj_cspp import \
SpkirAbjCsppParser, \
SpkirAbjCsppInstrumentTelemeteredDataParticle, \
SpkirAbjCsppMetadataTelemeteredDataParticle, \
SpkirAbjCsppInstrumentRecoveredDataParticle, \
SpkirAbjCsppMetadataRecoveredDataParticle
log = get_logger()
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi', 'dataset', 'driver', 'spkir_abj', 'cspp', 'resource')
@attr('UNIT', group='mi')
class SpkirAbjCsppParserUnitTestCase(ParserUnitTestCase):
"""
spkir_abj_cspp Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._telem_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.spkir_abj_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: SpkirAbjCsppMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: SpkirAbjCsppInstrumentTelemeteredDataParticle,
}
}
self._recov_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.spkir_abj_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: SpkirAbjCsppMetadataRecoveredDataParticle,
DATA_PARTICLE_CLASS_KEY: SpkirAbjCsppInstrumentRecoveredDataParticle,
}
}
def test_simple(self):
"""
Read test data and pull out data particles
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, '11079364_PPD_OCR.txt'), 'rU') as file_handle:
# Note: since the recovered and teelemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = SpkirAbjCsppParser(self._recov_config,
file_handle,
self.exception_callback)
particles = parser.get_records(20)
log.debug("*** test_simple Num particles %s", len(particles))
self.assert_particles(particles, '11079364_PPD_OCR_recov.yml', RESOURCE_PATH)
with open(os.path.join(RESOURCE_PATH, '11079364_PPD_OCR.txt'), 'rU') as file_handle:
# Note: since the recovered and teelemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = SpkirAbjCsppParser(self._telem_config,
file_handle,
self.exception_callback)
particles = parser.get_records(20)
log.debug("*** test_simple Num particles %s", len(particles))
self.assert_particles(particles, '11079364_PPD_OCR_telem.yml', RESOURCE_PATH)
def test_get_many(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, '11079419_PPB_OCR.txt'), 'rU') as file_handle:
# Note: since the recovered and teelemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = SpkirAbjCsppParser(self._recov_config,
file_handle,
self.exception_callback)
# try to get 2000 particles, there are only 1623 data records
# so should get 1624 including the meta data
particles = parser.get_records(2000)
log.debug("*** test_get_many Num particles %s", len(particles))
self.assert_particles(particles, '11079419_PPB_OCR_recov.yml', RESOURCE_PATH)
with open(os.path.join(RESOURCE_PATH, '11079419_PPB_OCR.txt'), 'rU') as file_handle:
# Note: since the recovered and teelemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = SpkirAbjCsppParser(self._telem_config,
file_handle,
self.exception_callback)
# try to get 2000 particles, there are only 1623 data records
# so should get 1624 including the meta data
particles = parser.get_records(2000)
log.debug("*** test_get_many Num particles %s", len(particles))
self.assert_particles(particles, '11079419_PPB_OCR_telem.yml', RESOURCE_PATH)
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists.
"""
# the first data record in this file is corrupted and will be ignored
# we expect the first 2 particles to be the metadata particle and the
# intrument particle from the data record after the corrupted one
with open(os.path.join(RESOURCE_PATH, '11079419_BAD_PPB_OCR.txt'), 'rU') as file_handle:
log.debug(self.exception_callback_value)
parser = SpkirAbjCsppParser(self._recov_config,
file_handle,
self.exception_callback)
particles = parser.get_records(2)
self.assert_particles(particles, 'bad_data_record_recov.yml', RESOURCE_PATH)
with open(os.path.join(RESOURCE_PATH, '11079419_BAD_PPB_OCR.txt'), 'rU') as file_handle:
log.debug(self.exception_callback_value)
parser = SpkirAbjCsppParser(self._telem_config,
file_handle,
self.exception_callback)
particles = parser.get_records(2)
self.assert_particles(particles, 'bad_data_record_telem.yml', RESOURCE_PATH)
def test_extra_data(self):
"""
Ensure that bad data is skipped when it exists.
"""
# the first 2 data record in this file are corrupted by adding additional
# data vlaues separated by tabs and will be ignored
# we expect the first 2 particles to be the metadata particle and the
# intrument particle from the data record after the corrupted one
with open(os.path.join(RESOURCE_PATH, '11079364_EXTRA_DATA_PPD_OCR.txt'), 'rU') as file_handle:
log.debug(self.exception_callback_value)
parser = SpkirAbjCsppParser(self._recov_config,
file_handle,
self.exception_callback)
particles = parser.get_records(2)
self.assertEquals(len(self.exception_callback_value), 2)
for exception in self.exception_callback_value:
self.assert_(isinstance(exception, RecoverableSampleException))
# expect to see a recoverable sample exception in the log
log.debug('TEST EXTRA DATA exception call back is %s', self.exception_callback_value)
self.assert_particles(particles, 'extra_data_values_recov.yml', RESOURCE_PATH)
self.exception_callback_value = []
with open(os.path.join(RESOURCE_PATH, '11079364_EXTRA_DATA_PPD_OCR.txt'), 'rU') as file_handle:
log.debug(self.exception_callback_value)
parser = SpkirAbjCsppParser(self._telem_config,
file_handle,
self.exception_callback)
particles = parser.get_records(2)
self.assertEquals(len(self.exception_callback_value), 2)
for exception in self.exception_callback_value:
self.assert_(isinstance(exception, RecoverableSampleException))
# expect to see a recoverable sample exception in the log
log.debug('TEST EXTRA DATA exception call back is %s', self.exception_callback_value)
self.assert_particles(particles, 'extra_data_values_telem.yml', RESOURCE_PATH)
| |
import xml.etree.ElementTree as etree
import pandas as pd
import numpy as np
# TODO: add to_pi_json() method. (Both PiTimeSeries and PiTimeSeriesCollection should be able to call this method)
# TODO: adapt to_pi_xml() and to_pi_json() from PiTimeSeries by Mattijn. Probably more robust write methods.
class PiBase:
"""
Mix-in class for functionality that applies to both PiTimeSeries and PiTimeSeriesCollection.
"""
def to_pi_json(self, fnam):
# TODO: write to_pi_json function.
raise NotImplementedError()
def to_pi_xml(self, fnam):
"""
Write PiTimeSeries object to PI-XML file.
Parameters
----------
fnam: path
path to XML file to be written
TODO: allow events (timeseries lines) to accept other fields besides 'date', 'time', 'value', 'flag'
"""
assert fnam.endswith(".xml"), "Output file should have '.xml' extension!"
# first line of XML file
line0 = '<?xml version="1.0" encoding="UTF-8"?>\n'
# some definitions for timeseries XML file
NS = r"http://www.wldelft.nl/fews/PI"
FS = r"http://www.wldelft.nl/fews/fs"
XSI = r"http://www.w3.org/2001/XMLSchema-instance"
schemaLocation = (
r"http://fews.wldelft.nl/schemas/version1.0/Pi-schemas/pi_timeseries.xsd"
)
timeseriesline = '<TimeSeries xmlns="{NS}" xmlns:xsi="{XSI}" xsi:schemaLocation="{NS} {schema}" version="{version}" xmlns:fs="{FS}">\n'
# line templates
paramline = "<{tag}>{param}</{tag}>\n"
# write file
with open(fnam, "w") as f:
f.write(line0)
f.write(
timeseriesline.format(
NS=NS, FS=FS, XSI=XSI, schema=schemaLocation, version=self.version
)
)
tzline = "\t" + paramline.format(tag="timeZone", param=self.timezone)
f.write(tzline)
# how best to do this? Needs to be generic for single series vs collection of series
N = 1 if isinstance(self, FewsTimeSeries) else self.timeseries.shape[0]
for i in range(N):
if isinstance(self, FewsTimeSeries):
ts = self
elif isinstance(self, FewsTimeSeriesCollection):
ts = self.timeseries["events"].iloc[i]
# start series
start = "\t" + "<series>\n"
f.write(start)
# write header
hlines = []
hstart = 2 * "\t" + "<header>\n"
hlines.append(hstart)
for htag, hval in ts.header.items():
if htag.endswith("Date"):
try:
hdate = hval.strftime("%Y-%m-%d")
htime = hval.strftime("%H:%M:%S")
except AttributeError:
ts._update_header_dates()
hdate, htime = ts.header[htag].split(" ")
hline = '<{tag} date="{date}" time="{time}"/>\n'.format(
tag=htag, date=hdate, time=htime
)
elif htag.endswith("timeStep"):
hline = '<{tag} unit="{unit}"/>\n'.format(tag=htag, unit=hval)
else:
hline = paramline.format(tag=htag, param=hval)
hlines.append(3 * "\t" + hline)
hlines.append(2 * "\t" + "</header>\n")
f.writelines(hlines)
# write timeseries
dates = ts.timeseries.reset_index()["index"].apply(
lambda s: pd.datetime.strftime(s, "%Y-%m-%d")
)
times = ts.timeseries.reset_index()["index"].apply(
lambda s: pd.datetime.strftime(s, "%H:%M:%S")
)
values = ts.timeseries["value"].astype(str)
flags = ts.timeseries["flag"].astype(str)
events = (
2 * "\t"
+ '<event date="'
+ dates.values
+ '" time="'
+ times.values
+ '" value="'
+ values.values
+ '" flag="'
+ flags.values
+ '"/>\n'
)
f.writelines(events)
# end series
f.write("\t" + "</series>\n")
# end Timeseries
f.write("</TimeSeries>\n")
class FewsTimeSeriesCollection(PiBase):
"""
Object for XML data. Read from file, add/remove series manually and write to new XML file.
"""
def __init__(self, timeseries=None, timezone=None, version=1.19):
"""
Initializes PiTimeSeries object.
Parameters
----------
data: pd.DataFrame, default None
DataFrame containing specific columns (as created by from_pi_xml() method). If None,
an empty DataFrame is created with the default column names.
timezone: float, default 1.
float indicating timezone (default 1.0 for Netherlands)
version: float, default 1.19
version of the XML file
"""
if timeseries is None:
columns = [
"endDate",
"events",
"lat",
"locationId",
"lon",
"missVal",
"moduleInstanceId",
"parameterId",
"startDate",
"stationName",
"timeStep",
"type",
"units",
"x",
"y",
]
self.timeseries = pd.DataFrame(columns=columns)
else:
self.timeseries = timeseries
self.timezone = 1.0 if timezone is None else timezone
# Etc/GMT* follows POSIX standard, including counter-intuitive sign change: see https://stackoverflow.com/q/51542167/2459096
if self.timezone >= 0:
self.timezone = "Etc/GMT-" + str(self.timezone)
else:
self.timezone = "Etc/GMT+" + str(self.timezone)
self.version = version
def add_series(self, dfseries, metadata):
"""
Add series to PiTimeSeries object.
Parameters
----------
dfseries: pd.DataFrame
Timeseries to add, must have DateTimeIndex and have columns with name "value" and "flag"
metadata: dict
dictionary containing header. Common entries values for include 'x', 'y', 'lat', lon',
'missVal', 'stationName', 'type', 'units', 'moduleInstanceId', 'qualifierId', 'parameterId',
'locationId'
Notes
-----
It is unclear whether the entries in header are required or optional.
Some possible values for header entries are shown below
in case they need to be supplied:
- 'missVal': np.nan
- 'stationName': np.nan
- 'units': 'm'
- 'type': 'instantaneous'
"""
# TODO: additional checks needed for input to ensure write works succesfully? Check keys of header if correct fields are present?
assert isinstance(
dfseries.index, pd.core.indexes.datetimes.DatetimeIndex
), "DataFrame needs to have DateTimeIndex!"
assert {"value", "flag"} <= set(
dfseries.columns
), "DataFrame requires columns named 'value' and 'flag'!"
# Append to existing DF or add new row to empty DF
try:
new_index = self.timeseries.index[-1] + 1
except IndexError:
new_index = 0
# add metadata info to header
for k, v in metadata.items():
self.timeseries.loc[new_index, k] = v
# add timeseries as FewsTimeSeries object
pi_timeseries = FewsTimeSeries(
timeseries=dfseries, header=metadata, timezone=1.0
)
self.timeseries.loc[new_index, "events"] = pi_timeseries
@classmethod
def from_pi_xml(cls, fname):
"""
Create a PiXML object from an existing XML file:
Parameters
----------
fname: path
path to XML file
Returns
-------
cls: PiXML object
returns PiXML object.
"""
tree = etree.parse(fname)
root = tree.getroot()
data = []
# default timezone, overwritten if found in file
tz = 1.0
for i in range(len(root)):
if root[i].tag.endswith("timeZone"):
tz = np.float(
root[i].text.replace(",", ".")
) # you never know with those Dutchies if they put decimal commas...
if root[i].tag.endswith("series"):
series = {}
header = {}
date = []
flag = []
time = []
value = []
for j in range(len(root[i])):
if root[i][j].tag.endswith("header"):
for k in range(len(root[i][j])):
# check if start and end date are read correctly!
prop = root[i][j][k].tag.split("}")[-1]
val = root[i][j][k].text
header[prop] = val
elif root[i][j].tag.endswith("event"):
date.append(root[i][j].attrib["date"])
flag.append(root[i][j].attrib["flag"])
time.append(root[i][j].attrib["time"])
value.append(root[i][j].attrib["value"])
# combine events in a dataframe
index = pd.to_datetime([d + " " + t for d, t in zip(date, time)])
timeseries = pd.DataFrame(
{"flag": flag, "value": value}, index=index, dtype=float
)
pi_timeseries = FewsTimeSeries(
timeseries=timeseries, header=header, timezone=tz
)
series.update(header)
series["events"] = pi_timeseries
data.append(series)
return cls(timeseries=pd.DataFrame(data), timezone=tz)
class FewsTimeSeries(PiBase):
def __init__(self, timeseries=None, header=None, timezone=None, version=1.19):
self.header = header
self.timeseries = timeseries
self.timezone = 1.0 if timezone is None else timezone
# Etc/GMT* follows POSIX standard, including counter-intuitive sign change: see https://stackoverflow.com/q/51542167/2459096
if self.timezone >= 0:
self.timezone = "Etc/GMT-" + str(self.timezone)
else:
self.timezone = "Etc/GMT+" + str(self.timezone)
self.version = version
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
return (
np.all(self.timeseries == other.timeseries)
and (self.timezone == other.timezone)
and (self.version == other.version)
and (self.header == other.header)
)
return False
@classmethod
def from_pi_xml(cls, fname):
"""
Initialize PiTimeSeries object from XML file. Assumes file contains only one timeseries.
If not, stops after reading the first timeseries. Use PiTimeSeriesCollection for
reading in multiple timeseries.
Parameters
----------
fname: str
path of PI XML file to read.
Returns
-------
PiTimeSeries: PiTimeSeries object
instance of PiTimeSeries object
"""
tree = etree.parse(fname)
root = tree.getroot()
# default timeZone, overwritten if found in file
tz = 1.0
scount = 0 # count series to quit parsing after first series
for i in range(len(root)):
if root[i].tag.endswith("timeZone"):
tz = np.float(
root[i].text.replace(",", ".")
) # ensure decimal point is used (not comma)
if root[i].tag.endswith("series"):
if (
scount >= 1
): # ugly but effective method to only parsing first series
break
header = {}
date = []
flag = []
time = []
value = []
for j in range(len(root[i])):
if root[i][j].tag.endswith("header"):
for k in range(len(root[i][j])):
prop = root[i][j][k].tag.split("}")[-1]
val = root[i][j][k].text
header[prop] = val
elif root[i][j].tag.endswith("event"):
date.append(root[i][j].attrib["date"])
flag.append(root[i][j].attrib["flag"])
time.append(root[i][j].attrib["time"])
value.append(root[i][j].attrib["value"])
# combine events in a dataframe
index = pd.to_datetime([d + " " + t for d, t in zip(date, time)])
timeseries = pd.DataFrame(
{"flag": flag, "value": value}, index=index, dtype=float
)
scount += 1
return cls(timeseries=timeseries, header=header, timezone=tz)
def _update_header_dates(self):
"""
If read fails to fill in startDate and endDate retrieve these from the timeseries
and update header.
"""
hupdate = {}
for hcol in ["startDate", "endDate"]:
ind = 0 if hcol.startswith("start") else -1 # start or end date
hdate = self.timeseries.index[ind].strftime("%Y-%m-%d")
htime = self.timeseries.index[ind].strftime("%H:%M:%S")
hupdate[hcol] = hdate + " " + htime
self.header.update(hupdate)
def plot(self, **kwargs):
"""
Pass plot command to DataFrame.
"""
self.timeseries.plot(**kwargs)
if __name__ == "__main__":
# load 1 series from an XML and write to file
pi_ts1 = FewsTimeSeries.from_pi_xml(fname=r"../notebooks/test_2series.xml")
pi_ts1.to_pi_xml("temp_1series.xml")
# load all series from an XML and write to file
ts_all = FewsTimeSeriesCollection.from_pi_xml(
fname=r"../notebooks/test_2series.xml"
)
# add series to collection loaded above
ts_all.add_series(pi_ts1.timeseries, pi_ts1.header)
# write to file
ts_all.to_pi_xml("temp_3series.xml")
# type of timeseries events in FewsTimeSeriesCollection should be FewsTimeSeries:
print(type(ts_all.timeseries.events[0]))
# test equality of FewsTimeSeries and the same series in FewsTimeSeriesCollection
print(ts_all.timeseries.events[0] == pi_ts1)
# plot timeseries
import matplotlib.pyplot as plt
pi_ts1.plot(y="value")
plt.show()
| |
#!/usr/bin/env python
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
#
##########################################################################
##########################################################################
#
# Module: run-throughput-tests.py
#
# Notes: runs throughput testing for coreclr and uploads the timing results
# to benchview
#
#
##########################################################################
##########################################################################
import argparse
import distutils.dir_util
import os
import re
import shutil
import subprocess
import sys
import time
import timeit
import stat
import csv
##########################################################################
# Globals
##########################################################################
# List of dlls we want to exclude
dll_exclude_list = {
'All': [
# Require Newtonsoft.Json
"Microsoft.DotNet.ProjectModel.dll",
"Microsoft.Extensions.DependencyModel.dll",
# Require System.Security.Principal.Windows
"System.Net.Requests.dll",
"System.Net.Security.dll",
"System.Net.Sockets.dll",
# Moving target. Not a reliable measurement
"System.Private.CoreLib.dll",
# Reference and forwarding assemblies
"System.AppContext.dll",
"System.Diagnostics.Contracts.dll",
"System.Dynamic.Runtime.dll",
"System.Globalization.dll",
"System.Globalization.Calendars.dll",
"System.IO.dll",
"System.IO.FileSystem.Primitives.dll",
"System.Reflection.dll",
"System.Reflection.Emit.dll",
"System.Reflection.Emit.ILGeneration.dll",
"System.Reflection.Emit.Lightweight.dll",
"System.Reflection.Extensions.dll",
"System.Reflection.Primitives.dll",
"System.Resources.ResourceManager.dll",
"System.Runtime.Handles.dll",
"System.Runtime.Loader.dll",
"System.Runtime.Serialization.Json.dll",
"System.Runtime.Serialization.Xml.dll",
"System.Security.Principal.dll",
"System.Text.Encoding.dll",
"System.Text.Encoding.Extensions.dll",
"System.Threading.ThreadPool.dll",
"System.Threading.Timer.dll",
"System.Xml.ReaderWriter.dll",
"System.Xml.XDocument.dll",
"System.Xml.XmlDocument.dll",
"System.Xml.XmlSerializer.dll",
"System.Xml.XPath.dll"
],
'Windows_NT': [
],
'Linux' : [
]
}
jit_list = {
'Windows_NT': {
'x64': 'clrjit.dll',
'x86': 'clrjit.dll',
'x86lb': 'legacyjit.dll'
},
'Linux': {
'x64': 'libclrjit.so'
}
}
os_group_list = {
'Windows_NT': 'Windows_NT',
'Ubuntu14.04': 'Linux'
}
python_exe_list = {
'Windows_NT': 'py',
'Linux': 'python3.5'
}
##########################################################################
# Argument Parser
##########################################################################
description = 'Tool to collect throughtput performance data'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-arch', dest='arch', default='x64')
parser.add_argument('-configuration', dest='build_type', default='Release')
parser.add_argument('-run_type', dest='run_type', default='rolling')
parser.add_argument('-os', dest='operating_system', default='Windows_NT')
parser.add_argument('-clr_root', dest='clr_root', default=None)
parser.add_argument('-assembly_root', dest='assembly_root', default=None)
parser.add_argument('-benchview_path', dest='benchview_path', default=None)
parser.add_argument('-iterations', dest='iterations', default=5, type=int)
parser.add_argument('-opt_level', dest='opt_level', default='full_opt')
parser.add_argument('-jit_name', dest='jit_name', default='ryujit')
##########################################################################
# Helper Functions
##########################################################################
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser): Args parsed by the argument parser.
Returns:
(arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
(str, str, str, str, str, str, str)
Notes:
If the arguments are valid then return them all in a tuple. If not, raise
an exception stating x argument is incorrect.
"""
arch = args.arch
build_type = args.build_type
run_type = args.run_type
operating_system = args.operating_system
clr_root = args.clr_root
assembly_root = args.assembly_root
benchview_path = args.benchview_path
iterations = args.iterations
opt_level = args.opt_level.lower()
jit_name = args.jit_name.lower()
def validate_arg(arg, check):
""" Validate an individual arg
Args:
arg (str|bool): argument to be validated
check (lambda: x-> bool): test that returns either True or False
: based on whether the check passes.
Returns:
is_valid (bool): Is the argument valid?
"""
helper = lambda item: item is not None and check(item)
if not helper(arg):
raise Exception('Argument: %s is not valid.' % (arg))
valid_archs = {'Windows_NT': ['x86', 'x64'], 'Linux': ['x64']}
valid_build_types = ['Release']
valid_run_types = ['rolling', 'private']
valid_os = ['Windows_NT', 'Ubuntu14.04']
valid_opt_levels = ['full_opt', 'min_opt']
valid_jit_names = {'x64': ['ryujit'], 'x86': ['ryujit', 'legacy_backend']}
arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
validate_arg(operating_system, lambda item: item in valid_os)
os_group = os_group_list[operating_system]
validate_arg(arch, lambda item: item in valid_archs[os_group])
validate_arg(build_type, lambda item: item in valid_build_types)
validate_arg(run_type, lambda item: item in valid_run_types)
validate_arg(iterations, lambda item: item > 0)
validate_arg(opt_level, lambda item: item in valid_opt_levels)
validate_arg(jit_name, lambda item: item in valid_jit_names[arch])
if clr_root is None:
raise Exception('--clr_root must be set')
else:
clr_root = os.path.normpath(clr_root)
validate_arg(clr_root, lambda item: os.path.isdir(clr_root))
if assembly_root is None:
raise Exception('--assembly_root must be set')
else:
assembly_root = os.path.normpath(assembly_root)
validate_arg(assembly_root, lambda item: os.path.isdir(assembly_root))
if not benchview_path is None:
benchview_path = os.path.normpath(benchview_path)
validate_arg(benchview_path, lambda item: os.path.isdir(benchview_path))
args = (arch, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path, iterations, opt_level, jit_name)
# Log configuration
log('Configuration:')
log(' arch: %s' % arch)
log(' os: %s' % operating_system)
log(' os_group: %s' % os_group)
log(' build_type: %s' % build_type)
log(' opt_level: %s' % opt_level)
log(' jit_name: %s' % jit_name)
log(' run_type: %s' % run_type)
log(' iterations: %d' % iterations)
log(' clr_root: %s' % clr_root)
log(' assembly_root: %s' % assembly_root)
if not benchview_path is None:
log('benchview_path : %s' % benchview_path)
return args
def nth_dirname(path, n):
""" Find the Nth parent directory of the given path
Args:
path (str): path name containing at least N components
n (int): num of basenames to remove
Returns:
outpath (str): path with the last n components removed
Notes:
If n is 0, path is returned unmodified
"""
assert n >= 0
for i in range(0, n):
path = os.path.dirname(path)
return path
def del_rw(action, name, exc):
os.chmod(name, stat.S_IWRITE)
os.remove(name)
def log(message):
""" Print logging information
Args:
message (str): message to be printed
"""
print('[%s]: %s' % (sys.argv[0], message))
def generateCSV(dll_name, dll_runtimes):
""" Write throuput performance data to a csv file to be consumed by measurement.py
Args:
dll_name (str): the name of the dll
dll_runtimes (float[]): A list of runtimes for each iteration of the performance test
"""
csv_file_name = "throughput-%s.csv" % (dll_name)
csv_file_path = os.path.join(os.getcwd(), csv_file_name)
with open(csv_file_path, 'w') as csvfile:
output_file = csv.writer(csvfile, delimiter=',', lineterminator='\n')
for iteration in dll_runtimes:
output_file.writerow(["default", "coreclr-crossgen-tp", dll_name, iteration])
return csv_file_name
def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assemblies_path, opt_level, jit_name):
""" Run throughput testing for a given dll
Args:
dll_name: the name of the dll
dll_path: the path to the dll
iterations: the number of times to run crossgen on the dll
crossgen_path: the path to crossgen
jit_path: the path to the jit
assemblies_path: the path to the assemblies that may be needed for the crossgen run
Returns:
dll_elapsed_times: a list of the elapsed times for the dll
"""
dll_elapsed_times = []
# Set up arguments for running crossgen
run_args = [crossgen_path,
'/JITPath',
jit_path,
'/Platform_Assemblies_Paths',
assemblies_path,
dll_path
]
my_env = os.environ
if opt_level == 'min_opt':
my_env['COMPlus_JITMinOpts'] = '1'
if jit_name == 'legacy_backend':
my_env['COMPlus_AltJit'] = '*'
my_env['COMPlus_AltJitNgen'] = '*'
log(" ".join(run_args))
# Time.clock() returns seconds, with a resolution of 0.4 microseconds, so multiply by the multiplier to get milliseconds
multiplier = 1000
for iteration in range(0,iterations + 1):
proc = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env)
start_time = timeit.default_timer()
(out, err) = proc.communicate()
end_time = timeit.default_timer()
if proc.returncode == 0:
# Calculate the runtime
elapsed_time = (end_time - start_time) * multiplier
dll_elapsed_times.append(elapsed_time)
else:
log("Error in %s" % (dll_name))
log(err.decode("utf-8"))
return dll_elapsed_times
##########################################################################
# Main
##########################################################################
def main(args):
global dll_exclude_list
global jit_list
global os_group_list
global python_exe_list
architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path, iterations, opt_level, jit_name = validate_args(args)
arch = architecture
if jit_name == 'legacy_backend':
architecture = 'x86lb'
current_dir = os.getcwd()
jit = jit_list[os_group][architecture]
crossgen = 'crossgen'
if os_group == 'Windows_NT':
crossgen += '.exe'
# Make sandbox
sandbox_path = os.path.join(clr_root, "sandbox")
if os.path.isdir(sandbox_path):
shutil.rmtree(sandbox_path, onerror=del_rw)
os.makedirs(sandbox_path)
os.chdir(sandbox_path)
# Set up paths
bin_path = os.path.join(clr_root, 'bin', 'Product', os_group + '.' + arch + '.' + build_type)
crossgen_path = os.path.join(bin_path,crossgen)
jit_path = os.path.join(bin_path, jit)
# Replace assembly_root's System.Private.CoreLib with built System.Private.CoreLib.
shutil.copyfile(os.path.join(bin_path, 'System.Private.CoreLib.dll'), os.path.join(assembly_root, 'System.Private.CoreLib.dll'))
python_exe = python_exe_list[os_group]
# Run throughput testing
for dll_file_name in os.listdir(assembly_root):
# Find all framework dlls in the assembly_root dir, which we will crossgen
if (dll_file_name.endswith(".dll") and
(not ".ni." in dll_file_name) and
("Microsoft" in dll_file_name or "System" in dll_file_name) and
(not dll_file_name in dll_exclude_list[os_group]) and
(not dll_file_name in dll_exclude_list["All"])):
dll_name = dll_file_name.replace(".dll", "")
dll_path = os.path.join(assembly_root, dll_file_name)
dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root, opt_level, jit_name)
if len(dll_elapsed_times) != 0:
if not benchview_path is None:
# Generate the csv file
csv_file_name = generateCSV(dll_name, dll_elapsed_times)
shutil.copy(csv_file_name, clr_root)
# For each benchmark, call measurement.py
measurement_args = [python_exe,
os.path.join(benchview_path, "measurement.py"),
"csv",
os.path.join(os.getcwd(), csv_file_name),
"--metric",
"execution_time",
"--unit",
"milliseconds",
"--better",
"desc",
"--drop-first-value",
"--append"]
log(" ".join(measurement_args))
proc = subprocess.Popen(measurement_args)
proc.communicate()
else:
# Write output to console if we are not publishing
log("%s" % (dll_name))
log("Duration: [%s]" % (", ".join(str(x) for x in dll_elapsed_times[1:])))
# Upload the data
if not benchview_path is None:
# Call submission.py
submission_args = [python_exe,
os.path.join(benchview_path, "submission.py"),
"measurement.json",
"--build",
os.path.join(clr_root, "build.json"),
"--machine-data",
os.path.join(clr_root, "machinedata.json"),
"--metadata",
os.path.join(clr_root, "submission-metadata.json"),
"--group",
"CoreCLR-throughput",
"--type",
run_type,
"--config-name",
build_type,
"--config",
"Configuration",
build_type,
"--config",
"OS",
operating_system,
"--config",
"OptLevel",
opt_level,
"--config",
"JitName",
jit_name,
"--arch",
architecture,
"--machinepool",
"PerfSnake"
]
log(" ".join(submission_args))
proc = subprocess.Popen(submission_args)
proc.communicate()
# Call upload.py
upload_args = [python_exe,
os.path.join(benchview_path, "upload.py"),
"submission.json",
"--container",
"coreclr"
]
log(" ".join(upload_args))
proc = subprocess.Popen(upload_args)
proc.communicate()
os.chdir(current_dir)
return 0
if __name__ == "__main__":
Args = parser.parse_args(sys.argv[1:])
main(Args)
| |
try:
from urllib import quote
except ImportError as e:
from urllib.parse import quote
from woeid import WoeidError
from six import string_types
__author__ = 'Renchen'
class Filters:
"""A class that encapsulates all filters
Args:
q(``str`` or ``tuple``, optional):
Specify a place name to search for or a tuple that has a place name and a focus. This filter is mutually exclusive with the `woeid` filter. The specified place can be any unicode characters. Focus can be either an ISO-3166-1 country code or a WOEID. For a "startswith" filter, specify the place as a string followed by an asterisk (*).
woeid(``list``(``str``) or ``list``(``int``), optional):
Specify a `Where On Earth Identifier` (`woeid`). Up to ten WOEIDs may be specified. This filter is mutually exclusive with the `q` filter. Example: woeid=(1,2,3)
typ(``list``(``str``) or ``list``(``int``) or ``int``, optional):
Specify one or more place type codes (https://developer.yahoo.com/geo/geoplanet/guide/concepts.html#placetypes). Up to ten place type codes or names may be provided.
degree(``int`` or ``str``, optional):
`.degree` specifier which represents the degree to which two places are neighborhoods. Only consider valid if either `neighbors` or `children` filters are set.
nd(``boolean``, optional):
Specify a join operations on two filters. Example:
>>> import woeid
>>> api = woeid.Api(client_id='YOUR_CLIENT_ID')
>>> ret = api.GetPlaces(q='StringField', typ=22, nd=True)
"""
def __init__(self,
q=None,
woeid=None,
typ=None,
degree=None,
aand=None):
filters = {}
# q and woeid are mutually exclusive
if isinstance(q, string_types) or isinstance(q, tuple):
filters['q'] = q
elif woeid and isinstance(woeid, list):
# Make sure the values are str
filters['woeid'] = [str(val) for val in woeid if isinstance(val, int) or isinstance(val, string_types)]
if typ and isinstance(typ, list) or isinstance(typ, int):
filters['type'] = typ
if degree and isinstance(degree, int):
filters['degree'] = degree
if aand and isinstance(aand, bool):
filters['and'] = aand
self._filters = filters
def HasQ(self):
"""Return if the filter object has `.q` filter.
"""
return 'q' in self._filters
def HasWoeid(self):
"""Return if the filter object has `.woeid` filter.
"""
return 'woeid' in self._filters
def HasType(self):
"""Return if the filter object has `.type` filter
"""
return 'type' in self._filters
def HasDegree(self):
"""Return if the filter object has `.degree` filter
"""
return 'degree' in self._filters
def HasAnd(self):
"""Return if the filter object has `$and` filter
"""
return 'and' in self._filters
def IsValid(self):
return isinstance(self._filters, dict)
def __str__(self):
qstr = ''
woeidstr = ''
typestr = ''
degreestr = ''
andstr = ''
# work on .q filter
if self.HasQ():
if isinstance(self._filters['q'], string_types):
qstr = quote(self._filters['q'].encode('utf-8'))
elif isinstance(self._filters['q'], tuple):
stra = self._filters['q'][0].encode('utf-8')
strb = self._filters['q'][1].encode('utf-8')
# Second item will be a focus value
# Focus can be either an ISO-3166-1 country code or a WOEID.
qstr += quote(stra + ',' + strb)
else:
raise WoeidError("Unexpected usage of function! query filter is %s" % self._filters['q'])
qstr = '.q(%s)'%qstr
# work on .woeid filter
if self.HasWoeid():
if isinstance(self._filters['woeid'], list) and len(self._filters[
'woeid']) > 1:
for item in self._filters['woeid']:
if (isinstance(item, string_types) and item.isdigit()) or isinstance(item, int):
woeidstr += quote(item) + ','
# tick out the last comma
woeidstr = woeidstr[:-1]
elif isinstance(self._filters['woeid'], list) and len(
self._filters['woeid']) == 1:
woeidstr = '/' + quote(self._filters['woeid'][0])
else:
raise WoeidError("Unexpected usage of function! query filter is %s"%self._filters['woeid'])
#.woeid can be omitted if there is only one item
if ',' in woeidstr:
woeidstr = '.woeid(%s)'%woeidstr
# work on .type filter
if 'type' in self._filters:
tpitem = self._filters['type']
if isinstance(tpitem, list):
for item in tpitem:
if (isinstance(item, string_types) and item.isdigit()) or isinstance(item, int):
typestr += quote(str(item)) + ','
typestr = typestr[:-1]
typestr = '.type(%s)'%typestr
elif (type(tpitem) is str and tpitem.isdigit()) or isinstance(tpitem, int):
typestr = '.type(%s)'%quote(str(tpitem))
# work on .degree filter
if 'degree' in self._filters:
degree = str(self._filters['degree'])
degreestr = '.degree(%s)'%degree
# work on .and filter
if 'and' in self._filters:
conda = ''
condb = ''
if self.HasQ() and qstr:
conda = qstr
if self.HasWoeid() and woeidstr:
conda = woeidstr
if typestr:
condb = typestr
if degreestr:
condb = degreestr
if conda and condb:
andstr = '$and(%s,%s)'%(conda,condb)
if andstr:
return andstr
query_or_woeid_str = qstr if qstr else woeidstr
return query_or_woeid_str + typestr + degreestr
class Relationships:
""""A class that encapsulates all relationships
Args:
parent(``boolean``, optional):
A relationship specifier used to return a parent place of a given woeid.
ancestors(``boolean``, optional):
A relationship specifier used to return one or more acestors of a place of a given woeid.
belongtos(``boolean``, optional):
A relationship specifier used to return a collection of places that have a place as a child or descendant (child of a child).
neighbors(``boolean``, optional):
A relationship specifier used to return a collection of places that neighbor of a place.
children(``boolean``, optional):
A relationship specifier used to return a collection of places that are children of a place.
siblings(``boolean``, optional):
A relationship specifier used to return a collection of places that are siblings of a place.
descendants(``boolean``, optional):
A relationship specifier used to return a collection of places that are in the child hierarchy (the child, the child of child, etc).
common(``boolean``, optional):
A relationship specifier used to return the common ancestor of both places.
"""
def __init__(self,
parent=False,
ancestors=False,
belongstos=False,
neighbors=False,
siblings=False,
children=False,
descendants=False,
common=False):
self._parent=parent
self._ancestors=ancestors
self._belongtos=belongstos
self._neighbors=neighbors
self._siblings=siblings
self._children=children
self._descendants=descendants
self._common=common
def __str__(self):
if self._parent:
return '/parent'
if self._ancestors:
return '/ancestors'
if self._belongtos:
return '/belongtos'
if self._neighbors:
return '/neighbors'
if self._siblings:
return '/siblings'
if self._children:
return '/children'
if self._descendants:
return '/descendants'
if self._common:
return '/common/'
return ''
def __Validate(self, filters):
if isinstance(filters, Filters):
raise WoeidError("Unexpected modules usage: %s"%"Validate takes a Filters object as its argument")
if not filters.IsValid():
raise WoeidError("Unexpected API usage: %s"%"filters should be a dictionary")
''' /parent, /ancestors, /siblings, /common/ don't support any filters'''
if self._parent and filters.keys():
raise WoeidError("Unexpected API usage: %s"%"woeid/parent doesn't support filters")
if self._ancestors and filters.keys():
raise WoeidError("Unexpected API usage: %s"%"woeid/ancestors doesn't support filters")
if self._siblings and filters.keys():
raise WoeidError("Unexpected API usage: %s"%"woeid/siblings doesn't support filters")
if self._common and filters.keys():
raise WoeidError("Unexpected API usage: %s"%"woeid1/common/woeid2 doesn't support filters")
'''/belongtos and /descendants and /children support .type filter'''
if self._belongtos and (filters.HasAnd() or filters.HasDegree() or filters.HasQ() or filters.HasWoeid()):
raise WoeidError("Unexpected API usage: %s"%"woeid/belongtos supports .type filter only")
if self._descendants and (filters.HasAnd() or filters.HasDegree() or filters.HasQ() or filters.HasWoeid()):
raise WoeidError("Unexpected API usage: %s"%"woeid/descendants supports .type filter only")
if self._children and (filters.HasWoeid() or filters.HasQ() or filters.HasAnd()):
raise WoeidError("Unexpected API usage: %s"%"woeid/children supports .degree or .type filters only")
'''/neighbors support .degree filter'''
if self._neighbors and (filters.HasWoeid() or filters.HasType() or filters.HasQ() or filters.HasAnd()):
raise WoeidError("Unexpected API usage: %s"%"woeid/neighbors supports .degree filter only")
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.test_session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
pool_grad_grad_func: Second-order gradient function, if available.
"""
total_size = np.prod(input_sizes)
# TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
# maximal at 16 bits. Switch to np.random.randn when resolved.
x = np.arange(1, total_size + 1, dtype=np.float32)
x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly
# Verify some specifically interesting values...
x[np.random.choice(total_size)] = np.inf
x[np.random.choice(total_size)] = -np.inf
# TODO(b/74222344): Fix nan handling for max pool grad.
# x[np.random.choice(total_size)] = np.nan
x = x.reshape(input_sizes)
with self.test_session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_output_grad_gradients = output_grad_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
xla_inputs,
xla_outputs,
xla_output_grad_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
actual_input_gradients_vals = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals,
actual_input_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_input_gradients_vals, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def _VerifyValues(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=pool_grad_grad_func)
def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
def testMaxPool(self):
self._TestPooling(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
| |
from __future__ import unicode_literals
import pytest
from textx import metamodel_from_str, TextXSyntaxError
def test_modifier_separator_zeroormore():
model = """
Rule:
("a"|"b")*[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a, b")
assert model
def test_modifier_separator_oneormore():
model = """
Rule:
("a"|"b")+[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a, b")
assert model
with pytest.raises(TextXSyntaxError):
# Must be separated with comma
metamodel.model_from_str("a b")
with pytest.raises(TextXSyntaxError):
# At least one must be matched
metamodel.model_from_str("")
def test_modifier_separator_optional():
model = """
Rule:
("a"|"b")?[','];
"""
with pytest.raises(TextXSyntaxError):
# Modifiers are not possible for ? operator
metamodel_from_str(model)
def test_modifier_separator_unordered_group():
model = """
Rule:
("a" "b" "c")#[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a, b, c")
assert model
model = metamodel.model_from_str("c, a, b")
assert model
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, a, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str(",a, c, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, b, ")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, ,b ")
def test_modifier_separator_unordered_group_with_optionals():
model = """
Rule:
("a" "b"? "c")#[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a, b, c")
assert model
model = metamodel.model_from_str("c, a")
assert model
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, ")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, a, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str(",a, c, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, b, ")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, ,b ")
def test_assignment_modifier_separator_zeroormore():
model = """
Rule:
a*=AorB[','];
AorB:
"a"|"b";
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a")
# 3 AorBs must be matched
assert len(model.a) == 3
assert model.a[1] == 'b'
def test_assignment_modifier_separator_oneormore():
model = """
Rule:
a+=AorB[','];
AorB:
"a"|"b";
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a")
# 3 AorBs must be matched
assert len(model.a) == 3
assert model.a[1] == 'b'
def test_assignment_modifier_separator_optional():
"""
Modifiers are not allowed for ?= assignment.
"""
model = """
Rule:
a?=AorB[','];
AorB:
"a"|"b";
"""
with pytest.raises(TextXSyntaxError):
metamodel_from_str(model)
def test_assignment_modifier_separator_plain():
"""
Modifiers are not allowed for plain assignment.
"""
model = """
Rule:
a=AorB[','];
AorB:
"a"|"b";
"""
with pytest.raises(TextXSyntaxError):
metamodel_from_str(model)
def test_modifier_eolterm_zeroormore():
model = """
Rule:
'first'
INT*[eolterm] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34 56 88 65
123456
""")
assert model
def test_modifier_eolterm_oneormore():
model = """
Rule:
'first'
INT+[eolterm] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34 56 88 65
123456
""")
assert model
def test_multiple_modifiers():
"""
Multiple modifier may be specified separated with space.
"""
model = """
Rule:
'first'
INT+[eolterm ','] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers separated with comma
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34, 56, 88, 65
123456
""")
assert model
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestDataset(unittest2.TestCase):
DATASET_ID = 'DATASET'
def _getTargetClass(self):
from gcloud.datastore.dataset import Dataset
return Dataset
def _makeOne(self, dataset_id=DATASET_ID, connection=None):
return self._getTargetClass()(dataset_id, connection)
def test_ctor_w_dataset_id_None(self):
self.assertRaises(ValueError, self._makeOne, None)
def test_ctor_w_dataset_id_no_connection(self):
dataset = self._makeOne()
self.assertEqual(dataset.dataset_id, self.DATASET_ID)
def test_ctor_w_dataset_id_w_connection(self):
conn = object()
dataset = self._makeOne(connection=conn)
self.assertEqual(dataset.dataset_id, self.DATASET_ID)
self.assertTrue(dataset.connection is conn)
def test_get_defaults(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
_called_with = []
def _get(*args, **kw):
_called_with.append((args, kw))
dataset = self._makeOne()
key = object()
with _Monkey(MUT, get=_get):
dataset.get([key])
self.assertEqual(_called_with[0][0], ([key],))
self.assertTrue(_called_with[0][1]['missing'] is None)
self.assertTrue(_called_with[0][1]['deferred'] is None)
self.assertTrue(_called_with[0][1]['connection'] is None)
self.assertEqual(_called_with[0][1]['dataset_id'], self.DATASET_ID)
def test_get_explicit(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
_called_with = []
def _get(*args, **kw):
_called_with.append((args, kw))
conn = object()
dataset = self._makeOne(connection=conn)
key, missing, deferred = object(), [], []
with _Monkey(MUT, get=_get):
dataset.get([key], missing, deferred)
self.assertEqual(_called_with[0][0], ([key],))
self.assertTrue(_called_with[0][1]['missing'] is missing)
self.assertTrue(_called_with[0][1]['deferred'] is deferred)
self.assertTrue(_called_with[0][1]['connection'] is conn)
self.assertEqual(_called_with[0][1]['dataset_id'], self.DATASET_ID)
def test_put_wo_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
_called_with = []
def _put(*args, **kw):
_called_with.append((args, kw))
dataset = self._makeOne()
entity = object()
with _Monkey(MUT, put=_put):
dataset.put([entity])
self.assertEqual(_called_with[0][0], ([entity],))
self.assertTrue(_called_with[0][1]['connection'] is None)
self.assertEqual(_called_with[0][1]['dataset_id'], self.DATASET_ID)
def test_put_w_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
_called_with = []
def _put(*args, **kw):
_called_with.append((args, kw))
entity, conn = object(), object()
dataset = self._makeOne(connection=conn)
with _Monkey(MUT, put=_put):
dataset.put([entity])
self.assertEqual(_called_with[0][0], ([entity],))
self.assertTrue(_called_with[0][1]['connection'] is conn)
self.assertEqual(_called_with[0][1]['dataset_id'], self.DATASET_ID)
def test_delete_wo_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
_called_with = []
def _delete(*args, **kw):
_called_with.append((args, kw))
dataset = self._makeOne()
key = object()
with _Monkey(MUT, delete=_delete):
dataset.delete([key])
self.assertEqual(_called_with[0][0], ([key],))
self.assertTrue(_called_with[0][1]['connection'] is None)
self.assertEqual(_called_with[0][1]['dataset_id'], self.DATASET_ID)
def test_delete_w_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
_called_with = []
def _delete(*args, **kw):
_called_with.append((args, kw))
key, conn = object(), object()
dataset = self._makeOne(connection=conn)
with _Monkey(MUT, delete=_delete):
dataset.delete([key])
self.assertEqual(_called_with[0][0], ([key],))
self.assertTrue(_called_with[0][1]['connection'] is conn)
self.assertEqual(_called_with[0][1]['dataset_id'], self.DATASET_ID)
def test_key_w_dataset_id(self):
KIND = 'KIND'
ID = 1234
dataset = self._makeOne()
self.assertRaises(TypeError,
dataset.key, KIND, ID, dataset_id=self.DATASET_ID)
def test_key_wo_dataset_id(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
KIND = 'KIND'
ID = 1234
dataset = self._makeOne()
with _Monkey(MUT, Key=_Dummy):
key = dataset.key(KIND, ID)
self.assertTrue(isinstance(key, _Dummy))
self.assertEqual(key.args, (KIND, ID))
self.assertEqual(key.kwargs, {'dataset_id': self.DATASET_ID})
def test_batch_wo_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
dataset = self._makeOne()
with _Monkey(MUT, Batch=_Dummy):
batch = dataset.batch()
self.assertTrue(isinstance(batch, _Dummy))
self.assertEqual(batch.args, ())
self.assertEqual(batch.kwargs,
{'dataset_id': self.DATASET_ID, 'connection': None})
def test_batch_w_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
conn = object()
dataset = self._makeOne(connection=conn)
with _Monkey(MUT, Batch=_Dummy):
batch = dataset.batch()
self.assertTrue(isinstance(batch, _Dummy))
self.assertEqual(batch.args, ())
self.assertEqual(batch.kwargs,
{'dataset_id': self.DATASET_ID, 'connection': conn})
def test_transaction_wo_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
dataset = self._makeOne()
with _Monkey(MUT, Transaction=_Dummy):
xact = dataset.transaction()
self.assertTrue(isinstance(xact, _Dummy))
self.assertEqual(xact.args, ())
self.assertEqual(xact.kwargs,
{'dataset_id': self.DATASET_ID, 'connection': None})
def test_transaction_w_connection(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
conn = object()
dataset = self._makeOne(connection=conn)
with _Monkey(MUT, Transaction=_Dummy):
xact = dataset.transaction()
self.assertTrue(isinstance(xact, _Dummy))
self.assertEqual(xact.args, ())
self.assertEqual(xact.kwargs,
{'dataset_id': self.DATASET_ID, 'connection': conn})
def test_query_w_dataset_id(self):
KIND = 'KIND'
dataset = self._makeOne()
self.assertRaises(TypeError,
dataset.query, kind=KIND, dataset_id=self.DATASET_ID)
def test_query_w_defaults(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
dataset = self._makeOne()
with _Monkey(MUT, Query=_Dummy):
query = dataset.query()
self.assertTrue(isinstance(query, _Dummy))
self.assertEqual(query.args, ())
self.assertEqual(query.kwargs, {'dataset_id': self.DATASET_ID})
def test_query_explicit(self):
from gcloud.datastore import dataset as MUT
from gcloud._testing import _Monkey
KIND = 'KIND'
NAMESPACE = 'NAMESPACE'
ANCESTOR = object()
FILTERS = [('PROPERTY', '==', 'VALUE')]
PROJECTION = ['__key__']
ORDER = ['PROPERTY']
GROUP_BY = ['GROUPBY']
dataset = self._makeOne()
with _Monkey(MUT, Query=_Dummy):
query = dataset.query(
kind=KIND,
namespace=NAMESPACE,
ancestor=ANCESTOR,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
group_by=GROUP_BY,
)
self.assertTrue(isinstance(query, _Dummy))
kwargs = {
'dataset_id': self.DATASET_ID,
'kind': KIND,
'namespace': NAMESPACE,
'ancestor': ANCESTOR,
'filters': FILTERS,
'projection': PROJECTION,
'order': ORDER,
'group_by': GROUP_BY,
}
self.assertEqual(query.args, ())
self.assertEqual(query.kwargs, kwargs)
class _Dummy(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
| |
import os
import sys
import time
import subprocess
import logging
from StringIO import StringIO
from Pegasus.netlogger.analysis.schema import stampede_dashboard_schema as dash
from pegasus.service import app, db, em, tests, catalogs, ensembles
from pegasus.service.tests import *
class TestWorkflowProcessor:
def __init__(self, workflow):
self.workflow = workflow
def plan(self):
pass
def planning(self):
return False
def planning_successful(self):
return True
def get_wf_uuid(self):
return "d8f8e15c-a55f-4ca0-8474-62bdb3310083"
def get_submitdir(self):
return "submitdir"
def run(self):
pass
def pending(self):
return False
def running(self):
return False
def running_successful(self):
return True
class EnsembleManagerTest(tests.UserTestCase):
def setUp(self):
tests.UserTestCase.setUp(self)
em.EnsembleProcessor.Processor = TestWorkflowProcessor
def tearDown(self):
em.EnsembleProcessor.Processor = em.WorkflowProcessor
tests.UserTestCase.tearDown(self)
def test_em(self):
# Create an ensemble and a workflow
e = ensembles.Ensemble(self.user_id, "foo")
e.set_max_planning(1)
e.set_max_running(1)
db.session.add(e)
db.session.flush()
w = ensembles.EnsembleWorkflow(e.id, "bar")
db.session.add(w)
db.session.flush()
w2 = ensembles.EnsembleWorkflow(e.id, "baz")
db.session.add(w2)
db.session.flush()
mgr = em.EnsembleManager()
e.set_state(ensembles.EnsembleStates.PAUSED)
db.session.flush()
mgr.loop_once()
self.assertEquals(w.state, ensembles.EnsembleWorkflowStates.READY, "State should still be READY")
self.assertEquals(w2.state, ensembles.EnsembleWorkflowStates.READY, "State should still be READY")
e.set_state(ensembles.EnsembleStates.ACTIVE)
db.session.flush()
mgr.loop_once()
self.assertEquals(w.state, ensembles.EnsembleWorkflowStates.PLANNING, "State should be PLANNING")
self.assertEquals(w2.state, ensembles.EnsembleWorkflowStates.READY, "State should be READY")
mgr.loop_once()
self.assertEquals(w.state, ensembles.EnsembleWorkflowStates.RUNNING, "State should be RUNNING")
self.assertEquals(w.submitdir, "submitdir", "Submitdir should be set")
self.assertEquals(w.wf_uuid, "d8f8e15c-a55f-4ca0-8474-62bdb3310083", "UUID should be set")
self.assertEquals(w2.state, ensembles.EnsembleWorkflowStates.PLANNING, "State should be PLANNING")
mgr.loop_once()
self.assertEquals(w.state, ensembles.EnsembleWorkflowStates.SUCCESSFUL, "State should be SUCCESSFUL")
self.assertEquals(w2.state, ensembles.EnsembleWorkflowStates.RUNNING, "State should be RUNNING")
mgr.loop_once()
self.assertEquals(w2.state, ensembles.EnsembleWorkflowStates.SUCCESSFUL, "State should be SUCCESSFUL")
def RequiresPegasus(f):
def wrapper(*args, **kwargs):
try:
em.get_pegasus_bin()
except:
sys.stderr.write(" test requires Pegasus ")
return None
return f(*args, **kwargs)
return wrapper
def RequiresCondor(f):
def wrapper(*args, **kwargs):
try:
em.get_condor_bin()
except:
sys.stderr.write(" test requires Condor ")
return None
return f(*args, **kwargs)
return wrapper
class ScriptTest(tests.TestCase):
@IntegrationTest
@RequiresPegasus
@RequiresCondor
def testGetEnv(self):
PEGASUS_BIN = em.get_pegasus_bin()
CONDOR_BIN = em.get_condor_bin()
env = em.get_script_env()
self.assertTrue(PEGASUS_BIN in env["PATH"])
self.assertTrue(CONDOR_BIN in env["PATH"])
def testForkScript(self):
em.forkscript("true")
cwdfile = "/tmp/forkscript.cwd"
if os.path.isfile(cwdfile):
os.remove(cwdfile)
em.forkscript("echo $PWD > %s" % cwdfile, cwd="/")
time.sleep(1) # This just gives the script time to finish
cwd = open(cwdfile, "r").read().strip()
self.assertEquals(cwd, "/")
os.remove(cwdfile)
pidfile = "/tmp/forkscript.pid"
if os.path.isfile(pidfile):
os.remove(pidfile)
em.forkscript("true", cwd="/tmp", pidfile="/tmp/forkscript.pid")
self.assertTrue(os.path.isfile(pidfile))
pid = int(open(pidfile,"r").read())
self.assertTrue(pid > 0)
os.remove(pidfile)
self.assertRaises(em.EMException, em.forkscript, "true", cwd="/some/path/not/existing")
self.assertRaises(em.EMException, em.forkscript, "true", pidfile="/some/path/not/existing.pid")
def testRunScript(self):
em.runscript("true")
cwdfile = "/tmp/runscript.cwd"
if os.path.isfile(cwdfile):
os.remove(cwdfile)
em.runscript("echo $PWD > %s" % cwdfile, cwd="/")
cwd = open(cwdfile, "r").read().strip()
self.assertEquals(cwd, "/")
os.remove(cwdfile)
self.assertRaises(em.EMException, em.runscript, "true", cwd="/some/path/not/existing")
class WorkflowTest(tests.UserTestCase):
endstates = set([
ensembles.EnsembleWorkflowStates.SUCCESSFUL,
ensembles.EnsembleWorkflowStates.PLAN_FAILED,
ensembles.EnsembleWorkflowStates.RUN_FAILED,
ensembles.EnsembleWorkflowStates.FAILED
])
def test_workflow_processor(self):
"Simple tests to make sure the WorkflowProcessor works"
wf_uuid = "d8f8e15c-a55f-4ca0-8474-62bdb3310083"
e = ensembles.Ensemble(self.user_id, "foo")
db.session.add(e)
db.session.flush()
ew = ensembles.EnsembleWorkflow(e.id, "bar")
ew.wf_uuid = wf_uuid
db.session.add(ew)
db.session.flush()
p = em.WorkflowProcessor(ew)
self.assertRaises(em.EMException, p.run)
ew.submitdir = "/some/path/not/existing"
db.session.flush()
self.assertRaises(em.EMException, p.run)
p = em.WorkflowProcessor(ew)
self.assertTrue(p.pending())
self.assertRaises(em.EMException, p.running)
self.assertRaises(em.EMException, p.running_successful)
dw = dash.DashboardWorkflow()
dw.wf_uuid = wf_uuid
db.session.add(dw)
db.session.flush()
ws = dash.DashboardWorkflowstate()
ws.wf_id = dw.wf_id
ws.state = 'WORKFLOW_STARTED'
ws.restart_count = 0
ws.status = 0
db.session.add(ws)
db.session.flush()
p = em.WorkflowProcessor(ew)
self.assertTrue(p.running())
self.assertRaises(em.EMException, p.running_successful)
ws.state = 'WORKFLOW_TERMINATED'
ws.status = 0
db.session.flush()
self.assertFalse(p.running())
self.assertTrue(p.running_successful())
ws.status = 1
db.session.flush()
self.assertFalse(p.running_successful())
def create_test_workflow(self, daxfile):
# The replica catalog can be empty
rcfile = StringIO("")
# Just one transformation in the tc
tcfile = StringIO("""
tr ls {
site local {
pfn "/bin/ls"
arch "x86_64"
os "linux"
type "INSTALLED"
}
}
""")
# Only the local site in the SC
scfile = StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<sitecatalog xmlns="http://pegasus.isi.edu/schema/sitecatalog"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/sitecatalog http://pegasus.isi.edu/schema/sc-4.0.xsd"
version="4.0">
<site handle="local" arch="x86_64" os="LINUX">
<directory type="shared-scratch" path="%(tmpdir)s/scratch">
<file-server operation="all" url="file://%(tmpdir)s/scratch"/>
</directory>
<directory type="local-storage" path="%(tmpdir)s/storage">
<file-server operation="all" url="file://%(tmpdir)s/storage"/>
</directory>
</site>
</sitecatalog>
""" % {"tmpdir": self.tmpdir})
rc = catalogs.save_catalog("replica", self.user_id, "replica", "File", rcfile)
sc = catalogs.save_catalog("site", self.user_id, "sites", "XML", scfile)
tc = catalogs.save_catalog("transformation", self.user_id, "transformations", "text", tcfile)
conf = StringIO("pegasus.register=false")
e = ensembles.create_ensemble(self.user_id, "process", 1, 1)
ew = ensembles.create_ensemble_workflow(e.id, "process", 0, rc, tc, sc, daxfile, conf,
sites=["local"], output_site="local", force=True, cleanup=False)
return e, ew
@IntegrationTest
@RequiresPegasus
def test_planner_fails(self):
# This should fail to plan because the dax has an unknown transformation
dax = StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<adag xmlns="http://pegasus.isi.edu/schema/DAX" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd"
version="3.4" name="process">
<job id="ID0000001" name="FROOB">
<argument>-l /</argument>
<stdout name="listing.txt" link="output"/>
<uses name="listing.txt" link="output" register="false" transfer="true"/>
</job>
</adag>
""")
e, ew = self.create_test_workflow(dax)
p = em.WorkflowProcessor(ew)
p.plan()
while p.planning():
time.sleep(1)
self.assertFalse(p.planning_successful(), "Workflow should fail to plan")
@IntegrationTest
@RequiresPegasus
@RequiresCondor
def test_failed_workflow(self):
# This workflow should fail because the argument to the ls job is invalid
dax = StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<adag xmlns="http://pegasus.isi.edu/schema/DAX" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd"
version="3.4" name="process">
<job id="ID0000001" name="ls">
<argument>-l /path/that/does/not/exist</argument>
<stdout name="listing.txt" link="output"/>
<uses name="listing.txt" link="output" register="false" transfer="true"/>
</job>
</adag>
""")
e, ew = self.create_test_workflow(dax)
p = em.WorkflowProcessor(ew)
p.plan()
while p.planning():
time.sleep(1)
self.assertTrue(p.planning_successful(), "Planning should succeed")
submitdir = p.get_submitdir()
self.assertTrue(os.path.isdir(submitdir), "Submit dir should exist")
wf_uuid = p.get_wf_uuid()
self.assertTrue(wf_uuid is not None, "wf_uuid should exist")
# The ensemble processor normally does this
ew.set_submitdir(submitdir)
ew.set_wf_uuid(wf_uuid)
db.session.flush()
db.session.commit()
p.run()
while p.pending() or p.running():
time.sleep(5)
self.assertFalse(p.running_successful(), "The workflow should fail to run")
@IntegrationTest
@RequiresPegasus
@RequiresCondor
def test_successful_workflow(self):
# This workflow should succeed
dax = StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<adag xmlns="http://pegasus.isi.edu/schema/DAX" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd"
version="3.4" name="process">
<job id="ID0000001" name="ls">
<argument>-l /</argument>
<stdout name="listing.txt" link="output"/>
<uses name="listing.txt" link="output" register="false" transfer="true"/>
</job>
</adag>
""")
e, ew = self.create_test_workflow(dax)
p = em.WorkflowProcessor(ew)
p.plan()
while p.planning():
time.sleep(1)
self.assertTrue(p.planning_successful())
submitdir = p.get_submitdir()
self.assertTrue(os.path.isdir(submitdir))
wf_uuid = p.get_wf_uuid()
self.assertTrue(wf_uuid is not None)
ew.set_submitdir(submitdir)
ew.set_wf_uuid(wf_uuid)
db.session.flush()
db.session.commit()
p.run()
while p.pending() or p.running():
time.sleep(5)
self.assertTrue(p.running_successful())
@IntegrationTest
@RequiresPegasus
@RequiresCondor
def test_ensemble_end_to_end(self):
# This workflow should succeed
dax = StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<adag xmlns="http://pegasus.isi.edu/schema/DAX" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd"
version="3.4" name="process">
<job id="ID0000001" name="ls">
<argument>-l /</argument>
<stdout name="listing.txt" link="output"/>
<uses name="listing.txt" link="output" register="false" transfer="true"/>
</job>
</adag>
""")
e, ew = self.create_test_workflow(dax)
mgr = em.EnsembleManager()
while ew.state not in self.endstates:
mgr.loop_once()
time.sleep(5)
self.assertEquals(ew.state, ensembles.EnsembleWorkflowStates.SUCCESSFUL)
@IntegrationTest
@RequiresPegasus
@RequiresCondor
def test_ensemble_failure_end_to_end(self):
# This workflow should fail because of the argument to ls
dax = StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<adag xmlns="http://pegasus.isi.edu/schema/DAX" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd"
version="3.4" name="process">
<job id="ID0000001" name="ls">
<argument>-l /some/non/existent/directory</argument>
<stdout name="listing.txt" link="output"/>
<uses name="listing.txt" link="output" register="false" transfer="true"/>
</job>
</adag>
""")
e, ew = self.create_test_workflow(dax)
mgr = em.EnsembleManager()
while ew.state not in self.endstates:
mgr.loop_once()
time.sleep(5)
self.assertEquals(ew.state, ensembles.EnsembleWorkflowStates.FAILED)
@IntegrationTest
@RequiresPegasus
@RequiresCondor
def test_hierarchical_workflow(self):
subdax = """<?xml version="1.0" encoding="UTF-8"?>
<adag xmlns="http://pegasus.isi.edu/schema/DAX" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd"
version="3.4" name="process">
<job id="ID0000001" name="ls">
<argument>-l /</argument>
<stdout name="listing.txt" link="output"/>
<uses name="listing.txt" link="output" register="false" transfer="true"/>
</job>
</adag>
"""
subdaxfile = os.path.join(self.tmpdir, "subdax.xml")
f = open(subdaxfile, "w")
f.write(subdax)
f.close()
dax = StringIO("""<?xml version="1.0" encoding="UTF-8"?>
<adag xmlns="http://pegasus.isi.edu/schema/DAX" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd"
version="3.4" name="process">
<file name="subdax.xml">
<pfn url="file://%s" site="local"/>
</file>
<dax id="ID0000001" file="subdax.xml">
</dax>
</adag>
""" % subdaxfile)
e, ew = self.create_test_workflow(dax)
mgr = em.EnsembleManager()
while ew.state not in self.endstates:
mgr.loop_once()
time.sleep(5)
self.assertEquals(ew.state, ensembles.EnsembleWorkflowStates.SUCCESSFUL)
| |
#!/usr/bin/env python
# Copyright 2015 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#from spy_state import *
from spy_analysis import *
import sys, re
# All of these calls are based on the print statements in legion_logging.h
prefix = "\[(?P<node>[0-9]+) - (?P<thread>[0-9a-f]+)\] \{\w+\}\{legion_spy\}: "
prefix_pat = re.compile(prefix)
# Logger calls for the shape of the machine
utility_pat = re.compile(prefix+"Utility (?P<pid>[0-9a-f]+)")
processor_pat = re.compile(prefix+"Processor (?P<pid>[0-9a-f]+) (?P<kind>[0-9]+)")
memory_pat = re.compile(prefix+"Memory (?P<mid>[0-9a-f]+) (?P<capacity>[0-9]+)")
proc_mem_pat = re.compile(prefix+"Processor Memory (?P<pid>[0-9a-f]+) (?P<mid>[0-9a-f]+) (?P<band>[0-9]+) (?P<lat>[0-9]+)")
mem_mem_pat = re.compile(prefix+"Memory Memory (?P<mone>[0-9a-f]+) (?P<mtwo>[0-9a-f]+) (?P<band>[0-9]+) (?P<lat>[0-9]+)")
# Calls for the shape of region trees
top_index_pat = re.compile(prefix+"Index Space (?P<uid>[0-9a-f]+)")
top_index_name_pat = re.compile(prefix+"Index Space Name (?P<uid>[0-9a-f]+) (?P<name>\w+)")
index_part_pat = re.compile(prefix+"Index Partition (?P<pid>[0-9a-f]+) (?P<uid>[0-9a-f]+) (?P<disjoint>[0-1]) (?P<color>[0-9]+)")
index_part_name_pat = re.compile(prefix+"Index Partition Name (?P<uid>[0-9a-f]+) (?P<name>\w+)")
index_subspace_pat = re.compile(prefix+"Index Subspace (?P<pid>[0-9a-f]+) (?P<uid>[0-9a-f]+) (?P<color>[0-9]+)")
field_space_pat = re.compile(prefix+"Field Space (?P<uid>[0-9]+)")
field_space_name_pat = re.compile(prefix+"Field Space Name (?P<uid>[0-9]+) (?P<name>\w+)")
field_create_pat = re.compile(prefix+"Field Creation (?P<uid>[0-9]+) (?P<fid>[0-9]+)")
field_name_pat = re.compile(prefix+"Field Name (?P<uid>[0-9]+) (?P<fid>[0-9]+) (?P<name>\w+)")
region_pat = re.compile(prefix+"Region (?P<iid>[0-9a-f]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+)")
region_name_pat = re.compile(prefix+"Logical Region Name (?P<iid>[0-9a-f]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+) (?P<name>\w+)")
partition_name_pat = re.compile(prefix+"Logical Partition Name (?P<iid>[0-9a-f]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+) (?P<name>\w+)")
# Logger calls for operations
top_task_pat = re.compile(prefix+"Top Task (?P<tid>[0-9]+) (?P<uid>[0-9]+) (?P<name>\w+)")
single_task_pat = re.compile(prefix+"Individual Task (?P<ctx>[0-9]+) (?P<tid>[0-9]+) (?P<uid>[0-9]+) (?P<name>\w+)")
index_task_pat = re.compile(prefix+"Index Task (?P<ctx>[0-9]+) (?P<tid>[0-9]+) (?P<uid>[0-9]+) (?P<name>\w+)")
mapping_pat = re.compile(prefix+"Mapping Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
close_pat = re.compile(prefix+"Close Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+) (?P<is_inter>[0-1])")
fence_pat = re.compile(prefix+"Fence Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
copy_op_pat = re.compile(prefix+"Copy Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
acquire_op_pat = re.compile(prefix+"Acquire Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
release_op_pat = re.compile(prefix+"Release Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
deletion_pat = re.compile(prefix+"Deletion Operation (?P<ctx>[0-9]+) (?P<uid>[0-9]+)")
index_slice_pat = re.compile(prefix+"Index Slice (?P<index>[0-9]+) (?P<slice>[0-9]+)")
slice_slice_pat = re.compile(prefix+"Slice Slice (?P<slice1>[0-9]+) (?P<slice2>[0-9]+)")
slice_point_pat = re.compile(prefix+"Slice Point (?P<slice>[0-9]+) (?P<point>[0-9]+) (?P<dim>[0-9]+) (?P<val1>[0-9]+) (?P<val2>[0-9]+) (?P<val3>[0-9]+)")
point_point_pat = re.compile(prefix+"Point Point (?P<point1>[0-9]+) (?P<point2>[0-9]+)")
# Logger calls for phase barriers
phase_barrier_pat = re.compile(prefix+"Phase Barrier (?P<uid>[0-9a-f]+)")
# Logger calls for logical mapping dependence analysis
requirement_pat = re.compile(prefix+"Logical Requirement (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<is_reg>[0-1]) (?P<ispace>[0-9a-f]+) (?P<fspace>[0-9]+) (?P<tid>[0-9]+) (?P<priv>[0-9]+) (?P<coher>[0-9]+) (?P<redop>[0-9]+)")
req_field_pat = re.compile(prefix+"Logical Requirement Field (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<fid>[0-9]+)")
mapping_dep_pat = re.compile(prefix+"Mapping Dependence (?P<ctx>[0-9]+) (?P<prev_id>[0-9]+) (?P<pidx>[0-9]+) (?P<next_id>[0-9]+) (?P<nidx>[0-9]+) (?P<dtype>[0-9]+)")
# Logger calls for physical dependence analysis
task_inst_req_pat = re.compile(prefix+"Task Instance Requirement (?P<uid>[0-9]+) (?P<idx>[0-9]+) (?P<index>[0-9]+)")
# Logger calls for events
event_event_pat = re.compile(prefix+"Event Event (?P<idone>[0-9a-f]+) (?P<genone>[0-9]+) (?P<idtwo>[0-9a-f]+) (?P<gentwo>[0-9]+)")
implicit_event_pat = re.compile(prefix+"Implicit Event (?P<idone>[0-9a-f]+) (?P<genone>[0-9]+) (?P<idtwo>[0-9a-f]+) (?P<gentwo>[0-9]+)")
op_event_pat = re.compile(prefix+"Op Events (?P<uid>[0-9]+) (?P<startid>[0-9a-f]+) (?P<startgen>[0-9]+) (?P<termid>[0-9a-f]+) (?P<termgen>[0-9]+)")
copy_event_pat = re.compile(prefix+"Copy Events (?P<srcman>[0-9a-f]+) (?P<dstman>[0-9a-f]+) (?P<index>[0-9a-f]+) (?P<field>[0-9]+) (?P<tree>[0-9]+) (?P<startid>[0-9a-f]+) (?P<startgen>[0-9]+) (?P<termid>[0-9a-f]+) (?P<termgen>[0-9]+) (?P<redop>[0-9]+)")
copy_field_pat = re.compile(prefix+"Copy Field (?P<startid>[0-9a-f]+) (?P<startgen>[0-9]+) (?P<termid>[0-9a-f]+) (?P<termgen>[0-9]+) (?P<fid>[0-9]+)")
# Logger calls for physical instance usage
physical_inst_pat = re.compile(prefix+"Physical Instance (?P<iid>[0-9a-f]+) (?P<mid>[0-9a-f]+) (?P<index>[0-9a-f]+) (?P<field>[0-9]+) (?P<tid>[0-9]+)")
physical_reduc_pat = re.compile(prefix+"Reduction Instance (?P<iid>[0-9a-f]+) (?P<mid>[0-9a-f]+) (?P<index>[0-9a-f]+) (?P<field>[0-9]+) (?P<tid>[0-9]+) (?P<fold>[0-1]) (?P<indirect>[0-9]+)")
op_user_pat = re.compile(prefix+"Op Instance User (?P<uid>[0-9]+) (?P<idx>[0-9]+) (?P<iid>[0-9a-f]+)")
op_proc_user_pat = re.compile(prefix+"Op Processor User (?P<uid>[0-9]+) (?P<pid>[0-9a-f]+)")
def parse_log_line(line, state):
# Machine shapes
m = utility_pat.match(line)
if m <> None:
if state.add_utility(int(m.group('pid'),16)):
return True
m = processor_pat.match(line)
if m <> None:
if state.add_processor(int(m.group('pid'),16), int(m.group('kind'))):
return True
m = memory_pat.match(line)
if m <> None:
if state.add_memory(int(m.group('mid'),16), int(m.group('capacity'))):
return True
m = proc_mem_pat.match(line)
if m <> None:
if state.set_proc_mem(int(m.group('pid'),16), int(m.group('mid'),16), int(m.group('band')), int(m.group('lat'))):
return True
m = mem_mem_pat.match(line)
if m <> None:
if state.set_mem_mem(int(m.group('mone'),16), int(m.group('mtwo'),16), int(m.group('band')), int(m.group('lat'))):
return True
# Region tree shapes
m = top_index_pat.match(line)
if m <> None:
if state.add_index_space(int(m.group('uid'),16)):
return True
m = top_index_name_pat.match(line)
if m <> None:
if state.add_index_space_name(int(m.group('uid'),16), m.group('name')):
return True
m = index_part_pat.match(line)
if m <> None:
if state.add_index_partition(int(m.group('pid'),16), int(m.group('uid'),16), True if (int(m.group('disjoint'))) == 1 else False, int(m.group('color'))):
return True
m = index_part_name_pat.match(line)
if m <> None:
if state.add_index_partition_name(int(m.group('uid'),16), m.group('name')):
return True
m = index_subspace_pat.match(line)
if m <> None:
if state.add_index_subspace(int(m.group('pid'),16), int(m.group('uid'),16), int(m.group('color'))):
return True
m = field_space_pat.match(line)
if m <> None:
if state.add_field_space(int(m.group('uid'))):
return True
m = field_space_name_pat.match(line)
if m <> None:
if state.add_field_space_name(int(m.group('uid')), m.group('name')):
return True
m = field_create_pat.match(line)
if m <> None:
if state.add_field(int(m.group('uid')), int(m.group('fid'))):
return True
m = field_name_pat.match(line)
if m <> None:
if state.add_field_name(int(m.group('uid')), int(m.group('fid')), m.group('name')):
return True
m = region_pat.match(line)
if m <> None:
if state.add_region(int(m.group('iid'),16), int(m.group('fid')), int(m.group('tid'))):
return True
m = region_name_pat.match(line)
if m <> None:
if state.add_region_name(int(m.group('iid'),16), int(m.group('fid')), int(m.group('tid')), m.group('name')):
return True
m = partition_name_pat.match(line)
if m <> None:
if state.add_partition_name(int(m.group('iid'),16), int(m.group('fid')), int(m.group('tid')), m.group('name')):
return True
# Operations
m = top_task_pat.match(line)
if m <> None:
if state.add_top_task(int(m.group('tid')), int(m.group('uid')), m.group('name')):
return True
m = single_task_pat.match(line)
if m <> None:
if state.add_single_task(int(m.group('ctx')), int(m.group('tid')), int(m.group('uid')), m.group('name')):
return True
m = index_task_pat.match(line)
if m <> None:
if state.add_index_task(int(m.group('ctx')), int(m.group('tid')), int(m.group('uid')), m.group('name')):
return True
m = mapping_pat.match(line)
if m <> None:
if state.add_mapping(int(m.group('ctx')), int(m.group('uid'))):
return True
m = close_pat.match(line)
if m <> None:
if state.add_close(int(m.group('ctx')), int(m.group('uid')), True if int(m.group('is_inter')) == 1 else False):
return True
m = fence_pat.match(line)
if m <> None:
if state.add_fence(int(m.group('ctx')), int(m.group('uid'))):
return True
m = copy_op_pat.match(line)
if m <> None:
if state.add_copy_op(int(m.group('ctx')), int(m.group('uid'))):
return True
m = acquire_op_pat.match(line)
if m <> None:
if state.add_acquire_op(int(m.group('ctx')), int(m.group('uid'))):
return True
m = release_op_pat.match(line)
if m <> None:
if state.add_release_op(int(m.group('ctx')), int(m.group('uid'))):
return True
m = deletion_pat.match(line)
if m <> None:
if state.add_deletion(int(m.group('ctx')), int(m.group('uid'))):
return True
m = index_slice_pat.match(line)
if m <> None:
if state.add_index_slice(int(m.group('index')),int(m.group('slice'))):
return True
m = slice_slice_pat.match(line)
if m <> None:
if state.add_slice_slice(int(m.group('slice1')),int(m.group('slice2'))):
return True
m = slice_point_pat.match(line)
if m <> None:
if state.add_slice_point(int(m.group('slice')),int(m.group('point')), int(m.group('dim')), int(m.group('val1')), int(m.group('val2')), int(m.group('val3'))):
return True
m = point_point_pat.match(line)
if m <> None:
if state.add_point_point(int(m.group('point1')),int(m.group('point2'))):
return True
# Phase Barriers
m = phase_barrier_pat.match(line)
if m <> None:
if state.add_phase_barrier(int(m.group('uid'), 16)):
return True
# Mapping dependence analysis
m = requirement_pat.match(line)
if m <> None:
if state.add_requirement(int(m.group('uid')), int(m.group('index')), True if (int(m.group('is_reg')))==1 else False, int(m.group('ispace'),16), int(m.group('fspace')), int(m.group('tid')), int(m.group('priv')), int(m.group('coher')), int(m.group('redop'))):
return True
m = req_field_pat.match(line)
if m <> None:
if state.add_req_field(int(m.group('uid')), int(m.group('index')), int(m.group('fid'))):
return True
m = mapping_dep_pat.match(line)
if m <> None:
if state.add_mapping_dependence(int(m.group('ctx')), int(m.group('prev_id')), int(m.group('pidx')), int(m.group('next_id')), int(m.group('nidx')), int(m.group('dtype'))):
return True
# Physical dependence analysis
m = task_inst_req_pat.match(line)
if m <> None:
if state.add_instance_requirement(int(m.group('uid')), int(m.group('idx')), int(m.group('index'))):
return True
# Physical Analysis
m = event_event_pat.match(line)
if m <> None:
if state.add_event_dependence(int(m.group('idone'),16), int(m.group('genone')), int(m.group('idtwo'),16), int(m.group('gentwo'))):
return True
m = implicit_event_pat.match(line)
if m <> None:
if state.add_implicit_dependence(int(m.group('idone'),16), int(m.group('genone')), int(m.group('idtwo'),16), int(m.group('gentwo'))):
return True
m = op_event_pat.match(line)
if m <> None:
if state.add_op_events(int(m.group('uid')), int(m.group('startid'),16), int(m.group('startgen')), int(m.group('termid'),16), int(m.group('termgen'))):
return True
m = copy_event_pat.match(line)
if m <> None:
if state.add_copy_events(int(m.group('srcman'),16), int(m.group('dstman'),16), int(m.group('index'),16), int(m.group('field')), int(m.group('tree')), int(m.group('startid'),16), int(m.group('startgen')), int(m.group('termid'),16), int(m.group('termgen')), int(m.group('redop'))):
return True
m = copy_field_pat.match(line)
if m <> None:
if state.add_copy_field_to_copy_event(int(m.group('startid'),16), int(m.group('startgen')), int(m.group('termid'),16), int(m.group('termgen')), int(m.group('fid'))):
return True
# Physical instance usage
m = physical_inst_pat.match(line)
if m <> None:
if state.add_physical_instance(int(m.group('iid'),16), int(m.group('mid'),16), int(m.group('index'),16), int(m.group('field')), int(m.group('tid'))):
return True
m = physical_reduc_pat.match(line)
if m <> None:
if state.add_reduction_instance(int(m.group('iid'),16), int(m.group('mid'),16), int(m.group('index'),16), int(m.group('field')), int(m.group('tid')), True if (int(m.group('fold')) == 1) else False, int(m.group('indirect'))):
return True
m = op_user_pat.match(line)
if m <> None:
if state.add_op_user(int(m.group('uid')), int(m.group('idx')), int(m.group('iid'),16)):
return True
m = op_proc_user_pat.match(line)
if m <> None:
if state.add_op_proc_user(int(m.group('uid')), int(m.group('pid'),16)):
return True
return False
def parse_log_file(file_name, state):
log = open(file_name, 'r')
matches = 0
# Since some lines might match, but are out of order due to things getting
# printed to the log file in weird orders, try reparsing lines
replay_lines = list()
for line in log:
# Do a quick check to see if we match the prefix
# If not then we can skip the line entirely
m = prefix_pat.match(line)
if m == None:
continue
# If we made it here then we failed to match
if parse_log_line(line, state):
matches += 1
else:
replay_lines.append(line)
log.close()
# Now see if we have lines that need to be replayed
while len(replay_lines) > 0:
to_delete = set()
for line in replay_lines:
if parse_log_line(line, state):
to_delete.add(line)
# Now check to make sure we actually did something
# If not then we're not making forward progress which is bad
if len(to_delete) == 0:
print "ERROR: NO PROGRESS PARSING! BUG IN LEGION SPY LOGGING ASSUMPTIONS!"
for line in replay_lines:
print line
assert False
# Now delete any lines to delete and go again until we're done
for line in to_delete:
replay_lines.remove(line)
return matches
# EOF
| |
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
while True:
cmd_return_code = cmd_proc.poll()
if cmd_return_code != None:
break
if not cmd.endswith("&"):
while True:
line = cmd_proc.stdout.readline().strip("\r\n")
print line
if not line or line.find("daemon started") >= 0:
break
output.append(line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".xpk"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, PKG_SRC_DIR+"/"+item_name):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| |
import os
import yaml
import lasagne
import pandas as pd
import numpy as np
from network import Network
import architectures as arches
L = lasagne.layers
# loads data with names according to autoload_data.py
from autoload_data import *
# load specs for all networks
with open('arch_specs.yaml') as archfile:
arch_dict = yaml.load(archfile)
### Compiling results from saved parameters ###
def compute_pretrained_results(net, archname, idx, test_data, fake=False):
"""
Compute pre-tuning results for a given arch/network on appropriate test data
"""
Xt, yt = test_data
if fake:
fname = '{} {} split fake data.npz'
fname = fname.format('fake_' + archname, idx)
paramsdir = os.path.join(paramsdir_, 'fake_' + archname)
else:
fname = '{} {} split agg fit exp 1-4.npz'
fname = fname.format(archname.replace('_', ' '), idx)
paramsdir = os.path.join(paramsdir_, archname[:-1])
results_df = pd.DataFrame(index=np.arange(Xt.shape[0]), columns=[idx])
net.load_params(os.path.join(paramsdir, fname))
nlls = net.itemized_test_fn(Xt, yt)
predictions = net.output_fn(Xt)
results_df[idx] = nlls
return results_df, predictions
def compute_tuned_results(net, archname, idx, test_idx, test_data, df):
"""
Compute post-tuning results for a given architecture/network on appropriate
test data
"""
Xt, yt = test_data
group_idx = (test_idx - 1) % 5 # fix eventually to take df/groupidx/selection passed independently?
selection = df.loc[df['group']==(group_idx+1)].index.values
results_df = pd.DataFrame(index=np.arange(Xt.shape[0]), columns=[idx])
predictions_df = pd.DataFrame(index=selection, columns=np.arange(36))
fname = '{} {} agg fit exp 1-4 {} tune fit exp 0.npz'
fname = fname.format(archname.replace('_', ' '), idx, test_idx)
net.load_params(os.path.join(paramsdir_, archname[:-1], fname))
nlls = net.itemized_test_fn(Xt[selection, :, :, :], yt[selection])
predictions = net.output_fn(Xt[selection, :, :, :])
predictions_df.loc[selection, :] = predictions
results_df.loc[selection, idx] = nlls
return results_df, predictions_df
def compute_net_results(net, archname, test_data, df):
"""
For a given network, test on appropriate test data and return dataframes
with results and predictions (named obviously)
"""
pretrain_results = []
pretrain_predictions = []
tune_results = []
tune_predictions = []
for idx in range(5):
results_df, predictions_df = compute_pretrained_results(net, archname, idx, test_data)
pretrain_results.append(results_df)
pretrain_predictions.append(predictions_df)
pretrain_results = pd.concat(pretrain_results, axis=1)
for idx in range(5):
for test_idx in range(5):
results_df, predictions_df = compute_tuned_results(net, archname, idx, test_idx, test_data, df)
tune_results.append(results_df)
tune_predictions.append(predictions_df)
tune_results = pd.concat(tune_results, axis=1, join='inner').stack().unstack()
return pretrain_results, pretrain_predictions, tune_results, tune_predictions
def rehydrate(verbose=False):
"""
Recompile networks, load params, and run on appropriate test data
outputs dictionaries with keys = names of architectures as in arch_specs.yaml
outputs:
PTx is pretrained on bulk but untuned
Tx is tuned
xR is results (nlls)
xP is predictions (distributions)
param_counts is what it sounds like.
"""
Xt, yt, _, _, _ = loading.unpack_data(df) # get Xs and ys
PTR = {} # results and predictions holders
TR = {}
PTP = {}
TP = {}
param_counts = {} # counter of parameters per net
for archname in arch_dict.keys():
# for each network
arch_dir = archname[:-1] # get directory
if arch_dir not in os.listdir(paramsdir_):
# if it doesn't exist
print("{} not started".format(archname[:-1])) # alert us
continue
files = os.listdir(os.path.join(paramsdir_, arch_dir))
if not any(archname.replace('_', ' ') in f for f in files):
# if a network doesn't have a full set of parameter fits
print("{} not completed".format(archname)) # let us know
continue
if verbose:
print(archname)
arch = arch_dict[archname]
af = getattr(arches, arch['type'])
arch_func = lambda input_var: af(input_var, **arch['kwargs'])
net = Network(arch_func) # compile network from specs in arch_specs.yaml
param_counts[archname] = L.count_params(net.net) # count the params
pretrain_R, pretrain_P, tune_R, tune_P = compute_net_results(net, archname, (Xt, yt), df)
PTR[archname] = pretrain_R # insert results into respective holders
TR[archname] = tune_R
PTP[archname] = pretrain_P
TP[archname] = tune_P
pretrain_R.to_csv(os.path.join(resultsdir, 'pretrain {}.csv'.format(archname))) # save results into respective directories
tune_R.to_csv(os.path.join(resultsdir, 'train {}.csv'.format(archname)))
return PTR, TR, PTP, TP, param_counts
### Statistics and summaries ###
def entropy_zets(zets):
"""Shannon entropy"""
z = np.histogram(zets, bins=np.arange(37), normed=True)[0]
z = z[z > 0]
return -(z * np.log2(z)).sum()
def count_pieces(row):
"""Counts pieces in a given binstring state representation"""
bp, wp = row[['bp', 'wp']]
n_bp = np.array(list(bp)).astype(int).sum()
n_wp = np.array(list(wp)).astype(int).sum()
return n_bp + n_wp
def aggregate_results(PTR, TR, param_counts):
"""
Args:
PTR, TR, are dictionaries of results dataframes with archnames as keys,
archnames as in arch_specs.yaml and PTR/TR as produced by rehydrate()
param_counts also keyed by archnames and as produced by rehydrate; contains
parameter counts
outputs:
F is a dataframe containing individual networks in rows with mean
performances in columns
"""
pc_series = pd.Series(param_counts)
pc_series.to_csv(os.path.join(resultsdir, 'params per net.csv'))
F = pd.DataFrame(index=np.arange(len(pc_series.index)), columns=['net name'])
F['net name'] = pc_series.index
F['num params'] = pc_series.values
for k, v in PTR.items():
idx = F['net name'] == k
v['subject'] = Ss
v['mean'] = v.mean()
F.loc[idx, 'pretrained'] = v['mean'].mean()
pvt = v.pivot_table(index='subject', values='mean', aggfunc=np.mean).mean().values
F.loc[idx, 'pretrained subject'] = pvt
for k, v in TR.items():
idx = F['net name'] == k
v['subject'] = Ss
v['mean'] = v.mean()
F.loc[idx, 'tuned'] = v['mean'].mean()
pvt = v.pivot_table(index='subject', values='mean', aggfunc=np.mean).mean().values
F.loc[idx, 'tuned subject'] = pvt
F['tuning improvement'] = -(F['tuned'] - F['pretrained'])
return F
def error_per_piece(archname, resultdict, datadf):
"""
Args:
archname is name of architecture as in arch_specs.yaml
resultdict should be a dictionary of predictions where keys = archnames
datadf is the original data containing positions, subjects, moves, etc
Outputs:
preds is a list of dataframes containing predictions for reach observation
in datadf and training split for archname
per_pieces is a df with the mean chance-relative nll for each subject and
number of pieces in position
"""
chancenll = lambda x: -np.log(1/(36-x))
cross_entropy = lambda row: -np.log(row[int(row['zet'])])
preds = [pd.concat(resultdict[archname][i:i+5]).sort_index() for i in range(5)]
for pred in preds:
pred['subject'] = df['subject']
pred['zet'] = df['zet']
pred['num pieces'] = df['np']
pred['rt'] = df['rt']
pred['chance nll'] = chancenll(df['np'].values)
pred['error'] = pred.apply(cross_entropy, axis=1)
pred['relative error'] = pred['chance nll'] - pred['error']
per_pieces = pd.concat([
p.pivot_table(index='subject', values='relative error', columns='num pieces').mean(axis=0)
for p in preds
], axis=1)
return preds, per_pieces
| |
import datetime
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.handlers.wsgi import WSGIRequest
from django.test import Client
from django.test import TestCase
from registration import forms
from registration import signals
from registration.admin import RegistrationAdmin
from registration.backends import get_backend
from registration.backends.default import DefaultBackend
from registration.models import RegistrationProfile
class _MockRequestClient(Client):
"""
A ``django.test.Client`` subclass which can return mock
``HttpRequest`` objects.
"""
def request(self, **request):
"""
Rather than issuing a request and returning the response, this
simply constructs an ``HttpRequest`` object and returns it.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
def _mock_request():
"""
Construct and return a mock ``HttpRequest`` object; this is used
in testing backend methods which expect an ``HttpRequest`` but
which are not being called from views.
"""
return _MockRequestClient().request()
class BackendRetrievalTests(TestCase):
"""
Test that utilities for retrieving the active backend work
properly.
"""
def test_get_backend(self):
"""
Verify that ``get_backend()`` returns the correct value when
passed a valid backend.
"""
self.failUnless(isinstance(get_backend('registration.backends.default.DefaultBackend'),
DefaultBackend))
def test_backend_error_invalid(self):
"""
Test that a nonexistent/unimportable backend raises the
correct exception.
"""
self.assertRaises(ImproperlyConfigured, get_backend,
'registration.backends.doesnotexist.NonExistentBackend')
def test_backend_attribute_error(self):
"""
Test that a backend module which exists but does not have a
class of the specified name raises the correct exception.
"""
self.assertRaises(ImproperlyConfigured, get_backend,
'registration.backends.default.NonexistentBackend')
class DefaultRegistrationBackendTests(TestCase):
"""
Test the default registration backend.
Running these tests successfull will require two templates to be
created for the sending of activation emails; details on these
templates and their contexts may be found in the documentation for
the default backend.
"""
def setUp(self):
"""
Create an instance of the default backend for use in testing,
and set ``ACCOUNT_ACTIVATION_DAYS``.
"""
from registration.backends.default import DefaultBackend
self.backend = DefaultBackend()
self.old_activation = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', None)
settings.ACCOUNT_ACTIVATION_DAYS = 7
def tearDown(self):
"""
Restore the original value of ``ACCOUNT_ACTIVATION_DAYS``.
"""
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation
def test_registration(self):
"""
Test the registration process: registration creates a new
inactive account and a new profile with activation key,
populates the correct account data and sends an activation
email.
"""
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
# Details of the returned user must match what went in.
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must not be active.
self.failIf(new_user.is_active)
# A registration profile was created, and an activation email
# was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_registration_no_sites(self):
"""
Test that registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
Site._meta.installed = False
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
self.failIf(new_user.is_active)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
Site._meta.installed = True
def test_valid_activation(self):
"""
Test the activation process: activating within the permitted
window sets the account's ``is_active`` field to ``True`` and
resets the activation key.
"""
valid_user = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
valid_profile = RegistrationProfile.objects.get(user=valid_user)
activated = self.backend.activate(_mock_request(),
valid_profile.activation_key)
self.assertEqual(activated.username, valid_user.username)
self.failUnless(activated.is_active)
# Fetch the profile again to verify its activation key has
# been reset.
valid_profile = RegistrationProfile.objects.get(user=valid_user)
self.assertEqual(valid_profile.activation_key,
RegistrationProfile.ACTIVATED)
def test_invalid_activation(self):
"""
Test the activation process: trying to activate outside the
permitted window fails, and leaves the account inactive.
"""
expired_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
expired_user.date_joined = expired_user.date_joined - datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
expired_user.save()
expired_profile = RegistrationProfile.objects.get(user=expired_user)
self.failIf(self.backend.activate(_mock_request(),
expired_profile.activation_key))
self.failUnless(expired_profile.activation_key_expired())
def test_allow(self):
"""
Test that the setting ``REGISTRATION_OPEN`` appropriately
controls whether registration is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
self.failUnless(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = False
self.failIf(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = old_allowed
def test_form_class(self):
"""
Test that the default form class returned is
``registration.forms.RegistrationForm``.
"""
self.failUnless(self.backend.get_form_class(_mock_request()) is forms.RegistrationForm)
def test_post_registration_redirect(self):
"""
Test that the default post-registration redirect is the named
pattern ``registration_complete``.
"""
self.assertEqual(self.backend.post_registration_redirect(_mock_request(), User()),
('registration_complete', (), {}))
def test_registration_signal(self):
"""
Test that registering a user sends the ``user_registered``
signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_registered.connect(receiver, sender=self.backend.__class__)
self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_registered])
def test_activation_signal_success(self):
"""
Test that successfully activating a user sends the
``user_activated`` signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_activated])
def test_activation_signal_failure(self):
"""
Test that an unsuccessful activation attempt does not send the
``user_activated`` signal.
"""
receiver = lambda sender, **kwargs: received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 0)
def test_email_send_action(self):
"""
Test re-sending of activation emails via admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # One on registering, one more on the resend.
RegistrationProfile.objects.filter(user=alice).update(activation_key=RegistrationProfile.ACTIVATED)
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # No additional email because the account has activated.
def test_activation_action(self):
"""
Test manual activation of users view admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
admin_class.activate_users(_mock_request(),
RegistrationProfile.objects.all())
self.failUnless(User.objects.get(username='alice').is_active)
| |
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
"""
Class for loading boolean expression trees
and returning sql to get the results
"""
import pymongo
from WebUI import db
from bson import ObjectId
from WebUI.sqlstore import datastore
from sets import Set
# Also create an instance of sqlite3 connection
class QueryTree(object):
def __init__(self, tree, algo="avg", indent="", debug=False, mode="dynatree", skip=0, limit=30):
"""
Incoming tree must have a single entry as key
"""
self.algo = algo
self.tree = tree
self.debug = debug
self.indent = indent
self.mode = mode
self.skip = skip
self.limit=limit
if type(tree) == type(u""):
# leaf
self.children = []
self.title = tree
return
if mode == "parselogic":
self.parselogic()
return
self.title = tree.keys()[0]
self.children = []
self.data = None
self.indent = indent
self.debug=debug
if type(tree[self.title]) == type([]):
for atree in tree[self.title]:
self.children.append(self.__class__(atree, algo=self.algo, indent=self.indent+" ", debug=self.debug))
else:
self.data = tree[self.title]
def parselogic(self):
self.children = []
self.title = "$" + self.tree["logic"]
for atree in self.tree["terms"]:
self.children.append(self.__class__(atree, algo=self.algo, indent=self.indent+" ", debug=self.debug, mode=self.mode))
def sql(self):
"""
Creates and returns the sql statement based on the tree loaded
in the base implementation returns all the columns involved in tree with ordering in the order
in which they appear
"""
print "Expression: ", self.expression()
sql = "SELECT " + ','.join(self.expression()) + " FROM ob"
return sql
def expression(self):
"""
Computes the results based on the tree loaded
else returns empty results
Recursively calls the parent if any child nodes are not leaf
"""
# For now just print the tree
if self.title[0] == "$":
if self.debug:
print self.indent + self.title + ", %d Children"%(len(self.children))
results = []
for child in self.children:
# Determine how to combine results
aresult = child.expression()
results = results + aresult
return results
else:
result = self.title
if self.debug:
print self.indent + "Processing: " + self.title
# Query the database
return [result]
# Must have children
class AvgQueryTree(QueryTree):
def process(self):
"""
Computes the results based on the tree loaded
else returns empty results
Recursively calls the parent self if any child nodes are not leaf
For and the score of the clip is the score of the maximum of the attributes selected
For each clip, find the score of the attributes of all clips
- Aggregate the attribute scores for all clips
"""
# Create a new collection
# prefix = "score"
# col = db["fusion"]
# fieldname = time.t
# Insert the meta object so that this collection can be garbage collected
# keep only first few
# For now just print the tree
if self.title[0] == "$":
if self.debug:
print self.indent + self.title + ", %d Children"%(len(self.children))
results = []
sum = 1
# Evaluate all the non-attribute nodes first
for child in self.children:
# Determine how to combine results
aresult = child.process()
results.append(aresult)
sum = sum + aresult
return sum
else:
# Create a collection of scores the threshold doesn't matter
result = []
colname = "clip_calib_" + self.title
count = db[colname].find({"score" : self.data}).count()
if count > 0:
for ares in db[colname].find({"score" : self.data}):
result.append(ares["clip"])
# Use the information in .data to query the database to query the data
result = 1
if self.debug:
print self.indent + "Avg: " + self.title
# Query the database
return result
# Must have children
class BooleanQueryTree(QueryTree):
def sql(self):
"""
Example query is as follows -
SELECT scary, working FROM clip_calib_scores WHERE (scary > 0.1 AND working > 0.1) ORDER BY scary DESC LIMIT 0,50;
"""
# Recursively invokes expression
items = ", ".join(self.expression())
sql = "SELECT v_id, " + items + " FROM ob WHERE " + self.query()
sql = sql + " ORDER BY " + items + " DESC LIMIT 0,50;"
return sql
def query(self):
"""
Computes the results based on the tree loaded
else returns empty results
Recursively calls itself if any child nodes are not leaf
"""
# For now just print the tree
if self.title[0] == "$":
if self.debug:
print self.indent + self.title + ", %d Children"%(len(self.children))
results = []
for child in self.children:
# Determine how to combine results
aresult = child.query()
# Combine using the title operator
if type(aresult) == type(u"d"):
# Got a string
results = results + [aresult]
else:
results = results + aresult
return " (" + ( " " + self.title[1:].upper() + " ").join(results) + ")"
else:
key = self.data.keys()[0]
expr = self.title
if key == "$gte":
expr = expr + " >= "
expr = expr + str(self.data[key])
return expr
class ScoreFusionQueryTree(QueryTree):
def sql(self):
"""
Example query is as follows -
SELECT scary, working, (scary + working) /2 as result FROM clip_calib_scores ORDER BY result DESC LIMIT 0,50;
"""
terms = self.expression()
# Recursively invokes expression
items = ", ".join(terms)
tables = set()
# for i, item in enumerate(terms):
# if item[:2] not in ["bu", "sc", "ob"]:
# terms[i] = "sc." + item
# print terms
for anitem in terms:
if anitem[:2] == "bu":
tables.add("bu")
if anitem[:2] == "sc":
tables.add("sc")
if anitem[:2] == "ob":
tables.add("ob")
ob = 1
tablestr = ", ".join(tables)
tableslist = list(tables)
if len(tableslist) == 1:
wherejoin = " "
if len(tables) == 2:
wherejoin = "WHERE " + tableslist[0] + ".v_id = " + tableslist[1] + ".v_id"
if len(tables) == 3:
wherejoin = "WHERE ob.v_id = sc.v_id and ob.v_id = bu.v_id "
# if(bu = 1)
sql = "SELECT " + tableslist[0] + ".v_id, " + items + ", " + self.query() + " as result FROM " + tablestr + " " + wherejoin
sql = sql + " ORDER BY result DESC LIMIT " + str(self.skip) +"," + str(self.limit) + ";"
return sql
def query(self):
"""
Computes the results based on the tree loaded
else returns empty results
Recursively calls itself if any child nodes are not leaf
"""
# For now just print the tree
if self.title[0] == "$":
if self.debug:
print self.indent + self.title + ", %d Children"%(len(self.children))
results = []
# raise
for child in self.children:
# Determine how to combine results
aresult = child.query()
# Combine using the title operator
print type(aresult), type(u"d")
print aresult
if type(aresult) == type(u"d"):
# Got a string
results = results + [aresult]
else:
results = results + aresult
if self.title[1:] == "and":
# Use average
return "((" + " + ".join(results) + ") / " + str(len(results)) + ") "
elif self.title[1:] == "or":
# Use max (easy)
return "MAX(" + ",".join(results) + ")"
else:
return self.title
class RankFusionQueryTree(QueryTree):
def sql(self):
"""
Example query is as follows -
SELECT scary, working, (scary + working) /2 as result FROM clip_calib_scores ORDER BY result DESC LIMIT 0,50;
"""
# Recursively invokes expression
items = ", ".join(self.expression())
print items
sql = "SELECT v_id, " + items + ", MAX( ( RANK() OVER (ORDER BY " + self.query() + " ) as result FROM clip_calib_scores "
sql = sql + " ORDER BY result ASC LIMIT " + str(self.skip) +"," + str(self.limit) + ";"
return sql
def query(self):
"""
Computes the results based on the tree loaded
else returns empty results
Recursively calls itself if any child nodes are not leaf
"""
# For now just print the tree
if self.title[0] == "$":
if self.debug:
print self.indent + self.title + ", %d Children"%(len(self.children))
results = []
for child in self.children:
# Determine how to combine results
aresult = child.query()
# Combine using the title operator
print type(aresult), type(u"d")
print aresult
if type(aresult) == type(u"d"):
# Got a string
results = results + [aresult]
else:
results = results + aresult
if self.title[1:] == "and":
# Use average
# return "((" + " + ".join(results) + ") / " + str(len(results)) + ") "
return "), RANK() OVER (ORDER BY ".join(results) + ") )"
elif self.title[1:] == "or":
# Use max (easy)
return "MAX(" + ",".join(results) + ")"
else:
return self.title
| |
from django.conf import settings
from django.contrib.auth.models import User, Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from package.models import Category, Package, PackageExample
from package.tests import initial_data
from profiles.models import Profile
class FunctionalPackageTest(TestCase):
def setUp(self):
initial_data.load()
for user in User.objects.all():
profile = Profile.objects.create(user=user)
profile.save()
settings.RESTRICT_PACKAGE_EDITORS = False
settings.RESTRICT_GRID_EDITORS = True
def test_package_list_view(self):
url = reverse('packages')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'package/package_list.html')
packages = Package.objects.all()
for p in packages:
self.assertContains(response, p.title)
def test_package_detail_view(self):
url = reverse('package', kwargs={'slug': 'testability'})
response = self.client.get(url)
self.assertTemplateUsed(response, 'package/package.html')
p = Package.objects.get(slug='testability')
self.assertContains(response, p.title)
self.assertContains(response, p.repo_description)
for participant in p.participant_list():
self.assertContains(response, participant)
for g in p.grids():
self.assertContains(response, g.title)
for e in p.active_examples:
self.assertContains(response, e.title)
def test_latest_packages_view(self):
url = reverse('latest_packages')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'package/package_archive.html')
packages = Package.objects.all()
for p in packages:
self.assertContains(response, p.title)
self.assertContains(response, p.repo_description)
def test_add_package_view(self):
url = reverse('add_package')
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, url))
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'package/package_form.html')
for c in Category.objects.all():
self.assertContains(response, c.title)
count = Package.objects.count()
response = self.client.post(url, {
'category': Category.objects.all()[0].pk,
'repo_url': 'http://github.com/django/django',
'slug': 'test-slug',
'title': 'TEST TITLE',
})
self.assertEqual(response.status_code, 302)
self.assertEqual(Package.objects.count(), count + 1)
def test_edit_package_view(self):
p = Package.objects.get(slug='testability')
url = reverse('edit_package', kwargs={'slug': 'testability'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, url))
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'package/package_form.html')
self.assertContains(response, p.title)
self.assertContains(response, p.slug)
# Make a test post
response = self.client.post(url, {
'category': Category.objects.all()[0].pk,
'repo_url': 'http://github.com/django/django',
'slug': p.slug,
'title': 'TEST TITLE',
})
self.assertEqual(response.status_code, 302)
# Check that it actually changed the package
p = Package.objects.get(slug='testability')
self.assertEqual(p.title, 'TEST TITLE')
def test_add_example_view(self):
url = reverse('add_example', kwargs={'slug': 'testability'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'package/add_example.html')
count = PackageExample.objects.count()
response = self.client.post(url, {
'title': 'TEST TITLE',
'url': 'http://github.com',
})
self.assertEqual(response.status_code, 302)
self.assertEqual(PackageExample.objects.count(), count + 1)
def test_edit_example_view(self):
e = PackageExample.objects.all()[0]
id = e.pk
url = reverse('edit_example', kwargs={'slug': e.package.slug,
'id': e.pk})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'package/edit_example.html')
response = self.client.post(url, {
'title': 'TEST TITLE',
'url': 'http://github.com',
})
self.assertEqual(response.status_code, 302)
e = PackageExample.objects.get(pk=id)
self.assertEqual(e.title, 'TEST TITLE')
def test_usage_view(self):
url = reverse('usage', kwargs={'slug': 'testability', 'action': 'add'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
user = User.objects.get(username='user')
count = user.package_set.count()
self.assertTrue(self.client.login(username='user', password='user'))
# Now that the user is logged in, make sure that the number of packages
# they use has increased by one.
response = self.client.get(url)
self.assertEqual(count + 1, user.package_set.count())
# Now we remove that same package from the user's list of used packages,
# making sure that the total number has decreased by one.
url = reverse('usage', kwargs={'slug': 'testability', 'action': 'remove'})
response = self.client.get(url)
self.assertEqual(count, user.package_set.count())
class PackagePermissionTest(TestCase):
def setUp(self):
initial_data.load()
for user in User.objects.all():
profile = Profile.objects.create(user=user)
profile.save()
settings.RESTRICT_PACKAGE_EDITORS = True
self.test_add_url = reverse('add_package')
self.test_edit_url = reverse('edit_package',
kwargs={'slug': 'testability'})
self.login = self.client.login(username='user', password='user')
self.user = User.objects.get(username='user')
def test_login(self):
self.assertTrue(self.login)
def test_switch_permissions(self):
settings.RESTRICT_PACKAGE_EDITORS = False
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 200)
settings.RESTRICT_PACKAGE_EDITORS = True
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 403)
def test_add_package_permission_fail(self):
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 403)
def test_add_package_permission_success(self):
add_package_perm = Permission.objects.get(codename="add_package",
content_type__app_label='package')
self.user.user_permissions.add(add_package_perm)
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 200)
def test_edit_package_permission_fail(self):
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 403)
def test_edit_package_permission_success(self):
edit_package_perm = Permission.objects.get(codename="change_package",
content_type__app_label='package')
self.user.user_permissions.add(edit_package_perm)
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 200)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
class SavedModelTest(test.TestCase):
def _get_export_dir(self, label):
return os.path.join(test.get_temp_dir(), label)
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.VariableV1(variable_value, name=variable_name)
sess.run(variables.global_variables_initializer())
self.assertEqual(variable_value, self.evaluate(v))
def _build_asset_collection(self, asset_file_name, asset_file_contents,
asset_file_tensor_name, asset_subdir=""):
parent_dir = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_subdir))
file_io.recursive_create_dir(parent_dir)
asset_filepath = os.path.join(
compat.as_bytes(parent_dir), compat.as_bytes(asset_file_name))
file_io.write_string_to_file(asset_filepath, asset_file_contents)
asset_file_tensor = constant_op.constant(
asset_filepath, name=asset_file_tensor_name)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
return asset_collection
def _validate_asset_collection(self, export_dir, graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[asset_id].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
def _validate_inputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_inputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError,
"SavedModel file does not exist at: %s" %
export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PB):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with no variables.
with self.session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
# Restore the graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# An attempt to create another builder with the same export directory should
# result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder.SavedModelBuilder,
export_dir)
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(42, name="v")
ops.add_to_collection("foo_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(42, self.evaluate(v))
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(43, name="v")
ops.add_to_collection("bar_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(43, self.evaluate(v))
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(dict(),
dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(dict(),
dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(dict(),
dict(),
"foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidationFails(self):
export_dir = self._get_export_dir("test_signature_def_validation_fail")
builder = saved_model_builder.SavedModelBuilder(export_dir)
tensor_without_encoding = meta_graph_pb2.TensorInfo()
tensor_without_encoding.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info_fail(builder, tensor_without_encoding)
self._validate_outputs_tensor_info_fail(builder, tensor_without_encoding)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info_fail(builder, tensor_without_dtype)
self._validate_outputs_tensor_info_fail(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info_fail(builder, tensor_empty)
self._validate_outputs_tensor_info_fail(builder, tensor_empty)
def testSignatureDefValidationSucceedsWithName(self):
tensor_with_name = meta_graph_pb2.TensorInfo()
tensor_with_name.name = "foo"
tensor_with_name.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_name_1")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_name)
export_dir = self._get_export_dir("test_signature_def_validation_name_2")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_name)
def testSignatureDefValidationSucceedsWithCoo(self):
tensor_with_coo = meta_graph_pb2.TensorInfo()
# TODO(soergel) test validation of each of the fields of coo_sparse
tensor_with_coo.coo_sparse.values_tensor_name = "foo"
tensor_with_coo.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_coo_1")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_coo)
export_dir = self._get_export_dir("test_signature_def_validation_coo_2")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_coo)
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection("hello42.txt",
"foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionDiffFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_diff_file")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar bak", "asset_file_tensor",
asset_subdir="1")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1",
asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar bak",
"asset_file_tensor:0")
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt_1", "foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
def testAssetsNameCollisionSameFilepath(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_path")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionSameFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_file")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor",
asset_subdir="1")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1",
asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionManyFiles(self):
export_dir = self._get_export_dir("test_assets_name_collision_many_files")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
for i in range(5):
idx = str(i)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz " + idx, "asset_file_tensor_" + idx,
asset_subdir=idx)
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
for i in range(1, 5):
idx = str(i)
self._validate_asset_collection(
export_dir, foo_graph.collection_def, "hello42.txt_" + idx,
"foo bar baz " + idx, "asset_file_tensor_{}:0".format(idx),
asset_id=i)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz 0",
"asset_file_tensor_0:0")
def testCustomMainOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3")
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1._ref(), v2._ref())
custom_main_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2))
sess.run(custom_main_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], main_op=custom_main_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3", trainable=False, collections=[])
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the legacy_init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the legacy_init_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(
export_dir, constants.LEGACY_INIT_OP_KEY)
def testMainOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_main_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir, constants.MAIN_OP_KEY)
def _testInitOpsWithNonEmptyCollection(self, export_dir, key):
builder = saved_model_builder.SavedModelBuilder(export_dir)
g = ops.Graph()
with self.session(graph=g) as sess:
# Initialize variable `v1` to 1.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variables.VariableV1(42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the init op.
assign_v2 = state_ops.assign(v2, v1)
init_op = control_flow_ops.group(assign_v2, name="init_op")
sess.run(variables.global_variables_initializer())
ops.add_to_collection(key, control_flow_ops.no_op())
# ValueError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=init_op)
# We shouldn't be able to add as MAIN_OP, either.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op)
def testTrainOp(self):
export_dir = self._get_export_dir("test_train_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
sess.run(variables.global_variables_initializer())
train_op = state_ops.assign_add(v1, v2)
sess.run(train_op)
# TODO(karmel): remove explicit call when in the public method.
builder._add_train_op(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(3, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Tensor)
def testTrainOpGroup(self):
export_dir = self._get_export_dir("test_train_op_group")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
sess.run(variables.global_variables_initializer())
train_op = control_flow_ops.group()
sess.run(train_op)
# TODO(karmel): remove explicit call when in the public method.
builder._add_train_op(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Operation)
def testTrainOpAfterVariables(self):
export_dir = self._get_export_dir("test_train_op_after_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["pre_foo"])
train_op = state_ops.assign_add(v1, v2)
sess.run(train_op)
# TODO(karmel): remove explicit call when in the public method.
builder._add_train_op(train_op)
builder.add_meta_graph(["foo"])
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertIsInstance(
ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Tensor)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["pre_foo"], export_dir)
self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY))
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_collection = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"bar.txt", "content_bar",
"asset_file_tensor:0")
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.VariableV1(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variables.VariableV1(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variables.VariableV1(1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
init_op = control_flow_ops.group(assign_v3, name="init_op")
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
ops.add_to_collection("v", v3)
ops.add_to_collection("init_op", init_op)
sess.run(variables.global_variables_initializer())
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
ops.get_collection("init_op")[0].run()
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
variables.global_variables_initializer().run()
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
def testCustomSaver(self):
export_dir = self._get_export_dir("test_custom_saver")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
sess.run(variables.global_variables_initializer())
custom_saver = training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"], saver=custom_saver)
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertFalse("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "my_saver/restore_all")
def testNoCustomSaver(self):
export_dir = self._get_export_dir("test_no_custom_saver")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
sess.run(variables.global_variables_initializer())
training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"])
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertTrue("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "save/restore_all")
def testMultipleCustomSavers(self):
export_dir = self._get_export_dir("test_multiple_custom_savers")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["tag_0"])
saver_1 = training.Saver()
builder.add_meta_graph(["tag_1"], saver=saver_1)
saver_2 = training.Saver()
builder.add_meta_graph(["tag_2"], saver=saver_2)
# Save the SavedModel to disk.
builder.save()
def _validate_custom_saver(tag_name, saver_name):
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, [tag_name], export_dir)
self.assertEqual(
saved_graph.saver_def.restore_op_name,
saver_name)
_validate_custom_saver("tag_0", "save/restore_all")
_validate_custom_saver("tag_1", "save_1/restore_all")
_validate_custom_saver("tag_2", "save_2/restore_all")
def testImportScope(self):
export_dir = self._get_export_dir("test_scoped_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Build a SavedModel with a variable, an asset, and a constant tensor.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
constant_op.constant("constant value", name="constant_tensor_name")
builder.add_meta_graph_and_variables(
sess, ["tag_name"], assets_collection=asset_collection)
# Save the asset file path for later comparison.
asset_file_path = asset_collection[0].eval()
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
# Restore the SavedModel under an import_scope in a new graph/session.
graph_proto = loader.load(
sess, ["tag_name"], export_dir, import_scope="scope_name")
# The loaded variable tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
"scope_name/v:0",
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].name)
self.assertEqual(
42,
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# The loaded asset tensor should be scoped, but the asset file path and
# contents should be unchanged.
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
self.assertEqual(1, len(asset_collection))
self.assertEqual(asset_file_path, asset_collection[0].eval())
self.assertEqual("scope_name/asset_file_tensor:0",
asset_collection[0].name)
# The static asset data inside graph_proto.collection_def should not be
# scoped.
self._validate_asset_collection(export_dir, graph_proto.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# The constant tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
compat.as_bytes("constant value"),
ops.get_default_graph().get_tensor_by_name(
"scope_name/constant_tensor_name:0").eval())
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testStripDefaultAttrs(self):
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
# pylint: disable=protected-access
saved_model_pb = loader_impl._parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
# pylint: enable=protected-access
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
# Tests the behavior of loading SavedModels that having missing attrs or attrs
# with incorrect types.
def testInconsistentConsumerDefaultAttrs(self):
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with a single variable and a test op with a defaultless
# float32 attr, "test_attr".
with session.Session(graph=ops.Graph()) as sess:
variables.VariableV1(1.0, dtype=dtypes.float64, name="var")
test_ops.test_attr(T=dtypes.float32, name="test_attr")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Rewrite the SavedModel to remove the T attr from "test_attr".
saved_model_file = os.path.join(
export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
with open(saved_model_file) as f:
original_saved_model = f.read()
no_attr_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", "")
with open(saved_model_file, "w") as f:
f.write(no_attr_saved_model)
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "TestAttr" node, and there is no
# default specified in the TestAttr OpDef.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"):
loader.load(sess, ["foo"], export_dir)
# Rewrite the SavedModel to change the type of the T attr in "test_attr"
bad_type_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", """
attr {
key: "T"
value {
type: DT_DOUBLE
}
}""")
with open(saved_model_file, "w") as f:
f.write(bad_type_saved_model)
# Loading the SavedModel via the loader must fail because there is no
# OpKernel registered to handle T = double.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"No OpKernel was registered to support Op 'TestAttr' used by node "
"test_attr \\(defined at .*\\) with these attrs: \\[.*\\]\n"
"Registered devices:.*\n"
"Registered kernels:.*"
):
loader.load(sess, ["foo"], export_dir)
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.PaginatedList
import github.Gist
import github.Repository
import github.NamedUser
import github.Plan
import github.Organization
import github.Event
class NamedUser(github.GithubObject.CompletableGithubObject):
"""
This class represents NamedUsers as returned for example by http://developer.github.com/v3/todo
"""
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def bio(self):
"""
:type: string
"""
self._completeIfNotSet(self._bio)
return self._bio.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def contributions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._contributions)
return self._contributions.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def followers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._followers_url)
return self._followers_url.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def following_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._following_url)
return self._following_url.value
@property
def gists_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._gists_url)
return self._gists_url.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def hireable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._hireable)
return self._hireable.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def organizations_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._organizations_url)
return self._organizations_url.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def received_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._received_events_url)
return self._received_events_url.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def starred_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._starred_url)
return self._starred_url.value
@property
def subscriptions_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscriptions_url)
return self._subscriptions_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def get_events(self):
"""
:calls: `GET /users/:user/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events",
None
)
def get_followers(self):
"""
:calls: `GET /users/:user/followers <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
NamedUser,
self._requester,
self.url + "/followers",
None
)
def get_following(self):
"""
:calls: `GET /users/:user/following <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
NamedUser,
self._requester,
self.url + "/following",
None
)
def get_gists(self):
"""
:calls: `GET /users/:user/gists <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
self.url + "/gists",
None
)
def get_keys(self):
"""
:calls: `GET /users/:user/keys <http://developer.github.com/v3/users/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.UserKey.UserKey`
"""
return github.PaginatedList.PaginatedList(
github.UserKey.UserKey,
self._requester,
self.url + "/keys",
None
)
def get_orgs(self):
"""
:calls: `GET /users/:user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
self.url + "/orgs",
None
)
def get_public_events(self):
"""
:calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events/public",
None
)
def get_public_received_events(self):
"""
:calls: `GET /users/:user/received_events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/received_events/public",
None
)
def get_received_events(self):
"""
:calls: `GET /users/:user/received_events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/received_events",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, type=github.GithubObject.NotSet):
"""
:calls: `GET /users/:user/repos <http://developer.github.com/v3/repos>`_
:param type: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert type is github.GithubObject.NotSet or isinstance(type, str), type
url_parameters = dict()
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/repos",
url_parameters
)
def get_starred(self):
"""
:calls: `GET /users/:user/starred <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/starred",
None
)
def get_subscriptions(self):
"""
:calls: `GET /users/:user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/subscriptions",
None
)
def get_watched(self):
"""
:calls: `GET /users/:user/watched <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/watched",
None
)
def has_in_following(self, following):
"""
:calls: `GET /users/:user/following/:target_user <http://developer.github.com/v3/users/followers/#check-if-one-user-follows-another>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(following, github.NamedUser.NamedUser), following
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/following/" + following._identity
)
return status == 204
@property
def _identity(self):
return self.login
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._contributions = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "contributions" in attributes: # pragma no branch
self._contributions = self._makeIntAttribute(attributes["contributions"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(attributes["organizations_url"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(attributes["received_events_url"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(attributes["subscriptions_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test the hunt_view interface."""
from absl import app
from grr_response_server.gui import gui_test_lib
from grr.test_lib import test_lib
class TestHuntControl(gui_test_lib.GRRSeleniumHuntTest):
"""Test the hunt start/stop/delete functionality."""
def testToolbarStateForStoppedHunt(self):
hunt_id = self.CreateSampleHunt(stopped=True)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Check we can now see the details.
self.WaitUntil(self.IsElementPresent, "css=dl.dl-hunt")
self.WaitUntil(self.IsTextPresent, "Clients Scheduled")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
self.WaitUntil(self.IsElementPresent,
"css=button[name=RunHunt]:not([disabled])")
self.WaitUntil(self.IsElementPresent, "css=button[name=StopHunt][disabled]")
self.WaitUntil(self.IsElementPresent,
"css=button[name=ModifyHunt]:not([disabled])")
def testToolbarStateForRunningHunt(self):
hunt_id = self.CreateSampleHunt(stopped=False)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Check we can now see the details.
self.WaitUntil(self.IsElementPresent, "css=dl.dl-hunt")
self.WaitUntil(self.IsTextPresent, "Clients Scheduled")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
self.WaitUntil(self.IsElementPresent, "css=button[name=RunHunt][disabled]")
self.WaitUntil(self.IsElementPresent,
"css=button[name=StopHunt]:not([disabled])")
self.WaitUntil(self.IsElementPresent,
"css=button[name=ModifyHunt][disabled]")
def testRunHunt(self):
hunt_id = self.CreateSampleHunt(stopped=True)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Click on Run button and check that dialog appears.
self.Click("css=button[name=RunHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to run this hunt?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
# This should be rejected now and a form request is made.
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=grr-request-approval-dialog button[name=Cancel]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
self.RequestAndGrantHuntApproval(hunt_id)
# Click on Run and wait for dialog again.
self.Click("css=button[name=RunHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to run this hunt?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Hunt started successfully!")
self.assertFalse(self.IsElementPresent("css=button[name=Proceed]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
# View should be refreshed automatically.
self.WaitUntil(self.IsTextPresent, "GenericHunt")
# Check the hunt is in a running state.
self.CheckState("STARTED")
def testStopHunt(self):
hunt_id = self.CreateSampleHunt(stopped=False)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Click on Stop button and check that dialog appears.
self.Click("css=button[name=StopHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to stop this hunt?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
# This should be rejected now and a form request is made.
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=grr-request-approval-dialog button[name=Cancel]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
self.RequestAndGrantHuntApproval(hunt_id)
# Click on Stop and wait for dialog again.
self.Click("css=button[name=StopHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to stop this hunt?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Hunt stopped successfully")
self.assertFalse(self.IsElementPresent("css=button[name=Proceed]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
# View should be refreshed automatically.
self.WaitUntil(self.IsTextPresent, "GenericHunt")
# Check the hunt is not in a running state.
self.CheckState("STOPPED")
def testModifyHunt(self):
hunt_id = self.CreateSampleHunt(stopped=True)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Click on Modify button and check that dialog appears.
self.Click("css=button[name=ModifyHunt]")
self.WaitUntil(self.IsTextPresent, "Modify this hunt")
self.Type(
"css=grr-modify-hunt-dialog label:contains('Client limit') ~ * input",
"4483")
self.Type(
"css=grr-modify-hunt-dialog label:contains('Client rate') ~ * input",
"42")
self.Type("css=grr-modify-hunt-dialog label:contains('Duration') ~ * input",
"1337s")
# Click on Proceed.
self.Click("css=button[name=Proceed]")
# This should be rejected now and a form request is made.
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=grr-request-approval-dialog button[name=Cancel]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
# Now create an approval.
self.RequestAndGrantHuntApproval(hunt_id)
# Click on Modify button and check that dialog appears.
self.Click("css=button[name=ModifyHunt]")
self.WaitUntil(self.IsTextPresent, "Modify this hunt")
self.Type(
"css=grr-modify-hunt-dialog label:contains('Client limit') ~ * input",
"4483")
self.Type(
"css=grr-modify-hunt-dialog label:contains('Client rate') ~ * input",
"42")
self.Type("css=grr-modify-hunt-dialog label:contains('Duration') ~ * input",
"1337s")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Hunt modified successfully!")
self.assertFalse(self.IsElementPresent("css=button[name=Proceed]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
# View should be refreshed automatically.
self.WaitUntil(self.IsTextPresent, hunt_id)
self.WaitUntil(self.IsTextPresent, "4483")
self.WaitUntil(self.IsTextPresent, "1337s")
def testDeleteHunt(self):
# This needs to be created by a different user so we can test the
# approval dialog.
hunt_id = self.CreateSampleHunt(stopped=True, creator="random user")
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Click on delete button.
self.Click("css=button[name=DeleteHunt]")
self.WaitUntil(self.IsTextPresent, "Delete this hunt")
# Click on Proceed.
self.Click("css=button[name=Proceed]")
# This should be rejected now and a form request is made.
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=grr-request-approval-dialog button[name=Cancel]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
# Now create an approval.
self.RequestAndGrantHuntApproval(hunt_id)
# Select a hunt again, as it's deselected after approval dialog
# disappears. TODO(user): if this behavior is not convenient, fix it.
self.Click("css=td:contains('%s')" % hunt_id)
# Click on Delete button and check that dialog appears.
self.Click("css=button[name=DeleteHunt]")
self.WaitUntil(self.IsTextPresent, "Delete this hunt")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Hunt deleted successfully!")
self.assertFalse(self.IsElementPresent("css=button[name=Proceed]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
if __name__ == "__main__":
app.run(test_lib.main)
| |
# -*- coding: utf-8 -*-
"""Indexing hdf5 files"""
from __future__ import unicode_literals
# NOTICE: THIS FILE IS ALSO PART OF THE CLIENT. IT SHOULD NOT CONTAIN
# REFERENCES TO THE SERVER OR TWISTED PKG.
ext = '.h5'
import os
try:
from cPickle import dumps, loads
except:
from pickle import dumps, loads
unicode = str
basestring = str
from traceback import format_exc
import tables
from datetime import datetime
import numpy as np
from ..parameters import cfilter
from .. import csutil
from .. import option
from ..csutil import lockme, enc_options, str3
from .. import reference
from .corefile import CoreFile
from .dataops import DataOperator
from . import digisign
from .digisign import list_references
max_string_length = 1000
# To disable @lockme locking:
# lockme=lambda func: func
tables.file._FILE_OPEN_POLICY = 'default'
def pathnode(path):
"""Split a complete path into its group and leaf components"""
while path.endswith('/'):
path = path[:-1]
path = path.split('/')
node = path.pop(-1)
return '/'.join(path), node
class SharedFile(CoreFile, DataOperator):
"""Interface for test file access. Versioning.
TODO: move xmlrpc to server-side OutFile?"""
start_profiler = csutil.start_profiler
stop_profiler = csutil.stop_profiler
version = None
"""Current file version"""
conf = False
"""Configuration dictionary"""
def __init__(self, *a, **k):
self.conf = False
CoreFile.__init__(self, *a, **k)
self.node_cache = {}
def open_file(self, path=False, uid='', mode='a', title='', header=True, version='', load_conf=True):
"""opens the hdf file in `path` or `uid`"""
self.node_cache = {}
if not path:
path = self.path
if not path:
self.log.debug('No path supplied', path, self.path, uid)
return False
# Always open the real, normalize path
path = os.path.realpath(os.path.normpath(path))
if mode == 'w':
self.log.debug('Creating in write mode', path)
tables.open_file(path, mode='w', title=title).close()
if not os.path.exists(path):
raise RuntimeError("File %s not found." % path)
try:
self.log.debug('opening existing file', path, mode, repr(version))
if mode=='r' and self.highest_mode(path)=='a':
self.close_handlers(path)
self.test = tables.open_file(path, mode=mode)
self.path = path
except:
self.log.error('Error opening file:', format_exc(), path)
return False
self.uid = uid
if header and mode != 'w':
if not self.has_node('/userdata') and mode != 'r':
self.create_group('/', 'userdata')
self.set_attributes('/userdata', attrs={'active_version': ''})
elif not version:
version = self.active_version()
if not self.has_node(version):
self.log.info('VERSION DOES NOT EXIST', version)
version = ''
if self.has_node('/conf'):
if self.has_node_attr('/conf', 'uid'):
self.uid = self.get_node_attr('/conf', 'uid')
if version != None:
self.set_version(version, load_conf=load_conf)
else:
self.log.info(
'No configuration object was found', path, version)
self.header(refresh=False, version=self.get_version())
if self.conf is False:
self.conf = option.ConfigurationProxy()
return self.test, self.path
def writable(self):
if not self.test:
return False
return self.test.mode in ('a', 'r+')
def load_conf(self):
d = self.conf_tree()
self.conf = option.ConfigurationProxy(desc=d)
self.conf.filename = self.path
self.log.debug('load conf', self.conf, len(d))
return True
def verify(self):
if not self.test:
return False
return digisign.verify(self.test)
def get_versions(self):
"""List available versions. Returns a dictionary {path: (name,date)}"""
if not self.test:
self.log.debug('get_versions: no test defined')
return {}
if not self._has_node('/conf'):
self.log.debug('get_versions: no /conf')
return {}
v = {'': ('Original', self.test.root.conf.attrs.date)}
# skip 0 and seek a little farer
latest = 0
for node in self.test.list_nodes('/'):
name = str(node._v_name)
if not name.startswith('ver_'):
continue
ver = int(name.split('_')[-1])
if ver > latest:
latest = ver
v[str(node._v_pathname)] = (node._f_getattr('name'),
node._f_getattr('date'))
if self.writable():
self.test.root.conf.attrs.versions = latest
self.log.debug('returning versions', v)
return v
def get_version_by_name(self, name):
for path, data in self.get_versions().items():
if data[0] != name:
continue
return path
return False
def get_latest_version_number(self):
v = [0]
for k in self.get_versions().keys():
v.append(0 if k == '' else int(k.split('_')[-1]))
m = max(v)
return m
def get_versions_by_date(self):
v = self.get_versions()
v = [[key] + list(val) for key, val in v.items()]
v.sort(key=lambda e: datetime.strptime(e[2]))
def get_version(self):
return self.version or ''
def set_version(self, newversion=-1, load_conf=True):
"""Set the current version to `newversion`"""
# Load the last used version
if newversion == -1:
found = False
self._lock.acquire()
if '/userdata' in self.test:
newversion = self._active_version()
if self._has_node(newversion):
self.log.debug('Found active version', newversion)
found = True
if not found:
newversion = getattr(
self.test.root.conf.attrs, 'versions', 0) - 1
if newversion > 0:
newversion = '/ver_{}'.format(newversion)
if self._has_node(newversion):
self.log.debug('Take latest version', newversion)
found = True
if not found:
newversion = ''
self.log.debug(
'Last version was not found. Taking original')
self._lock.release()
# Load version by number (deprecated)
if not isinstance(newversion, basestring):
newversion = '/ver_{}'.format(newversion)
if self.version == newversion and len(self.conf):
self.log.debug('Not changing version!', self.version, newversion)
return True
self._change_version(newversion)
if load_conf:
self.load_conf()
self.header(refresh=False)
return True
def _change_version(self, new_version):
self.log.debug('Changing version to {}, {} (old was {})'.format(
repr(new_version),
type(new_version),
self.version))
self.version = str(new_version)
if self.writable():
self._set_attributes(
'/userdata', attrs={'active_version': new_version})
return True
return False
def create_version(self, name=False, overwrite=True):
"""Create a new version with `name`. `overwrite` a previous version with same name."""
self.reopen(mode='a')
newversion = False
if name:
newversion = self.get_version_by_name(name)
if newversion and overwrite:
self.log.debug('Found version', name, 'saved as',
newversion, '. Overwriting.')
latest = int(newversion.split('_')[-1])
#self.remove_version(newversion, remove_plots=False)
else:
latest = self.get_latest_version_number() + 1
newversion = '/ver_{}'.format(latest)
if not name:
name = newversion
name = unicode(name).encode('ascii', 'ignore')
if not self.has_node('/', newversion[1:]):
self.log.debug('creating new version', newversion, name)
self.test.create_group('/', newversion[1:])
else:
self.log.debug('using existing version', newversion, name)
d = datetime.now().strftime("%H:%M:%S, %d/%m/%Y")
self._set_attributes(newversion, attrs={'name': name, 'date': d})
self.test.root.conf.attrs.versions = latest
# Set current version (will be empty until some transparent writing
# occurs)
self.version = str(newversion)
self.log.debug('New version is now active', newversion)
self._change_version(newversion)
self.test.flush()
return newversion
def remove_version(self, version_path, remove_plots=True):
self.reopen(mode='a')
if remove_plots:
self.remove_node(version_path, recursive=True)
self.log.info('Removed version', version_path)
else:
for node in self.list_nodes():
if node == 'plots':
continue
self.remove_node(version_path + '/' + node)
if version_path == self.version:
self._change_version('')
n = int(version_path.split('_')[-1]) - 1
if self.test.root.conf.attrs.versions == n:
self.test.root.conf.attrs.versions -= 1
if self._active_version() == version_path:
self.log.debug('Resetting active_version to Original')
self.test.set_node_attr('/userdata', 'active_version', '')
self.header(refresh=True)
self.log.debug('Removed version', version_path, n)
self.flush()
return True
@lockme()
def get_plots(self, render=False, version=False):
"""List available plots in `version` (current if False).
Returns a dictionary {path: (name,date,render,render_format)}"""
r = {}
if not self.test:
return r
plots_path = self._versioned('/plot', version=version)
if not plots_path in self.test:
return r
image = False
# TODO: read format
image_format = False
for node in self.test.list_nodes(plots_path):
path = plots_path + '/{}/'.format(node._v_name)
script = self.test.get_node(path + 'script')
image = False
if render:
if path + 'render' in self.test:
image = self._file_node(path + 'render')
r[node._v_name] = (script.attrs.title, script.attrs.date,
image, script.attrs.format)
return r
def get_plot(self, plot_id):
"""Returns the text of a plot"""
n = self.versioned('/plot') + '/{}/script'.format(plot_id)
text = self.file_node(n)
attrs = self.get_attributes(n)
return text, attrs
def save_plot(self, text, plot_id=False,
title=False,
date=False,
render=False,
render_format=False):
"""Save the text of a plot to plot_id, optionally adding a title, date and rendered output"""
if not self.version:
self.log.error(
'Cannot save plots for original version. Please make a new version first.')
return False
self.reopen(mode='a')
plots_path = self.get_version() + '/plot'
if not self.has_node(plots_path):
self.create_group(self.versioned('/'), 'plot')
if not plot_id:
plot_id = '0'
if not plot_id:
plot_id = self.get_unique_name(plots_path)
if not title:
title = plot_id
if not date:
date = datetime.now().strftime("%H:%M:%S, %d/%m/%Y")
base_group = plots_path + '/' + plot_id
if not self.has_node(base_group):
self.create_group(plots_path, plot_id)
text_path = base_group + '/script'
self.filenode_write(text_path, data=text)
self.set_attributes(text_path, attrs={'title': title,
'date': date,
'format': render_format})
if render and render_format:
render_path = base_group + '/render'
self.filenode_write(render_path, data=render)
self.set_attributes(render_path, attrs={'format': render_format})
return plot_id, title, date
def getLog(self):
# FIXME: show logging
return 'unimplemented'
txt = ''
for line in self.test.root.log:
txt += reference.Binary.decode(line)[1]
if not txt.endswith('\n'):
txt += '\n'
return txt
def conf_tree(self, path=False):
if not path:
path = self.versioned('/conf')
self.log.debug('Loading conf', path)
tree = self.file_node(path)
if tree in [False, None]:
self.log.warning('Configuration node file not found!', path)
return '{}'
# test
self.log.debug('loading ', len(tree))
opt = enc_options.copy()
if 'encoding' in opt:
opt['encoding'] = 'latin1'
d = loads(tree, **opt)
if not isinstance(d, dict):
self.log.debug('Wrong Conf Tree!')
return False
self.log.debug('Conf tree length:', len(tree))
return d
def xmlrpc_conf_tree(self):
t = self.file_node(self.versioned('/conf'))
if t is False:
return t
return csutil.binfunc(t)
def save_conf(self, tree=False, writeLevel=3):
"""Saves a new version of the configuration tree"""
if not tree:
if not self.conf:
self.load_conf()
tree = self.conf.tree()
ver = self.get_version()
self.filenode_write(ver + '/conf', obj=tree)
if ver != '':
a = self.get_attributes('/conf')
self.set_attributes(ver + '/conf', attrs=a)
self.conf = option.ConfigurationProxy(desc=tree)
return
def save_data(self, path, data, time_data, opt=False):
version = self.active_version()
if version is '':
raise RuntimeError(
"Original version is not writable.\nCreate or switch to another version first.")
path = path.split(':')[-1]
path = ("/summary/" + path).replace('//', '/')
vpath = path.split("/")
parent = "/".join(vpath[0:-1])
name = vpath[-1]
newparent = version + parent
path = newparent + "/" + name
if not opt:
opt = self.get_attributes(parent + '/' + name)
opt['handle'] = name
self.remove_node(path)
# Detect fixed time
td = [1, 0]
if len(time_data) > 10:
td = np.diff(time_data)
if max(td) - min(td) > 1e-14:
# Regular Array
write_data = np.transpose(np.vstack((time_data, data)))
array_cls = reference.Array
else:
# FixedTimeArray
write_data = np.transpose(data)
array_cls = reference.FixedTimeArray
opt['t0'] = time_data[0]
opt['dt'] = td.mean()
dest_path_reference = array_cls(
self, newparent, opt=opt, with_summary=False)
dest_path_reference.append(write_data)
self.flush()
if path not in self._header[array_cls.__name__]:
self._header[array_cls.__name__].append(path)
def active_version(self):
try:
return self.get_node_attr('/userdata', 'active_version')
except:
return ''
def _active_version(self):
try:
return self.test.get_node_attr('/userdata', 'active_version')
except:
return ''
def _write_userdata_header(self, h):
"""Save header dict into userdata cache.
No lock."""
from time import time
t0 = time()
if not self._has_node('/userdata'):
self.create_group('/', 'userdata')
self.set_attributes('/userdata', attrs={'active_version': ''})
# Write each header entry in a separate variable-length-array
for cls_name in h:
name = 'header_'+cls_name
if self._has_node('/userdata', name):
self.test.remove_node('/userdata',name)
vla = self.test.create_vlarray(where='/userdata',
name=name,
atom=tables.StringAtom(itemsize=1),
title='Header cache for '+cls_name,
filters=cfilter)
for dsn in h[cls_name]:
vla.append(list(dsn))
self.log.debug('Wrote cached header class', cls_name, len(h[cls_name]))
# put only the keys in the header attr
self.test.set_node_attr('/userdata', 'header', list(h.keys()))
self.log.debug('Finished writing headers cache', len(h), 1000 * (time() - t0))
def _read_userdata_header(self):
"""Load header dict from userdata cache.
No lock"""
from time import time
t0 = time()
h = {}
if not self._has_node('/userdata'):
self.log.debug('_read_userdata_header: no /userdata')
return h
try:
keys = self.test.get_node_attr('/userdata', 'header')
except:
self.log.error(format_exc())
keys = []
# Compatibility with old caching mechanism
if isinstance(keys, dict):
self.log.debug('Reading old header cache', keys)
return keys
for cls_name in keys:
name = 'header_'+cls_name
if not self._has_node('/userdata', name):
self.log.error('Could not find header class', cls_name)
continue
n = self._get_node('/userdata', name)
v = []
for path in n:
v.append(''.join(path))
h[cls_name] = v
self.log.debug('Loaded header class:',cls_name, len(v))
self.log.debug('Loaded cached header', len(h), 1000 * (time() - t0))
return h
@lockme()
def header(self, reference_classes=['Array'], startswith=False, refresh=False, version=False):
"""Returns all available data references"""
from time import time
if not version:
version = self.get_version()
# Try to read cached header from file
if not refresh and len(self._header) == 0:
self._header = self._read_userdata_header()
# Rebuild the header if empty or refresh
if refresh or len(self._header) == 0:
t0 = time()
self._header = list_references(self.test.root)
self.log.debug('References',
len(self._header),
1000 * (time() - t0))
if self.writable():
self._write_userdata_header(self._header)
if reference_classes is False:
reference_classes = self._header.keys()
r = []
for k in reference_classes:
r += self._header.get(k, [])
if startswith:
swv = version + startswith
r = filter(lambda el: el.startswith(
startswith) or el.startswith(swv), r)
if not version:
r = filter(lambda el: not el.startswith('/ver_'), r)
else:
# Exclude element with wrong version
def good(el): return el.startswith(
version + '/') or not el.startswith('/ver_')
r = filter(good, r)
# Exclude unversioned elements having a version
r = filter(lambda el: version + el not in r, r)
return list(r)
def xmlrpc_col(self, *a, **k):
r = self.col(*a, **k)
return csutil.binfunc(dumps(r))
def xmlrpc_col_at(self, *a, **k):
r = self.col_at(*a, **k)
return csutil.binfunc(dumps(r))
@lockme()
def get_decoded(self, path, idx, get):
"""Get the `path` node index `idx` using the getter function `get`"""
n = self._get_node(path)
r = get(n, idx)
# n.close()
return r
@lockme()
def query_time(self, path, startTime=-1, endTime=-1, step=None, interp=False):
"""Reads an array in the requested time range"""
n = self._get_node(path)
# TODO: adapt also to other Reference objects
t = n.cols.t
if startTime < 0:
startTime = t[0]
if endTime <= 0:
endTime = t[-1]
if startTime > endTime:
self.log.error('impossible time frame', startTime, endTime)
# n.close()
return []
si = csutil.find_nearest_val(t, startTime)
ei = csutil.find_nearest_val(t, endTime)
self.log.debug(startTime, si, endTime, ei)
arr = n[si:ei]
# n.close()
if step is None:
return arr
# Interpolate for time stepping
st = t[si]
et = t[ei]
ts = np.arange(st, et, step)
self.log.debug('tseq', st, et, step, ts)
if interp:
r = self.interpolated_col(
arr=arr, startIdx=0, endIdx=-1, time_sequence=ts)
r = np.array([ts, r])
r = r.transpose()
else:
r = arr
return r
def xmlrpc_query_time(self, *a, **k):
r = self.query_time(*a, **k)
return csutil.binfunc(dumps(r))
def xmlrpc_interpolated_col(self, *a, **k):
r = self.interpolated_col(*a, **k)
return csutil.binfunc(dumps(r))
def instrument_name(self):
return self.get_node_attr('/conf', 'instrument')
def run_scripts(self, instr=None):
"""Re-evaluate scripts"""
if instr is None:
instr = getattr(self.conf, self.instrument_name(), None)
if instr is None:
self.log.debug('Impossible to run scripts: conf is not available.')
return False
if self.conf.kiln is not None:
instr.kiln = self.conf.kiln
# Associate scripts to their output Meta options
instr.outFile = self
instr.distribute_scripts(self)
instr.characterization(period='all')
return True
def copy(self):
return self
def connect(self):
return True
def has_key(self, *a, **k):
return False
def __contains__(self, k):
return False
def decode(self, method):
"""Return if a method name should be decoded client-side"""
return hasattr(self, 'xmlrpc_' + method)
| |
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def save_redo(SpecRecs,inspec):
print("Saving changes to specimen file")
pmag.magic_write(inspec,SpecRecs,'pmag_specimens')
def main():
"""
NAME
zeq_magic.py
DESCRIPTION
reads in magic_measurements formatted file, makes plots of remanence decay
during demagnetization experiments. Reads in prior interpretations saved in
a pmag_specimens formatted file and allows re-interpretations of best-fit lines
and planes and saves (revised or new) interpretations in a pmag_specimens file.
interpretations are saved in the coordinate system used. Also allows judicious editting of
measurements to eliminate "bad" measurements. These are marked as such in the magic_measurements
input file. they are NOT deleted, just ignored.
SYNTAX
zeq_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEASFILE: sets magic_measurements format input file, default: magic_measurements.txt
-fsp SPECFILE: sets pmag_specimens format file with prior interpreations, default: zeq_specimens.txt
-Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)
-crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system
-fsa SAMPFILE: sets er_samples format file with orientation information, default: er_samples.txt
-spc SPEC plots single specimen SPEC, saves plot with specified format
with optional -dir settings and quits
-dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none
beg: starting step for PCA calculation
end: ending step for PCA calculation
[L,P,F]: calculation type for line, plane or fisher mean
must be used with -spc option
-fmt FMT: set format of saved plot [png,svg,jpg]
-A: suppresses averaging of replicate measurements, default is to average
-sav: saves all plots without review
SCREEN OUTPUT:
Specimen, N, a95, StepMin, StepMax, Dec, Inc, calculation type
"""
# initialize some variables
doave,e,b=1,0,0 # average replicates, initial end and beginning step
plots,coord=0,'s'
noorient=0
version_num=pmag.get_version()
verbose=pmagplotlib.verbose
beg_pca,end_pca,direction_type="","",'l'
calculation_type,fmt="","svg"
user,spec_keys,locname="",[],''
plot_file=""
sfile=""
plot_file=""
PriorRecs=[] # empty list for prior interpretations
backup=0
specimen="" # can skip everything and just plot one specimen with bounds e,b
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
else:
dir_path='.'
inspec=dir_path+'/'+'zeq_specimens.txt'
meas_file,geo,tilt,ask,samp_file=dir_path+'/magic_measurements.txt',0,0,0,dir_path+'/er_samples.txt'
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=dir_path+'/'+sys.argv[ind+1]
if '-fsp' in sys.argv:
ind=sys.argv.index('-fsp')
inspec=dir_path+'/'+sys.argv[ind+1]
if '-fsa' in sys.argv:
ind=sys.argv.index('-fsa')
samp_file=dir_path+'/'+sys.argv[ind+1]
sfile='ok'
if '-crd' in sys.argv:
ind=sys.argv.index('-crd')
coord=sys.argv[ind+1]
if coord=='g' or coord=='t':
samp_data,file_type=pmag.magic_read(samp_file)
if file_type=='er_samples':sfile='ok'
geo=1
if coord=='t':tilt=1
if '-spc' in sys.argv:
ind=sys.argv.index('-spc')
specimen=sys.argv[ind+1]
if '-dir' in sys.argv:
ind=sys.argv.index('-dir')
direction_type=sys.argv[ind+1]
beg_pca=int(sys.argv[ind+2])
end_pca=int(sys.argv[ind+3])
if direction_type=='L':calculation_type='DE-BFL'
if direction_type=='P':calculation_type='DE-BFP'
if direction_type=='F':calculation_type='DE-FM'
if '-Fp' in sys.argv:
ind=sys.argv.index('-Fp')
plot_file=dir_path+'/'+sys.argv[ind+1]
if '-A' in sys.argv: doave=0
if '-sav' in sys.argv:
plots=1
verbose=0
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
#
first_save=1
meas_data,file_type=pmag.magic_read(meas_file)
changeM,changeS=0,0 # check if data or interpretations have changed
if file_type != 'magic_measurements':
print(file_type)
print(file_type,"This is not a valid magic_measurements file ")
sys.exit()
for rec in meas_data:
if "magic_method_codes" not in rec.keys(): rec["magic_method_codes"]=""
methods=""
tmp=rec["magic_method_codes"].replace(" ","").split(":")
for meth in tmp:
methods=methods+meth+":"
rec["magic_method_codes"]=methods[:-1] # get rid of annoying spaces in Anthony's export files
if "magic_instrument_codes" not in rec.keys() :rec["magic_instrument_codes"]=""
PriorSpecs=[]
PriorRecs,file_type=pmag.magic_read(inspec)
if len(PriorRecs)==0:
if verbose:print("starting new file ",inspec)
for Rec in PriorRecs:
if 'magic_software_packages' not in Rec.keys():Rec['magic_software_packages']=""
if Rec['er_specimen_name'] not in PriorSpecs:
if 'specimen_comp_name' not in Rec.keys():Rec['specimen_comp_name']="A"
PriorSpecs.append(Rec['er_specimen_name'])
else:
if 'specimen_comp_name' not in Rec.keys():Rec['specimen_comp_name']="A"
if "magic_method_codes" in Rec.keys():
methods=[]
tmp=Rec["magic_method_codes"].replace(" ","").split(":")
for meth in tmp:
methods.append(meth)
if 'DE-FM' in methods:
Rec['calculation_type']='DE-FM' # this won't be imported but helps
if 'DE-BFL' in methods:
Rec['calculation_type']='DE-BFL'
if 'DE-BFL-A' in methods:
Rec['calculation_type']='DE-BFL-A'
if 'DE-BFL-O' in methods:
Rec['calculation_type']='DE-BFL-O'
if 'DE-BFP' in methods:
Rec['calculation_type']='DE-BFP'
else:
Rec['calculation_type']='DE-BFL' # default is to assume a best-fit line
#
# get list of unique specimen names
#
sids=pmag.get_specs(meas_data)
#
# set up plots, angle sets X axis to horizontal, direction_type 'l' is best-fit line
# direction_type='p' is great circle
#
#
# draw plots for sample s - default is just to step through zijderveld diagrams
#
#
# define figure numbers for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
ZED={}
ZED['eqarea'],ZED['zijd'], ZED['demag']=1,2,3
pmagplotlib.plot_init(ZED['eqarea'],5,5)
pmagplotlib.plot_init(ZED['zijd'],6,5)
pmagplotlib.plot_init(ZED['demag'],5,5)
save_pca=0
if specimen=="":
k = 0
else:
k=sids.index(specimen)
angle,direction_type="",""
setangle=0
CurrRecs=[]
while k < len(sids):
CurrRecs=[]
if setangle==0:angle=""
method_codes,inst_code=[],""
s=sids[k]
PmagSpecRec={}
PmagSpecRec["er_analyst_mail_names"]=user
PmagSpecRec['magic_software_packages']=version_num
PmagSpecRec['specimen_description']=""
PmagSpecRec['magic_method_codes']=""
if verbose and s!="":print(s, k , 'out of ',len(sids))
#
# collect info for the PmagSpecRec dictionary
#
s_meas=pmag.get_dictitem(meas_data,'er_specimen_name',s,'T') # fish out this specimen
s_meas=pmag.get_dictitem(s_meas,'magic_method_codes','Z','has') # fish out zero field steps
if len(s_meas)>0:
for rec in s_meas: # fix up a few things for the output record
PmagSpecRec["magic_instrument_codes"]=rec["magic_instrument_codes"] # copy over instruments
PmagSpecRec["er_citation_names"]="This study"
PmagSpecRec["er_specimen_name"]=s
PmagSpecRec["er_sample_name"]=rec["er_sample_name"]
PmagSpecRec["er_site_name"]=rec["er_site_name"]
PmagSpecRec["er_location_name"]=rec["er_location_name"]
locname=rec['er_location_name']
if 'er_expedition_name' in rec.keys(): PmagSpecRec["er_expedition_name"]=rec["er_expedition_name"]
PmagSpecRec["magic_method_codes"]=rec["magic_method_codes"]
if "magic_experiment_name" not in rec.keys():
PmagSpecRec["magic_experiment_names"]=""
else:
PmagSpecRec["magic_experiment_names"]=rec["magic_experiment_name"]
break
#
# find the data from the meas_data file for this specimen
#
data,units=pmag.find_dmag_rec(s,meas_data)
PmagSpecRec["measurement_step_unit"]= units
u=units.split(":")
if "T" in units:PmagSpecRec["magic_method_codes"]=PmagSpecRec["magic_method_codes"]+":LP-DIR-AF"
if "K" in units:PmagSpecRec["magic_method_codes"]=PmagSpecRec["magic_method_codes"]+":LP-DIR-T"
if "J" in units:PmagSpecRec["magic_method_codes"]=PmagSpecRec["magic_method_codes"]+":LP-DIR-M"
#
# find prior interpretation
#
if len(CurrRecs)==0: # check if already in
beg_pca,end_pca="",""
calculation_type=""
if inspec !="":
if verbose: print(" looking up previous interpretations...")
precs=pmag.get_dictitem(PriorRecs,'er_specimen_name',s,'T') # get all the prior recs with this specimen name
precs=pmag.get_dictitem(precs,'magic_method_codes','LP-DIR','has') # get the directional data
PriorRecs=pmag.get_dictitem(PriorRecs,'er_specimen_name',s,'F') # take them all out of prior recs
# get the ones that meet the current coordinate system
for prec in precs:
if 'specimen_tilt_correction' not in prec.keys() or prec['specimen_tilt_correction']=='-1':
crd='s'
elif prec['specimen_tilt_correction']=='0':
crd='g'
elif prec['specimen_tilt_correction']=='100':
crd='t'
else:
crd='?'
CurrRec={}
for key in prec.keys():CurrRec[key]=prec[key]
CurrRecs.append(CurrRec) # put in CurrRecs
method_codes= CurrRec["magic_method_codes"].replace(" ","").split(':')
calculation_type='DE-BFL'
if 'DE-FM' in method_codes: calculation_type='DE-FM'
if 'DE-BFP' in method_codes: calculation_type='DE-BFP'
if 'DE-BFL-A' in method_codes: calculation_type='DE-BFL-A'
if 'specimen_dang' not in CurrRec.keys():
if verbose:print('Run mk_redo.py and zeq_magic_redo.py to get the specimen_dang values')
CurrRec['specimen_dang']=-1
if calculation_type!='DE-FM' and crd==coord: # not a fisher mean
if verbose:print("Specimen N MAD DANG start end dec inc type component coordinates")
if units=='K':
if verbose:print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec["specimen_dang"]),float(CurrRec["measurement_step_min"])-273,float(CurrRec["measurement_step_max"])-273,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd))
elif units=='T':
if verbose:print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec["specimen_dang"]),float(CurrRec["measurement_step_min"])*1e3,float(CurrRec["measurement_step_max"])*1e3,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd))
elif 'T' in units and 'K' in units:
if float(CurrRec['measurement_step_min'])<1.0 :
min=float(CurrRec['measurement_step_min'])*1e3
else:
min=float(CurrRec['measurement_step_min'])-273
if float(CurrRec['measurement_step_max'])<1.0 :
max=float(CurrRec['measurement_step_max'])*1e3
else:
max=float(CurrRec['measurement_step_max'])-273
if verbose:print('%s %i %7.1f %i %i %7.1f %7.1f %7.1f, %s %s\n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec['specimen_dang']),min,max,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,crd))
elif 'J' in units:
if verbose:print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec['specimen_dang']),float(CurrRec["measurement_step_min"]),float(CurrRec["measurement_step_max"]),float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd))
elif calculation_type=='DE-FM' and crd==coord: # fisher mean
if verbose:print("Specimen a95 DANG start end dec inc type component coordinates")
if units=='K':
if verbose:print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_alpha95"]),float(CurrRec["measurement_step_min"])-273,float(CurrRec["measurement_step_max"])-273,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd))
elif units=='T':
if verbose:print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_alpha95"]),float(CurrRec["measurement_step_min"])*1e3,float(CurrRec["measurement_step_max"])*1e3,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd))
elif 'T' in units and 'K' in units:
if float(CurrRec['measurement_step_min'])<1.0 :
min=float(CurrRec['measurement_step_min'])*1e3
else:
min=float(CurrRec['measurement_step_min'])-273
if float(CurrRec['measurement_step_max'])<1.0 :
max=float(CurrRec['measurement_step_max'])*1e3
else:
max=float(CurrRec['measurement_step_max'])-273
if verbose:print('%s %i %7.1f %i %i %7.1f %7.1f %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_alpha95"]),min,max,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,crd))
elif 'J' in units:
if verbose:print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec["measurement_step_min"]),float(CurrRec["measurement_step_max"]),float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd))
if len(CurrRecs)==0:beg_pca,end_pca="",""
datablock=data
noskip=1
if len(datablock) <3:
noskip=0
if backup==0:
k+=1
else:
k-=1
if len(CurrRecs)>0:
for rec in CurrRecs:
PriorRecs.append(rec)
CurrRecs=[]
else:
backup=0
if noskip:
#
# find replicate measurements at given treatment step and average them
#
# step_meth,avedata=pmag.vspec(data)
# if len(avedata) != len(datablock):
# if doave==1:
# method_codes.append("DE-VM")
# datablock=avedata
# #
# do geo or stratigraphic correction now
#
if geo==1:
#
# find top priority orientation method
orient,az_type=pmag.get_orient(samp_data,PmagSpecRec["er_sample_name"])
if az_type=='SO-NO':
if verbose: print("no orientation data for ",s)
orient["sample_azimuth"]=0
orient["sample_dip"]=0
noorient=1
method_codes.append("SO-NO")
orient["sample_azimuth"]=0
orient["sample_dip"]=0
orient["sample_bed_dip_azimuth"]=0
orient["sample_bed_dip"]=0
noorient=1
method_codes.append("SO-NO")
else:
noorient=0
#
# if stratigraphic selected, get stratigraphic correction
#
tiltblock,geoblock=[],[]
for rec in datablock:
d_geo,i_geo=pmag.dogeo(rec[1],rec[2],float(orient["sample_azimuth"]),float(orient["sample_dip"]))
geoblock.append([rec[0],d_geo,i_geo,rec[3],rec[4],rec[5],rec[6]])
if tilt==1 and "sample_bed_dip" in orient.keys() and float(orient['sample_bed_dip'])!=0:
d_tilt,i_tilt=pmag.dotilt(d_geo,i_geo,float(orient["sample_bed_dip_direction"]),float(orient["sample_bed_dip"]))
tiltblock.append([rec[0],d_tilt,i_tilt,rec[3],rec[4],rec[5],rec[6]])
if tilt==1: plotblock=tiltblock
if geo==1 and tilt==0:plotblock=geoblock
if geo==0 and tilt==0: plotblock=datablock
#
# set the end pca point to last point if not set
if e==0 or e>len(plotblock)-1: e=len(plotblock)-1
if angle=="": angle=plotblock[0][1] # rotate to NRM declination
title=s+'_s'
if geo==1 and tilt==0 and noorient!=1:title=s+'_g'
if tilt==1 and noorient!=1:title=s+'_t'
pmagplotlib.plot_zed(ZED,plotblock,angle,title,units)
if verbose:pmagplotlib.draw_figs(ZED)
if len(CurrRecs)!=0:
for prec in CurrRecs:
if 'calculation_type' not in prec.keys():
calculation_type=''
else:
calculation_type=prec["calculation_type"]
direction_type=prec["specimen_direction_type"]
if calculation_type !="":
beg_pca,end_pca="",""
for j in range(len(datablock)):
if data[j][0]==float(prec["measurement_step_min"]):beg_pca=j
if data[j][0]==float(prec["measurement_step_max"]):end_pca=j
if beg_pca=="" or end_pca=="":
if verbose:
print("something wrong with prior interpretation ")
break
if calculation_type!="":
if beg_pca=="":beg_pca=0
if end_pca=="":end_pca=len(plotblock)-1
if geo==1 and tilt==0:
mpars=pmag.domean(geoblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
pmagplotlib.plot_dir(ZED,mpars,geoblock,angle)
if verbose:pmagplotlib.draw_figs(ZED)
if geo==1 and tilt==1:
mpars=pmag.domean(tiltblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
pmagplotlib.plot_dir(ZED,mpars,tiltblock,angle)
if verbose:pmagplotlib.draw_figs(ZED)
if geo==0 and tilt==0:
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
pmagplotlib.plot_dir(ZED,mpars,plotblock,angle)
if verbose:pmagplotlib.draw_figs(ZED)
#
# print out data for this sample to screen
#
recnum=0
for plotrec in plotblock:
if units=='T' and verbose: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]*1e3," mT",plotrec[3],plotrec[1],plotrec[2],plotrec[6]))
if units=="K" and verbose: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]-273,' C',plotrec[3],plotrec[1],plotrec[2],plotrec[6]))
if units=="J" and verbose: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0],' J',plotrec[3],plotrec[1],plotrec[2],plotrec[6]))
if 'K' in units and 'T' in units:
if plotrec[0]>=1. and verbose: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]-273,' C',plotrec[3],plotrec[1],plotrec[2],plotrec[6]))
if plotrec[0]<1. and verbose: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]*1e3," mT",plotrec[3],plotrec[1],plotrec[2],plotrec[6]))
recnum += 1
if specimen!="":
if plot_file=="":
basename=locname+'_'+s
else:
basename=plot_file
files={}
for key in ZED.keys():
files[key]=basename+'_'+key+'.'+fmt
pmagplotlib.save_plots(ZED,files)
sys.exit()
else: # interactive
if plots==0:
ans='b'
k+=1
changeS=0
while ans != "":
if len(CurrRecs)==0:
print("""
g/b: indicates good/bad measurement. "bad" measurements excluded from calculation
set s[a]ve plot, [b]ounds for pca and calculate, [p]revious, [s]pecimen,
change [h]orizontal projection angle, change [c]oordinate systems,
[e]dit data, [q]uit:
""")
else:
print("""
g/b: indicates good/bad measurement. "bad" measurements excluded from calculation
set s[a]ve plot, [b]ounds for pca and calculate, [p]revious, [s]pecimen,
change [h]orizontal projection angle, change [c]oordinate systems,
[d]elete current interpretation(s), [e]dit data, [q]uit:
""")
ans=input('<Return> for next specimen \n')
setangle=0
if ans=='d': # delete this interpretation
CurrRecs=[]
k-=1 # replot same specimen
ans=""
changeS=1
if ans=='q':
if changeM==1:
ans=input('Save changes to magic_measurements.txt? y/[n] ')
if ans=='y':
pmag.magic_write(meas_file,meas_data,'magic_measurements')
print("Good bye")
sys.exit()
if ans=='a':
if plot_file=="":
basename=locname+'_'+s+'_'
else:
basename=plot_file
files={}
for key in ZED.keys():
files[key]=basename+'_'+coord+'_'+key+'.'+fmt
pmagplotlib.save_plots(ZED,files)
ans=""
if ans=='p':
k-=2
ans=""
backup=1
if ans=='c':
k-=1 # replot same block
if tilt==0 and geo ==1:print("You are currently viewing geographic coordinates ")
if tilt==1 and geo ==1:print("You are currently viewing stratigraphic coordinates ")
if tilt==0 and geo ==0: print("You are currently viewing sample coordinates ")
print("\n Which coordinate system do you wish to view? ")
coord=input(" <Return> specimen, [g] geographic, [t] tilt corrected ")
if coord=="g":geo,tilt=1,0
if coord=="t":
geo=1
tilt=1
if coord=="":
coord='s'
geo=0
tilt=0
if geo==1 and sfile=="":
samp_file=input(" Input er_samples file for sample orientations [er_samples.txt] " )
if samp_file=="":samp_file="er_samples.txt"
samp_data,file_type=pmag.magic_read(samp_file)
if file_type != 'er_samples':
print(file_type)
print("This is not a valid er_samples file - coordinate system not changed")
else:
sfile="ok"
ans=""
if ans=='s':
keepon=1
sample=input('Enter desired specimen name (or first part there of): ')
while keepon==1:
try:
k =sids.index(sample)
keepon=0
except:
tmplist=[]
for qq in range(len(sids)):
if sample in sids[qq]:tmplist.append(sids[qq])
print(sample," not found, but this was: ")
print(tmplist)
sample=input('Select one or try again\n ')
angle,direction_type="",""
setangle=0
ans=""
if ans=='h':
k-=1
angle=input("Enter desired declination for X axis 0-360 ")
angle=float(angle)
if angle==0:angle=0.001
s=sids[k]
setangle=1
ans=""
if ans=='e':
k-=1
ans=""
recnum=0
for plotrec in plotblock:
if plotrec[0]<=200 and verbose: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f ' % (plotrec[5], recnum,plotrec[0]*1e3," mT",plotrec[3],plotrec[1],plotrec[2]))
if plotrec[0]>200 and verbose: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f ' % (plotrec[5], recnum,plotrec[0]-273,' C',plotrec[3],plotrec[1],plotrec[2]))
recnum += 1
answer=input('Enter index of point to change from bad to good or vice versa: ')
try:
ind=int(answer)
meas_data=pmag.mark_dmag_rec(s,ind,meas_data)
changeM=1
except:
'bad entry, try again'
if ans=='b':
if end_pca=="":end_pca=len(plotblock)-1
if beg_pca=="":beg_pca=0
k-=1 # stay on same sample until through
GoOn=0
while GoOn==0:
print('Enter index of first point for pca: ','[',beg_pca,']')
answer=input('return to keep default ')
if answer != "":
beg_pca=int(answer)
print('Enter index of last point for pca: ','[',end_pca,']')
answer=input('return to keep default ')
try:
end_pca=int(answer)
if plotblock[beg_pca][5]=='b' or plotblock[end_pca][5]=='b':
print("Can't select 'bad' measurement for PCA bounds -try again")
end_pca=len(plotblock)-1
beg_pca=0
elif beg_pca >=0 and beg_pca<=len(plotblock)-2 and end_pca>0 and end_pca<len(plotblock):
GoOn=1
else:
print(beg_pca,end_pca, " are bad entry of indices - try again")
end_pca=len(plotblock)-1
beg_pca=0
except:
print(beg_pca,end_pca, " are bad entry of indices - try again")
end_pca=len(plotblock)-1
beg_pca=0
GoOn=0
while GoOn==0:
if calculation_type!="":
print("Prior calculation type = ",calculation_type)
ct=input('Enter new Calculation Type: best-fit line, plane or fisher mean [l]/p/f : ' )
if ct=="" or ct=="l":
direction_type="l"
calculation_type="DE-BFL"
GoOn=1
elif ct=='p':
direction_type="p"
calculation_type="DE-BFP"
GoOn=1
elif ct=='f':
direction_type="l"
calculation_type="DE-FM"
GoOn=1
else:
print("bad entry of calculation type: try again. ")
pmagplotlib.plot_zed(ZED,plotblock,angle,s,units)
if verbose:pmagplotlib.draw_figs(ZED)
if geo==1 and tilt==0:
mpars=pmag.domean(geoblock,beg_pca,end_pca,calculation_type)
if mpars['specimen_direction_type']=='Error':break
PmagSpecRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
if "SO-NO" not in method_codes:
PmagSpecRec["specimen_tilt_correction"]='0'
method_codes.append("DA-DIR-GEO")
else:
PmagSpecRec["specimen_tilt_correction"]='-1'
pmagplotlib.plot_dir(ZED,mpars,geoblock,angle)
if verbose:pmagplotlib.draw_figs(ZED)
if geo==1 and tilt==1:
mpars=pmag.domean(tiltblock,beg_pca,end_pca,calculation_type)
if mpars['specimen_direction_type']=='Error':break
PmagSpecRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
if "SO-NO" not in method_codes:
PmagSpecRec["specimen_tilt_correction"]='100'
method_codes.append("DA-DIR-TILT")
else:
PmagSpecRec["specimen_tilt_correction"]='-1'
pmagplotlib.plot_dir(ZED,mpars,tiltblock,angle)
if verbose:pmagplotlib.draw_figs(ZED)
if geo==0 and tilt==0:
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if mpars['specimen_direction_type']=='Error':break
PmagSpecRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
PmagSpecRec["specimen_tilt_correction"]='-1'
pmagplotlib.plot_dir(ZED,mpars,plotblock,angle)
if verbose:pmagplotlib.draw_figs(ZED)
PmagSpecRec["measurement_step_min"]='%8.3e ' %(mpars["measurement_step_min"])
PmagSpecRec["measurement_step_max"]='%8.3e ' %(mpars["measurement_step_max"])
PmagSpecRec["specimen_correction"]='u'
PmagSpecRec["specimen_dang"]='%7.1f ' %(mpars['specimen_dang'])
print('DANG: ',PmagSpecRec["specimen_dang"])
if calculation_type!='DE-FM':
PmagSpecRec["specimen_mad"]='%7.1f ' %(mpars["specimen_mad"])
PmagSpecRec["specimen_alpha95"]=""
else:
PmagSpecRec["specimen_alpha95"]='%7.1f ' %(mpars["specimen_alpha95"])
PmagSpecRec["specimen_mad"]=""
PmagSpecRec["specimen_n"]='%i ' %(mpars["specimen_n"])
PmagSpecRec["specimen_direction_type"]=direction_type
PmagSpecRec["calculation_type"]=calculation_type # redundant and won't be imported - just for convenience
method_codes=PmagSpecRec["magic_method_codes"].split(':')
if len(method_codes) != 0:
methstring=""
for meth in method_codes:
ctype=meth.split('-')
if 'DE' not in ctype:methstring=methstring+ ":" +meth # don't include old direction estimation methods
methstring=methstring+':'+calculation_type
PmagSpecRec["magic_method_codes"]= methstring.strip(':')
print('Method codes: ',PmagSpecRec['magic_method_codes'])
if calculation_type!='DE-FM':
if units=='K':
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])-273,float(PmagSpecRec["measurement_step_max"])-273,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
elif units== 'T':
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])*1e3,float(PmagSpecRec["measurement_step_max"])*1e3,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
elif 'T' in units and 'K' in units:
if float(PmagSpecRec['measurement_step_min'])<1.0 :
min=float(PmagSpecRec['measurement_step_min'])*1e3
else:
min=float(PmagSpecRec['measurement_step_min'])-273
if float(PmagSpecRec['measurement_step_max'])<1.0 :
max=float(PmagSpecRec['measurement_step_max'])*1e3
else:
max=float(PmagSpecRec['measurement_step_max'])-273
print('%s %i %7.1f %i %i %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),min,max,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
else:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"]),float(PmagSpecRec["measurement_step_max"]),float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
else:
if 'K' in units:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])-273,float(PmagSpecRec["measurement_step_max"])-273,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
elif 'T' in units:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])*1e3,float(PmagSpecRec["measurement_step_max"])*1e3,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
elif 'T' in units and 'K' in units:
if float(PmagSpecRec['measurement_step_min'])<1.0 :
min=float(PmagSpecRec['measurement_step_min'])*1e3
else:
min=float(PmagSpecRec['measurement_step_min'])-273
if float(PmagSpecRec['measurement_step_max'])<1.0 :
max=float(PmagSpecRec['measurement_step_max'])*1e3
else:
max=float(PmagSpecRec['measurement_step_max'])-273
print('%s %i %7.1f %i %i %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),min,max,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
else:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),float(PmagSpecRec["measurement_step_min"]),float(PmagSpecRec["measurement_step_max"]),float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type))
saveit=input("Save this interpretation? [y]/n \n")
if saveit!="n":
changeS=1
#
# put in details
#
angle,direction_type,setangle="","",0
if len(CurrRecs)>0:
replace=input(" [0] add new component, or [1] replace existing interpretation(s) [default is replace] ")
if replace=="1" or replace=="":
CurrRecs=[]
PmagSpecRec['specimen_comp_name']='A'
CurrRecs.append(PmagSpecRec)
else:
print('These are the current component names for this specimen: ')
for trec in CurrRecs:print(trec['specimen_comp_name'])
compnum=input("Enter new component name: ")
PmagSpecRec['specimen_comp_name']=compnum
print("Adding new component: ",PmagSpecRec['specimen_comp_name'])
CurrRecs.append(PmagSpecRec)
else:
PmagSpecRec['specimen_comp_name']='A'
CurrRecs.append(PmagSpecRec)
k+=1
ans=""
else:
ans=""
else: # plots=1
k+=1
files={}
locname.replace('/','-')
print(PmagSpecRec)
for key in ZED.keys():
files[key]="LO:_"+locname+'_SI:_'+PmagSpecRec['er_site_name']+'_SA:_'+PmagSpecRec['er_sample_name']+'_SP:_'+s+'_CO:_'+coord+'_TY:_'+key+'_.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['demag']='DeMag Plot'
titles['zijd']='Zijderveld Plot'
titles['eqarea']='Equal Area Plot'
ZED = pmagplotlib.add_borders(ZED,titles,black,purple)
pmagplotlib.save_plots(ZED,files)
if len(CurrRecs)>0:
for rec in CurrRecs: PriorRecs.append(rec)
if changeS==1:
if len(PriorRecs)>0:
save_redo(PriorRecs,inspec)
else:
os.system('rm '+inspec)
CurrRecs,beg_pca,end_pca=[],"","" # next up
changeS=0
else: k+=1 # skip record - not enough data
if changeM==1:
pmag.magic_write(meas_file,meas_data,'magic_measurements')
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
"""
Create a batch configuration script to submit to a cluster.
"""
from __future__ import division, absolute_import, print_function
import os
import argparse
import yaml
import healpy as hp
import numpy as np
import glob
import redmapper
import redmapper.parsl_templates as parsl_templates
def create_batchconfig(filename):
with open(filename, 'w') as f:
f.write("""
batchname:
setup: ''
batch: 'lsf'
requirements: ''
""")
def load_batchconfig(filename):
"""
Load a batch configuration file.
Parameters
----------
filename: `str`
Filename of batch configuration file
Returns
-------
yaml_data: `dict`
Dict of parameters from configuration file.
"""
with open(filename) as f:
yaml_data = yaml.load(f, Loader=yaml.SafeLoader)
for key in yaml_data.keys():
if 'batch' not in yaml_data[key]:
raise ValueError("Missing 'batch' key for %s section in %s." % (key, filename))
if 'setup' not in yaml_data[key]:
yaml_data[key]['setup'] = ''
if 'requirements' not in yaml_data[key]:
yaml_data[key]['requirements'] = ''
if 'parsl_provider' not in yaml_data[key]:
yaml_data[key]['parsl_provider'] = 'local'
if 'image' not in yaml_data[key]:
yaml_data[key]['image'] = ''
if 'constraint' not in yaml_data[key]:
yaml_data[key]['constraint'] = ''
if 'qos' not in yaml_data[key]:
yaml_data[key]['qos'] = ''
return yaml_data
batchconfigfile = os.path.join(os.environ['HOME'], '.redmapper_batch.yml')
if not os.path.isfile(batchconfigfile):
create_batchconfig(batchconfigfile)
print("Please edit %s with batch configuration and rerun." % (batchconfigfile))
batchconfig = load_batchconfig(batchconfigfile)
if len(batchconfig) > 1:
mode_required = True
else:
mode_required = False
parser = argparse.ArgumentParser(description="Create a batch file for running redmapper codes")
parser.add_argument('-c', '--configfile', action='store', type=str, required=True,
help='YAML config file')
parser.add_argument('-r', '--runmode', action='store', type=int, required=True,
help='Run mode. 0 is full finder run. 1 is zred run.')
parser.add_argument('-b', '--batchmode', action='store', type=str, required=mode_required,
help='Batch mode, defined in ~/.redmapper_batch.yml')
parser.add_argument('-w', '--walltime', action='store', type=int, required=False,
help='Wall time (override default)')
parser.add_argument('-n', '--nside', action='store', type=int, required=False,
help='Parallelization nside (optional, can use default)')
parser.add_argument('-N', '--nodes', action='store', type=int, required=False,
default=2, help='Number of nodes to run (for nersc)')
args = parser.parse_args()
if not mode_required and args.batchmode is None:
batchmode = list(batchconfig.keys())[0]
else:
batchmode = args.batchmode
# Read in the config file
config = redmapper.Configuration(args.configfile)
if len(config.hpix) != 0:
raise ValueError("Cannot run redmapper in batch mode with hpix not an empty list (full sky)")
# Check the nside
nside = args.nside
if args.runmode == 0:
# This is a full run
if nside is None:
nside = 8
jobtype = 'run'
default_walltime = 72*60
memory = 6000
elif args.runmode == 1:
# This is a zred run
if nside is None:
nside = 8
jobtype = 'zred'
default_walltime = 5*60
memory = 2000
elif args.runmode == 2:
# This is a runcat run
if nside is None:
nside = 8
jobtype = 'runcat'
default_walltime = 10*60
memory = 4000
elif args.runmode == 3:
# This is a random/zmask run
if nside is None:
nside = 8
jobtype = 'zmask'
default_walltime = 10*60
memory = 4000
elif args.runmode == 4:
# This is a zscan run
if nside is None:
nside = 8
jobtype = 'zscan'
default_walltime = 10*60
memory = 4000
else:
raise RuntimeError("Unsupported runmode: %d" % (args.runmode))
if args.walltime is None:
walltime = default_walltime
else:
walltime = args.walltime
jobname = '%s_%s' % (config.outbase, jobtype)
# Determine which pixels overlap the galaxy file...
tab = redmapper.Entry.from_fits_file(config.galfile)
theta, phi = hp.pix2ang(tab.nside, tab.hpix)
hpix_run = np.unique(hp.ang2pix(nside, theta, phi))
# Make the batch script in a "jobs" directory
cwd = os.getcwd()
jobpath = os.path.join(cwd, 'jobs')
if not os.path.isdir(jobpath):
os.makedirs(jobpath)
# Will want to check for previous (failed) jobs
if batchconfig[batchmode]['batch'] == 'parsl':
test = glob.glob(os.path.join(jobpath, '%s_?.py' % (jobname)))
else:
test = glob.glob(os.path.join(jobpath, '%s_?.job' % (jobname)))
index = len(test)
need_maskgals = False
if args.runmode == 0:
# Run in the directory where the config file is, by default
run_command = 'redmapper_run_redmapper_pixel.py -c %s -p %%s -n %d -d %s' % (
(os.path.abspath(args.configfile),
nside,
os.path.dirname(os.path.abspath(args.configfile))))
need_maskgals = True
elif args.runmode == 1:
run_command = 'redmapper_run_zred_pixel.py -c %s -p %%s -n %d -d %s' % (
(os.path.abspath(args.configfile),
nside,
os.path.dirname(os.path.abspath(args.configfile))))
elif args.runmode == 2:
run_command = 'redmapper_runcat_pixel.py -c %s -p %%s -n %d -d %s' % (
(os.path.abspath(args.configfile),
nside,
os.path.dirname(os.path.abspath(args.configfile))))
need_maskgals = True
elif args.runmode == 3:
run_command = 'redmapper_run_zmask_pixel.py -c %s -p %%s -n %d -d %s' % (
(os.path.abspath(args.configfile),
nside,
os.path.dirname(os.path.abspath(args.configfile))))
need_maskgals = True
elif args.runmode == 4:
run_command = 'redmapper_run_zscan_pixel.py -c %s -p %%s -n %d -d %s' % (
(os.path.abspath(args.configfile),
nside,
os.path.dirname(os.path.abspath(args.configfile))))
need_maskgals = True
if need_maskgals:
# Check to see if maskgals are there, and generate them if not.
if not os.path.isfile(config.maskgalfile):
print("Did not find maskgalfile %s. Generating now." % (config.maskgalfile))
mask = redmapper.mask.get_mask(config, include_maskgals=False)
mask.gen_maskgals(config.maskgalfile)
if batchconfig[batchmode]['batch'] == 'parsl':
jobfile = os.path.join(jobpath, '%s_%d.py' % (jobname, index + 1))
else:
jobfile = os.path.join(jobpath, '%s_%d.job' % (jobname, index + 1))
with open(jobfile, 'w') as jf:
write_jobarray = True
if (batchconfig[batchmode]['batch'] == 'lsf'):
# LSF mode
jf.write("#BSUB -R '%s'\n" % (batchconfig[batchmode]['requirements']))
jf.write("#BSUB -R 'rusage[mem=%d]'\n" % (memory))
jf.write("#BSUB -J %s[1-%d]\n" % (jobname, hpix_run.size))
jf.write("#BSUB -oo %s\n" % (os.path.join(jobpath, '%s_%%J_%%I.log' % (jobname))))
jf.write("#BSUB -n 1\n")
jf.write("#BSUB -W %d\n\n" % (walltime))
index_string = '${pixarr[LSB_JOBINDEX-1]}'
elif (batchconfig[batchmode]['batch'] == 'pbs'):
# PBS mode
ppn = batchconfig[batchmode]['ppn']
n_nodes = int(np.ceil(float(hpix_run.size) / float(ppn)))
jf.write("#PBS -q %s\n" % (batchconfig[batchmode]['queue']))
jf.write("#PBS -l nodes=%d:ppn=%d\n" % (n_nodes, ppn))
jf.write("#PBS -l walltime=%d:00:00\n" % (int(walltime / 60)))
jf.write("#PBS -l mem=%dmb\n" % (memory))
jf.write("#PBS -j oe\n")
jf.write('N_CPU=%d\n' % (n_nodes * batchconfig[batchmode]['ppn']))
elif (batchconfig[batchmode]['batch'] == 'parsl'):
write_jobarray = False
if batchconfig[batchmode]['parsl_provider'] == 'local':
parsl_config = parsl_templates.PARSL_LOCAL_CONFIG_TEMPLATE
elif batchconfig[batchmode]['parsl_provider'] == 'slurm':
parsl_config = parsl_templates.PARSL_SLURM_CONFIG_TEMPLATE.format(
nodes=args.nodes,
constraint=batchconfig[batchmode]['constraint'],
qos=batchconfig[batchmode]['qos'],
walltime=walltime
)
else:
raise RuntimeError("Invalid parsl_provider (requires either local or slurm).")
cmd = run_command % ('{pixel}')
if batchconfig[batchmode]['image'] != '':
# We are using a shifter image
image = batchconfig[batchmode]['image']
parsl_command = f'shifter --image={image} /bin/bash -c ". /opt/redmapper/startup.sh && {cmd}"'
else:
# No shifter image
parsl_command = cmd
hpix_run_str = [str(hpix) for hpix in hpix_run]
hpix_list_str = "[" + ', '.join(hpix_run_str) + "]"
parsl_script = parsl_templates.PARSL_RUN_TEMPLATE.format(
parsl_config=parsl_config,
parsl_command=parsl_command,
memory=memory,
hpix_list_str=hpix_list_str,
jobname=jobname
)
jf.write(parsl_script)
elif (batchconfig[batchmode]['batch'] == 'slurm'):
raise NotImplementedError("Basic slurm submission not implemented yet. Use parsl")
else:
# Nothing else supported
raise RuntimeError("Only LSF, PBS, parsl/slurm, and parsl/local supported at this time.")
if write_jobarray:
jf.write("pixarr=(")
for hpix in hpix_run:
jf.write("%d " % (hpix))
jf.write(")\n\n")
jf.write("%s\n\n" % (batchconfig[batchmode]['setup']))
cmd = run_command % (index_string)
jf.write("%s\n" % (cmd))
| |
# -*- coding: utf-8 -*-
'''
SoftLayer Cloud Module
======================
The SoftLayer cloud module is used to control access to the SoftLayer VPS
system.
Use of this module only requires the ``apikey`` parameter. Set up the cloud
configuration at:
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/softlayer.conf``:
.. code-block:: yaml
my-softlayer-config:
# SoftLayer account api key
user: MYLOGIN
apikey: JVkbSJDGHSDKUKSDJfhsdklfjgsjdkflhjlsdfffhgdgjkenrtuinv
driver: softlayer
The SoftLayer Python Library needs to be installed in order to use the
SoftLayer salt.cloud modules. See: https://pypi.python.org/pypi/SoftLayer
:depends: softlayer
'''
# Import python libs
from __future__ import absolute_import
import logging
import time
# Import salt cloud libs
import salt.utils.cloud
import salt.config as config
from salt.exceptions import SaltCloudSystemExit
# Attempt to import softlayer lib
try:
import SoftLayer
HAS_SLLIBS = True
except ImportError:
HAS_SLLIBS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'softlayer'
# Only load in this module if the SoftLayer configurations are in place
def __virtual__():
'''
Check for SoftLayer configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey',)
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'softlayer': HAS_SLLIBS}
)
def script(vm_):
'''
Return the script deployment object
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def get_conn(service='SoftLayer_Virtual_Guest'):
'''
Return a conn object for the passed VM data
'''
client = SoftLayer.Client(
username=config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
),
api_key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
)
return client[service]
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
#return response
for datacenter in response['datacenters']:
#return data center
ret[datacenter['template']['datacenter']['name']] = {
'name': datacenter['template']['datacenter']['name'],
}
return ret
def avail_sizes(call=None):
'''
Return a dict of all available VM sizes on the cloud provider with
relevant data. This data is provided in three dicts.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
ret = {
'block devices': {},
'memory': {},
'processors': {},
}
conn = get_conn()
response = conn.getCreateObjectOptions()
for device in response['blockDevices']:
#return device['template']['blockDevices']
ret['block devices'][device['itemPrice']['item']['description']] = {
'name': device['itemPrice']['item']['description'],
'capacity':
device['template']['blockDevices'][0]['diskImage']['capacity'],
}
for memory in response['memory']:
ret['memory'][memory['itemPrice']['item']['description']] = {
'name': memory['itemPrice']['item']['description'],
'maxMemory': memory['template']['maxMemory'],
}
for processors in response['processors']:
ret['processors'][processors['itemPrice']['item']['description']] = {
'name': processors['itemPrice']['item']['description'],
'start cpus': processors['template']['startCpus'],
}
return ret
def avail_images(call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
for image in response['operatingSystems']:
ret[image['itemPrice']['item']['description']] = {
'name': image['itemPrice']['item']['description'],
'template': image['template']['operatingSystemReferenceCode'],
}
return ret
def list_custom_images(call=None):
'''
Return a dict of all custom VM images on the cloud provider.
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_vlans function must be called with -f or --function.'
)
ret = {}
conn = get_conn('SoftLayer_Account')
response = conn.getBlockDeviceTemplateGroups()
for image in response:
if 'globalIdentifier' not in image:
continue
ret[image['name']] = {
'id': image['id'],
'name': image['name'],
'globalIdentifier': image['globalIdentifier'],
}
if 'note' in image:
ret[image['name']]['note'] = image['note']
return ret
def get_location(vm_=None):
'''
Return the location to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(),
__opts__,
#default=DEFAULT_LOCATION,
search_global=False
)
)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'softlayer',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
name = vm_['name']
hostname = name
domain = config.get_cloud_config_value(
'domain', vm_, __opts__, default=None
)
if domain is None:
SaltCloudSystemExit(
'A domain name is required for the SoftLayer driver.'
)
if vm_.get('use_fqdn'):
name = '.'.join([name, domain])
vm_['name'] = name
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
{
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(name))
conn = get_conn()
kwargs = {
'hostname': hostname,
'domain': domain,
'startCpus': vm_['cpu_number'],
'maxMemory': vm_['ram'],
'hourlyBillingFlag': vm_['hourly_billing'],
}
local_disk_flag = config.get_cloud_config_value(
'local_disk', vm_, __opts__, default=False
)
kwargs['localDiskFlag'] = local_disk_flag
if 'image' in vm_:
kwargs['operatingSystemReferenceCode'] = vm_['image']
kwargs['blockDevices'] = []
disks = vm_['disk_size']
if isinstance(disks, int):
disks = [str(disks)]
elif isinstance(disks, str):
disks = [size.strip() for size in disks.split(',')]
count = 0
for disk in disks:
# device number '1' is reserved for the SWAP disk
if count == 1:
count += 1
block_device = {'device': str(count),
'diskImage': {'capacity': str(disk)}}
kwargs['blockDevices'].append(block_device)
count += 1
# Upper bound must be 5 as we're skipping '1' for the SWAP disk ID
if count > 5:
log.warning('More that 5 disks were specified for {0} .'
'The first 5 disks will be applied to the VM, '
'but the remaining disks will be ignored.\n'
'Please adjust your cloud configuration to only '
'specify a maximum of 5 disks.'.format(name))
break
elif 'global_identifier' in vm_:
kwargs['blockDeviceTemplateGroup'] = {
'globalIdentifier': vm_['global_identifier']
}
location = get_location(vm_)
if location:
kwargs['datacenter'] = {'name': location}
private_vlan = config.get_cloud_config_value(
'private_vlan', vm_, __opts__, default=False
)
if private_vlan:
kwargs['primaryBackendNetworkComponent'] = {
'networkVlan': {
'id': private_vlan,
}
}
private_network = config.get_cloud_config_value(
'private_network', vm_, __opts__, default=False
)
if bool(private_network) is True:
kwargs['privateNetworkOnlyFlag'] = 'True'
public_vlan = config.get_cloud_config_value(
'public_vlan', vm_, __opts__, default=False
)
if public_vlan:
kwargs['primaryNetworkComponent'] = {
'networkVlan': {
'id': public_vlan,
}
}
max_net_speed = config.get_cloud_config_value(
'max_net_speed', vm_, __opts__, default=10
)
if max_net_speed:
kwargs['networkComponents'] = [{
'maxSpeed': int(max_net_speed)
}]
post_uri = config.get_cloud_config_value(
'post_uri', vm_, __opts__, default=None
)
if post_uri:
kwargs['postInstallScriptUri'] = post_uri
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
{'kwargs': kwargs},
transport=__opts__['transport']
)
try:
response = conn.createObject(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on SoftLayer\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
name, str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
ip_type = 'primaryIpAddress'
private_ssh = config.get_cloud_config_value(
'private_ssh', vm_, __opts__, default=False
)
private_wds = config.get_cloud_config_value(
'private_windows', vm_, __opts__, default=False
)
if private_ssh or private_wds or public_vlan is None or public_vlan is False:
ip_type = 'primaryBackendIpAddress'
def wait_for_ip():
'''
Wait for the IP address to become available
'''
nodes = list_nodes_full()
if ip_type in nodes[hostname]:
return nodes[hostname][ip_type]
time.sleep(1)
return False
ip_address = salt.utils.cloud.wait_for_fun(
wait_for_ip,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
if config.get_cloud_config_value('deploy', vm_, __opts__) is not True:
return show_instance(hostname, call='action')
SSH_PORT = 22
WINDOWS_DS_PORT = 445
managing_port = SSH_PORT
if config.get_cloud_config_value('windows', vm_, __opts__) or \
config.get_cloud_config_value('win_installer', vm_, __opts__):
managing_port = WINDOWS_DS_PORT
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 15 * 60
)
connect_timeout = config.get_cloud_config_value(
'connect_timeout', vm_, __opts__, ssh_connect_timeout
)
if not salt.utils.cloud.wait_for_port(ip_address,
port=managing_port,
timeout=connect_timeout):
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
pass_conn = get_conn(service='SoftLayer_Account')
mask = {
'virtualGuests': {
'powerState': '',
'operatingSystem': {
'passwords': ''
},
},
}
def get_credentials():
'''
Wait for the password to become available
'''
node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask)
for node in node_info:
if node['id'] == response['id'] and \
'passwords' in node['operatingSystem'] and \
len(node['operatingSystem']['passwords']) > 0:
return node['operatingSystem']['passwords'][0]['username'], node['operatingSystem']['passwords'][0]['password']
time.sleep(5)
return False
username, passwd = salt.utils.cloud.wait_for_fun( # pylint: disable=W0633
get_credentials,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
response['username'] = username
response['password'] = passwd
response['public_ip'] = ip_address
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default=username
)
vm_['ssh_host'] = ip_address
vm_['password'] = passwd
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret.update(response)
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
{
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
return ret
def list_nodes_full(mask='mask[id]', call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
ret = {}
conn = get_conn(service='SoftLayer_Account')
response = conn.getVirtualGuests()
for node_id in response:
hostname = node_id['hostname'].split('.')[0]
ret[hostname] = node_id
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
return ret
def list_nodes(call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full()
if 'error' in nodes:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
nodes['error']['Errors']['Error']['Message']
)
)
for node in nodes:
ret[node] = {
'id': nodes[node]['hostname'],
'ram': nodes[node]['maxMemory'],
'cpus': nodes[node]['maxCpu'],
}
if 'primaryIpAddress' in nodes[node]:
ret[node]['public_ips'] = nodes[node]['primaryIpAddress']
if 'primaryBackendIpAddress' in nodes[node]:
ret[node]['private_ips'] = nodes[node]['primaryBackendIpAddress']
if 'status' in nodes[node]:
ret[node]['state'] = str(nodes[node]['status']['name'])
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__['query.selection'], call,
)
def show_instance(name, call=None):
'''
Show the details from SoftLayer concerning a guest
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def destroy(name, call=None):
'''
Destroy a node.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name},
transport=__opts__['transport']
)
# If the VM was created with use_fqdn, the short hostname will be used instead.
name = name.split('.')[0]
node = show_instance(name, call='action')
conn = get_conn()
response = conn.deleteObject(id=node['id'])
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name},
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return response
def list_vlans(call=None):
'''
List all VLANs associated with the account
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_vlans function must be called with -f or --function.'
)
conn = get_conn(service='SoftLayer_Account')
return conn.getNetworkVlans()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import struct
import uuid
from cryptography import fernet
import msgpack
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from six.moves import map, urllib
from keystone.auth import plugins as auth_plugins
from keystone.common import utils as ks_utils
from keystone import exception
from keystone.i18n import _, _LI
from keystone.token import provider
from keystone.token.providers.fernet import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Fernet byte indexes as as computed by pypi/keyless_fernet and defined in
# https://github.com/fernet/spec
TIMESTAMP_START = 1
TIMESTAMP_END = 9
class TokenFormatter(object):
"""Packs and unpacks payloads into tokens for transport."""
@property
def crypto(self):
"""Return a cryptography instance.
You can extend this class with a custom crypto @property to provide
your own token encoding / decoding. For example, using a different
cryptography library (e.g. ``python-keyczar``) or to meet arbitrary
security requirements.
This @property just needs to return an object that implements
``encrypt(plaintext)`` and ``decrypt(ciphertext)``.
"""
keys = utils.load_keys()
if not keys:
raise exception.KeysNotFound()
fernet_instances = [fernet.Fernet(key) for key in keys]
return fernet.MultiFernet(fernet_instances)
def pack(self, payload):
"""Pack a payload for transport as a token."""
# base64 padding (if any) is not URL-safe
return urllib.parse.quote(self.crypto.encrypt(payload))
def unpack(self, token):
"""Unpack a token, and validate the payload."""
token = urllib.parse.unquote(six.binary_type(token))
try:
return self.crypto.decrypt(token)
except fernet.InvalidToken:
raise exception.ValidationError(
_('This is not a recognized Fernet token'))
@classmethod
def creation_time(cls, fernet_token):
"""Returns the creation time of a valid Fernet token."""
# tokens may be transmitted as Unicode, but they're just ASCII
# (pypi/cryptography will refuse to operate on Unicode input)
fernet_token = six.binary_type(fernet_token)
# the base64 padding on fernet tokens is made URL-safe
fernet_token = urllib.parse.unquote(fernet_token)
# fernet tokens are base64 encoded and the padding made URL-safe
token_bytes = base64.urlsafe_b64decode(fernet_token)
# slice into the byte array to get just the timestamp
timestamp_bytes = token_bytes[TIMESTAMP_START:TIMESTAMP_END]
# convert those bytes to an integer
# (it's a 64-bit "unsigned long long int" in C)
timestamp_int = struct.unpack(">Q", timestamp_bytes)[0]
# and with an integer, it's trivial to produce a datetime object
created_at = datetime.datetime.utcfromtimestamp(timestamp_int)
return created_at
def create_token(self, user_id, expires_at, audit_ids, methods=None,
domain_id=None, project_id=None, trust_id=None,
federated_info=None):
"""Given a set of payload attributes, generate a Fernet token."""
if trust_id:
version = TrustScopedPayload.version
payload = TrustScopedPayload.assemble(
user_id,
methods,
project_id,
expires_at,
audit_ids,
trust_id)
elif federated_info:
version = FederatedPayload.version
payload = FederatedPayload.assemble(
user_id,
methods,
expires_at,
audit_ids,
federated_info)
elif project_id:
version = ProjectScopedPayload.version
payload = ProjectScopedPayload.assemble(
user_id,
methods,
project_id,
expires_at,
audit_ids)
elif domain_id:
version = DomainScopedPayload.version
payload = DomainScopedPayload.assemble(
user_id,
methods,
domain_id,
expires_at,
audit_ids)
else:
version = UnscopedPayload.version
payload = UnscopedPayload.assemble(
user_id,
methods,
expires_at,
audit_ids)
versioned_payload = (version,) + payload
serialized_payload = msgpack.packb(versioned_payload)
token = self.pack(serialized_payload)
# NOTE(lbragstad): We should warn against Fernet tokens that are over
# 255 characters in length. This is mostly due to persisting the tokens
# in a backend store of some kind that might have a limit of 255
# characters. Even though Keystone isn't storing a Fernet token
# anywhere, we can't say it isn't being stored somewhere else with
# those kind of backend constraints.
if len(token) > 255:
LOG.info(_LI('Fernet token created with length of %d '
'characters, which exceeds 255 characters'),
len(token))
return token
def validate_token(self, token):
"""Validates a Fernet token and returns the payload attributes."""
# Convert v2 unicode token to a string
if not isinstance(token, six.binary_type):
token = token.encode('ascii')
serialized_payload = self.unpack(token)
versioned_payload = msgpack.unpackb(serialized_payload)
version, payload = versioned_payload[0], versioned_payload[1:]
# depending on the formatter, these may or may not be defined
domain_id = None
project_id = None
trust_id = None
federated_info = None
if version == UnscopedPayload.version:
(user_id, methods, expires_at, audit_ids) = (
UnscopedPayload.disassemble(payload))
elif version == DomainScopedPayload.version:
(user_id, methods, domain_id, expires_at, audit_ids) = (
DomainScopedPayload.disassemble(payload))
elif version == ProjectScopedPayload.version:
(user_id, methods, project_id, expires_at, audit_ids) = (
ProjectScopedPayload.disassemble(payload))
elif version == TrustScopedPayload.version:
(user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
TrustScopedPayload.disassemble(payload))
elif version == FederatedPayload.version:
(user_id, methods, expires_at, audit_ids, federated_info) = (
FederatedPayload.disassemble(payload))
else:
# If the token_format is not recognized, raise ValidationError.
raise exception.ValidationError(_(
'This is not a recognized Fernet payload version: %s') %
version)
# rather than appearing in the payload, the creation time is encoded
# into the token format itself
created_at = TokenFormatter.creation_time(token)
created_at = ks_utils.isotime(at=created_at, subsecond=True)
expires_at = timeutils.parse_isotime(expires_at)
expires_at = ks_utils.isotime(at=expires_at, subsecond=True)
return (user_id, methods, audit_ids, domain_id, project_id, trust_id,
federated_info, created_at, expires_at)
class BasePayload(object):
# each payload variant should have a unique version
version = None
@classmethod
def assemble(cls, *args):
"""Assemble the payload of a token.
:param args: whatever data should go into the payload
:returns: the payload of a token
"""
raise NotImplementedError()
@classmethod
def disassemble(cls, payload):
"""Disassemble an unscoped payload into the component data.
:param payload: this variant of payload
:returns: a tuple of the payloads component data
"""
raise NotImplementedError()
@classmethod
def convert_uuid_hex_to_bytes(cls, uuid_string):
"""Compress UUID formatted strings to bytes.
:param uuid_string: uuid string to compress to bytes
:returns: a byte representation of the uuid
"""
# TODO(lbragstad): Wrap this in an exception. Not sure what the case
# would be where we couldn't handle what we've been given but incase
# the integrity of the token has been compromised.
uuid_obj = uuid.UUID(uuid_string)
return uuid_obj.bytes
@classmethod
def convert_uuid_bytes_to_hex(cls, uuid_byte_string):
"""Generate uuid.hex format based on byte string.
:param uuid_byte_string: uuid string to generate from
:returns: uuid hex formatted string
"""
# TODO(lbragstad): Wrap this in an exception. Not sure what the case
# would be where we couldn't handle what we've been given but incase
# the integrity of the token has been compromised.
uuid_obj = uuid.UUID(bytes=uuid_byte_string)
return uuid_obj.hex
@classmethod
def _convert_time_string_to_int(cls, time_string):
"""Convert a time formatted string to a timestamp integer.
:param time_string: time formatted string
:returns: an integer timestamp
"""
time_object = timeutils.parse_isotime(time_string)
return (timeutils.normalize_time(time_object) -
datetime.datetime.utcfromtimestamp(0)).total_seconds()
@classmethod
def _convert_int_to_time_string(cls, time_int):
"""Convert a timestamp integer to a string.
:param time_int: integer representing timestamp
:returns: a time formatted strings
"""
time_object = datetime.datetime.utcfromtimestamp(time_int)
return ks_utils.isotime(time_object, subsecond=True)
@classmethod
def attempt_convert_uuid_hex_to_bytes(cls, value):
"""Attempt to convert value to bytes or return value.
:param value: value to attempt to convert to bytes
:returns: uuid value in bytes or value
"""
try:
return cls.convert_uuid_hex_to_bytes(value)
except ValueError:
# this might not be a UUID, depending on the situation (i.e.
# federation)
return value
@classmethod
def attempt_convert_uuid_bytes_to_hex(cls, value):
"""Attempt to convert value to hex or return value.
:param value: value to attempt to convert to hex
:returns: uuid value in hex or value
"""
try:
return cls.convert_uuid_bytes_to_hex(value)
except ValueError:
return value
class UnscopedPayload(BasePayload):
version = 0
@classmethod
def assemble(cls, user_id, methods, expires_at, audit_ids):
"""Assemble the payload of an unscoped token.
:param user_id: identifier of the user in the token request
:param methods: list of authentication methods used
:param expires_at: datetime of the token's expiration
:param audit_ids: list of the token's audit IDs
:returns: the payload of an unscoped token
"""
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
"""Disassemble an unscoped payload into the component data.
:param payload: the payload of an unscoped token
:return: a tuple containing the user_id, auth methods, expires_at, and
audit_ids
"""
user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
expires_at_str = cls._convert_int_to_time_string(payload[2])
audit_ids = list(map(provider.base64_encode, payload[3]))
return (user_id, methods, expires_at_str, audit_ids)
class DomainScopedPayload(BasePayload):
version = 1
@classmethod
def assemble(cls, user_id, methods, domain_id, expires_at, audit_ids):
"""Assemble the payload of a domain-scoped token.
:param user_id: ID of the user in the token request
:param methods: list of authentication methods used
:param domain_id: ID of the domain to scope to
:param expires_at: datetime of the token's expiration
:param audit_ids: list of the token's audit IDs
:returns: the payload of a domain-scoped token
"""
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
try:
b_domain_id = cls.convert_uuid_hex_to_bytes(domain_id)
except ValueError:
# the default domain ID is configurable, and probably isn't a UUID
if domain_id == CONF.identity.default_domain_id:
b_domain_id = domain_id
else:
raise
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_domain_id, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
"""Disassemble a payload into the component data.
:param payload: the payload of a token
:return: a tuple containing the user_id, auth methods, domain_id,
expires_at_str, and audit_ids
"""
user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
try:
domain_id = cls.convert_uuid_bytes_to_hex(payload[2])
except ValueError:
# the default domain ID is configurable, and probably isn't a UUID
if payload[2] == CONF.identity.default_domain_id:
domain_id = payload[2]
else:
raise
expires_at_str = cls._convert_int_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
return (user_id, methods, domain_id, expires_at_str, audit_ids)
class ProjectScopedPayload(BasePayload):
version = 2
@classmethod
def assemble(cls, user_id, methods, project_id, expires_at, audit_ids):
"""Assemble the payload of a project-scoped token.
:param user_id: ID of the user in the token request
:param methods: list of authentication methods used
:param project_id: ID of the project to scope to
:param expires_at: datetime of the token's expiration
:param audit_ids: list of the token's audit IDs
:returns: the payload of a project-scoped token
"""
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
"""Disassemble a payload into the component data.
:param payload: the payload of a token
:return: a tuple containing the user_id, auth methods, project_id,
expires_at_str, and audit_ids
"""
user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
project_id = cls.attempt_convert_uuid_bytes_to_hex(payload[2])
expires_at_str = cls._convert_int_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
return (user_id, methods, project_id, expires_at_str, audit_ids)
class TrustScopedPayload(BasePayload):
version = 3
@classmethod
def assemble(cls, user_id, methods, project_id, expires_at, audit_ids,
trust_id):
"""Assemble the payload of a trust-scoped token.
:param user_id: ID of the user in the token request
:param methods: list of authentication methods used
:param project_id: ID of the project to scope to
:param expires_at: datetime of the token's expiration
:param audit_ids: list of the token's audit IDs
:param trust_id: ID of the trust in effect
:returns: the payload of a trust-scoped token
"""
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id)
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids,
b_trust_id)
@classmethod
def disassemble(cls, payload):
"""Validate a trust-based payload.
:param token_string: a string representing the token
:returns: a tuple containing the user_id, auth methods, project_id,
expires_at_str, audit_ids, and trust_id
"""
user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
project_id = cls.attempt_convert_uuid_bytes_to_hex(payload[2])
expires_at_str = cls._convert_int_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
trust_id = cls.convert_uuid_bytes_to_hex(payload[5])
return (user_id, methods, project_id, expires_at_str, audit_ids,
trust_id)
class FederatedPayload(BasePayload):
version = 4
@classmethod
def assemble(cls, user_id, methods, expires_at, audit_ids, federated_info):
"""Assemble the payload of a federated token.
:param user_id: ID of the user in the token request
:param methods: list of authentication methods used
:param expires_at: datetime of the token's expiration
:param audit_ids: list of the token's audit IDs
:param federated_info: dictionary containing group IDs, the identity
provider ID, protocol ID, and federated domain
ID
:returns: the payload of a federated token
"""
def pack_group_ids(group_dict):
return cls.attempt_convert_uuid_hex_to_bytes(group_dict['id'])
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_group_ids = list(map(pack_group_ids, federated_info['group_ids']))
b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
federated_info['idp_id'])
protocol_id = federated_info['protocol_id']
expires_at_int = cls._convert_time_string_to_int(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_group_ids, b_idp_id, protocol_id,
expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
"""Validate a federated payload.
:param token_string: a string representing the token
:return: a tuple containing the user_id, auth methods, audit_ids, and
a dictionary containing federated information such as the the
group IDs, the identity provider ID, the protocol ID, and the
federated domain ID
"""
def unpack_group_ids(group_id_in_bytes):
group_id = cls.attempt_convert_uuid_bytes_to_hex(group_id_in_bytes)
return {'id': group_id}
user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
methods = auth_plugins.convert_integer_to_method_list(payload[1])
group_ids = list(map(unpack_group_ids, payload[2]))
idp_id = cls.attempt_convert_uuid_bytes_to_hex(payload[3])
protocol_id = payload[4]
expires_at_str = cls._convert_int_to_time_string(payload[5])
audit_ids = list(map(provider.base64_encode, payload[6]))
federated_info = dict(group_ids=group_ids, idp_id=idp_id,
protocol_id=protocol_id)
return (user_id, methods, expires_at_str, audit_ids, federated_info)
| |
import asyncio
import pytest
import async_timeout
from grpclib.const import Status
from grpclib.testing import ChannelFor
from grpclib.exceptions import GRPCError
from grpclib.health.check import ServiceCheck, ServiceStatus
from grpclib.health.service import Health
from grpclib.health.v1.health_pb2 import HealthCheckRequest, HealthCheckResponse
from grpclib.health.v1.health_grpc import HealthStub
class Check:
__current_status__ = None
async def __call__(self):
return self.__current_status__
SERVICE_NAME = 'namespace.ServiceName'
class Service:
async def Foo(self, stream):
raise NotImplementedError
def __mapping__(self):
return {'/{}/Foo'.format(SERVICE_NAME): self.Foo}
@pytest.mark.asyncio
async def test_check_unknown_service():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
with pytest.raises(GRPCError) as err:
await stub.Check(HealthCheckRequest(service='Unknown'))
assert err.value.status == Status.NOT_FOUND
@pytest.mark.asyncio
async def test_check_zero_checks():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
response = await stub.Check(HealthCheckRequest(service=SERVICE_NAME))
assert response == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
@pytest.mark.asyncio
@pytest.mark.parametrize('v1, v2, status', [
(None, None, HealthCheckResponse.UNKNOWN),
(True, False, HealthCheckResponse.NOT_SERVING),
(False, True, HealthCheckResponse.NOT_SERVING),
(True, True, HealthCheckResponse.SERVING)
])
async def test_check_service_check(loop, v1, v2, status):
svc = Service()
c1 = Check()
c2 = Check()
health = Health({svc: [
ServiceCheck(c1, check_ttl=0),
ServiceCheck(c2, check_ttl=0),
]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
c1.__current_status__ = v1
c2.__current_status__ = v2
response = await stub.Check(HealthCheckRequest(service=SERVICE_NAME))
assert response == HealthCheckResponse(status=status)
@pytest.mark.asyncio
@pytest.mark.parametrize('v1, v2, status', [
(None, None, HealthCheckResponse.UNKNOWN),
(True, False, HealthCheckResponse.NOT_SERVING),
(False, True, HealthCheckResponse.NOT_SERVING),
(True, True, HealthCheckResponse.SERVING)
])
async def test_check_service_status(v1, v2, status):
svc = Service()
s1 = ServiceStatus()
s2 = ServiceStatus()
health = Health({svc: [s1, s2]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
s1.set(v1)
s2.set(v2)
response = await stub.Check(HealthCheckRequest(service=SERVICE_NAME))
assert response == HealthCheckResponse(status=status)
@pytest.mark.asyncio
async def test_watch_unknown_service():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service='Unknown'),
end=True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVICE_UNKNOWN,
)
try:
with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
await stream.cancel()
@pytest.mark.asyncio
async def test_watch_zero_checks():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service=SERVICE_NAME),
end=True)
response = await stream.recv_message()
assert response == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
try:
with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
await stream.cancel()
@pytest.mark.asyncio
async def test_watch_service_check():
svc = Service()
c1 = Check()
c2 = Check()
health = Health({svc: [
ServiceCheck(c1, check_ttl=0.001),
ServiceCheck(c2, check_ttl=0.001),
]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service=SERVICE_NAME),
end=True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.UNKNOWN,
)
# check that there are no unnecessary messages
try:
with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
c1.__current_status__ = True
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
c2.__current_status__ = True
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
c1.__current_status__ = False
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
c1.__current_status__ = True
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
await stream.cancel()
@pytest.mark.asyncio
async def test_watch_service_status():
svc = Service()
s1 = ServiceStatus()
s2 = ServiceStatus()
health = Health({svc: [s1, s2]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service=SERVICE_NAME),
end=True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.UNKNOWN,
)
s1.set(True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
s2.set(True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
s1.set(False)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
s1.set(True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
# check that there are no unnecessary messages if status isn't
# changed
s1.set(True)
try:
with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
await stream.cancel()
| |
import collections
import numpy as np
import openpnm as op
import openpnm.models.physics as pm
from numpy.testing import assert_allclose
from sympy import ln as sym_ln
from sympy import symbols
class GenericSourceTermTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[5, 5, 5])
Ps = self.net.Ps
Ts = self.net.Ts
self.geo = op.geometry.GenericGeometry(network=self.net, pores=Ps,
throats=Ts)
self.phase = op.phases.GenericPhase(network=self.net)
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.phase,
geometry=self.geo)
self.phys['throat.diffusive_conductance'] = 5e-8
self.phase['pore.mole_fraction'] = 0.0
self.BC_pores = np.arange(20, 30)
self.source_pores = np.arange(55, 85)
def test_default_values_should_give_zero_rate(self):
sources = ["linear", "power_law", "exponential", "natural_exponential",
"logarithm", "natural_logarithm"]
self.alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
self.alg.settings.update({'conductance': 'throat.diffusive_conductance',
'quantity': 'pore.mole_fraction'})
self.alg.set_value_BC(values=0.4, pores=self.BC_pores)
self.alg.set_source(propname='pore.source_term',
pores=self.source_pores)
# To avoid nans in logarithm-based source term models
self.phase['pore.mole_fraction'] = 0.1
for source in sources:
self.phys.add_model(propname="pore.source_term",
model=getattr(pm.generic_source_term, source),
X="pore.mole_fraction")
assert self.phys["pore.source_term.rate"].mean() == 0
assert self.phys["pore.source_term.S1"].mean() == 0
assert self.phys["pore.source_term.S2"].mean() == 0
def test_linear(self):
self.phys['pore.item1'] = 0.5e-11
self.phys['pore.item2'] = 1.5e-12
self.phys.add_model(propname='pore.source1',
model=pm.generic_source_term.linear,
A1='pore.item1',
A2='pore.item2',
X='pore.mole_fraction',
regen_mode='normal')
self.phys.add_model(propname='pore.source2',
model=pm.generic_source_term.linear_sym,
A1='pore.item1',
A2='pore.item2',
X='pore.mole_fraction',
regen_mode='normal')
self.alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
self.alg.settings.update({'conductance': 'throat.diffusive_conductance',
'quantity': 'pore.mole_fraction'})
self.alg.set_value_BC(values=0.4, pores=self.BC_pores)
self.alg.set_source(propname='pore.source1',
pores=self.source_pores)
self.alg.run()
self.phase.update(self.alg.results())
self.phys.regenerate_models(propnames='pore.source1')
self.phys.regenerate_models(propnames='pore.source2')
X = self.phase['pore.mole_fraction']
r1 = np.round(np.sum(0.5e-11 * X[self.source_pores] + 1.5e-12), 20)
r2 = np.round(np.sum(self.phys['pore.source1.rate'][self.source_pores]), 20)
rs = np.round(np.sum(self.phys['pore.source2.rate'][self.source_pores]), 20)
assert r1 == r2
assert r1 == rs
def test_power_law(self):
self.phys['pore.item1'] = 0.5e-12
self.phys['pore.item2'] = 2.5
self.phys['pore.item3'] = -1.4e-11
self.phys.add_model(propname='pore.source1',
model=pm.generic_source_term.power_law,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
X='pore.mole_fraction',
regen_mode='normal')
self.phys.add_model(propname='pore.source2',
model=pm.generic_source_term.power_law_sym,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
X='pore.mole_fraction',
regen_mode='normal')
self.alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
self.alg.set_value_BC(values=0.4, pores=self.BC_pores)
self.alg.set_source(propname='pore.source1',
pores=self.source_pores)
self.alg.settings.update({'conductance': 'throat.diffusive_conductance',
'quantity': 'pore.mole_fraction'})
self.alg.run()
self.phase.update(self.alg.results())
self.phys.regenerate_models(propnames='pore.source1')
self.phys.regenerate_models(propnames='pore.source2')
X = self.phase['pore.mole_fraction']
r1 = np.sum(0.5e-12 * X[self.source_pores]**2.5 - 1.4e-11)
r2 = np.sum(self.phys['pore.source1.rate'][self.source_pores])
rs = np.sum(self.phys['pore.source2.rate'][self.source_pores])
assert r1 == r2
assert r1 == rs
def test_exponential(self):
self.phys['pore.item1'] = 0.8e-11
self.phys['pore.item2'] = 3
self.phys['pore.item3'] = 0.5
self.phys['pore.item4'] = 2
self.phys['pore.item5'] = -0.34
self.phys['pore.item6'] = 2e-14
self.phys.add_model(propname='pore.source1',
model=pm.generic_source_term.exponential,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
A6='pore.item6',
X='pore.mole_fraction',
regen_mode='normal')
self.phys.add_model(propname='pore.source2',
model=pm.generic_source_term.exponential_sym,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
A6='pore.item6',
X='pore.mole_fraction',
regen_mode='normal')
self.alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
self.alg.set_value_BC(values=0.4, pores=self.BC_pores)
self.alg.set_source(propname='pore.source1',
pores=self.source_pores)
self.alg.settings.update({'conductance': 'throat.diffusive_conductance',
'quantity': 'pore.mole_fraction'})
self.alg.run()
self.phase.update(self.alg.results())
self.phys.regenerate_models(propnames='pore.source1')
self.phys.regenerate_models(propnames='pore.source2')
X = self.phase['pore.mole_fraction']
r1 = np.sum(0.8e-11 * 3 ** (0.5 * X[self.source_pores]**2 - 0.34) + 2e-14)
r2 = np.sum(self.phys['pore.source1.rate'][self.source_pores])
rs = np.sum(self.phys['pore.source2.rate'][self.source_pores])
assert_allclose(actual=r2, desired=r1, rtol=1e-7)
assert_allclose(actual=rs, desired=r1, rtol=1e-7)
def test_natural_exponential(self):
self.phys['pore.item1'] = 0.8e-11
self.phys['pore.item2'] = 0.5
self.phys['pore.item3'] = 2
self.phys['pore.item4'] = -0.34
self.phys['pore.item5'] = 2e-14
self.phys.add_model(propname='pore.source1',
model=pm.generic_source_term.natural_exponential,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
X='pore.mole_fraction',
regen_mode='normal')
self.phys.add_model(propname='pore.source2',
model=pm.generic_source_term.natural_exponential_sym,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
X='pore.mole_fraction',
regen_mode='normal')
self.alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
self.alg.set_value_BC(values=0.4, pores=self.BC_pores)
self.alg.set_source(propname='pore.source1',
pores=self.source_pores)
self.alg.settings.update({'conductance': 'throat.diffusive_conductance',
'quantity': 'pore.mole_fraction'})
self.alg.run()
self.phase.update(self.alg.results())
self.phys.regenerate_models(propnames='pore.source1')
self.phys.regenerate_models(propnames='pore.source2')
X = self.phase['pore.mole_fraction']
r1 = np.sum(0.8e-11 * np.exp(0.5 * X[self.source_pores]**2 - 0.34) + 2e-14)
r2 = np.sum(self.phys['pore.source1.rate'][self.source_pores])
rs = np.sum(self.phys['pore.source1.rate'][self.source_pores])
assert_allclose(actual=r2, desired=r1)
assert_allclose(actual=rs, desired=r1)
def test_logarithm(self):
self.phys['pore.item1'] = 0.16e-13
self.phys['pore.item2'] = 10
self.phys['pore.item3'] = 4
self.phys['pore.item4'] = 1.4
self.phys['pore.item5'] = 0.133
self.phys['pore.item6'] = -5.1e-13
self.phys.add_model(propname='pore.source1',
model=pm.generic_source_term.logarithm,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
A6='pore.item6',
X='pore.mole_fraction',
regen_mode='normal')
self.phys.add_model(propname='pore.source2',
model=pm.generic_source_term.logarithm_sym,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
A6='pore.item6',
X='pore.mole_fraction',
regen_mode='normal')
self.alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
self.alg.set_value_BC(values=0.4, pores=self.BC_pores)
self.alg.set_source(propname='pore.source1',
pores=self.source_pores)
self.alg.settings.update({'conductance': 'throat.diffusive_conductance',
'quantity': 'pore.mole_fraction'})
self.alg.run()
self.phase.update(self.alg.results())
self.phys.regenerate_models(propnames='pore.source1')
self.phys.regenerate_models(propnames='pore.source2')
X = self.phase['pore.mole_fraction']
r1 = np.sum(0.16e-13 * np.log(4*X[self.source_pores]**(1.4) + 0.133)
/ np.log(10) - 5.1e-13)
r2 = np.sum(self.phys['pore.source1.rate'][self.source_pores])
rs = np.sum(self.phys['pore.source2.rate'][self.source_pores])
assert_allclose(actual=r2, desired=r1)
assert_allclose(actual=rs, desired=r1)
def test_natural_logarithm(self):
self.phys['pore.item1'] = 0.16e-14
self.phys['pore.item2'] = 4
self.phys['pore.item3'] = 1.4
self.phys['pore.item4'] = 0.133
self.phys['pore.item5'] = -5.1e-14
self.phys.add_model(propname='pore.source1',
model=pm.generic_source_term.natural_logarithm,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
X='pore.mole_fraction',
regen_mode='on_demand')
self.phys.add_model(propname='pore.source2',
model=pm.generic_source_term.natural_logarithm_sym,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
X='pore.mole_fraction',
regen_mode='normal')
self.alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
self.alg.set_value_BC(values=0.4, pores=self.BC_pores)
self.alg.set_source(propname='pore.source1',
pores=self.source_pores)
self.alg.settings.update({'conductance': 'throat.diffusive_conductance',
'quantity': 'pore.mole_fraction'})
self.alg.run()
self.phase.update(self.alg.results())
self.phys.regenerate_models(propnames='pore.source1')
self.phys.regenerate_models(propnames='pore.source2')
X = self.phase['pore.mole_fraction']
r1 = np.sum(0.16e-14*np.log(4*X[self.source_pores]**1.4 + 0.133) - 5.1e-14)
r2 = np.sum(self.phys['pore.source1.rate'][self.source_pores])
rs = np.sum(self.phys['pore.source1.rate'][self.source_pores])
assert r1 == r2
assert r1 == rs
def test_general_symbolic(self):
a, b, c, d, e, x = symbols('a,b,c,d,e,x')
# natural log function
y = a*sym_ln(b*x**c + d)+e
phys = self.phys
phys['pore.item1'] = 0.16e-14
phys['pore.item2'] = 4
phys['pore.item3'] = 1.4
phys['pore.item4'] = 0.133
phys['pore.item5'] = -5.1e-14
phys.add_model(propname='pore.source1',
model=pm.generic_source_term.natural_logarithm,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
A4='pore.item4',
A5='pore.item5',
X='pore.mole_fraction',
regen_mode='normal')
arg_map=collections.OrderedDict([('a', 'pore.item1'),
('b', 'pore.item2'),
('c', 'pore.item3'),
('d', 'pore.item4'),
('e', 'pore.item5'),
('x', 'pore.mole_fraction')])
phys.add_model(propname='pore.general',
model=op.models.physics.generic_source_term.general_symbolic,
eqn=y, arg_map=arg_map,
regen_mode='normal')
assert np.allclose(phys['pore.source1.rate'], phys['pore.general.rate'])
assert np.allclose(phys['pore.source1.S1'], phys['pore.general.S1'])
assert np.allclose(phys['pore.source1.S2'], phys['pore.general.S2'])
if __name__ == '__main__':
t = GenericSourceTermTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
| |
import argparse
import os
import json
import collections
from subprocess import call
parser = argparse.ArgumentParser(description='CUE!')
options = parser.add_argument_group('Options')
options.add_argument('-p', '--project',
help='Specify the target project for the task',
required=False)
options.add_argument('-s', '--section',
help='Specify the section for the task',
required=False)
options.add_argument('task',
help='Specify the task')
options.add_argument('project_or_argument', nargs='?',
help='Specify the project')
global_config_dir_path = os.path.join(os.getenv('HOME'), '.cue')
extension = '.cueconf'
args = None
def get_global_conf():
cues = get_settings_from_directory(global_config_dir_path)
if 'projects' not in cues:
print 'cueconf is missing projects section'
exit()
if 'defaultSection' not in cues:
print 'cueconf is missing defaultSection definition'
exit()
return cues
def get_settings_from_directory(directory_path):
if not os.path.exists(directory_path):
print 'directory does not exist (%s)' % directory_path
exit()
cueconf_file_paths = \
[os.path.join(directory_path, file)
for file in os.listdir(directory_path) if file.lower().endswith(extension)]
cues = {}
for cueconf_path in cueconf_file_paths:
if not os.path.isfile(cueconf_path):
print "cueconf doesn't exist " + cueconf_path
exit()
try:
cueconf_contents = json.load(open(cueconf_path))
except:
print '%s is not valid json' % cueconf_path
exit()
cues = recursive_update(cues, cueconf_contents)
return cues
def get_project_conf(global_conf, project_name=None):
cues = None
if project_name:
if project_name not in global_conf['projects']:
print 'Project %s not found' % project_name
exit()
cues = get_settings_from_directory(global_conf['projects'][project_name])
else:
for slug in global_conf['projects']:
project_conf_path = global_conf['projects'][slug]
if os.path.dirname(project_conf_path) in os.getcwd():
cues = get_settings_from_directory(project_conf_path)
if not cues:
print 'Project found but no cueconf files appear to not exist'
exit()
break
if cues:
if 'root_path' not in cues:
print "project_conf missing 'root_path'"
exit()
if 'slug' not in cues:
print "project_conf missing 'slug'"
exit()
if 'name' not in cues:
print "project_conf missing 'name'"
exit()
if 'defaultSection' in cues and cues['defaultSection'] not in cues:
print "project_conf missing '%s'" % cues['defaultSection']
exit()
elif 'defaultSection' not in cues and global_conf['defaultSection'] not in cues:
print "project_conf missing '%s'" % global_config['defaultSection']
exit()
else:
print 'project_conf not found'
exit()
return cues
def recursive_update(d, u):
"Recursively updates a dictionary like object."
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = recursive_update(d.get(k, {}), v)
d[k] = r
elif k in d and isinstance(d[k], collections.Iterable) \
and isinstance(v, collections.Iterable):
d[k] += v
else:
d[k] = v
return d
def register(global_conf, section):
project_conf_dict = get_settings_from_directory(os.getcwd())
if project_conf_dict:
name = project_conf_dict['name']
slug = project_conf_dict['slug']
else:
name = raw_input('Name of Project: ')
slug = raw_input('Project Slug: ')
default_section = {}
project_conf_dict = {'name': name,
'slug': slug,
'root_path': os.getcwd(),
section: default_section}
proj_cue_path = os.path.join(os.getcwd(), extension)
f = open(proj_cue_path, 'w+')
json.dump(project_conf_dict, f, indent=4)
f.close()
if slug in global_conf['projects']:
print 'project slug already exists on the system'
exit()
global_conf['projects'][slug] = os.getcwd()
f = open(os.path.join(global_config_dir_path, slug + extension), 'w+')
json.dump({'projects': {slug: os.getcwd()}}, f, indent=4)
f.close()
return project_conf_dict
def deregister(global_conf, project_conf):
if project_conf['slug'] not in global_conf['projects']:
print 'Project %s is not registered' % project_conf['slug']
exit()
del global_conf['projects'][project_conf['slug']]
os.remove(os.path.join(global_config_dir_path, \
project_conf['slug'] + extension))
def run_task(section, task_name, global_conf, project_conf):
def exec_task(task, default_flow='next'):
exec_string = None
if isinstance(task, collections.Mapping):
if 'exec' in task:
exec_string = task['exec']
elif isinstance(task, basestring):
if task.startswith(':'):
return run_task(task[1:])
else:
exec_string = task
exit_code = 0
if exec_string:
exit_code = call(exec_string, shell=True)
flow = default_flow
if exit_code != 0:
# error - default flow is stop
flow = 'stop'
if isinstance(task, collections.Mapping) and'onError' in task:
flow = exec_task(task['onError'], default_flow='stop')
else:
# success
if 'flow' in task:
flow = task['flow']
return flow
tasks = None
#Local
if task_name in project_conf[section]:
tasks = project_conf[section][task_name]
#Global
if not tasks:
if 'global' in global_conf[section] and \
task_name in global_conf[section]['global']:
tasks = global_conf[section]['global'][task_name]
#By Group
if not tasks:
for group in global_conf[section]:
if group in project_conf:
tasks = \
global_conf[section][group][project_conf[group]][task_name]
else:
print 'Project missing setting. ' + \
'Please define an entry for %s[%s] = (%s)' % \
(section, group, str(global_conf[section][group].keys()))
if tasks:
break
if not tasks:
print '(%s) tasks not found' % task_name
exit()
if not isinstance(tasks, collections.Iterable) or \
isinstance(tasks, basestring):
tasks = [tasks]
previous_index = 0
current_index = 0
next_index = 1
stop_index = len(tasks)
while True:
flow = exec_task(tasks[current_index])
if flow == 'next':
previous_index = current_index
current_index = next_index
next_index = min(stop_index, current_index + 1)
elif flow == 'previous':
current_index = previous_index
previous_index = max(0, current_index - 1)
next_index = min(stop_index, current_index + 1)
elif flow == 'stop':
current_index = stop_index
next_index = stop_index
previous_index = max(0, current_index - 1)
elif flow.startswith('#'):
current_index = int(flow[1:])
previous_index = max(0, current_index - 1)
next_index = min(stop_index, current_index + 1)
if current_index >= stop_index:
break
if __name__ == '__main__':
args = vars(parser.parse_args())
global_conf = get_global_conf()
if not args['project'] and args['project_or_argument']:
# todo - check to see if this is the name of a project otherwise assume
# it is a parameter being passed to the commands
args['project'] = args['project_or_argument']
is_section_default = False
if not args['section']:
args['section'] = global_conf['defaultSection']
is_section_default = True
if args['task'] == 'register':
project_conf = register(global_conf, args['section'])
else:
project_conf = get_project_conf(global_conf, args['project'])
if is_section_default and 'defaultSection' in project_conf:
args['section'] = project_conf['defaultSection']
if args['task'] == 'deregister':
deregister(global_conf, project_conf)
else:
run_task(args['section'], args['task'], global_conf, project_conf)
| |
"""A wrapper around DBAPI-compliant databases to support iteration
and generator expression syntax for requests, instead of SQL
To get an iterator, initialize a connection to the database, then
set the cursor attribute of the query class to its cursor
Create an instance of Table for the tables you want to use
Then you can use the class query. You create an instance by passing
a generator expression as parameter. This instance translates the
generator expression in an SQL statement ; then you can iterate
on it to get the selected items as objects, dictionaries or lists
Supposing you call this module db_iterator.py, here is an example
of use with sqlite :
from pysqlite2 import dbapi2 as sqlite
from db_iterator import query, Table
conn = sqlite.connect('planes')
query.cursor = conn.cursor()
plane = Table()
countries = Table()
# all the items produced by iteration on query() are instances
# of the Record class
# simple requests
# since no attribute of r is specified in the query, returns a list
# of instances of Record with attributes matching all the field names
print [ r.name for r in query(r for r in plane if r.country == 'France') ]
# this request returns a list instances of Record with the attribute
# c_country (c.country with the . replaced by _)
print [ country for country in query(c.country for c in countries
if c.continent == 'Europe') ]
# request on two tables
print [r.name for r in query (r for r in plane for c in countries
if r.country == c.country and c.continent == 'Europe')]
"""
import tokenize
import token
import compiler
import types
class ge_visitor:
"""Instances of ge_visitor are used as the visitor argument to
compiler.walk(tree,visitor) where tree is an AST tree built by
compiler.parse
The instance has a src attribute which looks like the source
code from which the tree was built
Only a few of the visitNodeType are implemented, those likely to appear
in a database query. Can be easily extended
"""
def __init__(self):
self.src = ''
def visitTuple(self,t):
self.src += ','.join ( [ get_source(n) for n in t.nodes ])
def visitList(self,t):
self.src += ','.join ( [ get_source(n) for n in t.nodes ])
def visitMul(self,t):
self.src += '(%s)' %('*'.join([ get_source(n) for n in t]))
def visitName(self,t):
self.src += t.name
def visitConst(self,t):
if type(t.value) is str:
# convert single quotes, SQL-style
self.src += "'%s'" %t.value.replace("'","''")
else:
self.src += str(t.value)
def visitAssName(self,t):
self.src += t.name
def visitGetattr(self,t):
self.src += '%s.%s' %(get_source(t.expr),str(t.attrname))
def visitGenExprFor(self,t):
self.src += 'for %s in %s ' %(get_source(t.assign),
get_source(t.iter))
if t.ifs:
self.src += ' if ' +''.join([ get_source(i) for i in t.ifs ])
def visitGenExprIf(self,t):
self.src += get_source(t.test)
def visitCompare(self,t):
compiler.walk(t.expr,self)
self.src += ' '
for o in t.ops:
oper = o[0]
if oper == '==':
oper = '='
self.src += oper + ' '
compiler.walk(o[1],self)
def visitAnd(self,t):
self.src += '('
self.src += ' AND '.join([ get_source(n) for n in t.nodes ])
self.src+= ')'
def visitOr(self,t):
self.src += '('
self.src += ' OR '.join([ get_source(n) for n in t.nodes ])
self.src+= ')'
def visitNot(self,t):
self.src += '(NOT ' + get_source(t.expr) + ')'
def get_source(node):
"""Return the source code of the node, built by an instance of
ge_visitor"""
return compiler.walk(node,ge_visitor()).src
class genExprVisitor:
"""Visitor used to initialize GeneratorExpression objects
Uses the visitor pattern. See the compiler.visitor module"""
def __init__(self):
self.GenExprs = []
def visitGenExprInner(self,node):
ge = GeneratorExpression()
self.GenExprs.append(ge)
for y in node.getChildren():
if y.__class__ is compiler.ast.GenExprFor:
ge.exprfor.append(y)
else:
ge.result = y
class GeneratorExpression:
"""A class for a Generator Expression"""
def __init__(self):
self.result = None
self.exprfor = []
class Record(object):
"""A generic class for database records"""
pass
class Table:
"""A basic iterable class to avoid syntax errors"""
def __iter__(self):
return self
class query:
"""Class used for database queries
Instance is created with query(ge) where ge is a generator
expression
The __init__ method builds the SQL select expression matching the
generator expression
Iteration on the instance of query yields the items found by
the SQL select, under the form specified by return_type : an object,
a dictionary or a list"""
cursor = None # to be set to the cursor of the connection
return_type = object # can be set to dict or list
def __init__(self,s):
self._iterating = False # used in next()
# First we must get the source code of the generator expression
# I use an ugly hack with stack frame attributes and tokenize
# If there's a cleaner and safer way, please tell me !
readline = open(s.gi_frame.f_code.co_filename).readline
first_line = s.gi_frame.f_code.co_firstlineno
flag = False
self.source = '' # the source code
for t in tokenize.generate_tokens(open(s.gi_frame.f_code.co_filename).readline):
# check all tokens until the last parenthesis is closed
t_type,t_string,(r_start,c_start),(r_end,c_end),line = t
t_name = token.tok_name[t_type]
if r_start == first_line:
if t_name == 'NAME' and t_string=="query":
flag = True
res = t_string
start = 0 # number of parenthesis
continue
if flag:
self.source += ' '+t_string
if t_name == 'OP':
if t_string=='(':
start += 1
elif t_string == ')':
start -= 1
if start == 0:
break
# when the source has been found, build an AST tree from it
ast = compiler.parse(self.source.strip())
# use a visitor to find the generator expression(s) in the source
visitor = genExprVisitor()
compiler.walk(ast,visitor)
# if there are nested generator expressions, it's too difficult
# to handle : raise an exception
if len(visitor.GenExprs)>1:
raise Exception,'Invalid expression, found more ' \
'than 1 generator expression'
ge = visitor.GenExprs[0]
self.sql = self.build_sql(ge)
def build_sql(self,ge):
""" Build the SQL select for the generator expression
ge is an instance of GeneratorExpression
The generator expression looks like
(result) for x1 in table1 [ for x2 in table2] [ if condition ]
It has 2 attributes :
- result : an AST tree with the "result" part
- exprfor : a list of AST trees, one for each "for ... in ..."
"""
self.res = []
if ge.result.__class__ is compiler.ast.Tuple:
# more than one item in result
self.res = ge.result.getChildren()
else:
self.res = [ge.result]
results = [] # a list of strings = result part of the SQL expression
for res in self.res:
# a result can be a stand-alone name, or a "qualified" name,
# with the table name first (table.field)
if res.__class__ is compiler.ast.Name:
results.append((res.name,None))
elif res.__class__ is compiler.ast.Getattr:
results.append((get_source(res.expr),res.attrname))
self.results = results
# "for x in y" produces an item in the dictionary recdefs :
# recdef[x] = y
recdefs = {}
conditions = []
for exprfor in ge.exprfor:
recdefs[get_source(exprfor.assign)] = \
get_source(exprfor.iter)
if exprfor.ifs:
# an AST tree for the condition
conditions = exprfor.ifs
# To build objects or dictionaries in the result set, we must
# know the name of the fields in all the tables used in the
# query. For this, make a simple select in each table and read
# the information in cursor.description
self.names={}
for rec,table in recdefs.iteritems():
self.cursor.execute('SELECT * FROM %s' %table)
self.names[rec] = [ d[0] for d in self.cursor.description ]
sql_res = [] # the way the field will appear in the SQL string
rec_fields = [] # the name of the fields in the object or dictionary
for (n1,n2) in results:
if n2 is None:
# "stand-alone" name
if n1 in recdefs.keys():
sql_res += [ '%s.%s' %(n1,v) for v in self.names[n1] ]
rec_fields+=[ v for v in self.names[n1] ]
else:
sql_res.append(n1)
rec_fields.append(n1)
else:
# "qualified" name, with the table name first
sql_res.append('%s.%s' %(n1,n2))
# in the result set, the object will have the attribute
# table_name (we can't set an attribute table.name, and
# name alone could be ambiguous
rec_fields.append('%s_%s' %(n1,n2))
self.rec_fields = rec_fields
# now we can build the actual SQL string
sql = 'SELECT '+ ','.join(sql_res)
sql += ' FROM '
froms = []
for (k,v) in recdefs.iteritems():
froms.append('%s AS %s ' %(v,k))
sql += ','.join(froms)
if conditions:
sql += 'WHERE '
for c in conditions:
sql += get_source(c)
return sql
def __iter__(self):
return self
def next(self):
if not self._iterating:
# begin iteration
self.cursor.execute(self.sql)
self._iterating = True
row = self.cursor.fetchone()
if row is not None:
if self.return_type == object:
# transform list into instance of Record
# uses the rec_fields computed in build_sql()
rec = Record()
rec.__dict__ = dict(zip(self.rec_fields,row))
return rec
elif self.return_type == dict:
return dict(zip(self.rec_fields,row))
elif self.return_type == list:
return row
self._iterating = False
raise StopIteration
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import os
import mock
import unittest
from contextlib import contextmanager
from shutil import rmtree
from StringIO import StringIO
from tempfile import mkdtemp
from xml.dom import minidom
from eventlet import spawn, Timeout, listen
import simplejson
from swift.common.swob import Request, HeaderKeyDict
import swift.container
from swift.container import server as container_server
from swift.common.utils import normalize_timestamp, mkdirs, public, replication
from test.unit import fake_http_connect
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.container.server, 'http_connect',
None)
try:
yield True
finally:
swift.container.server.http_connect = orig_http_connect
class TestContainerController(unittest.TestCase):
"""Test swift.container.server.ContainerController"""
def setUp(self):
"""Set up for testing swift.object_server.ObjectController"""
self.testdir = os.path.join(mkdtemp(),
'tmp_test_object_server_ObjectController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.object_server.ObjectController"""
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_acl_container(self):
# Ensure no acl by default
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure POSTing acls works
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
# Ensure we can clear acls on POST
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3', 'X-Container-Read': '',
'X-Container-Write': ''})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure PUTing acls works
req = Request.blank(
'/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
def test_HEAD(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '0'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(int(response.headers['x-container-bytes-used']), 0)
self.assertEquals(int(response.headers['x-container-object-count']), 0)
req2 = Request.blank(
'/sda1/p/a/c/o', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_SIZE': 42,
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x'})
req2.get_response(self.controller)
response = req.get_response(self.controller)
self.assertEquals(int(response.headers['x-container-bytes-used']), 42)
self.assertEquals(int(response.headers['x-container-object-count']), 1)
def test_HEAD_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_HEAD_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_HEAD_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank(
'/sda1/p/a/c?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
def test_PUT_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', 'X-Size': '0',
'X-Content-Type': 'text/plain', 'X-ETag': 'e'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_invalid_container_sync_to(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'},
headers={'x-container-sync-to': '192.168.0.1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c/',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_container_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_utf8(self):
snowman = u'\u2603'
container_name = snowman.encode('utf-8')
req = Request.blank(
'/sda1/p/a/%s' % container_name, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_account_update_mismatched_host_device(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '127.0.0.1:0',
'X-Account-Partition': '123',
'X-Account-Device': 'sda1,sda2'})
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
resp = self.controller.account_update(req, 'a', 'c', broker)
self.assertEquals(resp.status_int, 400)
def test_account_update_account_override_deleted(self):
bindsock = listen(('127.0.0.1', 0))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' %
bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1',
'X-Account-Override-Deleted': 'yes'})
with save_globals():
new_connect = fake_http_connect(200, count=123)
swift.container.server.http_connect = new_connect
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_PUT_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-put-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 201, '0000000001.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000003.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, '0000000003.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000005.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, '0000000005.00000')
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_PUT_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_POST_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_DELETE(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_not_found(self):
# Even if the container wasn't previously heard of, the container
# server will accept the delete and replicate it to where it belongs
# later.
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_object(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0',
'HTTP_X_SIZE': 1, 'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 409)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '5'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '6'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-delete-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000002.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 204, '0000000002.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000003.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, '0000000003.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000005.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, '0000000005.00000')
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_DELETE_invalid_partition(self):
req = Request.blank(
'/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a/c?limit=%d' %
(container_server.CONTAINER_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_json(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body), [])
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "2",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*'):
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body), json_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
def test_GET_plain(self):
# make a container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/plainc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
plain_body = '0\n1\n2\n'
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, plain_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
# test conflicting formats
req = Request.blank(
'/sda1/p/a/plainc?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
# test unknown format uses default plain
req = Request.blank(
'/sda1/p/a/plainc?format=somethingelse',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
def test_GET_json_last_modified(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, d in [(0, 1.5), (1, 1.0), ]:
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': d,
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
# last_modified format must be uniform, even when there are not msecs
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.500000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}, ]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
def test_GET_xml(self):
# make a container
req = Request.blank(
'/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/xmlc/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
xml_body = '<?xml version="1.0" encoding="UTF-8"?>\n' \
'<container name="xmlc">' \
'<object><name>0</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>1</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>2</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'</container>'
# tests
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.body, xml_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
for xml_accept in (
'application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0', 'application/xml,text/xml'):
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, xml_body,
'Invalid body for Accept: %s' % xml_accept)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'text/xml'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/xml')
self.assertEquals(resp.body, xml_body)
def test_GET_marker(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit with marker
req = Request.blank('/sda1/p/a/c?limit=2&marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['2', ])
def test_weird_content_types(self):
snowman = u'\u2603'
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, ctype in enumerate((snowman.encode('utf-8'),
'text/plain; charset="utf-8"')):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype,
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = [x['content_type'] for x in simplejson.loads(resp.body)]
self.assertEquals(result, [u'\u2603', 'text/plain;charset="utf-8"'])
def test_GET_accept_not_valid(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_GET_limit(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit
req = Request.blank(
'/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['0', '1'])
def test_GET_prefix(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('a1', 'b1', 'a2', 'b2', 'a3', 'b3'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.body.split(), ['a1', 'a2', 'a3'])
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a/c?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"subdir": "US-OK-"},
{"subdir": "US-TX-"},
{"subdir": "US-UT-"}])
def test_GET_delimiter_xml(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, '<?xml version="1.0" encoding="UTF-8"?>'
'\n<container name="c"><subdir name="US-OK-">'
'<name>US-OK-</name></subdir>'
'<subdir name="US-TX-"><name>US-TX-</name></subdir>'
'<subdir name="US-UT-"><name>US-UT-</name></subdir></container>')
def test_GET_delimiter_xml_with_quotes(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c/<\'sub\' "dir">/object',
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?delimiter=/&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
dom = minidom.parseString(resp.body)
self.assert_(len(dom.getElementsByTagName('container')) == 1)
container = dom.getElementsByTagName('container')[0]
self.assert_(len(container.getElementsByTagName('subdir')) == 1)
subdir = container.getElementsByTagName('subdir')[0]
self.assertEquals(unicode(subdir.attributes['name'].value),
u'<\'sub\' "dir">/')
self.assert_(len(subdir.getElementsByTagName('name')) == 1)
name = subdir.getElementsByTagName('name')[0]
self.assertEquals(unicode(name.childNodes[0].data),
u'<\'sub\' "dir">/')
def test_GET_path(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US/TX', 'US/TX/B', 'US/OK', 'US/OK/B', 'US/UT/A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?path=US&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"name": "US/OK", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "US/TX", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}])
def test_GET_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_through_call(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
req = Request.blank(
'/sda1/p/a/c',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'})
req.get_response(self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a/c?format=%s' % format,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'path', 'prefix',
'end_marker', 'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
req = Request.blank('/sda1/p/a/c',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'})
req.get_response(self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'path', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_put_auto_create(self):
headers = {'x-timestamp': normalize_timestamp(1),
'x-size': '0',
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/.o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_delete_auto_create(self):
headers = {'x-timestamp': normalize_timestamp(1)}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/.c/.o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a/o',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a/o?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
def test_updating_multiple_container_servers(self):
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.iteritems()
if v is not None))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'X-Account-Partition': '30',
'X-Account-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Account-Device': 'sdb1, sdf1'})
orig_http_connect = container_server.http_connect
try:
container_server.http_connect = fake_http_connect
req.get_response(self.controller)
finally:
container_server.http_connect = orig_http_connect
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEquals(len(http_connect_args), 2)
self.assertEquals(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c',
'device': 'sdb1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': '0000012345.00000',
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEquals(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c',
'device': 'sdf1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': '0000012345.00000',
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
container_controller = container_server.ContainerController
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEquals(container_controller(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(container_controller(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(container_controller(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEquals(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.clique'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import re
import StringIO
import unittest
from grit import clique
from grit import exception
from grit import pseudo
from grit import tclib
from grit import grd_reader
from grit import util
class MessageCliqueUnittest(unittest.TestCase):
def testClique(self):
factory = clique.UberClique()
msg = tclib.Message(text='Hello USERNAME, how are you?',
placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
c = factory.MakeClique(msg)
self.failUnless(c.GetMessage() == msg)
self.failUnless(c.GetId() == msg.GetId())
msg_fr = tclib.Translation(text='Bonjour USERNAME, comment ca va?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
msg_de = tclib.Translation(text='Guten tag USERNAME, wie geht es dir?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
c.AddTranslation(msg_fr, 'fr')
factory.FindCliqueAndAddTranslation(msg_de, 'de')
# sort() sorts lists in-place and does not return them
for lang in ('en', 'fr', 'de'):
self.failUnless(lang in c.clique)
self.failUnless(c.MessageForLanguage('fr').GetRealContent() ==
msg_fr.GetRealContent())
try:
c.MessageForLanguage('zh-CN', False)
self.fail('Should have gotten exception')
except:
pass
self.failUnless(c.MessageForLanguage('zh-CN', True) != None)
rex = re.compile('fr|de|bingo')
self.failUnless(len(c.AllMessagesThatMatch(rex, False)) == 2)
self.failUnless(c.AllMessagesThatMatch(rex, True)[pseudo.PSEUDO_LANG] != None)
def testBestClique(self):
factory = clique.UberClique()
factory.MakeClique(tclib.Message(text='Alfur', description='alfaholl'))
factory.MakeClique(tclib.Message(text='Alfur', description=''))
factory.MakeClique(tclib.Message(text='Vaettur', description=''))
factory.MakeClique(tclib.Message(text='Vaettur', description=''))
factory.MakeClique(tclib.Message(text='Troll', description=''))
factory.MakeClique(tclib.Message(text='Gryla', description='ID: IDS_GRYLA'))
factory.MakeClique(tclib.Message(text='Gryla', description='vondakerling'))
factory.MakeClique(tclib.Message(text='Leppaludi', description='ID: IDS_LL'))
factory.MakeClique(tclib.Message(text='Leppaludi', description=''))
count_best_cliques = 0
for c in factory.BestCliquePerId():
count_best_cliques += 1
msg = c.GetMessage()
text = msg.GetRealContent()
description = msg.GetDescription()
if text == 'Alfur':
self.failUnless(description == 'alfaholl')
elif text == 'Gryla':
self.failUnless(description == 'vondakerling')
elif text == 'Leppaludi':
self.failUnless(description == 'ID: IDS_LL')
self.failUnless(count_best_cliques == 5)
def testAllInUberClique(self):
resources = grd_reader.Parse(util.WrapInputStream(
StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="grit/testdata/klonk.rc" />
<structure type="tr_html" name="ID_HTML" file="grit/testdata/simple.html" />
</structures>
</release>
</grit>''')), util.PathFromRoot('.'))
resources.SetOutputContext('en', {})
resources.RunGatherers(True)
content_list = []
for clique_list in resources.UberClique().cliques_.values():
for clique in clique_list:
content_list.append(clique.GetMessage().GetRealContent())
self.failUnless('Hello %s, how are you doing today?' in content_list)
self.failUnless('Jack "Black" Daniels' in content_list)
self.failUnless('Hello!' in content_list)
def testCorrectExceptionIfWrongEncodingOnResourceFile(self):
'''This doesn't really belong in this unittest file, but what the heck.'''
resources = grd_reader.Parse(util.WrapInputStream(
StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" file="grit/testdata/klonk.rc" />
</structures>
</release>
</grit>''')), util.PathFromRoot('.'))
self.assertRaises(exception.SectionNotFound, resources.RunGatherers, True)
def testSemiIdenticalCliques(self):
messages = [
tclib.Message(text='Hello USERNAME',
placeholders=[tclib.Placeholder('USERNAME', '$1', 'Joi')]),
tclib.Message(text='Hello USERNAME',
placeholders=[tclib.Placeholder('USERNAME', '%s', 'Joi')]),
]
self.failUnless(messages[0].GetId() == messages[1].GetId())
# Both of the above would share a translation.
translation = tclib.Translation(id=messages[0].GetId(),
text='Bonjour USERNAME',
placeholders=[tclib.Placeholder(
'USERNAME', '$1', 'Joi')])
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
for clq in cliques:
clq.AddTranslation(translation, 'fr')
self.failUnless(cliques[0].MessageForLanguage('fr').GetRealContent() ==
'Bonjour $1')
self.failUnless(cliques[1].MessageForLanguage('fr').GetRealContent() ==
'Bonjour %s')
def testMissingTranslations(self):
messages = [ tclib.Message(text='Hello'), tclib.Message(text='Goodbye') ]
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
cliques[1].MessageForLanguage('fr', False, True)
self.failUnless(not factory.HasMissingTranslations())
cliques[0].MessageForLanguage('de', False, False)
self.failUnless(factory.HasMissingTranslations())
report = factory.MissingTranslationsReport()
self.failUnless(report.count('WARNING') == 1)
self.failUnless(report.count('8053599568341804890 "Goodbye" fr') == 1)
self.failUnless(report.count('ERROR') == 1)
self.failUnless(report.count('800120468867715734 "Hello" de') == 1)
def testCustomTypes(self):
factory = clique.UberClique()
message = tclib.Message(text='Bingo bongo')
c = factory.MakeClique(message)
try:
c.SetCustomType(DummyCustomType())
self.fail()
except:
pass # expected case - 'Bingo bongo' does not start with 'jjj'
message = tclib.Message(text='jjjBingo bongo')
c = factory.MakeClique(message)
c.SetCustomType(util.NewClassInstance(
'grit.clique_unittest.DummyCustomType', clique.CustomType))
translation = tclib.Translation(id=message.GetId(), text='Bilingo bolongo')
c.AddTranslation(translation, 'fr')
self.failUnless(c.MessageForLanguage('fr').GetRealContent().startswith('jjj'))
def testWhitespaceMessagesAreNontranslateable(self):
factory = clique.UberClique()
message = tclib.Message(text=' \t')
c = factory.MakeClique(message, translateable=True)
self.failIf(c.IsTranslateable())
message = tclib.Message(text='\n \n ')
c = factory.MakeClique(message, translateable=True)
self.failIf(c.IsTranslateable())
message = tclib.Message(text='\n hello')
c = factory.MakeClique(message, translateable=True)
self.failUnless(c.IsTranslateable())
class DummyCustomType(clique.CustomType):
def Validate(self, message):
return message.GetRealContent().startswith('jjj')
def ValidateAndModify(self, lang, translation):
is_ok = self.Validate(translation)
self.ModifyEachTextPart(lang, translation)
def ModifyTextPart(self, lang, text):
return 'jjj%s' % text
if __name__ == '__main__':
unittest.main()
| |
from __future__ import print_function
import numpy as np
import matplotlib.pylab as plt
from openmdao.api import IndepVarComp, Component, Group, Problem, ExecComp, ScipyOptimizer
class UnderwaterOptimization(Component):
"""
Notes
-----
Estimates tube tunnel cost and pylon material cost
Optimizes tunnel thickness, pylon radius, and pylon spacing
Many parameters are currently taken from hyperloop alpha, will eventually pull from mission trajectory
Params
------
tube_area : float
Inner tube radius. Default is 3.8013 m**2
rho_tube : float
Density of tube material. Default is 7820 kg/m**3
E_tube : float
Young's modulus of tube material. Default value is 200e9 Pa
v_tube : float
Poisson's ratio of tube material. Default value is .3
Su_tube : float
Ultimate strength of tube material. Default value is 152e6 Pa
sf : float
Tube safety factor. Default value is 1.5
g : float
Gravitational acceleration. Default value is 9.81 m/s**2
unit_cost_tube : float
Cost of tube material per unit mass. Default value is .33 USD/kg
p_tunnel : float
Pressure of air in tube. Default value is 850 Pa. Value will come from vacuum component
p_ambient : float
Pressure of atmosphere. Default value is 101.3e3 Pa.
alpha_tube : float
Coefficient of thermal expansion of tube material. Default value is 0.0
dT_tube : float
Difference in tunnel temperature as compared ot a reference temperature. Default value is 0.0
m_pod : float
total mass of pod. Default value is 3100 kg. Value will come from weight component
r : float
Radius of tube. Default value is 1.1 m. Value will come from aero module
t : float
Thickness of the tube. Default value is 50 mm. Value is optimized in problem driver.
rho_pylon : float
Density of pylon material. Default value is 2400 kg/m**3
E_pylon : float
Young's modulus of pylon material. Default value is 41e9 Pa
v_pylon : float
Poisson's ratio of pylon material. Default value is .2
Su_pylon : float
Ultimate strength of pylon material. Default value is 40e6 Pa
unit_cost_pylon : float
Cost of pylon material per unit mass. Default value is .05 USD/kg
h : float
Height of each pylon. Default value is 10 m.
r_pylon : float
Radius of each pylon. Default value is 1 m. Value will be optimized in problem driver
vac_weight : float
Total weight of vacuums. Default value is 1500.0 kg. Value will come from vacuum component
Returns
-------
m_pylon : float
mass of individual pylon in kg/pylon
m_prime: float
Calculates mass per unit length of tube in kg/m
von_mises : float
Von Mises stress in the tube in Pa
total_material_cost : float
returns total cost of tube and pylon materials per unit distance in USD/m
R : float
Returns vertical component of force on each pylon in N
delta : float
Maximum deflection of tube between pylons in m
dx : float
outputs distance in between pylons in m
t_crit :
Minimum tube thickness to satisfy vacuum tube buckling condition in m
Notes
-----
[1] USA. NASA. Buckling of Thin-Walled Circular Cylinders. N.p.: n.p., n.d. Web. 13 June 2016.
"""
def __init__(self):
super(UnderwaterOptimization, self).__init__()
#Define material properties of tube
self.add_param('rho_tube',
val=7820.0,
units='kg/m**3',
desc='density of steel')
self.add_param('E_tube',
val=200.0 * (10**9),
units='Pa',
desc='Young\'s Modulus of tube')
self.add_param('v_tube', val=.3, desc='Poisson\'s ratio of tube')
self.add_param('Su_tube',
val=152.0e6,
units='Pa',
desc='ultimate strength of tube')
self.add_param('sf', val=1.5, desc='safety factor')
self.add_param('g', val=9.81, units='m/s**2', desc='gravity')
self.add_param('unit_cost_tube',
val=.3307,
units='USD/kg',
desc='cost of tube materials per unit mass')
self.add_param('p_tunnel',
val=100.0,
units='Pa',
desc='Tunnel Pressure')
self.add_param('p_atm',
val=101300.0,
units='Pa',
desc='Ambient Pressure')
self.add_param('alpha_tube',
val=0.0,
desc='Coefficient of Thermal Expansion of tube')
self.add_param(
'dT_tube', val=0.0,
units='K', desc='Temperature change')
self.add_param('m_pod', val=3100.0, units='kg', desc='mass of pod')
self.add_param('tube_area', val=3.8013, units='m**2', desc='inner tube area')
#self.add_param('r', val=1.1, units='m', desc='inner tube radius')
self.add_param('t', val=.05, units='m', desc='tube thickness')
#self.add_param('dx', val = 500.0, units = 'm', desc = 'distance between pylons')
#Define pylon material properties
self.add_param('rho_pylon',
val=7820.0,
units='kg/m**3',
desc='density of pylon material')
self.add_param('E_pylon',
val=200 * (10**9),
units='Pa',
desc='Young\'s Modulus of pylon')
self.add_param('v_pylon', val=.2, desc='Poisson\'s ratio of pylon')
self.add_param('Su_pylon',
val=152.0 * (10**6),
units='Pa',
desc='ultimate strength_pylon')
self.add_param('unit_cost_pylon',
val=.3307,
units='USD/kg',
desc='cost of pylon materials per unit mass')
self.add_param('h', val=10.0, units='m', desc='height of pylon')
self.add_param('r_pylon', val=.1, units='m', desc='inner tube radius')
self.add_param('vac_weight', val=1500.0, units='kg', desc='vacuum weight')
self.add_param('rho_water', val = 1025.0, units = 'kg/m**3', desc = 'Density of seawater')
self.add_param('depth', val = 10.0, units = 'm', desc = 'Depth of submerged tube')
#Define outputs
self.add_output('m_pylon',
val=0.0,
units='kg',
desc='total mass of the pylon')
self.add_output('m_prime',
val=100.0,
units='kg/m',
desc='total mass of the tube per unit length')
self.add_output('von_mises',
val=0.0,
units='Pa',
desc='max Von Mises Stress')
self.add_output('total_material_cost',
val=0.0,
units='USD/m',
desc='cost of materials')
self.add_output('R', val=0.0, units='N', desc='Force on pylon')
self.add_output('delta',
val=0.0,
units='m',
desc='max deflection inbetween pylons')
self.add_output('dx',
val=500.0,
units='m',
desc='distance between pylons')
self.add_output('t_crit',
val=0.0,
units='m',
desc='Minimum tunnel thickness for buckling')
def solve_nonlinear(self, params, unknowns, resids):
'''total material cost = ($/kg_tunnel)*m_prime + ($/kg_pylon)*m_pylon*(1/dx)
m_prime = mass of tunnel per unit length = rho_tube*pi*((r+t)^2-r^2)
m_pylon = mass of single pylon = rho_pylon*pi*(r_pylon^2)*h
Constraint equations derived from yield on buckling conditions
'''
rho_tube = params['rho_tube']
E_tube = params['E_tube']
v_tube = params['v_tube']
alpha_tube = params['alpha_tube']
dT_tube = params['dT_tube']
unit_cost_tube = params['unit_cost_tube']
g = params['g']
tube_area = params['tube_area']
#r = params['r']
t = params['t']
m_pod = params['m_pod']
p_tunnel = params['p_tunnel']
p_atm = params['p_atm']
Su_pylon = params['Su_pylon']
sf = params['sf']
rho_pylon = params['rho_pylon']
E_pylon = params['E_pylon']
r_pylon = params['r_pylon']
unit_cost_pylon = params['unit_cost_pylon']
h = params['h']
vac_weight = params['vac_weight']
rho_water = params['rho_water']
depth = params['depth']
#Compute intermediate variable
r = np.sqrt(tube_area/np.pi)
p_ambient = p_atm + rho_water*g*depth
#print(r)
q = (rho_water*np.pi*((r+t)**2.0)*g) - (rho_tube * np.pi * (((r + t)**2) - (r**2)) * g) #Calculate distributed load
dp = p_ambient - p_tunnel #Calculate delta pressure
I_tube = (np.pi / 4.0) * ((
(r + t)**4) - (r**4)) #Calculate moment of inertia of tube
m_prime = rho_tube * np.pi * (((r + t)**2) - (r**2)) #Calculate mass per unit length
dx = ((2 * (Su_pylon / sf) * np.pi *(r_pylon**2))) / q #Calculate dx
M = (q * ((dx**2) / 8.0)) #Calculate max moment
sig_theta = (dp * r) / t #Calculate hoop stress
sig_axial = ((dp * r) / (2 * t)) + (
(M * r) / I_tube
) + alpha_tube * E_tube * dT_tube #Calculate axial stress
von_mises = np.sqrt((((sig_theta**2) + (sig_axial**2) + (
(sig_axial - sig_theta)**2)) /
2.0)) #Calculate Von Mises stress
m_pylon = rho_pylon * np.pi * (r_pylon**
2) * h #Calculate mass of single pylon
# unknowns['total_material_cost'] = (unit_cost_tube * (rho_tube * np.pi * ((
# (r + t)**2) - (r**2)))) + (unit_cost_pylon * m_pylon * (1 / (
# ((2 * (Su_pylon / sf) * np.pi * (r_pylon**2)) - m_pod * g) /
# (m_prime * g))))
unknowns['total_material_cost'] = (unit_cost_tube * (rho_tube * np.pi * ((
(r + t)**2) - (r**2)))) + (unit_cost_pylon * m_pylon)/dx
unknowns['m_prime'] = m_prime
unknowns['von_mises'] = von_mises
unknowns['delta'] = (5.0 * q * (dx**4)) / (384.0 * E_tube * I_tube)
unknowns['m_pylon'] = m_pylon
unknowns['R'] = .5 * m_prime * dx * g + .5 * m_pod * g
unknowns['dx'] = dx
unknowns['t_crit'] = r * ((
(4.0 * dp * (1.0 - (v_tube**2))) / E_tube)**(1.0 / 3.0))
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (#('r', 1.1, {'units': 'm'}),
('tube_area', 53.134589, {'units': 'm**2'}),
('t', 5.0, {'units': 'm'}),
('r_pylon', 1.1, {'units': 'm'}),
('Su_tube', 152.0e6, {'units': 'Pa'}),
('sf', 1.5),
('p_ambient', 850.0, {'units': 'Pa'}),
('p_tunnel', 101300.0, {'units': 'Pa'}),
('v_tube', .3),
('rho_tube', 7820.0, {'units': 'kg/m**3'}),
('rho_pylon', 2400.0, {'units': 'Pa'}),
('Su_pylon', 40.0e6, {'units': 'Pa'}),
('E_pylon', 41.0e9, {'units': 'Pa'}),
('h', 10.0, {'units': 'm'}),
('m_pod', 3100.0, {'units': 'kg'})
)
root.add('input_vars', IndepVarComp(params))
root.add('p', UnderwaterOptimization())
root.add('con1', ExecComp(
'c1 = ((Su_tube/sf) - von_mises)')) #Impose yield stress constraint for tube
root.add('con2', ExecComp(
'c2 = t - t_crit')) #Impose buckling constraint for tube dx = ((pi**3)*E_pylon*(r_pylon**4))/(8*(h**2)*rho_tube*pi*(((r+t)**2)-(r**2))*g)
#root.connect('input_vars.r', 'p.r')
root.connect('input_vars.tube_area', 'p.tube_area')
root.connect('input_vars.t', 'p.t')
root.connect('input_vars.r_pylon', 'p.r_pylon')
root.connect('input_vars.Su_tube', 'con1.Su_tube')
root.connect('input_vars.sf', 'con1.sf')
root.connect('p.von_mises', 'con1.von_mises')
root.connect('input_vars.t', 'con2.t')
root.connect('p.t_crit', 'con2.t_crit')
root.p.deriv_options['type'] = "cs"
# root.p.deriv_options['form'] = 'forward'
root.p.deriv_options['step_size'] = 1.0e-10
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.add_desvar('input_vars.t', lower=.001, scaler=100.0)
top.driver.add_desvar('input_vars.r_pylon', lower=.001, scaler = 1.0)
top.driver.add_objective('p.total_material_cost', scaler = 1.0e-4)
top.driver.add_constraint('con1.c1', lower=0.0, scaler=1000.0)
top.driver.add_constraint('con2.c2', lower=0.0)
top.setup()
top['p.p_tunnel'] = 850.0
# top['p.m_pod']= 10000.0
top['p.h'] = 10.0
top['p.depth'] = 10.0
import csv
f = open('/Users/kennethdecker/Desktop/Paper figures/water_structural_trades.csv', 'wt')
writer = csv.writer(f)
writer.writerow(('A_tube', 'dx', 'cost'))
A_tube = np.linspace(20.0, 50.0, num = 30)
dx = np.zeros((1, len(A_tube)))
t_tube = np.zeros((1, len(A_tube)))
r_pylon = np.zeros((1, len(A_tube)))
cost = np.zeros((1,len(A_tube)))
for i in range(len(A_tube)):
top['input_vars.tube_area'] = A_tube[i]
top.run()
dx[0,i] = top['p.dx']
t_tube[0,i] = top['p.t']
r_pylon[0,i] = top['p.r_pylon']
cost[0,i] = top['p.total_material_cost']
print(top['p.r_pylon'])
print(r_pylon[0,i])
# writer.writerow((A_tube[i], dx[0,i], cost[0,i]))
# f.close()
plt.hold(True)
# plt.subplot(211)
line1, = plt.plot(A_tube, dx[0,:], 'b-', linewidth = 2.0, label = 'm_pod = 10000 kg')
plt.xlabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.ylabel('Pylon Spacing (m)', fontsize = 12, fontweight = 'bold')
plt.grid('on')
plt.show()
plt.subplot(211)
line1, = plt.plot(A_tube, t_tube[0,:], 'b-', linewidth = 2.0, label = 'm_pod = 10000 kg')
# plt.xlabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.ylabel('tube thickness (m)', fontsize = 12, fontweight = 'bold')
plt.grid('on')
plt.subplot(212)
line1, = plt.plot(A_tube, r_pylon[0,:], 'b-', linewidth = 2.0, label = 'm_pod = 10000 kg')
plt.xlabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.ylabel('Pylon Radius (m)', fontsize = 12, fontweight = 'bold')
plt.grid('on')
plt.show()
# plt.plot(A_tube, dx[0,:])
# plt.xlabel('Tube Area')
# plt.ylabel('pylon spacing')
# plt.show()
# plt.plot(A_tube, total_material_cost[0,:])
# plt.xlabel('Tube Area')
# plt.ylabel('Cost per unit length')
# plt.show()
# R_buckle = ((np.pi**3) * top['p.E_tube'] *
# (top['p.r_pylon']**4)) / (16 * (top['p.h']**2))
# print('Optimizer pylon radius %f' % top['p.r_pylon'])
# if top['p.R'] < R_buckle:
# print('Pylon buckling constraint is satisfied')
# else:
# r_pylon_new = ((R_buckle * 16 * (top['p.h']**2)) / (
# (np.pi**3) * top['p.E_tube']))**.25
# print(
# 'Optimizer value did not satisfy pylon buckling condition. Pylon radius set to minimum buckling value')
# print('new pylon radius is %f m' % r_pylon_new)
print('\n')
print('total material cost per m is $%6.2f/km' %
(top['p.total_material_cost'] * (1.0e3)))
print('pylon radius is %6.3f m' % top['p.r_pylon'])
print('tube thickness is %6.4f mm' % (top['p.t'] * (1.0e3)))
print('mass per unit length is %6.2f kg/m' % top['p.m_prime'])
print('vertical force on each pylon is %6.2f kN' % (top['p.R'] / (1.0e3)))
print('Von Mises stress is %6.3f MPa' % (top['p.von_mises'] / (1.0e6)))
print('distance between pylons is %6.2f m' % top['p.dx'])
print('max deflection is %6.4f mm' % (top['p.delta'] * (1.0e3)))
print('critical thickness %6.4f' % top['p.t_crit'])
print('\n')
print('con1 = %f' % top['con1.c1'])
print('con2 = %f' % top['con2.c2'])
if top['con1.c1'] < 0.0:
print('con1 not satisfied')
elif top['con2.c2'] < 0.0:
print('con2 not satisfied')
else:
print('Yield constraints are satisfied')
| |
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The element module defines reference elements such as the
:class:`LineReference` and :class:`TriangleReference`, but also more exotic
objects like the :class:`MosaicReference`. A set of (interconnected) elements
together form a :class:`nutils.topology.Topology`. Elements have edges and
children (for refinement), which are in turn elements and map onto self by an
affine transformation. They also have a well defined reference coordinate
system, and provide pointsets for purposes of integration and sampling.
"""
from . import util, numpy, config, numeric, cache, transform, warnings, types, points, _
import re, math, itertools, operator, functools
## REFERENCE ELEMENTS
class Reference(types.Singleton):
'reference element'
__slots__ = 'ndims',
__cache__ = 'connectivity', 'edgechildren', 'ribbons', 'volume', 'centroid', '_linear_bernstein', 'getpoints'
@types.apply_annotations
def __init__(self, ndims:int):
super().__init__()
self.ndims = ndims
@property
def nverts(self):
return len(self.vertices)
__and__ = lambda self, other: self if self == other else NotImplemented
__or__ = lambda self, other: self if self == other else NotImplemented
__rand__ = lambda self, other: self.__and__(other)
__ror__ = lambda self, other: self.__or__(other)
__sub__ = __rsub__ = lambda self, other: self.empty if self == other else NotImplemented
__bool__ = __nonzero__ = lambda self: bool(self.volume)
@property
def empty(self):
return EmptyLike(self)
def __mul__(self, other):
assert isinstance(other, Reference)
return self if not other.ndims else other if not self.ndims else TensorReference(self, other)
def __pow__(self, n):
assert numeric.isint(n) and n >= 0
return getsimplex(0) if n == 0 \
else self if n == 1 \
else self * self**(n-1)
@property
def nedges(self):
return len(self.edge_transforms)
@property
def nchildren(self):
return len(self.child_transforms)
@property
def edges(self):
return list(zip(self.edge_transforms, self.edge_refs))
@property
def children(self):
return list(zip(self.child_transforms, self.child_refs))
@property
def connectivity(self):
# Nested tuple with connectivity information about edges of children:
# connectivity[ichild][iedge] = ioppchild (interface) or -1 (boundary).
connectivity = [-numpy.ones(child.nedges, dtype=int) for child in self.child_refs]
vmap = {}
for ichild, (ctrans, cref) in enumerate(self.children):
for iedge, etrans in enumerate(cref.edge_transforms):
v = ctrans * etrans
try:
jchild, jedge = vmap.pop(v.flipped)
except KeyError:
vmap[v] = ichild, iedge
else:
connectivity[jchild][jedge] = ichild
connectivity[ichild][iedge] = jchild
for etrans, eref in self.edges:
for ctrans in eref.child_transforms:
vmap.pop(etrans * ctrans, None)
assert not any(self.child_refs[ichild].edge_refs[iedge] for ichild, iedge in vmap.values()), 'not all boundary elements recovered'
return tuple(types.frozenarray(c, copy=False) for c in connectivity)
@property
def edgechildren(self):
edgechildren = []
for iedge, (etrans, eref) in enumerate(self.edges):
children = []
for ichild, ctrans in enumerate(eref.child_transforms):
ctrans_, etrans_ = etrans.swapup(ctrans)
ichild_ = self.child_transforms.index(ctrans_)
iedge_ = self.child_refs[ichild].edge_transforms.index(etrans_)
children.append((ichild_, iedge_))
edgechildren.append(types.frozenarray(children))
return tuple(edgechildren)
@property
def ribbons(self):
# tuples of (iedge1,jedge1), (iedge2,jedge2) pairs
assert self.ndims >= 2
transforms = {}
ribbons = []
for iedge1, (etrans1,edge1) in enumerate(self.edges):
if edge1:
for iedge2, (etrans2,edge2) in enumerate(edge1.edges):
if edge2:
key = tuple(sorted(tuple(p) for p in (etrans1 * etrans2).apply(edge2.vertices)))
try:
jedge1, jedge2 = transforms.pop(key)
except KeyError:
transforms[key] = iedge1, iedge2
else:
assert self.edge_refs[jedge1].edge_refs[jedge2] == edge2
ribbons.append(((iedge1,iedge2), (jedge1,jedge2)))
assert not transforms
return tuple(ribbons)
def getischeme(self, ischeme):
ischeme, degree = parse_legacy_ischeme(ischeme)
points = self.getpoints(ischeme, degree)
return points.coords, getattr(points, 'weights', None)
def getpoints(self, ischeme, degree):
raise Exception('unsupported ischeme for {}: {!r}'.format(self.__class__.__name__, ischeme))
def with_children(self, child_refs):
child_refs = tuple(child_refs)
if not any(child_refs):
return self.empty
if child_refs == self.child_refs:
return self
return WithChildrenReference(self, child_refs)
@property
def volume(self):
return self.getpoints('gauss', 1).weights.sum()
@property
def centroid(self):
gauss = self.getpoints('gauss', 1)
return gauss.coords.T.dot(gauss.weights) / gauss.weights.sum()
def trim(self, levels, maxrefine, ndivisions):
'trim element along levelset'
assert len(levels) == self.nvertices_by_level(maxrefine)
return self if not self or numpy.greater_equal(levels, 0).all() \
else self.empty if numpy.less_equal(levels, 0).all() \
else self.with_children(cref.trim(clevels, maxrefine-1, ndivisions)
for cref, clevels in zip(self.child_refs, self.child_divide(levels,maxrefine))) if maxrefine > 0 \
else self.slice(lambda vertices: numeric.dot(numeric.poly_eval(self._linear_bernstein[_], vertices), levels), ndivisions)
@property
def _linear_bernstein(self):
return self.get_poly_coeffs('bernstein', degree=1)
def slice(self, levelfunc, ndivisions):
# slice along levelset by recursing over dimensions
levels = levelfunc(self.vertices)
assert numeric.isint(ndivisions)
assert len(levels) == self.nverts
if numpy.greater_equal(levels, 0).all():
return self
if numpy.less_equal(levels, 0).all():
return self.empty
nbins = 2**ndivisions
if self.ndims == 1:
l0, l1 = levels
xi = numpy.round(l0/(l0-l1) * nbins)
if xi in (0,nbins):
return self.empty if xi == 0 and l1 < 0 or xi == nbins and l0 < 0 else self
v0, v1 = self.vertices
midpoint = v0 + (xi/nbins) * (v1-v0)
refs = [edgeref if levelfunc(edgetrans.apply(numpy.zeros((1,0)))) > 0 else edgeref.empty for edgetrans, edgeref in self.edges]
else:
refs = [edgeref.slice(lambda vertices: levelfunc(edgetrans.apply(vertices)), ndivisions) for edgetrans, edgeref in self.edges]
if sum(ref != baseref for ref, baseref in zip(refs, self.edge_refs)) <= 1:
return self
if sum(bool(ref) for ref in refs) <= 1:
return self.empty
clevel = levelfunc(self.centroid[_])[0]
select = clevel*levels<=0 if clevel!=0 else levels!=0
levels = levels[select]
vertices = self.vertices[select]
xi = numpy.round(levels/(levels-clevel) * nbins)
midpoint = numpy.mean(vertices + (self.centroid-vertices)*(xi/nbins)[:,_], axis=0)
if tuple(refs) == tuple(self.edge_refs):
return self
if not any(refs):
return self.empty
mosaic = MosaicReference(self, refs, midpoint)
return self.empty if mosaic.volume == 0 else mosaic if mosaic.volume < self.volume else self
def cone(self, trans, tip):
return Cone(self, trans, tip)
def check_edges(self, tol=1e-15, print=print):
volume = 0
zero = 0
for trans, edge in self.edges:
if edge:
gauss = edge.getpoints('gauss', 1)
w_normal = gauss.weights[:,_] * trans.ext
zero += w_normal.sum(0)
volume += numeric.contract(trans.apply(gauss.coords), w_normal, axis=0)
if numpy.greater(abs(zero), tol).any():
print('divergence check failed: {} != 0'.format(zero))
if numpy.greater(abs(volume - self.volume), tol).any():
print('divergence check failed: {} != {}'.format(volume, self.volume))
def vertex_cover(self, ctransforms, maxrefine):
if maxrefine < 0:
raise Exception('maxrefine is too low')
npoints = self.nvertices_by_level(maxrefine)
allindices = numpy.arange(npoints)
if len(ctransforms) == 1:
ctrans, = ctransforms
assert not ctrans
return ((), self.getpoints('vertex', maxrefine).coords, allindices),
if maxrefine == 0:
raise Exception('maxrefine is too low')
cbins = [set() for ichild in range(self.nchildren)]
for ctrans in ctransforms:
ichild = self.child_transforms.index(ctrans[0])
cbins[ichild].add(ctrans[1:])
if not all(cbins):
raise Exception('transformations to not form an element cover')
fcache = cache.WrapperCache()
return tuple(((ctrans,) + trans, points, cindices[indices])
for ctrans, cref, cbin, cindices in zip(self.child_transforms, self.child_refs, cbins, self.child_divide(allindices,maxrefine))
for trans, points, indices in fcache[cref.vertex_cover](frozenset(cbin), maxrefine-1))
def __str__(self):
return self.__class__.__name__
__repr__ = __str__
def get_ndofs(self, degree):
raise NotImplementedError
def get_poly_coeffs(self, basis, **kwargs):
raise NotImplementedError
def get_edge_dofs(self, degree, iedge):
raise NotImplementedError
strictreference = types.strict[Reference]
class EmptyLike(Reference):
'inverse reference element'
__slots__ = 'baseref',
volume = 0
@property
def empty(self):
return self
@types.apply_annotations
def __init__(self, baseref:strictreference):
self.baseref = baseref
super().__init__(baseref.ndims)
@property
def vertices(self):
return self.baseref.vertices
@property
def edge_transforms(self):
return self.baseref.edge_transforms
@property
def edge_refs(self):
return tuple(eref.empty for eref in self.baseref.edge_refs)
@property
def child_transforms(self):
return self.baseref.child_transforms
@property
def child_refs(self):
return tuple(cref.empty for cref in self.baseref.child_refs)
__and__ = __sub__ = lambda self, other: self if other.ndims == self.ndims else NotImplemented
__or__ = lambda self, other: other if other.ndims == self.ndims else NotImplemented
__rsub__ = lambda self, other: other if other.ndims == self.ndims else NotImplemented
def trim(self, levels, maxrefine, ndivisions):
return self
def inside(self, point, eps=0):
return False
class RevolutionReference(Reference):
'modify gauss integration to always return a single point'
__slots__ = ()
__cache__ = 'getpoints',
def __init__(self):
super().__init__(ndims=1)
@property
def vertices(self):
return types.frozenarray([[0.]])
@property
def edge_transforms(self): # only used in check_edges
return transform.Updim(numpy.zeros((1,0)), [-numpy.pi], isflipped=True), transform.Updim(numpy.zeros((1,0)), [+numpy.pi], isflipped=False)
@property
def edge_refs(self): # idem edge_transforms
return PointReference(), PointReference()
@property
def child_transforms(self):
return transform.Identity(1),
@property
def child_refs(self):
return self,
@property
def simplices(self):
return (transform.Identity(self.ndims), self),
def getpoints(self, ischeme, degree):
return points.CoordsWeightsPoints([[0.]], [2 * numpy.pi])
def inside(self, point, eps=0):
return True
def nvertices_by_level(self, n):
return 1
def child_divide(self, vals, n):
return vals,
def get_poly_coeffs(self, basis, **kwargs):
return numpy.ones((1,1)) # single, constant basis function
class SimplexReference(Reference):
'simplex reference'
__slots__ = ()
__cache__ = 'edge_refs', 'edge_transforms', 'ribbons', '_get_poly_coeffs_bernstein', '_get_poly_coeffs_lagrange', '_integer_barycentric_coordinates'
@property
def vertices(self):
return types.frozenarray(numpy.concatenate([numpy.zeros(self.ndims)[_,:], numpy.eye(self.ndims)], axis=0), copy=False)
@property
def edge_refs(self):
assert self.ndims > 0
return (getsimplex(self.ndims-1),) * (self.ndims+1)
@property
def edge_transforms(self):
assert self.ndims > 0
return tuple(transform.SimplexEdge(self.ndims, i) for i in range(self.ndims+1))
@property
def child_refs(self):
return tuple([self] * (2**self.ndims))
@property
def child_transforms(self):
return tuple(transform.SimplexChild(self.ndims, ichild) for ichild in range(2**self.ndims))
@property
def ribbons(self):
return tuple(((iedge1,iedge2),(iedge2+1,iedge1)) for iedge1 in range(self.ndims+1) for iedge2 in range(iedge1,self.ndims))
def getpoints(self, ischeme, degree):
if ischeme == 'gauss':
return points.SimplexGaussPoints(self.ndims, degree if numeric.isint(degree) else sum(degree))
if ischeme == 'vtk':
return points.SimplexBezierPoints(self.ndims, 2)
if ischeme == 'vertex':
return points.SimplexBezierPoints(self.ndims, 2**(degree or 0) + 1)
if ischeme == 'bezier':
return points.SimplexBezierPoints(self.ndims, degree)
return super().getpoints(ischeme, degree)
@property
def simplices(self):
return (transform.Identity(self.ndims), self),
def get_ndofs(self, degree):
prod = lambda start, stop: functools.reduce(operator.mul, range(start, stop), 1)
return prod(degree+1, degree+1+self.ndims) // prod(1, self.ndims+1)
def get_poly_coeffs(self, basis, **kwargs):
f = getattr(self, '_get_poly_coeffs_{}'.format(basis), None)
if f:
return f(**kwargs)
else:
raise ValueError('basis {!r} undefined on {}'.format(basis, type(self).__qualname__))
def _integer_barycentric_coordinates(self, degree):
return tuple(
(degree-sum(i),*i[::-1])
for i in itertools.product(*[range(degree+1)]*self.ndims)
if sum(i) <= degree)
def _get_poly_coeffs_bernstein(self, degree):
ndofs = self.get_ndofs(degree)
if self.ndims == 0:
return types.frozenarray(numpy.ones((ndofs,), dtype=int), copy=False)
coeffs = numpy.zeros((ndofs,)+(degree+1,)*self.ndims, dtype=int)
for i, p in enumerate(self._integer_barycentric_coordinates(degree)):
p = p[1:]
for q in itertools.product(*[range(degree+1)]*self.ndims):
if sum(p+q) <= degree:
coeffs[(i,)+tuple(map(operator.add, p, q))] = (-1)**sum(q)*math.factorial(degree)//(math.factorial(degree-sum(p+q))*util.product(map(math.factorial, p+q)))
assert i == ndofs - 1
return types.frozenarray(coeffs, copy=False)
def _get_poly_coeffs_lagrange(self, degree):
if self.ndims == 0:
coeffs = numpy.ones((1,))
elif degree == 0:
coeffs = numpy.ones((1,*[1]*self.ndims))
else:
P = numpy.array(tuple(self._integer_barycentric_coordinates(degree)), dtype=int)[:,1:]
coeffs_ = numpy.linalg.inv(((P[:,_,:]/degree)**P[_,:,:]).prod(-1))
coeffs = numpy.zeros((len(P),*[degree+1]*self.ndims), dtype=float)
for i, p in enumerate(P):
coeffs[(slice(None),*p)] = coeffs_[i]
return types.frozenarray(coeffs, copy=False)
def get_edge_dofs(self, degree, iedge):
return types.frozenarray(tuple(i for i, j in enumerate(self._integer_barycentric_coordinates(degree)) if j[iedge] == 0), dtype=int)
class PointReference(SimplexReference):
'0D simplex'
__slots__ = ()
__cache__ = 'getpoints',
def __init__(self):
super().__init__(ndims=0)
def getpoints(self, ischeme, degree):
return points.CoordsWeightsPoints(numpy.empty([1,0]), [1.])
def inside(self, point, eps=0):
return True
def nvertices_by_level(self, n):
return 1
def child_divide(self, vals, n):
return vals,
class LineReference(SimplexReference):
'1D simplex'
__slots__ = '_bernsteincache',
def __init__(self):
self._bernsteincache = [] # TEMPORARY
super().__init__(ndims=1)
def getpoints(self, ischeme, degree):
if ischeme == 'uniform':
return points.CoordsUniformPoints(numpy.arange(.5, degree)[:,_] / degree, 1)
return super().getpoints(ischeme, degree)
def nvertices_by_level(self, n):
return 2**n + 1
def child_divide(self, vals, n):
assert n > 0
assert len(vals) == self.nvertices_by_level(n)
m = (len(vals)+1) // 2
return vals[:m], vals[m-1:]
def inside(self, point, eps=0):
x, = point
return -eps <= x <= 1+eps
class TriangleReference(SimplexReference):
'2D simplex'
__slots__ = ()
def __init__(self):
super().__init__(ndims=2)
def getpoints(self, ischeme, degree):
if ischeme == 'uniform':
p = numpy.arange(1./3, degree) / degree
C = numpy.empty([2, degree, degree])
C[0] = p[:,_]
C[1] = p[_,:]
coords = C.reshape(2, -1)
flip = numpy.greater(coords.sum(0), 1)
coords[:,flip] = 1 - coords[::-1,flip]
return points.CoordsUniformPoints(coords.T, .5)
return super().getpoints(ischeme, degree)
def nvertices_by_level(self, n):
m = 2**n + 1
return ((m+1)*m) // 2
def child_divide(self, vals, n):
assert len(vals) == self.nvertices_by_level(n)
np = 1 + 2**n # points along parent edge
mp = 1 + 2**(n-1) # points along child edge
cvals = []
for i in range(mp):
j = numpy.arange(mp-i)
cvals.append([vals[b+a*np-(a*(a-1))//2] for a, b in [(i,j),(i,mp-1+j),(mp-1+i,j),(i+j,mp-1-j)]])
return numpy.concatenate(cvals, axis=1)
def inside(self, point, eps=0):
x, y = point
return x >= -eps and y >= -eps and 1-x-y >= -eps
class TetrahedronReference(SimplexReference):
'3D simplex'
# TETRAHEDRON:
# c\d
# a-b
#
# EDGES:
# d\ d\ d\ c\
# b-c a-c a-b a-b
# SUBDIVIDED TETRAHEDRON:
# f\ i\j
# d-e\g-h
# a-b-c
#
# SUBDIVIDED EDGES:
# j\ j\ j\ f\
# h-i\ g-i\ g-h\ d-e\
# c-e-f a-d-f a-b-c a-b-c
#
# CHILDREN:
# d\g e\h f\i i\j e\g g\h g\i h\i
# a-b b-c d-e g-h b-d b-e d-e e-g
__slots__ = ()
_children_vertices = [0,1,3,6], [1,2,4,7], [3,4,5,8], [6,7,8,9], [1,3,4,6], [1,4,6,7], [3,4,6,8], [4,6,7,8]
def __init__(self):
super().__init__(ndims=3)
def getindices_vertex(self, n):
m = 2**n+1
indis = numpy.arange(m)
return numpy.array([[i,j,k] for k in indis for j in indis[:m-k] for i in indis[:m-j-k]])
def nvertices_by_level(self, n):
m = 2**n+1
return ((m+2)*(m+1)*m)//6
def child_divide(self, vals, n):
assert len(vals) == self.nvertices_by_level(n)
child_indices = self.getindices_vertex(1)
offset = numpy.array([1,0,0,0])
linear = numpy.array([[-1,-1,-1],[1,0,0],[0,1,0],[0,0,1]])
m = 2**n+1
cvals = []
for child_ref, child_vertices in zip(self.child_refs,self._children_vertices):
V = child_indices[child_vertices]
child_offset = (2**(n-1))*V.T.dot(offset)
child_linear = V.T.dot(linear)
original = child_ref.getindices_vertex(n-1)
transformed = original.dot(child_linear.T) + child_offset
i, j, k = transformed.T
cvals.append(vals[((k-1)*k*(2*k-1)//6 - (1+2*m)*(k-1)*k//2 + m*(m+1)*k)//2 + ((2*(m-k)+1)*j-j**2)//2 + i])
return numpy.array(cvals)
class TensorReference(Reference):
'tensor reference'
__slots__ = 'ref1', 'ref2'
__cache__ = 'vertices', 'edge_transforms', 'ribbons', 'child_transforms', 'getpoints', 'get_poly_coeffs'
def __init__(self, ref1, ref2):
assert not isinstance(ref1, TensorReference)
self.ref1 = ref1
self.ref2 = ref2
super().__init__(ref1.ndims + ref2.ndims)
def __mul__(self, other):
assert isinstance(other, Reference)
return TensorReference(self.ref1, self.ref2 * other)
@property
def vertices(self):
vertices = numpy.empty((self.ref1.nverts, self.ref2.nverts, self.ndims), dtype=float)
vertices[:,:,:self.ref1.ndims] = self.ref1.vertices[:,_]
vertices[:,:,self.ref1.ndims:] = self.ref2.vertices[_,:]
return types.frozenarray(vertices.reshape(self.ref1.nverts*self.ref2.nverts, self.ndims), copy=False)
@property
def centroid(self):
return numpy.concatenate([self.ref1.centroid, self.ref2.centroid])
def nvertices_by_level(self, n):
return self.ref1.nvertices_by_level(n) * self.ref2.nvertices_by_level(n)
def child_divide(self, vals, n):
np1 = self.ref1.nvertices_by_level(n)
np2 = self.ref2.nvertices_by_level(n)
return [v2.swapaxes(0,1).reshape((-1,)+vals.shape[1:])
for v1 in self.ref1.child_divide(vals.reshape((np1,np2)+vals.shape[1:]), n)
for v2 in self.ref2.child_divide(v1.swapaxes(0,1), n)]
def __str__(self):
return '{}*{}'.format(self.ref1, self.ref2)
def getpoints(self, ischeme, degree):
if self.ref1.ndims == 0:
return self.ref2.getpoints(ischeme, degree)
if self.ref2.ndims == 0:
return self.ref1.getpoints(ischeme, degree)
if ischeme != 'vtk':
ischeme1, ischeme2 = ischeme.split('*', 1) if '*' in ischeme else (ischeme, ischeme)
degree1 = degree if not isinstance(degree, tuple) else degree[0]
degree2 = degree if not isinstance(degree, tuple) else degree[1] if len(degree) == 2 else degree[1:]
return points.TensorPoints(self.ref1.getpoints(ischeme1, degree1), self.ref2.getpoints(ischeme2, degree2))
if self.ref1.ndims == self.ref2.ndims == 1:
coords = numpy.empty([2, 2, 2])
coords[...,:1] = self.ref1.vertices[:,_]
coords[0,:,1:] = self.ref2.vertices
coords[1,:,1:] = self.ref2.vertices[::-1]
elif self.ref1.ndims <= 1 and self.ref2.ndims >= 1:
coords = numpy.empty([self.ref1.nverts, self.ref2.nverts, self.ndims])
coords[...,:self.ref1.ndims] = self.ref1.vertices[:,_]
coords[...,self.ref1.ndims:] = self.ref2.vertices[_,:]
elif self.ref1.ndims >= 1 and self.ref2.ndims <= 1:
coords = numpy.empty([self.ref2.nverts, self.ref1.nverts, self.ndims])
coords[...,:self.ref1.ndims] = self.ref1.vertices[_,:]
coords[...,self.ref1.ndims:] = self.ref2.vertices[:,_]
else:
raise NotImplementedError
return points.CoordsPoints(coords.reshape(self.nverts, self.ndims))
@property
def edge_transforms(self):
edge_transforms = []
if self.ref1.ndims:
edge_transforms.extend(transform.TensorEdge1(trans1, self.ref2.ndims) for trans1 in self.ref1.edge_transforms)
if self.ref2.ndims:
edge_transforms.extend(transform.TensorEdge2(self.ref1.ndims, trans2) for trans2 in self.ref2.edge_transforms)
return tuple(edge_transforms)
@property
def edge_refs(self):
edge_refs = []
if self.ref1.ndims:
edge_refs.extend(edge1 * self.ref2 for edge1 in self.ref1.edge_refs)
if self.ref2.ndims:
edge_refs.extend(self.ref1 * edge2 for edge2 in self.ref2.edge_refs)
return tuple(edge_refs)
@property
def ribbons(self):
if self.ref1.ndims == 0:
return self.ref2.ribbons
if self.ref2.ndims == 0:
return self.ref1.ribbons
ribbons = []
for iedge1 in range(self.ref1.nedges):
#iedge = self.ref1.edge_refs[iedge] * self.ref2
for iedge2 in range(self.ref2.nedges):
#jedge = self.ref1 * self.ref2.edge_refs[jedge]
jedge1 = self.ref1.nedges + iedge2
jedge2 = iedge1
if self.ref1.ndims > 1:
iedge2 += self.ref1.edge_refs[iedge1].nedges
ribbons.append(((iedge1,iedge2),(jedge1,jedge2)))
if self.ref1.ndims >= 2:
ribbons.extend(self.ref1.ribbons)
if self.ref2.ndims >= 2:
ribbons.extend(((iedge1+self.ref1.nedges,iedge2+self.ref1.nedges),
(jedge1+self.ref1.nedges,jedge2+self.ref1.nedges)) for (iedge1,iedge2), (jedge1,jedge2) in self.ref2.ribbons)
return tuple(ribbons)
@property
def child_transforms(self):
return tuple(transform.TensorChild(trans1, trans2) for trans1 in self.ref1.child_transforms for trans2 in self.ref2.child_transforms)
@property
def child_refs(self):
return tuple(child1 * child2 for child1 in self.ref1.child_refs for child2 in self.ref2.child_refs)
def inside(self, point, eps=0):
return self.ref1.inside(point[:self.ref1.ndims],eps) and self.ref2.inside(point[self.ref1.ndims:],eps)
@property
def simplices(self):
return tuple((transform.TensorChild(trans1, trans2), TensorReference(simplex1, simplex2)) for trans1, simplex1 in self.ref1.simplices for trans2, simplex2 in self.ref2.simplices)
def get_ndofs(self, degree):
return self.ref1.get_ndofs(degree)*self.ref2.get_ndofs(degree)
def get_poly_coeffs(self, basis, **kwargs):
return numeric.poly_outer_product(self.ref1.get_poly_coeffs(basis, **kwargs), self.ref2.get_poly_coeffs(basis, **kwargs))
def get_edge_dofs(self, degree, iedge):
if not numeric.isint(iedge) or iedge < 0 or iedge >= self.nedges:
raise IndexError('edge index out of range')
nd2 = self.ref2.get_ndofs(degree)
if iedge < self.ref1.nedges:
dofs1 = self.ref1.get_edge_dofs(degree, iedge)
dofs2 = range(self.ref2.get_ndofs(degree))
else:
dofs1 = range(self.ref1.get_ndofs(degree))
dofs2 = self.ref2.get_edge_dofs(degree, iedge-self.ref1.nedges)
return types.frozenarray(tuple(d1*nd2+d2 for d1, d2 in itertools.product(dofs1, dofs2)), dtype=int)
@property
def _flat_refs(self):
for ref in self.ref1, self.ref2:
if isinstance(ref, TensorReference):
yield from ref._flat_refs
else:
yield ref
class Cone(Reference):
'cone'
__slots__ = 'edgeref', 'etrans', 'tip', 'extnorm', 'height'
__cache__ = 'vertices', 'edge_transforms', 'edge_refs', 'volume'
@types.apply_annotations
def __init__(self, edgeref, etrans, tip:types.frozenarray):
assert etrans.fromdims == edgeref.ndims
assert etrans.todims == len(tip)
super().__init__(len(tip))
self.edgeref = edgeref
self.etrans = etrans
self.tip = tip
ext = etrans.ext
self.extnorm = numpy.linalg.norm(ext)
self.height = numpy.dot(etrans.offset - tip, ext) / self.extnorm
assert self.height >= 0, 'tip is positioned at the negative side of edge'
@property
def vertices(self):
return types.frozenarray(numpy.vstack([[self.tip], self.etrans.apply(self.edgeref.vertices)]), copy=False)
@property
def edge_transforms(self):
edge_transforms = [self.etrans]
if self.edgeref.ndims > 0:
for trans, edge in self.edgeref.edges:
if edge:
b = self.etrans.apply(trans.offset)
A = numpy.hstack([numpy.dot(self.etrans.linear, trans.linear), (self.tip-b)[:,_]])
newtrans = transform.Updim(A, b, isflipped=self.etrans.isflipped^trans.isflipped^(self.ndims%2==1)) # isflipped logic tested up to 3D
edge_transforms.append(newtrans)
else:
edge_transforms.append(transform.Updim(numpy.zeros((1,0)), self.tip, isflipped=not self.etrans.isflipped))
return edge_transforms
@property
def edge_refs(self):
edge_refs = [self.edgeref]
if self.edgeref.ndims > 0:
extrudetrans = transform.Updim(numpy.eye(self.ndims-1)[:,:-1], numpy.zeros(self.ndims-1), isflipped=self.ndims%2==0)
tip = numpy.array([0]*(self.ndims-2)+[1], dtype=float)
edge_refs.extend(edge.cone(extrudetrans, tip) for edge in self.edgeref.edge_refs if edge)
else:
edge_refs.append(getsimplex(0))
return edge_refs
def getpoints(self, ischeme, degree):
if ischeme == 'gauss':
if self.nverts == self.ndims+1: # use optimal gauss schemes for simplex-like cones
trans = transform.Square((self.etrans.apply(self.edgeref.vertices) - self.tip).T, self.tip)
return points.TransformPoints(getsimplex(self.ndims).getpoints(ischeme, degree), trans)
epoints = self.edgeref.getpoints('gauss', degree)
tx, tw = points.gauss((degree + self.ndims - 1)//2)
wx = tx**(self.ndims-1) * tw * self.extnorm * self.height
return points.CoordsWeightsPoints((tx[:,_,_] * (self.etrans.apply(epoints.coords)-self.tip)[_,:,:] + self.tip).reshape(-1, self.ndims), (epoints.weights[_,:] * wx[:,_]).ravel())
if ischeme == 'uniform':
coords = numpy.concatenate([(self.etrans.apply(self.edgeref.getpoints('uniform', i+1).coords) - self.tip) * ((i+.5)/degree) + self.tip for i in range(degree)])
return points.CoordsUniformPoints(coords, self.volume)
if ischeme == 'vtk' and self.nverts == 5 and self.ndims==3: # pyramid
return points.CoordsPoints(self.vertices[[1,2,4,3,0]])
return points.ConePoints(self.edgeref.getpoints(ischeme, degree), self.etrans, self.tip)
@property
def volume(self):
return self.edgeref.volume * self.extnorm * self.height / self.ndims
@property
def simplices(self):
if self.nverts == self.ndims+1 or self.edgeref.ndims == 2 and self.edgeref.nverts == 4: # simplices and square-based pyramids are ok
return [(transform.Identity(self.ndims), self)]
return tuple((transform.Identity(self.ndims), ref.cone(self.etrans*trans,self.tip)) for trans, ref in self.edgeref.simplices)
def inside(self, point, eps=0):
# point = etrans.apply(epoint) * xi + tip * (1-xi) => etrans.apply(epoint) = tip + (point-tip) / xi
xi = numpy.dot(self.etrans.ext, point-self.tip) / numpy.dot(self.etrans.ext, self.etrans.offset-self.tip)
return 0 < xi <= 1+eps and self.edgeref.inside(numpy.linalg.solve(
numpy.dot(self.etrans.linear.T, self.etrans.linear),
numpy.dot(self.etrans.linear.T, self.tip + (point-self.tip)/xi - self.etrans.offset)), eps=eps)
class OwnChildReference(Reference):
'forward self as child'
__slots__ = 'baseref', 'child_refs', 'child_transforms'
def __init__(self, baseref):
self.baseref = baseref
self.child_refs = baseref,
self.child_transforms = transform.Identity(baseref.ndims),
super().__init__(baseref.ndims)
@property
def vertices(self):
return self.baseref.vertices
@property
def edge_transforms(self):
return self.baseref.edge_transforms
@property
def edge_refs(self):
return [OwnChildReference(edge) for edge in self.baseref.edge_refs]
def getpoints(self, ischeme, degree):
return self.baseref.getpoints(ischeme, degree)
@property
def simplices(self):
return self.baseref.simplices
def get_ndofs(self, degree):
return self.baseref.get_ndofs(degree)
def get_poly_coeffs(self, basis, **kwargs):
return self.baseref.get_poly_coeffs(basis, **kwargs)
def get_edge_dofs(self, degree, iedge):
return self.baseref.get_edge_dofs(degree, iedge)
class WithChildrenReference(Reference):
'base reference with explicit children'
__slots__ = 'baseref', 'child_transforms', 'child_refs'
__cache__ = '__extra_edges', 'edge_transforms', 'edge_refs', 'connectivity'
@types.apply_annotations
def __init__(self, baseref, child_refs:tuple):
assert len(child_refs) == baseref.nchildren and any(child_refs) and child_refs != baseref.child_refs
assert all(isinstance(child_ref,Reference) for child_ref in child_refs)
assert all(child_ref.ndims == baseref.ndims for child_ref in child_refs)
self.baseref = baseref
self.child_transforms = baseref.child_transforms
self.child_refs = child_refs
super().__init__(baseref.ndims)
def check_edges(self, tol=1e-15, print=print):
super().check_edges(tol=tol, print=print)
for cref in self.child_refs:
cref.check_edges(tol=tol, print=print)
@property
def vertices(self):
return self.baseref.vertices
def nvertices_by_level(self, n):
return self.baseref.nvertices_by_level(n)
def child_divide(self, vals, n):
return self.baseref.child_divide(vals, n)
__sub__ = lambda self, other: self.empty if other in (self,self.baseref) else self.baseref.with_children(self_child-other_child for self_child, other_child in zip(self.child_refs, other.child_refs)) if isinstance(other, WithChildrenReference) and other.baseref in (self,self.baseref) else NotImplemented
__rsub__ = lambda self, other: self.baseref.with_children(other_child - self_child for self_child, other_child in zip(self.child_refs, other.child_refs)) if other == self.baseref else NotImplemented
__and__ = lambda self, other: self if other == self.baseref else other if isinstance(other,WithChildrenReference) and self == other.baseref else self.baseref.with_children(self_child & other_child for self_child, other_child in zip(self.child_refs, other.child_refs)) if isinstance(other, WithChildrenReference) and other.baseref == self.baseref else NotImplemented
__or__ = lambda self, other: other if other == self.baseref else self.baseref.with_children(self_child | other_child for self_child, other_child in zip(self.child_refs, other.child_refs)) if isinstance(other, WithChildrenReference) and other.baseref == self.baseref else NotImplemented
@property
def __extra_edges(self):
extra_edges = [(ichild, iedge, cref.edge_refs[iedge])
for ichild, cref in enumerate(self.child_refs) if cref
for iedge in range(self.baseref.child_refs[ichild].nedges, cref.nedges)]
for ichild, edges in enumerate(self.baseref.connectivity):
cref = self.child_refs[ichild]
if not cref:
continue # new child is empty
for iedge, jchild in enumerate(edges):
if jchild == -1:
continue # iedge already is an external boundary
coppref = self.child_refs[jchild]
if coppref == self.baseref.child_refs[jchild]:
continue # opposite is complete, so iedge cannot form a new external boundary
eref = cref.edge_refs[iedge]
if coppref: # opposite new child is not empty
eref -= coppref.edge_refs[self.baseref.connectivity[jchild].index(ichild)]
if eref:
extra_edges.append((ichild, iedge, eref))
return extra_edges
def subvertex(self, ichild, i):
assert 0<=ichild<self.nchildren
npoints = 0
for childindex, child in enumerate(self.child_refs):
if child:
points = child.getpoints('vertex', i-1).coords
if childindex == ichild:
rng = numpy.arange(npoints, npoints+len(points))
npoints += len(points)
elif ichild==childindex:
rng = numpy.array([],dtype=int)
return npoints, rng
def getpoints(self, ischeme, degree):
if ischeme == 'vertex':
return self.baseref.getpoints(ischeme, degree)
if ischeme == 'bezier':
childpoints = [points.TransformPoints(ref.getpoints('bezier', degree//2+1), trans) for trans, ref in self.children if ref]
return points.ConcatPoints(childpoints, points.find_duplicates(childpoints))
return points.ConcatPoints(points.TransformPoints(ref.getpoints(ischeme, degree), trans) for trans, ref in self.children if ref)
@property
def simplices(self):
return [(trans2*trans1, simplex) for trans2, child in self.children for trans1, simplex in (child.simplices if child else [])]
@property
def edge_transforms(self):
return tuple(self.baseref.edge_transforms) \
+ tuple(transform.ScaledUpdim(self.child_transforms[ichild], self.child_refs[ichild].edge_transforms[iedge]) for ichild, iedge, ref in self.__extra_edges)
@property
def edge_refs(self):
refs = []
for etrans, eref in self.baseref.edges:
children = []
if eref:
for ctrans, cref in eref.children:
ctrans_, etrans_ = etrans.swapup(ctrans)
ichild = self.baseref.child_transforms.index(ctrans_)
cref = self.child_refs[ichild]
children.append(cref.edge_refs[cref.edge_transforms.index(etrans_)])
refs.append(eref.with_children(children))
for ichild, iedge, ref in self.__extra_edges:
refs.append(OwnChildReference(ref))
return tuple(refs)
@property
def connectivity(self):
return tuple(types.frozenarray(edges.tolist() + [-1] * (self.child_refs[ichild].nedges - len(edges))) for ichild, edges in enumerate(self.baseref.connectivity))
def inside(self, point, eps=0):
return any(cref.inside(ctrans.invapply(point), eps=eps) for ctrans, cref in self.children)
def get_ndofs(self, degree):
return self.baseref.get_ndofs(degree)
def get_poly_coeffs(self, basis, **kwargs):
return self.baseref.get_poly_coeffs(basis, **kwargs)
def get_edge_dofs(self, degree, iedge):
return self.baseref.get_edge_dofs(degree, iedge)
class MosaicReference(Reference):
'triangulation'
__slots__ = 'baseref', '_edge_refs', '_midpoint', 'edge_refs', 'edge_transforms'
__cache__ = 'vertices', 'subrefs'
@types.apply_annotations
def __init__(self, baseref, edge_refs:tuple, midpoint:types.frozenarray):
assert len(edge_refs) == baseref.nedges
assert edge_refs != tuple(baseref.edge_refs)
self.baseref = baseref
self._edge_refs = edge_refs
self._midpoint = midpoint
self.edge_refs = list(edge_refs)
self.edge_transforms = list(baseref.edge_transforms)
if baseref.ndims == 1:
assert any(edge_refs) and not all(edge_refs), 'invalid 1D mosaic: exactly one edge should be non-empty'
iedge, = [i for i, edge in enumerate(edge_refs) if edge]
self.edge_refs.append(getsimplex(0))
self.edge_transforms.append(transform.Updim(linear=numpy.zeros((1,0)), offset=midpoint, isflipped=not baseref.edge_transforms[iedge].isflipped))
else:
newedges = [(etrans1, etrans2, edge) for (etrans1,orig), new in zip(baseref.edges, edge_refs) for etrans2, edge in new.edges[orig.nedges:]]
for (iedge1,iedge2), (jedge1,jedge2) in baseref.ribbons:
Ei = edge_refs[iedge1]
ei = Ei.edge_refs[iedge2]
Ej = edge_refs[jedge1]
ej = Ej.edge_refs[jedge2]
ejsubi = ej - ei
if ejsubi:
newedges.append((self.edge_transforms[jedge1], Ej.edge_transforms[jedge2], ejsubi))
eisubj = ei - ej
if eisubj:
newedges.append((self.edge_transforms[iedge1], Ei.edge_transforms[iedge2], eisubj))
extrudetrans = transform.Updim(numpy.eye(baseref.ndims-1)[:,:-1], numpy.zeros(baseref.ndims-1), isflipped=baseref.ndims%2==0)
tip = numpy.array([0]*(baseref.ndims-2)+[1], dtype=float)
for etrans, trans, edge in newedges:
b = etrans.apply(trans.offset)
A = numpy.hstack([numpy.dot(etrans.linear, trans.linear), (midpoint-b)[:,_]])
newtrans = transform.Updim(A, b, isflipped=etrans.isflipped^trans.isflipped^(baseref.ndims%2==1)) # isflipped logic tested up to 3D
self.edge_transforms.append(newtrans)
self.edge_refs.append(edge.cone(extrudetrans, tip))
super().__init__(baseref.ndims)
@property
def vertices(self):
vertices = []
for etrans, eref in self.edges:
if eref:
for vertex in etrans.apply(eref.vertices):
if vertex not in vertices:
vertices.append(vertex)
return types.frozenarray(vertices)
def __and__(self, other):
if other in (self,self.baseref):
return self
if isinstance(other, MosaicReference) and other.baseref == self:
return other
if isinstance(other, MosaicReference) and self.baseref == other.baseref and numpy.equal(other._midpoint, self._midpoint).all():
isect_edge_refs = [selfedge & otheredge for selfedge, otheredge in zip(self._edge_refs, other._edge_refs)]
if not any(isect_edge_refs):
return self.empty
return MosaicReference(self.baseref, isect_edge_refs, self._midpoint)
return NotImplemented
def __or__(self, other):
if other in (self,self.baseref):
return other
if isinstance(other, MosaicReference) and self.baseref == other.baseref and numpy.equal(other._midpoint, self._midpoint).all():
union_edge_refs = [selfedge | otheredge for selfedge, otheredge in zip(self._edge_refs, other._edge_refs)]
if tuple(union_edge_refs) == tuple(self.baseref.edge_refs):
return self.baseref
return MosaicReference(self.baseref, union_edge_refs, self._midpoint)
return NotImplemented
def __sub__(self, other):
if other in (self,self.baseref):
return self.empty
if isinstance(other, MosaicReference) and other.baseref == self:
inv_edge_refs = [baseedge - edge for baseedge, edge in zip(self.edge_refs, other._edge_refs)]
return MosaicReference(self, inv_edge_refs, other._midpoint)
return NotImplemented
def __rsub__(self, other):
if other == self.baseref:
inv_edge_refs = [baseedge - edge for baseedge, edge in zip(other.edge_refs, self._edge_refs)]
return MosaicReference(other, inv_edge_refs, self._midpoint)
return NotImplemented
def nvertices_by_level(self, n):
return self.baseref.nvertices_by_level(n)
@property
def subrefs(self):
return [ref.cone(trans,self._midpoint) for trans, ref in zip(self.baseref.edge_transforms, self._edge_refs) if ref]
@property
def simplices(self):
return [simplex for subvol in self.subrefs for simplex in subvol.simplices]
def getpoints(self, ischeme, degree):
if ischeme == 'vertex':
return self.baseref.getpoints(ischeme, degree)
subpoints = [subvol.getpoints(ischeme, degree) for subvol in self.subrefs]
dups = points.find_duplicates(subpoints) if ischeme == 'bezier' else ()
return points.ConcatPoints(subpoints, dups)
def inside(self, point, eps=0):
return any(subref.inside(point, eps=eps) for subref in self.subrefs)
def get_ndofs(self, degree):
return self.baseref.get_ndofs(degree)
def get_poly_coeffs(self, basis, **kwargs):
return self.baseref.get_poly_coeffs(basis, **kwargs)
def get_edge_dofs(self, degree, iedge):
return self.baseref.get_edge_dofs(degree, iedge)
## UTILITY FUNCTIONS
def parse_legacy_ischeme(ischeme):
matches = list(map(re.compile('^([a-zA-Z]+)(.*)$').match, ischeme.split('*')))
assert all(matches), 'cannot parse integration scheme {!r}'.format(ischeme)
ischeme = '*'.join(match.group(1) for match in matches)
degree = eval(','.join(match.group(2) or 'None' for match in matches))
return ischeme, degree
def getsimplex(ndims):
Simplex_by_dim = PointReference, LineReference, TriangleReference, TetrahedronReference
return Simplex_by_dim[ndims]()
def index_or_append(items, item):
try:
index = items.index(item)
except ValueError:
index = len(items)
items.append(item)
return index
def arglexsort(triangulation):
return numpy.argsort(numeric.asobjvector(tuple(tri) for tri in triangulation))
## ELEMENT
class Element(types.Singleton):
'element class'
__slots__ = 'reference', 'transform', 'opposite'
@types.apply_annotations
def __init__(self, reference:strictreference, trans:transform.canonical, opptrans:transform.canonical=None):
super().__init__()
self.reference = reference
self.transform = trans
self.opposite = opptrans or trans
warnings.deprecation('element.Element is deprecated')
def withopposite(self, opp):
assert isinstance(opp, Element) and opp.reference == self.reference
return Element(self.reference, self.transform, opp.transform)
def __mul__(self, other):
self_is_iface = self.opposite != self.transform
other_is_iface = other.opposite != other.transform
trans = transform.Bifurcate(self.transform, other.transform),
if self_is_iface != other_is_iface:
opptrans = transform.Bifurcate(self.opposite, other.opposite),
else:
opptrans = None
return Element(self.reference * other.reference, trans, opptrans)
@property
def vertices(self):
return transform.apply(self.transform, self.reference.vertices)
@property
def ndims(self):
return self.reference.ndims
@property
def nverts(self):
return self.reference.nverts
@property
def nedges(self):
return self.reference.nedges
@property
def edges(self):
return [self.edge(i) for i in range(self.nedges)]
def edge(self, iedge):
trans, edge = self.reference.edges[iedge]
return Element(edge, self.transform + (trans,), self.opposite and self.opposite + (trans,)) if edge else None
@property
def children(self):
return [Element(child, self.transform + (trans,), self.opposite and self.opposite + (trans,))
for trans, child in self.reference.children if child]
@property
def flipped(self):
assert self.opposite, 'element does not define an opposite'
return Element(self.reference, self.opposite, self.transform)
@property
def simplices(self):
return [Element(reference, self.transform + (trans,), self.opposite and self.opposite + (trans,))
for trans, reference in self.reference.simplices]
def __str__(self):
return 'Element({})'.format(self.vertices)
strictelement = types.strict[Element]
# vim:sw=2:sts=2:et
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Dataset preprocessing and pipeline.
Built for Trembl dataset.
"""
import os
import types
from absl import logging
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from protein_lm import domains
@gin.configurable
def make_protein_domain(include_anomalous_amino_acids=True,
include_bos=True,
include_eos=True,
include_pad=True,
include_mask=True,
length=1024):
return domains.VariableLengthDiscreteDomain(
vocab=domains.ProteinVocab(
include_anomalous_amino_acids=include_anomalous_amino_acids,
include_bos=include_bos,
include_eos=include_eos,
include_pad=include_pad,
include_mask=include_mask),
length=length,
)
protein_domain = make_protein_domain()
def dataset_from_tensors(tensors):
"""Converts nested tf.Tensors or np.ndarrays to a tf.Data.Dataset."""
if isinstance(tensors, types.GeneratorType) or isinstance(tensors, list):
tensors = tuple(tensors)
return tf.data.Dataset.from_tensor_slices(tensors)
def _parse_example(value):
parsed = tf.parse_single_example(
value, features={'sequence': tf.io.VarLenFeature(tf.int64)})
sequence = tf.sparse.to_dense(parsed['sequence'])
return sequence
@gin.configurable
def get_train_valid_files(directory, num_test_files=10, num_valid_files=1):
"""Given a directory, list files and split into train/test files.
Args:
directory: Directory containing data.
num_test_files: Number of files to set aside for testing.
num_valid_files: Number of files to use for validation.
Returns:
Tuple of lists of (train files, test files).
"""
files = tf.gfile.ListDirectory(directory)
files = [os.path.join(directory, f) for f in files if 'tmp' not in f]
files = sorted(files)
# Set aside the first num_test_files files for testing.
valid_files = files[num_test_files:num_test_files + num_valid_files]
train_files = files[num_test_files + num_valid_files:]
return train_files, valid_files
def _add_eos(seq):
"""Add end of sequence markers."""
# TODO(ddohan): Support non-protein domains.
return tf.concat([seq, [protein_domain.vocab.eos]], axis=-1)
def load_dataset(train_files,
test_files,
shuffle_buffer=8192,
batch_size=32,
max_train_length=512,
max_eval_length=None):
"""Load data from directory.
Takes first shard as test split.
Args:
train_files: Files to load training data from.
test_files: Files to load test data from.
shuffle_buffer: Shuffle buffer size for training.
batch_size: Batch size.
max_train_length: Length to crop train sequences to.
max_eval_length: Length to crop eval sequences to.
Returns:
Tuple of (train dataset, test dataset)
"""
max_eval_length = max_eval_length or max_train_length
logging.info('Training on %s shards', len(train_files))
print('Training on %s shards' % len(train_files))
print('Test on %s shards' % str(test_files))
test_ds = tf.data.TFRecordDataset(test_files)
# Read training data from many files in parallel
filenames_dataset = tf.data.Dataset.from_tensor_slices(train_files).shuffle(
2048)
train_ds = filenames_dataset.interleave(
tf.data.TFRecordDataset,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=False)
train_ds = train_ds.map(
_parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
_parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_ds = batch_ds(
train_ds,
batch_size=batch_size,
shuffle_buffer=shuffle_buffer,
length=max_train_length)
test_ds = batch_ds(
test_ds,
batch_size=batch_size,
shuffle_buffer=None,
length=max_eval_length)
train_ds.prefetch(tf.data.experimental.AUTOTUNE)
test_ds.prefetch(tf.data.experimental.AUTOTUNE)
return train_ds, test_ds
@gin.configurable
def batch_ds(ds,
length=512,
batch_size=32,
shuffle_buffer=8192,
pack_length=None):
"""Crop, shuffle, and batch a dataset of sequences."""
def _crop(x):
return x[:length]
if length:
ds = ds.map(_crop, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if shuffle_buffer:
ds = ds.shuffle(buffer_size=shuffle_buffer, reshuffle_each_iteration=True)
if pack_length:
logging.info('Packing sequences to length %s', pack_length)
# Add EOS tokens.
ds = ds.map(_add_eos, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Pack sequences together by concatenating.
ds = ds.unbatch()
ds = ds.batch(pack_length) # Pack length
ds = ds.batch(batch_size, drop_remainder=True) # Add batch dimension.
else:
ds = ds.padded_batch(
batch_size,
padded_shapes=length,
padding_values=np.array(protein_domain.vocab.pad, dtype=np.int64),
drop_remainder=True)
return ds
def _encode_protein(protein_string):
array = protein_domain.encode([protein_string], pad=False)
array = np.array(array)
return array
def _sequence_to_tf_example(sequence):
sequence = np.array(sequence)
features = {
'sequence':
tf.train.Feature(
int64_list=tf.train.Int64List(value=sequence.reshape(-1))),
}
return tf.train.Example(features=tf.train.Features(feature=features))
def _write_tfrecord(sequences, outdir, idx, total):
"""Write iterable of sequences to sstable shard idx/total in outdir."""
idx = '%0.5d' % idx
total = '%0.5d' % total
name = 'data-%s-of-%s' % (idx, total)
path = os.path.join(outdir, name)
with tf.io.TFRecordWriter(path) as writer:
for seq in sequences:
proto = _sequence_to_tf_example(seq)
writer.write(proto.SerializeToString())
def csv_to_tfrecord(csv_path, outdir, idx, total):
"""Process csv at `csv_path` to shard idx/total in outdir."""
with tf.gfile.GFile(csv_path) as f:
def iterator():
for line in f:
_, seq = line.strip().split(',')
yield _encode_protein(seq)
it = iterator()
_write_tfrecord(it, outdir, idx, total)
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook, _parse_gcs_url
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class BigQueryOperator(BaseOperator):
"""
Executes BigQuery SQL queries in a specific BigQuery database
:param bql: the sql code to be executed
:type bql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
:param destination_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that, if set, will store the results
of the query.
:type destination_dataset_table: string
:param write_disposition: Specifies the action that occurs if the destination table
already exists. (default: 'WRITE_EMPTY')
:type write_disposition: string
:param create_disposition: Specifies whether the job is allowed to create new tables.
(default: 'CREATE_IF_NEEDED')
:type create_disposition: string
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allow_large_results``
must be ``true`` if this is set to ``false``. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: boolean
:param bigquery_conn_id: reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
:param maximum_billing_tier: Positive integer that serves as a multiplier
of the basic price.
Defaults to None, in which case it uses the value set in the project.
:type maximum_billing_tier: integer
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param query_params: a dictionary containing query parameter types and
values, passed to BigQuery.
:type query_params: dict
"""
template_fields = ('bql', 'destination_dataset_table')
template_ext = ('.sql', )
ui_color = '#e4f0e8'
@apply_defaults
def __init__(self,
bql,
destination_dataset_table=False,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=False,
bigquery_conn_id='bigquery_default',
delegate_to=None,
udf_config=False,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
priority='INTERACTIVE',
*args,
**kwargs):
super(BigQueryOperator, self).__init__(*args, **kwargs)
self.bql = bql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.allow_large_results = allow_large_results
self.flatten_results = flatten_results
self.bigquery_conn_id = bigquery_conn_id
self.delegate_to = delegate_to
self.udf_config = udf_config
self.use_legacy_sql = use_legacy_sql
self.maximum_billing_tier = maximum_billing_tier
self.maximum_bytes_billed = maximum_bytes_billed
self.schema_update_options = schema_update_options
self.query_params = query_params
self.bq_cursor = None
self.priority = priority
def execute(self, context):
if self.bq_cursor is None:
self.log.info('Executing: %s', self.bql)
hook = BigQueryHook(
bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql,
delegate_to=self.delegate_to)
conn = hook.get_conn()
self.bq_cursor = conn.cursor()
self.bq_cursor.run_query(
self.bql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
schema_update_options=self.schema_update_options,
priority=self.priority)
def on_kill(self):
super(BigQueryOperator, self).on_kill()
if self.bq_cursor is not None:
self.log.info('Canceling running query due to execution timeout')
self.bq_cursor.cancel_query()
class BigQueryCreateEmptyTableOperator(BaseOperator):
"""
Creates a new, empty table in the specified BigQuery dataset,
optionally with schema.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
You can also create a table without schema.
:param project_id: The project to create the table into.
:type project_id: string
:param dataset_id: The dataset to create the table into.
:type dataset_id: string
:param table_id: The Name of the table to be created.
:type table_id: string
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param gcs_schema_object: Full path to the JSON file containing schema. For
example: ``gs://test-bucket/dir1/dir2/employee_schema.json``
:type gcs_schema_object: string
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param bigquery_conn_id: Reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
**Example (with schema JSON in GCS)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
gcs_schema_object='gs://schema-bucket/employee_schema.json',
bigquery_conn_id='airflow-service-account',
google_cloud_storage_conn_id='airflow-service-account'
)
**Corresponding Schema file** (``employee_schema.json``): ::
[
{
"mode": "NULLABLE",
"name": "emp_name",
"type": "STRING"
},
{
"mode": "REQUIRED",
"name": "salary",
"type": "INTEGER"
}
]
**Example (with schema in the DAG)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}],
bigquery_conn_id='airflow-service-account',
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('dataset_id', 'table_id', 'project_id', 'gcs_schema_object')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
dataset_id,
table_id,
project_id=None,
schema_fields=None,
gcs_schema_object=None,
time_partitioning={},
bigquery_conn_id='bigquery_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
*args, **kwargs):
super(BigQueryCreateEmptyTableOperator, self).__init__(*args, **kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.schema_fields = schema_fields
self.gcs_schema_object = gcs_schema_object
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.time_partitioning = time_partitioning
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
gcs_bucket,
gcs_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning
)
class BigQueryCreateExternalTableOperator(BaseOperator):
"""
Creates a new external table in the dataset with the data in Google Cloud
Storage.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
:param bucket: The bucket to point the external table to.
:type bucket: string
:param source_objects: List of Google cloud storage URIs to point table to.
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type object: list
:param destination_project_dataset_table: The dotted (<project>.)<dataset>.<table>
BigQuery table to load data into. If <project> is not included, project will
be the project defined in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
Should not be set when source_format is 'DATASTORE_BACKUP'.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table.
:param schema_object: string
:param source_format: File format of the data.
:type source_format: string
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use for the CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param bigquery_conn_id: Reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
"""
template_fields = ('bucket', 'source_objects',
'schema_object', 'destination_project_dataset_table')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
source_format='CSV',
compression='NONE',
skip_leading_rows=0,
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
bigquery_conn_id='bigquery_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
src_fmt_configs={},
*args, **kwargs):
super(BigQueryCreateExternalTableOperator, self).__init__(*args, **kwargs)
# GCS config
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.skip_leading_rows = skip_leading_rows
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.src_fmt_configs = src_fmt_configs
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
if not self.schema_fields and self.schema_object \
and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
self.bucket,
self.schema_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
source_uris = ['gs://{}/{}'.format(self.bucket, source_object)
for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
src_fmt_configs=self.src_fmt_configs
)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from operator import itemgetter
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
def flavor_list(request):
"""Utility method to retrieve a list of flavors."""
try:
return api.nova.flavor_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return []
def sort_flavor_list(request, flavors, with_menu_label=True):
"""Utility method to sort a list of flavors.
By default, returns the available flavors, sorted by RAM usage (ascending).
Override these behaviours with a ``CREATE_INSTANCE_FLAVOR_SORT`` dict
in ``local_settings.py``.
"""
def get_key(flavor, sort_key):
try:
return getattr(flavor, sort_key)
except AttributeError:
LOG.warning('Could not find sort key "%s". Using the default '
'"ram" instead.', sort_key)
return getattr(flavor, 'ram')
try:
flavor_sort = settings.CREATE_INSTANCE_FLAVOR_SORT
sort_key = flavor_sort.get('key', 'ram')
rev = flavor_sort.get('reverse', False)
if not callable(sort_key):
def key(flavor):
return get_key(flavor, sort_key)
else:
key = sort_key
if with_menu_label:
flavor_list = [(flavor.id, '%s' % flavor.name)
for flavor in sorted(flavors, key=key, reverse=rev)]
else:
flavor_list = sorted(flavors, key=key, reverse=rev)
return flavor_list
except Exception:
exceptions.handle(request,
_('Unable to sort instance flavors.'))
return []
def server_group_list(request):
"""Utility method to retrieve a list of server groups."""
try:
return api.nova.server_group_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve Nova server groups.'))
return []
def network_field_data(request, include_empty_option=False, with_cidr=False,
for_launch=False):
"""Returns a list of tuples of all networks.
Generates a list of networks available to the user (request). And returns
a list of (id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:param with_cidr: flag to include subnets cidr in field name
:return: list of (id, name) tuples
"""
tenant_id = request.user.tenant_id
networks = []
if api.base.is_service_enabled(request, 'network'):
extra_params = {}
if for_launch:
extra_params['include_pre_auto_allocate'] = True
try:
networks = api.neutron.network_list_for_tenant(
request, tenant_id, **extra_params)
except Exception as e:
msg = _('Failed to get network list {0}').format(six.text_type(e))
exceptions.handle(request, msg)
_networks = []
for n in networks:
if not n['subnets']:
continue
v = n.name_or_id
if with_cidr:
cidrs = ([subnet.cidr for subnet in n['subnets']
if subnet.ip_version == 4] +
[subnet.cidr for subnet in n['subnets']
if subnet.ip_version == 6])
v += ' (%s)' % ', '.join(cidrs)
_networks.append((n.id, v))
networks = sorted(_networks, key=itemgetter(1))
if not networks:
if include_empty_option:
return [("", _("No networks available")), ]
return []
if include_empty_option:
return [("", _("Select Network")), ] + networks
return networks
def keypair_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all keypairs.
Generates a list of keypairs available to the user (request). And returns
a list of (id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
keypair_list = []
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except Exception:
exceptions.handle(request, _('Unable to retrieve key pairs.'))
if not keypair_list:
if include_empty_option:
return [("", _("No key pairs available")), ]
return []
if include_empty_option:
return [("", _("Select a key pair")), ] + keypair_list
return keypair_list
def flavor_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all image flavors.
Generates a list of image flavors available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
flavors = flavor_list(request)
if flavors:
flavors_list = sort_flavor_list(request, flavors)
if include_empty_option:
return [("", _("Select Flavor")), ] + flavors_list
return flavors_list
if include_empty_option:
return [("", _("No flavors available")), ]
return []
def port_field_data(request, with_network=False):
"""Returns a list of tuples of all ports available for the tenant.
Generates a list of ports that have no device_owner based on the networks
available to the tenant doing the request.
:param request: django http request object
:param with_network: include network name in field name
:return: list of (id, name) tuples
"""
def add_more_info_port_name(port, network):
# add more info to the port for the display
port_name = "{} ({})".format(
port.name_or_id, ",".join(
[ip['ip_address'] for ip in port['fixed_ips']]))
if with_network and network:
port_name += " - {}".format(network.name_or_id)
return port_name
ports = []
if api.base.is_service_enabled(request, 'network'):
network_list = api.neutron.network_list_for_tenant(
request, request.user.tenant_id)
for network in network_list:
ports.extend(
[(port.id, add_more_info_port_name(port, network))
for port in api.neutron.port_list_with_trunk_types(
request, network_id=network.id,
tenant_id=request.user.tenant_id)
if (not port.device_owner and
not isinstance(port, api.neutron.PortTrunkSubport))])
ports.sort(key=lambda obj: obj[1])
return ports
def server_group_field_data(request):
"""Returns a list of tuples of all server groups.
Generates a list of server groups available. And returns a list of
(id, name) tuples.
:param request: django http request object
:return: list of (id, name) tuples
"""
server_groups = server_group_list(request)
if server_groups:
server_groups_list = [(sg.id, sg.name) for sg in server_groups]
server_groups_list.sort(key=lambda obj: obj[1])
return [("", _("Select Server Group")), ] + server_groups_list
return [("", _("No server groups available")), ]
| |
from common_fixtures import * # NOQA
from cattle import ApiError
from test_physical_host import disable_go_machine_service # NOQA
@pytest.fixture(scope='module')
def update_ping_settings(request, super_client):
# These settings need changed because they control how the logic of the
# ping handlers behave in cattle. We need to update them so that we can
# ensure the ping logic will fully run.
settings = super_client.list_setting()
originals = []
def update_setting(new_value, s):
originals.append((setting, {'value': s.value}))
s = super_client.update(s, {'value': new_value})
wait_setting_active(super_client, s)
for setting in settings:
if setting.name == 'agent.ping.resources.every' and setting.value != 1:
update_setting('1', setting)
if setting.name == 'agent.resource.monitor.cache.resource.seconds' \
and setting.value != 0:
update_setting('0', setting)
def revert_settings():
for s in originals:
super_client.update(s[0], s[1])
request.addfinalizer(revert_settings)
def test_machine_lifecycle(super_client, admin_client, admin_account,
update_ping_settings):
name = random_str()
machine = admin_client.create_machine(name=name,
virtualboxConfig={})
machine = admin_client.wait_success(machine)
assert machine.state == 'active'
assert machine.virtualboxConfig is not None
external_id = super_client.reload(machine).externalId
assert external_id is not None
# Create an agent with the externalId specified. The agent simulator will
# mimic how the go-machine-service would use this external_id to bootstrap
# an agent onto the physical host with the proper PHYSICAL_HOST_UUID set.
scope = 'io.cattle.platform.agent.connection.simulator' \
'.AgentConnectionSimulator'
uri = 'sim://{}'.format(random_str())
data = {scope: {}}
data[scope]['addPhysicalHost'] = True
data[scope]['externalId'] = external_id
account_id = get_plain_id(super_client, admin_account)
data[scope]['agentResourcesAccountId'] = account_id
data['agentResourcesAccountId'] = account_id
agent = super_client.create_agent(uri=uri, data=data)
agent = super_client.wait_success(agent)
hosts = agent.hosts()
assert len(hosts) == 1
host = hosts[0]
assert host.physicalHostId == machine.id
assert machine.accountId == host.accountId
# Need to force a ping because they cause physical hosts to be created
# under non-machine use cases. Ensures the machine isnt overridden
ping = one(super_client.list_task, name='agent.ping')
ping.execute()
time.sleep(.1) # The ping needs time to execute
agent = super_client.reload(agent)
hosts = agent.hosts()
assert len(hosts) == 1
host = hosts[0]
physical_hosts = host.physicalHost()
assert physical_hosts.id == machine.id
machine = admin_client.wait_success(machine.remove())
assert machine.state == 'removed'
host = admin_client.wait_success(admin_client.reload(host))
assert host.state == 'removed'
def test_machine_driver_config(admin_client):
name = "test-%s" % random_str()
vbox_config = {
"memory": "2048",
"diskSize": "40000",
"boot2dockerUrl": "http://localhost/random",
}
ca = "ca-1"
key = "key-1"
host = admin_client.create_machine(name=name,
virtualboxConfig=vbox_config,
authCertificateAuthority=ca,
authKey=key)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert vbox_config == host.virtualboxConfig
assert ca == host.authCertificateAuthority
assert key == host.authKey
assert host.driver == 'virtualbox'
name = "test-%s" % random_str()
digoc_config = {
"image": "img1",
"region": "reg1",
"size": "40000",
"accessToken": "ac-1",
"ipv6": True,
"privateNetworking": True,
"backups": True
}
host = admin_client.create_machine(name=name,
digitaloceanConfig=digoc_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert digoc_config == host.digitaloceanConfig
assert host.driver == 'digitalocean'
name = "test-%s" % random_str()
ec2_config = {
"accessKey": "accesskey1",
"secretKey": "secretkey1",
"vpcId": "1234",
"subnetId": "5678",
"sessionToken": "sessiontoken1",
"ami": "ami1",
"region": "us-east-1",
"zone": "us-east-1a",
"securityGroup": "docker-machine",
"instanceType": "type1",
"rootSize": "60GB",
"iamInstanceProfile": "profile1",
}
host = admin_client.create_machine(name=name,
amazonec2Config=ec2_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert ec2_config == host.amazonec2Config
assert host.driver == 'amazonec2'
name = "test-%s" % random_str()
packet_config = {
"apiKey": "apikey1",
"projectId": "projectId",
"os": "centos_7",
"facilityCode": "ewr1",
"plan": "baremetal_1",
"billingCycle": "hourly",
}
host = admin_client.create_machine(name=name,
packetConfig=packet_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert packet_config == host.packetConfig
assert host.driver == 'packet'
name = "test-%s" % random_str()
rackspace_config = {
"username": "username",
"apiKey": "apiKey",
"region": "region",
"endpointType": "endpointType",
"imageId": "imageId",
"flavorId": "flavorId",
"sshUser": "sshUser",
"sshPort": "sshPort",
"dockerInstall": "dockerInstall",
}
host = admin_client.create_machine(name=name,
rackspaceConfig=rackspace_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert rackspace_config == host.rackspaceConfig
assert host.driver == 'rackspace'
def test_machine_validation(admin_client):
name = "test-%s" % random_str()
# Can't set two drivers
try:
admin_client.create_machine(name=name,
virtualboxConfig={},
digitaloceanConfig={"accessToken": "a"})
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'DriverConfigExactlyOneRequired'
else:
assert False, "Should not have been able to set two drivers."
# Must set at least one driver
try:
admin_client.create_machine(name=name)
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'DriverConfigExactlyOneRequired'
else:
assert False, "Should have been required to set a driver."
# Property present, but None/nil/null is acceptable
host = admin_client.create_machine(name=name,
virtualboxConfig={},
digitaloceanConfig=None)
assert host is not None
def test_digitalocean_config_validation(admin_client):
name = "test-%s" % random_str()
# accessToken is required
try:
admin_client.create_machine(name=name,
digitaloceanConfig={})
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'MissingRequired'
else:
assert False, 'Should have got MissingRequired for accessToken'
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import validate_email_add, get_fullname, strip_html, cstr
from frappe.core.doctype.communication.comment import (notify_mentions,
update_comment_in_doc)
from frappe.core.doctype.communication.email import (validate_email,
notify, _notify, update_parent_status)
from frappe.utils.bot import BotReply
from email.utils import parseaddr
from collections import Counter
exclude_from_linked_with = True
class Communication(Document):
no_feed_on_delete = True
"""Communication represents an external communication like Email."""
def validate(self):
if self.reference_doctype and self.reference_name:
if not self.reference_owner:
self.reference_owner = frappe.db.get_value(self.reference_doctype, self.reference_name, "owner")
# prevent communication against a child table
if frappe.get_meta(self.reference_doctype).istable:
frappe.throw(_("Cannot create a {0} against a child document: {1}")
.format(_(self.communication_type), _(self.reference_doctype)))
if not self.user:
self.user = frappe.session.user
if not self.subject:
self.subject = strip_html((self.content or "")[:141])
if not self.sent_or_received:
self.sent_or_received = "Sent"
self.set_status()
self.set_sender_full_name()
validate_email(self)
self.set_timeline_doc()
def after_insert(self):
if not (self.reference_doctype and self.reference_name):
return
if self.communication_type in ("Communication", "Comment"):
# send new comment to listening clients
frappe.publish_realtime('new_communication', self.as_dict(),
doctype= self.reference_doctype, docname = self.reference_name,
after_commit=True)
if self.communication_type == "Comment":
notify_mentions(self)
elif self.communication_type in ("Chat", "Notification", "Bot"):
if self.reference_name == frappe.session.user:
message = self.as_dict()
message['broadcast'] = True
frappe.publish_realtime('new_message', message, after_commit=True)
else:
# reference_name contains the user who is addressed in the messages' page comment
frappe.publish_realtime('new_message', self.as_dict(),
user=self.reference_name, after_commit=True)
def on_update(self):
"""Update parent status as `Open` or `Replied`."""
update_parent_status(self)
update_comment_in_doc(self)
self.bot_reply()
def on_trash(self):
if (not self.flags.ignore_permissions
and self.communication_type=="Comment" and self.comment_type != "Comment"):
# prevent deletion of auto-created comments if not ignore_permissions
frappe.throw(_("Sorry! You cannot delete auto-generated comments"))
if self.communication_type in ("Communication", "Comment"):
# send delete comment to listening clients
frappe.publish_realtime('delete_communication', self.as_dict(),
doctype= self.reference_doctype, docname = self.reference_name,
after_commit=True)
def set_status(self):
if not self.is_new():
return
if self.reference_doctype and self.reference_name:
self.status = "Linked"
elif self.communication_type=="Communication":
self.status = "Open"
else:
self.status = "Closed"
def set_sender_full_name(self):
if not self.sender_full_name and self.sender:
if self.sender == "Administrator":
self.sender_full_name = self.sender
self.sender = frappe.db.get_value("User", "Administrator", "email")
elif self.sender == "Guest":
self.sender_full_name = self.sender
self.sender = None
else:
if self.sent_or_received=='Sent':
validate_email_add(self.sender, throw=True)
sender_name, sender_email = parseaddr(self.sender)
if not sender_name:
sender_name = get_fullname(sender_email)
if sender_name == sender_email:
sender_name = None
self.sender = sender_email
self.sender_full_name = sender_name or get_fullname(frappe.session.user)
def get_parent_doc(self):
"""Returns document of `reference_doctype`, `reference_doctype`"""
if not hasattr(self, "parent_doc"):
if self.reference_doctype and self.reference_name:
self.parent_doc = frappe.get_doc(self.reference_doctype, self.reference_name)
else:
self.parent_doc = None
return self.parent_doc
def set_timeline_doc(self):
"""Set timeline_doctype and timeline_name"""
parent_doc = self.get_parent_doc()
if (self.timeline_doctype and self.timeline_name) or not parent_doc:
return
timeline_field = parent_doc.meta.timeline_field
if not timeline_field:
return
doctype = parent_doc.meta.get_link_doctype(timeline_field)
name = parent_doc.get(timeline_field)
if doctype and name:
self.timeline_doctype = doctype
self.timeline_name = name
else:
return
def send(self, print_html=None, print_format=None, attachments=None,
send_me_a_copy=False, recipients=None):
"""Send communication via Email.
:param print_html: Send given value as HTML attachment.
:param print_format: Attach print format of parent document."""
self.send_me_a_copy = send_me_a_copy
self.notify(print_html, print_format, attachments, recipients)
def notify(self, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, fetched_from_email_account=False):
"""Calls a delayed task 'sendmail' that enqueus email in Email Queue queue
:param print_html: Send given value as HTML attachment
:param print_format: Attach print format of parent document
:param attachments: A list of filenames that should be attached when sending this email
:param recipients: Email recipients
:param cc: Send email as CC to
:param fetched_from_email_account: True when pulling email, the notification shouldn't go to the main recipient
"""
notify(self, print_html, print_format, attachments, recipients, cc, fetched_from_email_account)
def _notify(self, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None):
_notify(self, print_html, print_format, attachments, recipients, cc)
def bot_reply(self):
if self.comment_type == 'Bot' and self.communication_type == 'Chat':
reply = BotReply().get_reply(self.content)
if reply:
frappe.get_doc({
"doctype": "Communication",
"comment_type": "Bot",
"communication_type": "Bot",
"content": cstr(reply),
"reference_doctype": self.reference_doctype,
"reference_name": self.reference_name
}).insert()
frappe.local.flags.commit = True
def set_delivery_status(self, commit=False):
'''Look into the status of Email Queue linked to this Communication and set the Delivery Status of this Communication'''
delivery_status = None
status_counts = Counter(frappe.db.sql_list('''select status from `tabEmail Queue` where communication=%s''', self.name))
if status_counts.get('Not Sent') or status_counts.get('Sending'):
delivery_status = 'Sending'
elif status_counts.get('Error'):
delivery_status = 'Error'
elif status_counts.get('Expired'):
delivery_status = 'Expired'
elif status_counts.get('Sent'):
delivery_status = 'Sent'
if delivery_status:
self.db_set('delivery_status', delivery_status)
frappe.publish_realtime('update_communication', self.as_dict(),
doctype=self.reference_doctype, docname=self.reference_name, after_commit=True)
# for list views and forms
self.notify_update()
if commit:
frappe.db.commit()
def on_doctype_update():
"""Add index in `tabCommunication` for `(reference_doctype, reference_name)`"""
frappe.db.add_index("Communication", ["reference_doctype", "reference_name"])
frappe.db.add_index("Communication", ["timeline_doctype", "timeline_name"])
frappe.db.add_index("Communication", ["link_doctype", "link_name"])
frappe.db.add_index("Communication", ["status", "communication_type"])
frappe.db.add_index("Communication", ["creation"])
frappe.db.add_index("Communication", ["modified"])
frappe.db.add_index("Communication", ["message_id(200)"])
def has_permission(doc, ptype, user):
if ptype=="read":
if doc.reference_doctype and doc.reference_name:
if frappe.has_permission(doc.reference_doctype, ptype="read", doc=doc.reference_name):
return True
if doc.timeline_doctype and doc.timeline_name:
if frappe.has_permission(doc.timeline_doctype, ptype="read", doc=doc.timeline_name):
return True
| |
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add feature command."""
import unittest
import os.path
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestAddFeature(TestBrokerCommand):
def test_100_add_host_pre(self):
command = ["add", "feature", "--feature", "pre_host",
"--type", "host", "--comment", "Test comment"]
self.noouttest(command)
def test_100_add_host_post(self):
command = ["add", "feature", "--feature", "post_host",
"--type", "host", "--post_personality"]
self.noouttest(command)
def test_100_add_hw(self):
command = ["add", "feature", "--feature", "bios_setup",
"--type", "hardware"]
self.noouttest(command)
def test_100_add_hw2(self):
command = ["add", "feature", "--feature", "disable_ht",
"--type", "hardware"]
self.noouttest(command)
def test_100_add_iface(self):
command = ["add", "feature", "--feature", "src_route",
"--type", "interface"]
self.noouttest(command)
def test_110_verify_pre(self):
command = ["show", "feature", "--feature", "pre_host", "--type", "host"]
out = self.commandtest(command)
self.matchoutput(out, "Host Feature: pre_host", command)
self.matchoutput(out, "Template: features/pre_host", command)
self.matchoutput(out, "Comments: Test comment", command)
self.matchoutput(out, "Post Personality: False", command)
self.matchclean(out, "Bound to", command)
def test_110_verify_post(self):
command = ["show", "feature", "--feature", "post_host", "--type", "host"]
out = self.commandtest(command)
self.matchoutput(out, "Host Feature: post_host", command)
self.matchoutput(out, "Template: features/post_host", command)
self.matchoutput(out, "Post Personality: True", command)
self.matchclean(out, "Comments", command)
self.matchclean(out, "Bound to", command)
def test_110_verify_hw(self):
command = ["show", "feature", "--feature", "bios_setup",
"--type", "hardware"]
out = self.commandtest(command)
self.matchoutput(out, "Hardware Feature: bios_setup", command)
self.matchoutput(out, "Template: features/hardware/bios_setup", command)
self.matchclean(out, "Post Personality", command)
self.matchclean(out, "Comments", command)
self.matchclean(out, "Bound to", command)
def test_110_verify_iface(self):
command = ["show", "feature", "--feature", "src_route",
"--type", "interface"]
out = self.commandtest(command)
self.matchoutput(out, "Interface Feature: src_route", command)
self.matchoutput(out, "Template: features/interface/src_route", command)
self.matchclean(out, "Post Personality", command)
self.matchclean(out, "Comments", command)
self.matchclean(out, "Bound to", command)
def test_120_show_all(self):
command = ["show", "feature", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "Host Feature: pre_host", command)
self.matchoutput(out, "Host Feature: post_host", command)
self.matchoutput(out, "Hardware Feature: bios_setup", command)
self.matchoutput(out, "Interface Feature: src_route", command)
def test_200_post_hw(self):
command = ["add", "feature", "--feature", "post_hw",
"--type", "hardware", "--post_personality"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "The post_personality attribute is implemented "
"only for host features.", command)
def test_200_post_iface(self):
command = ["add", "feature", "--feature", "post_iface",
"--type", "interface", "--post_personality"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "The post_personality attribute is implemented "
"only for host features.", command)
def test_200_hw_prefix(self):
command = ["add", "feature", "--feature", "hardware/host",
"--type", "host"]
out = self.badrequesttest(command)
self.matchoutput(out, "The 'hardware/' and 'interface/' prefixes "
"are not available for host features.", command)
def test_200_iface_prefix(self):
command = ["add", "feature", "--feature", "interface/host",
"--type", "host"]
out = self.badrequesttest(command)
self.matchoutput(out, "The 'hardware/' and 'interface/' prefixes "
"are not available for host features.", command)
def test_200_dotdot_begin(self):
# Use os.path.join() to test the natural path separator of the platform
path = os.path.join("..", "foo")
command = ["add", "feature", "--feature", path, "--type", "host"]
out = self.badrequesttest(command)
self.matchoutput(out, "Path components in the feature name must not "
"start with a dot.", command)
def test_200_dotdot_middle(self):
# Use os.path.join() to test the natural path separator of the platform
path = os.path.join("foo", "..", "bar")
command = ["add", "feature", "--feature", path, "--type", "host"]
out = self.badrequesttest(command)
self.matchoutput(out, "Path components in the feature name must not "
"start with a dot.", command)
def test_200_hidden_begin(self):
command = ["add", "feature", "--feature", ".foo", "--type", "host"]
out = self.badrequesttest(command)
self.matchoutput(out, "Path components in the feature name must not "
"start with a dot.", command)
def test_200_hidden_middle(self):
command = ["add", "feature", "--feature", "foo/.bar", "--type", "host"]
out = self.badrequesttest(command)
self.matchoutput(out, "Path components in the feature name must not "
"start with a dot.", command)
def test_210_verify_post_hw(self):
command = ["show", "feature", "--feature", "post_hw",
"--type", "hardware"]
out = self.notfoundtest(command)
self.matchoutput(out, "Hardware Feature post_hw not found.",
command)
def test_210_verify_post_iface(self):
command = ["show", "feature", "--feature", "post_iface",
"--type", "interface"]
out = self.notfoundtest(command)
self.matchoutput(out, "Interface Feature post_iface not found.",
command)
def test_210_verify_hw_prefix(self):
command = ["show", "feature", "--feature", "hardware/host",
"--type", "host"]
out = self.notfoundtest(command)
self.matchoutput(out, "Host Feature hardware/host not found.",
command)
def test_210_verify_iface_prefix(self):
command = ["show", "feature", "--feature", "interface/host",
"--type", "interface"]
out = self.notfoundtest(command)
self.matchoutput(out, "Interface Feature interface/host not found.",
command)
def test_220_type_mismatch(self):
command = ["show", "feature", "--feature", "bios_setup",
"--type", "host"]
out = self.notfoundtest(command)
self.matchoutput(out, "Host Feature bios_setup not found.",
command)
def test_230_add_again(self):
command = ["add", "feature", "--feature", "pre_host", "--type", "host"]
out = self.badrequesttest(command)
self.matchoutput(out, "Host Feature pre_host already exists.", command)
def test_240_add_bad_type(self):
command = ["add", "feature", "--feature", "bad-type",
"--type", "no-such-type"]
out = self.badrequesttest(command)
self.matchoutput(out, "Unknown feature type 'no-such-type'. The "
"valid values are: hardware, host, interface.",
command)
def test_240_show_bad_type(self):
command = ["show", "feature", "--feature", "bad-type",
"--type", "no-such-type"]
out = self.badrequesttest(command)
self.matchoutput(out, "Unknown feature type 'no-such-type'. The "
"valid values are: hardware, host, interface.",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddFeature)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
# -*- coding: utf-8 -*-
"""
goodness_of_fit.py
RAPIDpy
Created by Alan D Snow, 2015.
Based on RAPID_Toolbox for ArcMap
License: BSD 3-Clause
"""
from __future__ import print_function
from csv import writer as csvwriter
import numpy as np
from ..dataset import RAPIDDataset
# ------------------------------------------------------------------------------
# statistic functions
# ------------------------------------------------------------------------------
# FUNCTIONS FROM http://pydoc.net/Python/ambhas/0.4.0/ambhas.errlib/
def filter_nan(s, o):
"""
this functions removed the data from simulated and observed data
whereever the observed data contains nan
this is used by all other functions, otherwise they will produce nan as
output
"""
data = np.array([s.flatten(), o.flatten()])
data = np.transpose(data)
data = data[~np.isnan(data).any(1)]
return data[:, 0], data[:, 1]
def pc_bias(s, o):
"""
Percent Bias
input:
s: simulated
o: observed
output:
pc_bias: percent bias
"""
# s,o = filter_nan(s,o)
return 100.0*np.sum(s-o)/np.sum(o)
def apb(s, o):
"""
Absolute Percent Bias
input:
s: simulated
o: observed
output:
apb_bias: absolute percent bias
"""
# s,o = filter_nan(s,o)
return 100.0*np.sum(np.abs(s-o))/np.sum(o)
def rmse(s, o):
"""
Root Mean Squared Error
input:
s: simulated
o: observed
output:
rmses: root mean squared error
"""
# s,o = filter_nan(s,o)
return np.sqrt(np.mean((s-o)**2))
def mae(s, o):
"""
Mean Absolute Error
input:
s: simulated
o: observed
output:
maes: mean absolute error
"""
# s,o = filter_nan(s,o)
return np.mean(np.abs(s-o))
def bias(s, o):
"""
Bias
input:
s: simulated
o: observed
output:
bias: bias
"""
# s,o = filter_nan(s,o)
return np.mean(s-o)
def NS(s, o):
"""
Nash Sutcliffe efficiency coefficient
input:
s: simulated
o: observed
output:
ns: Nash Sutcliffe efficient coefficient
"""
# s,o = filter_nan(s,o)
return 1 - np.sum((s-o)**2)/np.sum((o-np.mean(o))**2)
def L(s, o, N=5):
"""
Likelihood
input:
s: simulated
o: observed
output:
L: likelihood
"""
# s,o = filter_nan(s,o)
return np.exp(-N*np.sum((s-o)**2)/np.sum((o-np.mean(o))**2))
def correlation(s, o):
"""
correlation coefficient
input:
s: simulated
o: observed
output:
correlation: correlation coefficient
"""
# s,o = filter_nan(s,o)
if s.size == 0:
corr = np.NaN
else:
corr = np.corrcoef(o, s)[0, 1]
return corr
def index_agreement(s, o):
"""
index of agreement
input:
s: simulated
o: observed
output:
ia: index of agreement
"""
# s,o = filter_nan(s,o)
ia = 1 - (np.sum((o-s)**2)) /\
(np.sum((np.abs(s-np.mean(o))+np.abs(o-np.mean(o)))**2))
return ia
def KGE(s, o):
"""
Kling-Gupta Efficiency
input:
s: simulated
o: observed
output:
kge: Kling-Gupta Efficiency
cc: correlation
alpha: ratio of the standard deviation
beta: ratio of the mean
"""
# s,o = filter_nan(s, o)
cc = correlation(s, o)
alpha = np.std(s)/np.std(o)
beta = np.sum(s)/np.sum(o)
kge = 1 - np.sqrt((cc-1)**2 + (alpha-1)**2 + (beta-1)**2)
return kge, cc, alpha, beta
# END FUNCTIONS FROM http://pydoc.net/Python/ambhas/0.4.0/ambhas.errlib/
# ------------------------------------------------------------------------------
# Time Series comparison functions
# ------------------------------------------------------------------------------
def find_goodness_of_fit(rapid_qout_file, reach_id_file, observed_file,
out_analysis_file, daily=False):
"""
Finds the goodness of fit comparing observed streamflow in a rapid Qout
file with simulated flows in a csv file.
Parameters
----------
rapid_qout_file: str
Path to the RAPID Qout file.
reach_id_file: str
ath to file with river reach ID's associate with the RAPID Qout file.
It is in the format of the RAPID observed flows reach ID file.
observed_file: str
Path to input csv with with observed flows corresponding to the
RAPID Qout. It is in the format of the RAPID observed flows file.
out_analysis_file: str
Path to the analysis output csv file.
daily: bool, optional
If True and the file is CF-Compliant, it will compare the
*observed_file* with daily average flow from Qout. Default is False.
Example with CF-Compliant RAPID Qout file:
.. code:: python
import os
from RAPIDpy.postprocess import find_goodness_of_fit
INPUT_DATA_PATH = '/path/to/data'
reach_id_file = os.path.join(INPUT_DATA_PATH, 'obs_reach_id.csv')
observed_file = os.path.join(INPUT_DATA_PATH, 'obs_flow.csv')
cf_input_qout_file = os.path.join(COMPARE_DATA_PATH,
'Qout_nasa_lis_3hr_20020830_CF.nc')
cf_out_analysis_file = \
os.path.join(OUTPUT_DATA_PATH,
'cf_goodness_of_fit_results-daily.csv')
find_goodness_of_fit(cf_input_qout_file,
reach_id_file,
observed_file,
cf_out_analysis_file,
daily=True)
"""
reach_id_list = np.loadtxt(reach_id_file,
delimiter=",", usecols=(0,),
ndmin=1, dtype=np.int32)
data_nc = RAPIDDataset(rapid_qout_file)
# analyze and write
observed_table = np.loadtxt(observed_file,
ndmin=2, delimiter=",",
usecols=tuple(range(reach_id_list.size)))
with open(out_analysis_file, 'w') as outcsv:
writer = csvwriter(outcsv)
writer.writerow(["reach_id",
"percent_bias",
"abs_percent_bias",
"rmse",
"mae",
"bias",
"NSE",
"likelihood",
"correlation_coeff",
"index_agreement",
"KGE"])
for index, reach_id in enumerate(reach_id_list):
observed_array = observed_table[:, index]
simulated_array = data_nc.get_qout(reach_id, daily=daily)
# make sure they are the same length
simulated_array = simulated_array[:len(observed_array)]
observed_array = observed_array[:len(simulated_array)]
simulated_array, observed_array = \
filter_nan(simulated_array, observed_array)
writer.writerow([reach_id,
pc_bias(simulated_array, observed_array),
apb(simulated_array, observed_array),
rmse(simulated_array, observed_array),
mae(simulated_array, observed_array),
bias(simulated_array, observed_array),
NS(simulated_array, observed_array),
L(simulated_array, observed_array),
correlation(simulated_array, observed_array),
index_agreement(simulated_array, observed_array),
KGE(simulated_array, observed_array)[0]])
def find_goodness_of_fit_csv(observed_simulated_file, out_file=None):
"""
Finds the goodness of fit comparing observed and simulated flows
In the file, the first column is the observed flows and the
second column is the simulated flows.
Example::
33.5, 77.2
34.7, 73.0
Parameters
----------
observed_simulated_file: str
Path to the csv file with the observed and simulated flows.
out_file: str, optional
Path to output file. If not provided, it will print to console.
Example:
.. code:: python
from RAPIDpy.postprocess import find_goodness_of_fit_csv
find_goodness_of_fit_csv('
/united_kingdom-thames/flows_kingston_gage_noah.csv')
"""
observed_simulated_table = np.loadtxt(observed_simulated_file,
ndmin=2, delimiter=",",
usecols=(0, 1))
observed_array, simulated_array = \
filter_nan(observed_simulated_table[:, 0],
observed_simulated_table[:, 1])
# print error indices
if out_file:
print_file = open(out_file, 'w')
else:
print_file = None
print("\n".join([
"Percent Bias: {0:.4f}"
.format(pc_bias(simulated_array, observed_array)),
"Absolute Percent Bias: {0:.4f}"
.format(apb(simulated_array, observed_array)),
"Root Mean Squared Error: {0:.4f}"
.format(rmse(simulated_array, observed_array)),
"Mean Absolute Error: {0:.4f}"
.format(mae(simulated_array, observed_array)),
"Bias: {0}".format(bias(simulated_array, observed_array)),
"Nash Sutcliffe efficiency coefficient: {0:.4f}"
.format(NS(simulated_array, observed_array)),
"Likelihood: {0:.4f}"
.format(L(simulated_array, observed_array)),
"correlation coefficient: {0:.4f}"
.format(correlation(simulated_array, observed_array)),
"index of agreement: {0:.4f}"
.format(index_agreement(simulated_array, observed_array)),
"Kling-Gupta Efficiency: {0:.4f}"
.format(KGE(simulated_array, observed_array)[0]),
]),
file=print_file)
if print_file:
print_file.close()
| |
"""
Unit tests for the frontend code.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import ga4gh.frontend as frontend
import ga4gh.protocol as protocol
import tests.utils as utils
class TestFrontend(unittest.TestCase):
"""
Tests the basic routing and HTTP handling for the Flask app.
"""
exampleUrl = 'www.example.com'
@classmethod
def setUpClass(cls):
config = {
"DATA_SOURCE": "__SIMULATED__",
"SIMULATED_BACKEND_RANDOM_SEED": 1111,
"SIMULATED_BACKEND_NUM_CALLS": 1,
"SIMULATED_BACKEND_VARIANT_DENSITY": 1.0,
"SIMULATED_BACKEND_NUM_VARIANT_SETS": 1,
# "DEBUG" : True
}
frontend.configure(
baseConfig="TestConfig", extraConfig=config)
cls.app = frontend.app.test_client()
@classmethod
def tearDownClass(cls):
cls.app = None
def sendPostRequest(self, path, request):
"""
Sends the specified GA request object and returns the response.
"""
versionedPath = utils.applyVersion(path)
headers = {
'Content-type': 'application/json',
'Origin': self.exampleUrl,
}
return self.app.post(
versionedPath, headers=headers,
data=request.toJsonString())
def sendVariantsSearch(self):
response = self.sendVariantSetsSearch()
variantSets = protocol.SearchVariantSetsResponse().fromJsonString(
response.data).variantSets
request = protocol.SearchVariantsRequest()
request.variantSetIds = [variantSets[0].id]
request.referenceName = "1"
request.start = 0
request.end = 1
return self.sendPostRequest('/variants/search', request)
def sendVariantSetsSearch(self, datasetIds=[""]):
request = protocol.SearchVariantSetsRequest()
request.datasetIds = datasetIds
return self.sendPostRequest('/variantsets/search', request)
def sendCallSetsSearch(self):
response = self.sendVariantSetsSearch()
variantSets = protocol.SearchVariantSetsResponse().fromJsonString(
response.data).variantSets
request = protocol.SearchCallSetsRequest()
request.variantSetIds = [variantSets[0].id]
return self.sendPostRequest('/callsets/search', request)
def sendReadsSearch(self, readGroupIds=None):
if readGroupIds is None:
readGroupIds = ['aReadGroupSet:one']
request = protocol.SearchReadsRequest()
request.readGroupIds = readGroupIds
return self.sendPostRequest('/reads/search', request)
def sendGetRequest(self, path):
versionedPath = utils.applyVersion(path)
headers = {
'Origin': self.exampleUrl,
}
response = self.app.get(versionedPath, headers=headers)
return response
def sendReferencesGet(self, id_=None):
if id_ is None:
id_ = 'simple:simple'
path = "/references/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendReferenceSetsGet(self, id_=None):
if id_ is None:
id_ = 'simple'
path = "/referencesets/{}".format(id_)
response = self.sendGetRequest(path)
return response
def sendReferencesSearch(self):
path = "/references/search"
request = protocol.SearchReferencesRequest()
response = self.sendPostRequest(path, request)
return response
def sendListRequest(self, path, request):
versionedPath = utils.applyVersion(path)
headers = {
'Origin': self.exampleUrl,
}
data = request.toJsonDict()
response = self.app.get(
versionedPath, data=data, headers=headers)
return response
def sendReferenceBasesList(self, id_=None):
if id_ is None:
id_ = 'simple:simple'
path = "/references/{}/bases".format(id_)
request = protocol.ListReferenceBasesRequest()
response = self.sendListRequest(path, request)
return response
def test404sReturnJson(self):
path = utils.applyVersion('/doesNotExist')
response = self.app.get(path)
protocol.GAException.fromJsonString(response.get_data())
self.assertEqual(404, response.status_code)
def testCors(self):
def assertHeaders(response):
self.assertEqual(self.exampleUrl,
response.headers['Access-Control-Allow-Origin'])
self.assertTrue('Content-Type' in response.headers)
assertHeaders(self.sendVariantsSearch())
assertHeaders(self.sendVariantSetsSearch())
assertHeaders(self.sendReadsSearch())
assertHeaders(self.sendReferencesGet())
assertHeaders(self.sendReferenceSetsGet())
assertHeaders(self.sendReferencesSearch())
assertHeaders(self.sendReferenceBasesList())
# TODO: Test other methods as they are implemented
def verifySearchRouting(self, path, getDefined=False):
"""
Verifies that the specified path has the correct routing for a
search command. If getDefined is False we check to see if it
returns the correct status code.
"""
versionedPath = utils.applyVersion(path)
response = self.app.post(versionedPath)
protocol.GAException.fromJsonString(response.get_data())
self.assertEqual(415, response.status_code)
if not getDefined:
getResponse = self.app.get(versionedPath)
protocol.GAException.fromJsonString(getResponse.get_data())
self.assertEqual(405, getResponse.status_code)
# Malformed requests should return 400
for badJson in ["", None, "JSON", "<xml/>", "{]"]:
badResponse = self.app.post(
versionedPath, data=badJson,
headers={'Content-type': 'application/json'})
self.assertEqual(400, badResponse.status_code)
# OPTIONS should return success
self.assertEqual(200, self.app.options(versionedPath).status_code)
def testRouteReferences(self):
referenceId = "aReferenceSet:srsone"
paths = ['/references/{}', '/references/{}/bases']
for path in paths:
path = path.format(referenceId)
versionedPath = utils.applyVersion(path)
self.assertEqual(200, self.app.get(versionedPath).status_code)
referenceSetId = "aReferenceSet"
paths = ['/referencesets/{}']
for path in paths:
path = path.format(referenceSetId)
versionedPath = utils.applyVersion(path)
self.assertEqual(200, self.app.get(versionedPath).status_code)
self.verifySearchRouting('/referencesets/search', True)
self.verifySearchRouting('/references/search', True)
def testRouteCallsets(self):
path = utils.applyVersion('/callsets/search')
self.assertEqual(415, self.app.post(path).status_code)
self.assertEqual(200, self.app.options(path).status_code)
self.assertEqual(405, self.app.get(path).status_code)
def testRouteReads(self):
paths = ['/reads/search', '/readgroupsets/search']
for path in paths:
self.verifySearchRouting(path)
def testRouteVariants(self):
for path in ['/variantsets/search', '/variants/search']:
self.verifySearchRouting(path)
def testRouteIndex(self):
self._routeIndex("/")
def testRouteIndexRedirect(self):
self._routeIndex("/{}".format(protocol.version))
def _routeIndex(self, path):
response = self.app.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual("text/html", response.mimetype)
self.assertGreater(len(response.data), 0)
def testVariantsSearch(self):
response = self.sendVariantsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchVariantsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.variants), 1)
def testVariantSetsSearch(self):
response = self.sendVariantSetsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchVariantSetsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.variantSets), 1)
def testCallSetsSearch(self):
response = self.sendCallSetsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchCallSetsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.callSets), 1)
def testReadsSearch(self):
response = self.sendReadsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.SearchReadsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.alignments), 2)
self.assertEqual(
responseData.alignments[0].id,
"aReadGroupSet:one:simulated0")
self.assertEqual(
responseData.alignments[1].id,
"aReadGroupSet:one:simulated1")
def testWrongVersion(self):
path = '/v0.1.2/variantsets/search'
self.assertEqual(404, self.app.options(path).status_code)
def testCurrentVersion(self):
path = '/{}/variantsets/search'.format(
frontend.Version.currentString)
self.assertEqual(200, self.app.options(path).status_code)
| |
import requests
import re
import ssl
import json
import time
jobStatus = {"Failure": -1, "Pending": 0, "Processing": 1, "Success": 2, "Failure": 3}
def getUrl(url):
"""
Return a URL from a hostname
"""
if "https" in url: return url
if "http" in url:
# Remove http and add https
return "https://{}".format(re.sub('http?:\/\/', '', url))
return "https://{}".format(url)
def getSession(api_key, secret_key, insecure=False):
session = requests.Session()
session.headers.update({"Authorization": "Token token=\"{}{}\"".format(api_key, secret_key)})
session.headers.update({"Content-Type": "application/json"})
if insecure:
requests.packages.urllib3.disable_warnings()
return session
def getNodes(session, url, details=False, group=None):
"""
Return a list of node objects (by the index endpoint).
Passing details=True will get all information for each node.
Alternatively provide a node group (ID or name) to only get those nodes.
"""
nodes = []
page = 1
per_page = 100
done = False
while not done:
new_nodes = session.get("{}/api/v2/nodes.json".format(url), params={"page": page, "per_page": per_page}).json()
nodes += new_nodes
page += 1
done = True if len(new_nodes) < per_page else False
if details:
detailed_nodes = []
for node in nodes:
detailed_nodes.append(session.get("{}/api/v2/nodes/{}.json".format(url, node["id"])).json())
return detailed_nodes
return nodes
def getNodeGroups(session, url, details=False):
"""
Return a list of node group objects (by the index endpoint).
Passing details=True will get all information for each node group.
"""
groups = []
page = 1
per_page = 100
done = False
while not done:
new_node_groups = session.get("{}/api/v2/node_groups.json".format(url), params={"page": page, "per_page": per_page}).json()
groups += new_node_groups
page += 1
done = True if len(new_node_groups) < per_page else False
if details:
detailed_groups = []
for group in groups:
detailed_group = session.get("{}/api/v2/node_groups/{}.json".format(url, group["id"])).json()
detailed_group["scan_options"] = json.loads(detailed_group["scan_options"])
detailed_groups.append(detailed_group)
return detailed_groups
return groups
def getConnectionManagerGroups(session, url):
"""
Return a list of connection manager groups (by the index endpoint).
"""
groups = []
page = 1
per_page = 100
done = False
while not done:
new_cm_groups = session.get("{}/api/v2/connection_manager_groups.json".format(url), params={"page": page, "per_page": per_page}).json()
groups += new_cm_groups
page += 1
done = True if len(new_cm_groups) < per_page else False
return groups
def getNodesInCMGroups(session, url):
"""
Return a dictionary of connection manager groups and the nodes associated with them:
* key: CM Group ID
* value: List of nodes
"""
nodes = getNodes(session=session, url=url, details=True)
cm_groups = getConnectionManagerGroups(session=session, url=url)
result = {}
for group in cm_groups:
result[group["id"]] = []
for node in nodes:
if node["connection_manager_group_id"]:
result[node["connection_manager_group_id"]].append(node)
return result
def getPolicies(session, url, details=False):
"""
Return a list of policies
"""
policies = []
page = 1
per_page = 50
done = False
while not done:
new_policies = session.get("{}/api/v2/node_groups.json".format(url), params={"page": page, "per_page": per_page}).json()
policies += new_policies
page += 1
done = True if len(new_policies) < per_page else False
if details:
detailed_policies = []
for policy in policies:
detailed_policies.append(session.get("{}/api/v2/policies/{}.json".format(url, policy["id"])).json())
return detailed_policies
return policies
def addPolicy(browser, token, name):
"""
Create a new policy with the given name
"""
raise NotImplementedError
status, data = APICall(browser, token, "POST", "/api/v2/policies.json", body=json.dumps({"policy": {"name": name}}))
new_policy = json.loads(data)
return new_policy
def addNode(session, url, node, verify=True):
"""
Create a new node from the given node object (a dictionary)
"""
response = session.post("{}/api/v2/nodes.json".format(url), params={}, data=json.dumps({"node": node}), verify=verify).json()
return response
def addNodeGroup(browser, token, obj):
"""
Create a new node group from the given node group object (a dictionary)
"""
raise NotImplementedError
status, data = APICall(browser, token, "POST", "/api/v2/node_groups.json", body=json.dumps({"node_group": obj}))
return json.loads(data)
def getEvents(session, url, view, since=None):
"""
Return a list of events using the provided view name
Optionally provide a datetime.date object in `since` to only return events from a certain date
"""
events = []
page = 1
per_page = 100
done = False
while not done:
new_events = session.get("{}/api/v2/events.json".format(url), params={"page": page, "per_page": per_page, "view_name": view, "date_from": since.strftime('%Y-%m-%d')}).json()
events += new_events
page += 1
done = True if len(new_events) < per_page else False
return events
def scan(session, url, node=None, group=None, environment=None, wait=False, label=""):
"""
Scan a node, group, or environment by name. The ID for the object (node, group, or environment) will be found automatically.
If `wait` is True, then this function will wait for the scan job to complete before returning.
"""
job = {}
found_obj = False
if node:
nodes = getNodes(session, url)
for n in nodes:
if n["name"].lower() == node.lower():
found_obj = True
result = session.post("{}/api/v2/nodes/{}/start_scan.json".format(url, n["id"]), params={"label": label}).json()
job["id"] = result["job_id"]
elif group:
raise NotImplementedError
elif environment:
raise NotImplementedError
else:
raise AttributeError("One of node, group, or environment must be provided.")
if found_obj:
if "id" in job:
job["status"] = 0
while job["status"] in [jobStatus["Pending"], jobStatus["Processing"]]:
time.sleep(5)
job = getJob(session, url, job["id"])
else:
raise AttributeError("Job ID was not found from the scan job.")
else:
raise AttributeError("Object (node, group, or environment) was not found to start the scan.")
return job
def getJob(session, url, id):
return session.get("{}/api/v2/jobs/{}.json".format(url, id)).json()
| |
# Author: Madhumita Subramaniam
from java.util import Arrays, Date
from java.io import IOException
from java.lang import Enum
from org.gluu.oxauth.service.net import HttpService
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import AuthenticationService
from org.gluu.oxauth.service.common import UserService
from org.gluu.oxauth.util import ServerUtil
from org.gluu.util import StringHelper, ArrayHelper
from javax.faces.application import FacesMessage
from org.gluu.jsf2.message import FacesMessages
import base64
try:
import json
except ImportError:
import simplejson as json
import random
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
self.identity = CdiUtil.bean(Identity)
def init(self, customScript, configurationAttributes):
print("Stytch. Initialization")
if not configurationAttributes.containsKey("SMS_ENDPOINT"):
print "Stytch. Initialization. Property SMS_ENDPOINT is mandatory"
return False
self.SMS_ENDPOINT = configurationAttributes.get("SMS_ENDPOINT").getValue2()
if not configurationAttributes.containsKey("AUTH_ENDPOINT"):
print "Stytch. Initialization. Property AUTH_ENDPOINT is mandatory"
return False
self.AUTH_ENDPOINT = configurationAttributes.get("AUTH_ENDPOINT").getValue2()
if not configurationAttributes.containsKey("ENROLL_ENDPOINT"):
print "Stytch. Initialization. Property ENROLL_ENDPOINT is mandatory"
return False
self.ENROLL_ENDPOINT = configurationAttributes.get("ENROLL_ENDPOINT").getValue2()
if not configurationAttributes.containsKey("PROJECT_ID"):
print "Stytch. Initialization. Property PROJECT_ID is mandatory"
return False
self.PROJECT_ID = configurationAttributes.get("PROJECT_ID").getValue2()
if not configurationAttributes.containsKey("SECRET"):
print "Stytch. Initialization. Property SECRET is mandatory"
return False
self.SECRET = configurationAttributes.get("SECRET").getValue2()
print("Stytch Initialized successfully")
return True
def destroy(self, configurationAttributes):
print("Stytch Destroy")
print("Stytch Destroyed successfully")
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
session_attributes = self.identity.getSessionId().getSessionAttributes()
if step == 1:
print("Stytch Step 1 Password Authentication")
credentials = self.identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
logged_in = authenticationService.authenticate(user_name, user_password)
if not logged_in:
return False
foundUser = None
try:
foundUser = authenticationService.getAuthenticatedUser()
except:
print("Stytch Error retrieving user {} from LDAP".format(user_name))
return False
mobile_number = None
try:
isVerified = foundUser.getAttribute("phoneNumberVerified")
if isVerified:
mobile_number = foundUser.getAttribute("employeeNumber")
if not mobile_number:
mobile_number = foundUser.getAttribute("mobile")
if not mobile_number:
mobile_number = foundUser.getAttribute("telephoneNumber")
if not mobile_number:
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to determine mobile phone number")
print("Stytch Error finding mobile number for user '{}'".format(user_name))
return False
except Exception as e:
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to determine mobile phone number")
print("Stytch Error finding mobile number for {}: {}".format(user_name, e))
return False
self.identity.setWorkingParameter("mobile_number", mobile_number)
self.identity.getSessionId().getSessionAttributes().put("mobile_number", mobile_number)
mobileDevices = self.getUserAttributeValue(user_name, "oxMobileDevices")
if mobileDevices is None:
# enrollment
print "No phones registered. Adding %s " % mobile_number
phone_id = self.addUser(mobile_number, user_name)
if phone_id is not None:
self.identity.setWorkingParameter("phone_id", phone_id)
print "phone_id to which SMS has been sent: %s" % phone_id
return True
# if enroll is success, send sms and move on to step 2
else:
print "Failed to send sms to user. In the next login attempt, user will be prompted for passcode anyway, so it is safe to return true"
return True
### end of enrollment
# already contains registered mobiles
print "mobileDevices: %s" % mobileDevices
data = json.loads(mobileDevices)
for phone in data['phones']:
print "phone number : %s " % phone['number']
print "mobile_number : %s" % mobile_number
if StringHelper.equals(mobile_number.strip('+'), phone['number'].strip('+')):
phone_id = phone['stytch_phone_id']
print "phone_id stored in oxMobileDevices: %s " % phone_id
if StringHelper.isNotEmptyString(phone_id) :
### authentication
self.identity.setWorkingParameter("phone_id", phone_id)
phone_id = self.sendPasscodeSMSToUser(mobile_number)
print "SendPasscodeSMSToUser: %s " % phone_id
if self.sendPasscodeSMSToUser(mobile_number) is None:
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to send message to mobile phone")
return False
else:
print "SMS sent successfully"
return True
### end of authentication
else:
# enrollment.
phone_id = self.addUser(mobile_number, user_name)
if phone_id is not None:
self.identity.setWorkingParameter("phone_id", phone_id)
print "phone_id to which SMS has been sent: %s" % phone_id
return True
# if enroll is success, send sms and move on to step 2
else:
print "Failed to send sms to user. In the next login attempt, user will be prompted for passcode anyway, so it is safe to return true"
return True
### end of enrollment
return False
elif step == 2:
form_passcode = ServerUtil.getFirstValue(requestParameters, "passcode")
print("Stytch form_response_passcode: {}".format(str(form_passcode)))
phone_id = session_attributes.get("phone_id")
print("Stytch phone_id: {}".format(str(phone_id)))
if phone_id is None:
print("Stytch Failed to find phone_id in session")
return False
if form_passcode is None:
print("Stytch Passcode is empty")
return False
if len(form_passcode) != 6:
print("Stytch Passcode from response is not 6 digits: {}".format(form_passcode))
return False
#use the phone_id to send the request for authentication
result = self.verifyPasscode(phone_id, form_passcode)
if result is False:
print("Stytch failed, user entered the wrong code! {} ".format(form_passcode))
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Incorrect SMS code, please try again.")
else:
return True
print("Stytch ERROR: step param not found or != (1|2)")
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
if step == 1:
print("Stytch Prepare for Step 1")
return True
elif step == 2:
print("Stytch Prepare for Step 2")
return True
return False
def getExtraParametersForStep(self, configurationAttributes, step):
if step == 2:
return Arrays.asList("phone_id")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 2
def getPageForStep(self, configurationAttributes, step):
if step == 2:
return "/auth/otp_sms/otp_sms.xhtml"
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
def logout(self, configurationAttributes, requestParameters):
return True
def sendPasscodeSMSToUser(self, phoneNumber):
httpService = CdiUtil.bean(HttpService)
http_client = httpService.getHttpsClient()
http_client_params = http_client.getParams()
data = {"phone_number": phoneNumber }
payload = json.dumps(data)
encodedString = base64.b64encode((self.PROJECT_ID +":"+self.SECRET).encode('utf-8'))
headers = { "Accept" : "application/json" }
try:
http_service_response = httpService.executePost(http_client, self.SMS_ENDPOINT, encodedString, headers, payload)
http_response = http_service_response.getHttpResponse()
print "http_response sendPasscodeSMSToUser%s" % http_response
except:
print "Stytch. Exception: sendPasscodeSMSToUser", sys.exc_info()[1]
return None
try:
if not httpService.isResponseStastusCodeOk(http_response):
print "Stytch. sendPasscodeSMSToUser: %s" % str(http_response.getStatusLine().getStatusCode())
httpService.consume(http_response)
return None
else :
response_bytes = httpService.getResponseContent(http_response)
response_string = httpService.convertEntityToString(response_bytes)
httpService.consume(http_response)
response = json.loads(response_string)
phone_id = response["phone_id"]
return phone_id
finally:
http_service_response.closeConnection()
return None
def verifyPasscode(self, method_id, code):
httpService = CdiUtil.bean(HttpService)
http_client = httpService.getHttpsClient()
http_client_params = http_client.getParams()
data = {"method_id": method_id, "code": code }
payload = json.dumps(data)
encodedString = base64.b64encode((self.PROJECT_ID +":"+self.SECRET).encode('utf-8'))
headers = { "Accept" : "application/json" }
try:
http_service_response = httpService.executePost(http_client, self.AUTH_ENDPOINT, encodedString, headers, payload)
http_response = http_service_response.getHttpResponse()
print "http_response verifyPasscode - %s" % http_response
except:
print "Stytch. Exception: verifyPasscode", sys.exc_info()[1]
return False
try:
if not httpService.isResponseStastusCodeOk(http_response):
print "Stytch. Verify passcode: ", str(http_response.getStatusLine().getStatusCode())
httpService.consume(http_response)
return False
else :
print "Stytch. User verified"
return True
finally:
http_service_response.closeConnection()
return False
def hasEnrollments(self, configurationAttributes, user):
return len(self.getNumbers(user)) > 0
def getNumbers(self, user):
numbers = set()
tmp = user.getAttributeValues("mobile")
if tmp:
for t in tmp:
numbers.add(t)
return list(numbers)
def getUserAttributeValue(self, user_name, attribute_name):
if StringHelper.isEmpty(user_name):
return None
userService = CdiUtil.bean(UserService)
find_user_by_uid = userService.getUser(user_name, attribute_name)
if find_user_by_uid == None:
return None
custom_attribute_value = userService.getCustomAttribute(find_user_by_uid, attribute_name)
if custom_attribute_value == None:
return None
attribute_value = custom_attribute_value.getValue()
print "Stytch. Get user attribute. User's %s attribute %s value is %s" % (user_name, attribute_name, attribute_value)
return attribute_value
def addUser(self, phoneNumber, gluu_user_name):
httpService = CdiUtil.bean(HttpService)
http_client = httpService.getHttpsClient()
userService = CdiUtil.bean(UserService)
data = {"phone_number": phoneNumber }
payload = json.dumps(data)
encodedString = base64.b64encode((self.PROJECT_ID +":"+self.SECRET).encode('utf-8'))
headers = { "Accept" : "application/json" }
try:
http_service_response = httpService.executePost(http_client, self.ENROLL_ENDPOINT, encodedString, headers, payload)
http_response = http_service_response.getHttpResponse()
print "http_response %s addUser" % http_response
except:
print "Stytch. Exception: addUser ", sys.exc_info()[1]
return None
try:
responseStatusCode = http_response.getStatusLine().getStatusCode();
print "Stytch. response: %s " % str(http_response.getStatusLine().getStatusCode())
if responseStatusCode == 200 or responseStatusCode == 201:
response_bytes = httpService.getResponseContent(http_response)
response_string = httpService.convertEntityToString(response_bytes)
httpService.consume(http_response)
response = json.loads(response_string)
phone_id = response["phone_id"]
user_id = response["user_id"]
print "phone id %s " % phone_id
print "user id %s " % user_id
find_user_by_uid = userService.getUser(gluu_user_name)
oxMobileDevices = json.dumps({'phones': [{'nickname': "Stych Credential", 'number': phoneNumber, 'stytch_phone_id': phone_id, 'stytch_user_id':user_id, 'addedOn': Date().getTime()}]})
userService.setCustomAttribute(find_user_by_uid, "oxMobileDevices", oxMobileDevices)
updated_user = userService.updateUser(find_user_by_uid)
if updated_user is not None:
return phone_id
else:
print "Stytch. Failed to update user - addUser"
else:
print "Stytch. Add user response: ", str(http_response.getStatusLine().getStatusCode())
httpService.consume(http_response)
return None
finally:
http_service_response.closeConnection()
return None
| |
from compiler import *
ui_strings = [
("music_volume", "Music Volume:"),
("sound_volume", "Sound Volume:"),
("mouse_sensitivity", "Mouse Sensitivity:"),
("invert_mouse_y_axis", "Invert Mouse Y Axis"),
("enabled", "Enabled"),
("disabled", "Disabled"),
("damage_to_player", "Damage to Player:"),
("reduced_to_1_over_4_easiest", "Reduced to 1/4 (Easiest)"),
("reduced_to_1_over_2_easy", "Reduced to 1/2 (Easy)"),
("damage_to_friends", "Damage to Friends:"),
("reduced_to_1_over_2_easiest", "Reduced to 1/2 (Easiest)"),
("reduced_to_3_over_4_easy", "Reduced to 3/4 (Easy)"),
("normal", "Normal"),
("combat_ai", "Combat AI:"),
("combat_speed", "Combat Speed:"),
("good", "Good"),
("average_caps", "Average"),
("poor", "Poor"),
("faster", "Faster"),
("slower", "Slower"),
("control_block_direction", "Control Block Direction:"),
("automatic_recommended", "Automatic"),
("manual_easy", "Manual (Easy)"),
("manual_hard", "Manual (Hard)"),
("by_mouse_movement", "By mouse movement"),
("control_attack_direction", "Control Attack Direction:"),
("lance_control", "Lance Control:"),
("by_relative_enemy_position", "By relative enemy position"),
("by_inverse_mouse_movement", "By inverse mouse movement"),
("battle_size", "Battle Size:"),
("show_attack_direction", "Show Attack Direction"),
("show_targeting_reticule", "Show Targeting Reticle"),
("show_names_of_friendly_troops", "Show Banners on Friendly Troops"),
("report_damage", "Report Damage"),
("report_shot_difficulty", "Report Shot Difficulty"),
("difficulty_rating_percentage", "Difficulty Rating = %d%%"),
("controls", "Controls"),
("video_options", "Video Options"),
("done", "Done"),
("factions", "Factions"),
("item_itemname", "Item - %s"),
("prop_propname", "Prop - %s"),
("unknown_unknownname", "Unknown - %s"),
("entry_point_entrypointname", "Entry Point %d"),
("passage_menu_item_passagename", "Passage (menu item %d)"),
("plant_plantname", "Plant - %s"),
("export_file_for_character_playername_already_exists_overwrite_it", "Export file for character %s already exists. Overwrite it?"),
("yes", "Yes"),
("no", "No"),
("set_save_file_name", "Enter a name for this save-game:"),
("enter_new_name", "Enter a new name:"),
("export_character", "Export Character"),
("import_character", "Import Character"),
("character_playername_exported_successfully", "Character %s exported successfully."),
("character_playername_imported_successfully", "Character %s imported successfully."),
("unable_to_open_import_file", "Unable to open import file."),
("are_you_sure_you_want_to_import_the_character", "Are you sure you want to import the character?"),
("unable_to_find_character_import_file", "Unable to find character import file."),
("mount_and_blade_is_running_in_trial_mode_please_buy_the_game_for_importing_a_character", "Mount&Blade is running in trial mode. Please buy the game for importing a character."),
("change_skin", "Skin"),
("change_hair", "Hair"),
("change_hair_color", "Hair Color"),
("change_beard", "Beard"),
("tutorial", "Tutorial"),
("tutorial_face_generator", "Adjust your character's face using the buttons and the sliders. To rotate the head, click on it and drag the mouse."),
("restore", "Load"),
("cancel", "Cancel"),
("delete", "Delete"),
("confirm_delete_game", "Are you sure you want to delete this game?"),
("error_removing_file", "Error removing file..."),
("day_datedisplay", "Day %d (%d:%d%d)"),
("reset_changes", "Reset Changes"),
("weapon_proficiencies", "Proficiencies"),
("skills", "Skills"),
("attributes", "Attributes"),
("enter_name_here", "*Enter Name Here*"),
("edit_face", "Click to edit face"),
("statistics", "Statistics"),
("next", "Next"),
("prev", "Prev"),
("learn", "Learn"),
("question_saving_policy", "What will the game's saving policy be?"),
("saving_policy_realistic", "Realistic! No quitting without saving!"),
("saving_policy_nonrealistic", "Allow me to quit without saving."),
("tutorial_character_generation", "Now enter your name and distribute your attribute, skill and weapon points. You can click on various elements on the screen to learn how each one will affect your character."),
("str", "STR"),
("agi", "AGI"),
("int", "INT"),
("cha", "CHA"),
("at_learning_limit", "(At learning limit)"),
("not_enough_skill_points_to_learn", "(Not enough skill points to learn)"),
("strength", "strength"),
("agility", "agility"),
("intelligence", "intelligence"),
("charisma", "charisma"),
("not_enough_attributetype_to_learn_this_skill", "(Not enough %s to learn this skill)"),
("explanation_one_handed_weapon", "Covers usage of one handed swords, axes and blunt weapons."),
("explanation_two_handed_weapon", "Covers usage of two handed swords, great axes and mauls."),
("explanation_polearm", "Covers usage of pole weapons like spears, lances, staffs, etc."),
("explanation_archery", "Covers usage of bows."),
("explanation_crossbow", "Covers usage of crossbows."),
("explanation_throwing", "Covers usage of thrown weapons like javelins, darts, stones etc."),
("explanation_firearms", "Covers usage of pistols and muskets."),
("explanation_strength", "Strength: Every point adds +1 to hit points. The following skills can not be developed beyond 1/3 of Strength: ironflesh, Power-strike, Power-throw, Power-draw."),
("explanation_agility", "Agility: Each point gives five weapon points and slightly increases movement speed. The following skills can not be developed beyond 1/3 of Agility: weapon-master, Shield, Athletics, Riding, Horse archery, Looting."),
("explanation_intelligence", "Intelligence: Every point to intelligence immediately gives one extra skill point. The following skills can not be developed beyond 1/3 of Intelligence: Trainer, Tracking, Tactics, Path finding, Spotting, Inventory Management, Wound treatment, Surgery, First-aid, Engineer, Persuasion."),
("explanation_charisma", "Charisma: Each point increases your party size limit by +1. The following skills can not be developed beyond 1/3 of Charisma: Prisoner Management, Leadership, Trade."),
("level", "Level: %d"),
("xp", "Experience: %d"),
("next_level_at", "Next level at: %d"),
("health_player", "Health: %d/%d"),
("health", "Health: %d"),
("attribute_points", "Attribute points: %d"),
("skill_points", "Skill points: %d"),
("weapon_points", "Weapon points: %d"),
("mission_losses_none", " none."),
("mission_losses_wounded", "wounded :"),
("mission_losses_killed", "killed :"),
("party_losses", "%s : %d wounded --- %d killed of %d."),
("casualties_sustained", "Casualties sustained:"),
("advantage_change", "Advantage change = %c%d "),
("overall_battle_casualties", "Overall battle causalties:"),
("advantage_outnumbered", " You are hopelessly outnumbered."),
("advantage_major_disadvantage", " You have a major disadvantage."),
("advantage_slight_disadvantage", " You are slightly disadvantaged."),
("advantage_balanced", " The situation is balanced."),
("advantage_fair_advantage", " You have a fair advantage for winning."),
("advantage_greatly_favored", " The odds of battle favor you greatly."),
("tactical_advantage", "Tactical advantage: %d (%s)"),
("order_group", "Order group:"),
("question_save_changes", "You have made changes to the objects. Do you want to save changes?"),
("yes_save", "Yes, save"),
("no_discard_changes", "No, discard changes"),
("everyone_control", "Everyone!"),
("everyone_around_control", "Nearby Soldiers!"),
("others_control", "Others!"),
("question_give_up_fight", "Give up the fight?"),
("give_up", "Give up"),
("keep_fighting", "Keep fighting"),
("question_leave_area", "Leave Area"),
("cant_retreat_there_are_enemies_nearby", "Can't retreat. There are enemies nearby!"),
("question_retreat_battle", "Retreat battle?"),
("retreated_battle", "%s has been routed."),
("retreated_battle", "%s has fled from the battlefield."),
("retreat", "Retreat"),
("talk", "Talk"),
("duel", "Duel"),
("mount", "Mount"),
("riding_skill_not_adequate_to_mount", "(Riding skill not adequate to mount)"),
("dismount", "Dismount"),
("exit", "Exit"),
("door_to", "Door to "),
("open", "Open"),
("equip", "Equip"),
("baggage", "Baggage"),
("access_inventory", "Access inventory"),
("chest", "Chest"),
("passage", "Passage"),
("go", "Go"),
("retreat_battle", "Retreat Battle"),
("leave_area", "Leave Area"),
("reports", "Reports"),
("camp", "Camp"),
("terrain", "Terrain"),
("quests", "Notes"),
("inventory", "Inventory"),
("character", "Character"),
("party", "Party"),
("paused", "Paused"),
("click_left_button_to_cancel_wait", "Waiting... (Left click to return)"),
("midnight", "Midnight"),
("late_night", "Late night"),
("dawn", "Dawn"),
("early_morning", "Early morning"),
("morning", "Morning"),
("noon", "Noon"),
("afternoon", "Afternoon"),
("late_afternoon", "Late afternoon"),
("dusk", "Dusk"),
("evening", "Evening"),
("midnight", "Midnight"),
("level_limit_reached", "Level Limit Reached!"),
("explanation_level_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to Level 8. In order to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("time_limit_reached", "Time Limit Reached!"),
("explanation_time_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to 30 game days. In oder to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("target_lost", "Target lost"),
("waiting", "Waiting."),
("travelling_to", "Travelling to "),
("following", "Following "),
("accompanying", "Accompanying "),
("running_from", "Running from "),
("patrolling", "Patrolling"),
("patrolling_around", "Patrolling around "),
("holding", "Holding"),
("travelling", "Travelling"),
("fighting_against", "Fighting against "),
("speed_equals", "Speed = %2.1f"),
("defenders", "Garrison:"),
("prisoners", "Prisoners:"),
("1_hour", "1 hour"),
("n_hours", "%d hours"),
("between_hours", "%d - %d hours"),
("combatants", "Combatants: %d"),
("party_size", "Party size: %d"),
("party_size_between", "Party size: %d - %d"),
("merchant", "Merchant"),
("return", "Return"),
("no_cost", "No cost"),
("rename", "Rename"),
("use", "Use"),
("destroy", "Destroy"),
("destructible_target", "Destructible target"),
("tutorial_inventory", "This is the trade screen. Hold down control key while clicking on an item to quickly purchase or sell it."),
("head_armor", "Head Armor: %d"),
("body_armor", "Body Armor: %d"),
("leg_armor", "Leg Armor: %d"),
("encumbrance", "Encumbrance: %2.1f"),
("you_dont_have_value", "You don't have %s."),
("merchant_cant_afford_value", "%s: I can't afford %s. I have only %s."),
("merchant_pay_whatever", "Allright, just pay whatever you can."),
("merchant_think_of_something_else", "Hmm. Let us think of something else."),
("dumping_value_items", "%d items will be permanently lost, are you sure?"),
("dumping_value_item", "One item will be permanently lost, are you sure?"),
("question_slaughter_food_and_eat", "Slaughter this %s and eat it?"),
("money_value", "Money: %s"),
("dump", "Discard"),
("outfit", "Outfit"),
("arms", "Arms"),
("horse", "Horse"),
("food", "Food"),
("reclaim_your_sold_goods", "Reclaim your sold goods before buying that!"),
("return_your_bought_goods", "Return your bought goods before selling that!"),
("polearm_no_shield", "Polearm (No shield)"),
("polearm", "Polearm"),
("two_handed", "Two-handed"),
("two_handed_one_handed", "Two-handed/One-handed"),
("one_handed", "One-handed"),
("return_price", "Return price: %d"),
("sell_price", "Sell price: %d"),
("reclaim_price", "Reclaim price: %d"),
("buying_price", "Buying price: %d"),
("default_item", "Default item"),
("buying_price_free", "Buying price: Free"),
("weight", "Weight: %2.1f"),
("plus_value_to_head_armor", "+%d to head armor"),
("plus_value_to_body_armor", "+%d to body armor"),
("plus_value_to_leg_armor", "+%d to leg armor"),
("swing", "Swing: %d%s"),
("damage", "Damage: %d%s"),
("thrust", "Thrust: %d%s"),
("accuracy", "Accuracy: %d"),
("speed_rating", "Speed rating: %d"),
("value_to_damage", "%c%d to damage"),
("value_to_morale", "+%1.1f to party morale"),
("resistance", "Resistance: %d"),
("size", "Size: %d"),
("weapon_reach", "Weapon reach: %d"),
("armor", "Armor: %d"),
("speed", "Speed: %d"),
("maneuver", "Maneuver: %d"),
("charge", "Charge: %d"),
("hit_points", "Hit Points: %d/%d"),
("requires_value_difficulty", "Requires %s: %d"),
("bonus_against_shields", "Bonus against shields"),
("cant_be_used_to_block", "Can't be used to block"),
("troop_cant_use_item", "%s: I can't use that item!"),
("notification_riding_skill_not_enough", "Your riding skill is not high enough to mount this horse."),
("notification_requirements_not_met", "You don't have the required skills or attributes for this weapon."),
("notification_payment_value", "You must pay %s."),
("notification_payment_receive_value", "You will receive %s."),
("one_handed_weapons", "One Handed Weapons"),
("two_handed_weapons", "Two Handed Weapons"),
("polearms", "Polearms"),
("archery", "Archery"),
("crossbows", "Crossbows"),
("throwing", "Throwing"),
("firearms", "Firearms"),
("reset", "Reset"),
("release_one", "Release one"),
("move_up", "Move Up"),
("move_down", " Move Down "),
("upgrade_one", "Upgrade one"),
("party_skills", "Party Skills"),
("morale", "Morale: %s"),
("terrible", "Terrible"),
("very_low", "Very low"),
("low", "Low"),
("below_average", "Below average"),
("average", "Average"),
("above_average", "Above average"),
("high", "High"),
("very_high", "Very high"),
("excellent", "Excellent"),
("starving", "Starving! %d%%"),
("weekly_cost_value", "Weekly cost: %s"),
("company", "Company: %d / %d"),
("prisoners_equal_value", "Prisoners: %d / %d"),
("choose_prisoners", "Choose Prisoners"),
("choose_companions", "Choose Companions"),
("rescued_prisoners", "Rescued Prisoners"),
("captured_enemies", "Captured Enemies"),
("disband", "Disband"),
("take_prisoner", "Take prisoner"),
("take_back", "Take back"),
("give", "Give"),
("take", "Take"),
("sell", "Sell"),
("hire", "Hire"),
("notification_cant_hire", "(Can't hire: not enough money)"),
("uncapture", "Release"),
("capture", "Capture"),
("party_capcity_reached", "(Party capacity reached)"),
("all", " all"),
("joining_cost_weekly_wage", "Joining cost: %d, Weekly wage: %d"),
("weekly_wage", "Weekly wage: %d denars"),
("price", "Price: %d"),
("number_ready_to_upgrade", "%d ready to be upgraded."),
("upgrade_to_value", " Upgrade to %s (%dd)"),
("notification_no_slot_for_upgrade", "No slot for upgrading to %s!"),
("shield_broken", "Shield broken."),
("shield_cracked", "Shield cracked."),
("shield_deformed", "Shield deformed."),
("you_hit_a_friendly_troop", "You hit a friendly troop!"),
("hit_shield_on_back", "Hit shield on back!"),
("delivered_couched_lance_damage", "Delivered couched lance damage!"),
("received_couched_lance_damage", "Received couched lance damage!"),
("speed_bonus_plus", "Speed bonus: +%d%%"),
("speed_bonus", "Speed bonus: %d%%"),
("cant_reload_this_weapon_on_horseback", "Can't reload this weapon on horseback."),
("no_more_bolts", "No more bolts..."),
("you_are_not_carrying_any_bolts", "You are not carrying any bolts."),
("no_more_arrows", "No more arrows..."),
("you_are_not_carrying_any_arrows", "You are not carrying any arrows."),
("head_shot", "Head shot!"),
("delivered_number_damage", "Delivered %d damage."),
("delivered_number_damage_to_horse", "Delivered %d damage to horse."),
("horse_charged_for_number_damage", "Horse charged for %d damage."),
("received_number_damage", "Received %d damage."),
("horse_received_number_damage", "Horse received %d damage."),
("value_killed_teammate", "%s has killed a teammate!"),
("horse_fell_dead", "Horse fell dead..."),
("horse_crippled", "Horse crippled..."),
("shot_difficulty", "Shot difficulty: %2.1f"),
("you_have_improved_your_proficiency_in_value_to_number", "You have improved your proficiency in %s to %d."),
("your_proficiency_in_value_has_improved_by_number_to_number", "Your proficiency in %s has improved by +%d to %d."),
("value_killed_by_value", "%s killed by %s."),
("value_fell_dead", "%s fell dead."),
("value_knocked_unconscious_by_value", "%s knocked unconscious by %s."),
("value_fell_unconscious", "%s fell unconscious."),
("troop_routed", "%s has been routed."),
("troop_panicked", "%s has panicked."),
("troop_fled", "%s has fled the battle."),
("you_got_number_experience", "You got %d experience."),
("you_have_advanced_to_level_number", "You have advanced to level %d."),
("value_has_advanced_to_level_number", "%s has advanced to level %d."),
("you_got_value", "You got %s."),
("new_quest_taken", "New quest taken: %s."),
("quest_completed_value", "Quest completed: %s."),
("quest_succeeded_value", "Quest succeeded: %s."),
("quest_failed_value", "Quest failed: %s."),
("quest_concluded_value", "Quest concluded: %s."),
("quest_cancelled_value", "Quest cancelled: %s."),
("lost_value", " (Lost: %s)"),
("items_lost", " (Items lost:"),
("party_has_nothing_to_eat", "Party has nothing to eat!"),
("days_training_is_complete", "Day's training is complete..."),
("total_experience_gained_through_training_number", "Total experience gained through training: %d"),
("some_soldiers_are_ready_to_upgrade", "Some soldiers are ready to upgrade."),
("number_of_companions_exceeds_leadership_limit", " Number of companions exceeds leadership limit."),
("number_of_prisoners_exceeds_prisoner_management_limit", " Number of prisoners exceeds prisoner management limit."),
("party_morale_is_low", " Party morale is low!"),
("and_one_space", " and"),
("has_deserted_the_party", " has deserted the party."),
("have_deserted_the_party", " have deserted the party."),
("weekly_report", "Weekly report"),
("shared_number_experience_within_party", "Shared %d experience within party."),
("got_item_value", "Got item: %s."),
("game_saved_successfully", "Game saved successfully."),
("autosaving", "Autosaving..."),
("quick_saving", "Quick-saving..."),
("cant_quick_save", "Can't Quick-save during battle..."),
("screenshot_taken_to_value", "Screenshot is saved to %s"),
("screenshot_failed", "Can't save screenshot."),
("value_joined_your_party", "%s joined your party."),
("value_joined_party_as_prisoner", "%s joined party as prisoner."),
("value_has_joined_party", "%s has joined party."),
("value_has_been_taken_prisoner", "%s has been taken prisoner."),
("value_left_the_party", "%s left the party."),
("number_values_left_the_party", "%d %s(s) left the party."),
("number_value_left_the_party", "%d %s left the party."),
("your_relations_with_value_has_improved_from_number_to_number", "Your relations with %s has improved from %d to %d."),
("your_relations_with_value_has_deteriorated_from_number_to_number", "Your relations with %s has deteriorated from %d to %d."),
("you_lost_value", "You lost %s."),
("lost_item_value", "Lost item: %s."),
("got_number_value", "Got %d %s."),
("lost_number_value", "Lost %d %s."),
("set_default_keys", "Set default keys"),
("undo_changes", "Undo changes"),
("press_a_key", "Press a key"),
("return_to_game", "Return to Game"),
("options", "Options"),
("save_and_exit", "Save & Exit"),
("save", "Save"),
("save_as", "Save As"),
("quit_without_saving", "Quit without Saving"),
("empty_slot", "Empty Slot"),
("game_saved", "Game saved..."),
("confirm_overwrite", "Savegame for %s will be overwritten. Are you sure?"),
("dynamic_lighting", "Dynamic Lighting"),
("character_shadows", "Character Shadows"),
("grass_density", "Grass Density:"),
("environment_shadows", "Environment Shadows"),
("realistic_shadows_on_plants", "Realistic Shadows on Plants:"),
("particle_systems", "Particle Systems"),
("gamma", "Monitor Gamma:"),
("character_detail", "Character Detail:"),
("character_shadow_detail", "Character Shadow Detail:"),
("blood_stains", "Blood Stains:"),
("on", "On"),
("off", "Off"),
("near_player_only", "Near player only"),
("default", "Default"),
("3d_grass", "3D Grass:"),
("number_of_ragdolls", "Number of Rag Dolls:"),
("number_of_corpses", "Number of Corpses:"),
("unlimited", "Unlimited"),
("anisotropic_filtering", "Anisotropic Filtering"),
("fast_water_reflection", "Fast Water Reflections"),
("maximum_framerate", "Max. Frame-rate:"),
("show_framerate", "Show Frame-rate:"),
("estimated_performance", "Estimated Performance: %d%%"),
("change_graphics_settings_explanation", "Some changes you have made will take effect when you enter a new area."),
("start_tutorial", "Play Tutorial"),
("start_a_new_game", "Start a New Game"),
("restore_a_saved_game", "Load Game"),
("exit_to_windows", "Exit"),
("credits", "Credits"),
("version_value", "v%s"),
("active_quests", "Active Quests"),
("finished_quests", "Finished Quests"),
("given_on_date", "Given on: %s"),
("days_since_given", "Days since given: %d"),
("quest_progression_number", "Quest progression: %d%%"),
("too_many_quests", "Too many quests"),
("ok", "OK"),
("move_forward", "Move Forward"),
("move_backward", "Move Backward"),
("move_left", "Move Left"),
("move_right", "Move Right"),
("action", "Action"),
("jump", "Jump"),
("attack", "Attack"),
("parry_then_attack", "Counter Attack"),
("defend", "Defend"),
("kick", "Kick"),
("equip_weapon_1", "Equip Item 1"),
("equip_weapon_2", "Equip Item 2"),
("equip_weapon_3", "Equip Item 3"),
("equip_weapon_4", "Equip Item 4"),
("equip_next_weapon", "Equip Next Weapon"),
("equip_next_shield", "Equip Next Shield"),
("sheath_weapon", "Sheath Weapon"),
("character_window", "Character Window"),
("inventory_window", "Inventory Window"),
("party_window", "Party Window"),
("quests_window", "Quests Window"),
("game_log_window", "Game Log Window"),
("leave_location_retreat", "Leave Location/Retreat"),
("zoom", "Zoom"),
("view_outfit", "View Outfit"),
("toggle_first_person_view", "Toggle First Person View"),
("view_orders", "View Orders"),
("quick_save", "Quick Save"),
("no_key_assigned", "No key assigned"),
("new_enemies_have_arrived", "New enemies have arrived."),
("reinforcements_have_arrived", "Reinforcements have arrived."),
("report_casualties", "Report Casualties"),
("report_experience", "Report Experience"),
("current_level_value", "Current Level: %d"),
("base_attribute_value", "Base Attribute: %s"),
("battle_controls", "Battle Controls"),
("map_controls", "Map Controls"),
("general_controls", "General Controls"),
("zoom_in", "Zoom In"),
("zoom_out", "Zoom Out"),
("wait", "Wait"),
("take_screenshot", "Take Screenshot"),
("randomize", "Randomize"),
("hint", "Hint"),
("press_left_mouse_button_to_continue", "Press left mouse button to continue..."),
("loot", "Loot"),
("chest", "Chest"),
("cut_short", "c"),
("pierce_short", "p"),
("blunt_short", "b"),
("battle", "Battle"),
("siege", "Siege"),
("troops", "Troops:"),
("loading_module_info_file", "Loading Module Info File..."),
("processing_ini_file", "Processing INI File..."),
("loading_music", "Loading Music..."),
("loading_data", "Loading Data..."),
("loading_setting_data", "Loading Setting Data..."),
("loading_textures", "Loading Textures..."),
("finished", "Finished."),
("creating_game", "Creating Game..."),
("loading_savegame_file", "Loading Savegame File..."),
("loading_map_file", "Loading Map File..."),
("initializing_map", "Initializing Map..."),
("launching_game", "Launching Game..."),
("capital_battle", "BATTLE:"),
("capital_versus", "--VERSUS--"),
("tracks", "Tracks"),
("battleground", "Battleground"),
("order_1", "Select Order 1"),
("order_2", "Select Order 2"),
("order_3", "Select Order 3"),
("order_4", "Select Order 4"),
("order_5", "Select Order 5"),
("order_6", "Select Order 6"),
("order_button_hold_this_position", "Hold this position"),
("order_button_follow_me", "Follow me"),
("order_button_charge", "Charge"),
("order_button_stand_ground", "Stand ground"),
("order_button_retreat", "Retreat"),
("order_button_advance", "Advance ten paces"),
("order_button_fall_back", "Fall back ten paces"),
("order_button_spread_out", "Spread out"),
("order_button_stand_closer", "Stand closer"),
("order_button_mount_horses", "Mount horses"),
("order_button_dismount", "Dismount"),
("order_button_hold_fire", "Hold your fire"),
("order_button_fire_at_will", "Fire at will"),
("order_button_use_blunt_weapons", "Use only blunt weapons"),
("order_button_use_any_weapon", "Use weapons at will"),
("order_button_movement_orders", "Movement orders"),
("order_button_formation_orders", "Formation orders"),
("order_button_fire_orders", "Fire orders"),
("follow_me_e_", "%s, follow me!"),
("charge_e_", "%s, charge!!!"),
("stand_ground_e_", "%s, stand ground!"),
("retreat_e_", "%s, retreat!"),
("mount_horses_e_", "%s, mount horses!"),
("dismount_e_", "%s, dismount!"),
("advance_e_", "%s, advance ten paces!"),
("fall_back_e_", "%s, fall back ten paces!"),
("stand_closer_e_", "%s, stand closer!"),
("spread_out_e_", "%s, spread out!"),
("use_blunt_weapons_e_", "%s, use only blunt weapons!"),
("use_any_weapon_e_", "%s, use weapons at will!"),
("hold_fire_e_", "%s, hold your fire!"),
("fire_at_will_e_", "%s, fire at will!"),
("hold_this_position_e_", "%s, hold this position!"),
("infantry", "Infantry"),
("archers", "Archers"),
("cavalry", "Cavalry"),
("companions", "Companions"),
("everyone_hear_me", "Everyone, hear me!"),
("everyone", "Everyone"),
("everyone_around_me", "Nearby Soldiers"),
("str_hear_me", "%s, hear me!"),
("str_and_str", "%s and %s"),
("str_comma_str", "%s, %s"),
("need_to_learn_prisoner_management", "You need to learn Prisoner Management skill in order to take prisoners."),
("game_log", "Game Log"),
("recent_messages", "Recent Messages"),
("custom_battle", "Custom Battle"),
("player", "Player"),
("value_denars", "%d denars"),
("back", "Back"),
("forward", "Forward"),
("display_on_map", "Show On Map"),
("info_pages", "Game Concepts"),
("troops2", "Characters"),
("locations", "Locations"),
("click_button_to_view_note", "Click on a link to view the notes"),
("this_page_contains_no_information", "This page contains no information"),
("other_pages_that_link_here", "Other pages that link here: "),
("report_is_value_days_old", " (Report is %d days old)"),
("report_is_current", " (Report is current)"),
("button_party_member_healthy_total", "%s (%d/%d)"),
("button_party_member_total", "%s (%d)"),
("button_party_member_hero_percentage_wounded", "%s (%d%% - Wounded)"),
("button_party_member_hero_percentage", "%s (%d%%)"),
("percentage_value", "%d%%"),
("full", "Full"),
("quick", "Quick"),
("none", "None"),
("change", "Change"),
("how_to_change", "How to change this?"),
("change_directx_explanation", "You can change the render method between DirectX 7 and DirectX 9 by clicking on the Configure button at the launch menu that comes up when you first start the game."),
("dropping_picking_up", "Dropping %s; picking up %s."),
("dropping", "Dropping %s."),
("picking_up", "Picking up %s."),
("unable_to_take", "Unable to take that."),
("age", "Age"),
("cannot_be_used_on_horseback", "Cannot be used on horseback"),
("enable_vertex_shaders2", "Render Method:"),
("screen_size2", "Screen Resolution:"),
("use_desktop_resolution2", "Use Desktop Resolution"),
("shadow_quality2", "Shadow Quality:"),
("m_low2", "Low"),
("m_high2", "High"),
("m_ultra_high2", "Ultra High"),
("off2", "Off"),
("group_header", "Class of troop"),
("group_rename", "Rename group"),
("group_1", "Infantry"),
("group_2", "Archers"),
("group_3", "Cavalry"),
("group_4", "Unnamed 1"),
("group_5", "Unnamed 2"),
("group_6", "Unnamed 3"),
("group_7", "Unnamed 4"),
("group_8", "Unnamed 5"),
("group_9", "Unnamed 6"),
("group_rename", "Rename Group"),
("group_close", "Close"),
("party_b_group_information", "%s belongs to %s group"),
("thrown_or_s", "Thrown/%s"),
("ranged_damage", "Ranged: %d%s"),
("overall_quality", "Overall Quality"),
("shader_quality", "Shader Quality:"),
("flora_lod_detail", "Tree Detail:"),
("flora_degrade_distance", "Tree Degrade Distance:"),
("antialiasing", "AntiAliasing:"),
("use_depth_effects", "Use Depth Effects"),
("hdr_mode", "HDR Mode:"),
("autoexpore", "Auto-exposure"),
("choose_profile", "Choose Profile"),
("create", "Create"),
("edit", "Edit"),
("join_game", "Join a Game"),
("host_game", "Host a Game"),
("custom", "Custom"),
("medium", "Medium"),
("male", "Male"),
("female", "Female"),
("gender", "Choose Gender:"),
("edit_profile", "Edit Profile"),
("new_profile", "New Profile"),
("enter_username", "Enter Username:"),
("invalid_username", "Usernames may only contain letters, numbers or _ - * [ ] ~ characters."),
("confirmation", "Are you sure?"),
("multiplayer", "Multiplayer"),
("server_name", "Server"),
("module_name", "Module"),
("game_type", "Game Type"),
("map_name", "Map"),
("ping", "Ping"),
("dedicated", "Dedicated"),
("number_of_players", "Players"),
("password_protected", "Password"),
("connect", "Connect"),
("local_area_network", "Local Area Network"),
("internet", "Internet"),
("favorites", "Favorites"),
("source", "Source:"),
("server_password", "Server Password:"),
("refresh", "Refresh"),
("start_search", "Start Search"),
("add_to_favorites", "Add to Favorites"),
("remove_from_favorites", "Remove from Favorites"),
("use_speedtree", "Use Speedtree"),
("use_instancing", "Use Instancing"),
("error", "Error"),
("error_server_full", "Server is full."),
("error_server_full_for_non_private", "Server is full for players without a private member password."),
("error_server_password_incorrect", "Incorrect password."),
("error_incorrect_serial", "Incorrect serial number."),
("error_incorrect_authorization_key", "Incorrect authorization key."),
("error_banned_from_server", "You are banned from this server."),
("error_username_taken", "Your profile name is used by another player."),
("error_authentication_failed", "Authentication failed."),
("unable_to_connect_to_server", "Unable to connect to server."),
("connection_to_server_is_lost", "Connection to server is lost."),
("kicked_from_server", "Kicked from server."),
("switch_to_module_question", "This server is running another module than the one you are currently running. Do you want Mount&Blade to switch to this module?"),
("download_module_question", "This server is running a module that is not installed on your computer. Would you like to visit the download site for this module now?"),
("download_mb_new_version_question", "This server is running a newer version (%d.%d%d%d) of Mount&Blade than the one you are currently running (%d.%d%d%d). Would you like to visit TaleWorlds download site now?"),
("download_mb_old_version_question", "This server is running an older version (%d.%d%d%d) of Mount&Blade and than the one you are currently running (%d.%d%d%d)."),
("download_module_new_version_question", "This server is running a newer version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d). Would you like to visit the download site for this module now?"),
("download_module_old_version_question", "This server is running an older version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d)."),
("authenticating_with_steam", "Authenticating with Steam..."),
("validating_serial_number", "Validating serial number..."),
("scanning_lan", "Scanning local area network..."),
("retrieving_servers", "Retrieving server list..."),
("shield_size2", "Size: %dx%d"),
("click_to_view_notes", "Click to view notes"),
("retrieving_server_infos", "Retrieving information from servers (%d)..."),
("connecting_to_server", "Connecting to server..."),
("requesting_to_join_the_game", "Requesting to join the game..."),
("loading", "Loading..."),
("group_value_control", "Group %d!"),
("drop_weapon", "Drop Weapon"),
("multiplayer_message_all", "Send Message to Everyone"),
("multiplayer_message_team", "Send Message to Team"),
("command_line", "Command Line"),
("use_ranged_weapon_as_melee", "Toggle Weapon Mode"),
("send_message_all", "Send Message to Everyone"),
("send_message_team", "Send Message to Team"),
("select", "Select"),
("context_menu", "Context Menu"),
("round_starts_in_value_seconds", "Round starts in %d seconds..."),
("watching_value", "Following %s"),
("capital_spec", "SPEC"),
("capital_dead", "DEAD"),
("instancing_error1", "Could not lock Instance Buffer (size: %d), Disabled mesh-instancing (Error Code: %d)"),
("instancing_error2", "Could not fit instanced objects, Disabled mesh-instancing"),
("by_keyboard", "By movement keys"),
("combat_speed_slowest", "Slowest"),
("combat_speed_slower", "Slower"),
("combat_speed_normal", "Normal"),
("combat_speed_faster", "Faster"),
("combat_speed_fastest", "Fastest"),
("module_newer_than_application", "The module you have selected requires a newer version of the game."),
("module_older_than_application", "The module you have selected requires an older version of the game."),
("unbalanced", "Unbalanced"),
("can_crush_through_blocks", "Can crush through blocks"),
("turn_camera_with_horse", "Turn Camera with Horse in First Person:"),
("widescreen_mode_on", "Multiple Screen Mode Enabled"),
("widescreen_mode_off", "Multiple Screen Mode Disabled"),
("notification_cant_upgrade", "(Can't upgrade: not enough money)"),
("turn_never", "Never"),
("turn_ranged_only", "Ranged only"),
("turn_melee_only", "Melee only"),
("turn_always", "Always"),
("general_options", "General Options"),
("vac_enabled", "Valve Anti Cheat Enabled"),
("campaign_ai", "Campaign AI:"),
("downloading_map", "Downloading map (%d KB)"),
("download_completed", "Download completed."),
("server_filter", "Server filter"),
("has_players", "Has players"),
("is_not_full", "Not full"),
("is_password_free", "No password"),
("native_only", "Native only"),
("ping_limit", "Ping limit"),
("filter_info", "%d games and %d players filtered"),
("is_version_compatible", "Compatible with module"),
("ttnet_account", "TTNET Oyun account"),
("username", "Username"),
("password", "Password"),
("error_incorrect_username_or_password", "Incorrect username or password"),
("validating_account", "Validating account..."),
("plase_enter_your_serial_key", "Please enter your serial key"),
("texture_detail2", "Texture Detail:"),
("antialiasing2", "Antialiasing:"),
("napoleonic_key_does_not_exist", "This mod requires the Napoleonic Wars DLC to play!"),
("delete_module_workshop", "Are you sure you want to unsubscribe from this module?"),
("delete_module", "Are you sure you want to delete the module?"),
("delete_native_module", "You cannot delete native mods."),
("incompatible_module", "This server is incompatible with your current module. You can use the configuration utility to change module."),
]
| |
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
# =============standard library imports ========================
from __future__ import absolute_import
from __future__ import print_function
import codecs
import glob
import os
import sys
import time
import serial
from six.moves import range
# =============local library imports ==========================
from .communicator import Communicator, process_response, prep_str, remove_eol_func
def get_ports():
if sys.platform == 'win32':
ports = ['COM{}'.format(i+1) for i in range(256)]
else:
usb = glob.glob('/dev/tty.usb*')
furpi = glob.glob('/dev/furpi.*')
pychron = glob.glob('/dev/pychron.*')
slab = glob.glob('/dev/tty.SLAB*')
if sys.platform == 'darwin':
keyspan = glob.glob('/dev/tty.U*')
else:
keyspan = glob.glob('/dev/ttyU*')
ports = keyspan + usb + furpi + pychron + slab
return ports
class SerialCommunicator(Communicator):
"""
Base Class for devices that communicate using a rs232 serial port.
Using Keyspan serial converter is the best option for a Mac
class is built on top of pyserial. Pyserial is used to create a handle and
this class uses the handle to read and write.
handles are created when a serial device is opened
setup args are loaded using load(). this method should be overwritten to
load specific items.
"""
# char_write = False
_auto_find_handle = False
_auto_write_handle = False
baudrate = None
port = None
bytesize = None
parity = None
stopbits = None
timeout = None
id_query = ''
id_response = ''
read_delay = None
read_terminator = None
read_terminator_position = None
clear_output = False
_config = None
_comms_report_attrs = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout')
@property
def address(self):
return self.port
def test_connection(self):
return self.handle is not None
def reset(self):
handle = self.handle
try:
isopen = handle.isOpen()
orate = handle.getBaudrate()
if isopen:
handle.close()
handle.setBaudrate(0)
handle.open()
time.sleep(0.1)
handle.close()
handle.setBaudrate(orate)
if isopen:
handle.open()
except Exception:
self.warning('failed to reset connection')
def close(self):
if self.handle:
self.debug('closing handle {}'.format(self.handle))
self.handle.close()
def load_comdict(self, port, baudrate=9600, bytesize=8, parity=None, stopbits=1):
self.baudrate = baudrate
self.port = port
self.set_parity(parity)
self.set_stopbits(stopbits)
self.bytesize = bytesize
def load(self, config, path):
self.config_path = path
self._config = config
self.set_attribute(config, 'port', 'Communications', 'port')
self.set_attribute(config, 'baudrate', 'Communications', 'baudrate',
cast='int', optional=True)
self.set_attribute(config, 'bytesize', 'Communications', 'bytesize',
cast='int', optional=True)
self.set_attribute(config, 'timeout', 'Communications', 'timeout',
cast='float', optional=True)
self.set_attribute(config, 'clear_output', 'Communications', 'clear_output',
cast='boolean', optional=True)
parity = self.config_get(config, 'Communications', 'parity', optional=True)
self.set_parity(parity)
stopbits = self.config_get(config, 'Communications', 'stopbits', optional=True)
self.set_stopbits(stopbits)
self.set_attribute(config, 'read_delay', 'Communications', 'read_delay',
cast='float', optional=True, default=25)
self.set_attribute(config, 'read_terminator', 'Communications', 'terminator',
optional=True, default=None)
self.set_attribute(config, 'read_terminator_position', 'Communications', 'terminator_position',
optional=True, default=None, cast='int')
self.set_attribute(config, 'write_terminator', 'Communications', 'write_terminator',
optional=True, default=b'\r')
if self.write_terminator == 'CRLF':
self.write_terminator = b'\r\n'
if self.read_terminator == 'CRLF':
self.read_terminator = b'\r\n'
if self.read_terminator == 'ETX':
self.read_terminator = chr(3)
def set_parity(self, parity):
if parity:
self.parity = getattr(serial, 'PARITY_%s' % parity.upper())
def set_stopbits(self, stopbits):
if stopbits:
if stopbits in ('1', 1):
stopbits = 'ONE'
elif stopbits in ('2', 2):
stopbits = 'TWO'
self.stopbits = getattr(serial, 'STOPBITS_{}'.format(stopbits.upper()))
def tell(self, cmd, is_hex=False, info=None, verbose=True, **kw):
"""
"""
if self.handle is None:
if verbose:
info = 'no handle'
self.log_tell(cmd, info)
return
with self._lock:
self._write(cmd, is_hex=is_hex)
if verbose:
self.log_tell(cmd, info)
def read(self, nchars=None, *args, **kw):
"""
"""
with self._lock:
if nchars is not None:
r = self._read_nchars(nchars)
else:
r = self._read_terminator(*args, **kw)
return r
def ask(self, cmd, is_hex=False, verbose=True, delay=None,
replace=None, remove_eol=True, info=None, nbytes=None,
handshake_only=False,
handshake=None,
read_terminator=None,
terminator_position=None,
nchars=None):
"""
"""
if self.handle is None:
if verbose:
x = prep_str(cmd.strip())
self.info('no handle {}'.format(x))
return
if not self.handle.isOpen():
return
with self._lock:
if self.clear_output:
self.handle.flushInput()
self.handle.flushOutput()
cmd = self._write(cmd, is_hex=is_hex)
if cmd is None:
return
if is_hex:
if nbytes is None:
nbytes = 8
re = self._read_hex(nbytes=nbytes, delay=delay)
elif handshake is not None:
re = self._read_handshake(handshake, handshake_only, delay=delay)
elif nchars is not None:
re = self._read_nchars(nchars)
else:
re = self._read_terminator(delay=delay,
terminator=read_terminator,
terminator_position=terminator_position)
if remove_eol and not is_hex:
re = remove_eol_func(re)
if verbose:
pre = process_response(re, replace, remove_eol=not is_hex)
self.log_response(cmd, pre, info)
# if is_hex:
# re = binascii.hexlify(re).decode('utf-8')
return re
def open(self, **kw):
"""
Use pyserial to create a handle connected to port wth baudrate
default handle parameters
baudrate=9600
bytesize=EIGHTBITS
parity= PARITY_NONE
stopbits= STOPBITS_ONE
timeout=None
"""
port = kw.get('port')
if port is None:
port = self.port
if port is None:
self.warning('Port not set')
return False
# #on windows device handles probably handled differently
if sys.platform == 'darwin':
port = '/dev/tty.{}'.format(port)
kw['port'] = port
for key in ['baudrate', 'bytesize', 'parity', 'stopbits', 'timeout']:
v = kw.get(key)
if v is None:
v = getattr(self, key)
if v is not None:
kw[key] = v
pref = kw.pop('prefs', None)
if pref is not None:
pref = pref.serial_preference
self._auto_find_handle = pref.auto_find_handle
self._auto_write_handle = pref.auto_write_handle
self.simulation = True
if self._validate_address(port):
try_connect = True
while try_connect:
try:
self.debug('Connection parameters={}'.format(kw))
self.handle = serial.Serial(**kw)
try_connect = False
self.simulation = False
except serial.serialutil.SerialException:
try_connect = False
self.debug_exception()
elif self._auto_find_handle:
self._find_handle(**kw)
self.debug('Serial device: {}'.format(self.handle))
return self.handle is not None # connected is true if handle is not None
# private
def _get_report_value(self, key):
c, value = super(SerialCommunicator, self)._get_report_value(key)
if self.handle:
value = getattr(self.handle, key)
return c, value
def _find_handle(self, **kw):
found = False
self.simulation = False
self.info('Trying to find correct port')
port = None
for port in get_ports():
self.info('trying port {}'.format(port))
kw['port'] = port
try:
self.handle = serial.Serial(**kw)
except serial.SerialException:
continue
r = self.ask(self.id_query)
# use id_response as a callable to do device specific
# checking
if callable(self.id_response):
if self.id_response(r):
found = True
self.simulation = False
break
if r == self.id_response:
found = True
self.simulation = False
break
if not found:
# update the port
if self._auto_write_handle and port:
# port in form
# /dev/tty.USAXXX1.1
p = os.path.split(port)[-1]
# remove tty.
p = p[4:]
self._config.set('Communication', 'port', )
self.write_configuration(self._config, self.config_path)
self.handle = None
self.simulation = True
def _validate_address(self, port):
"""
use glob to check the avaibable serial ports
valid ports start with /dev/tty.U or /dev/tty.usbmodem
"""
valid = get_ports()
if port in valid:
return True
else:
msg = '{} is not a valid port address'.format(port)
self.warning(msg)
if not valid:
self.warning('No valid ports')
else:
self.warning('======== Valid Ports ========')
for v in valid:
self.warning(v)
self.warning('=============================')
# wmsg = '\n'.join(valid)
# if not globalv.ignore_connection_warnings:
#
# if self.confirmation_dialog('{}\n{}\n\nQuit Pychron?'.format(msg, wmsg),
# title='Quit Pychron'):
# os._exit(0)
def _write(self, cmd, is_hex=False):
"""
use the serial handle to write the cmd to the serial buffer
return True if there is an exception writing cmd
"""
if not self.simulation:
if not isinstance(cmd, bytes):
cmd = bytes(cmd, 'utf-8')
if is_hex:
cmd = codecs.decode(cmd, 'hex')
else:
wt = self.write_terminator
if wt is not None:
if not isinstance(wt, bytes):
wt = bytes(wt, 'utf-8')
cmd += wt
try:
self.handle.write(cmd)
except (serial.serialutil.SerialException, OSError, IOError, ValueError) as e:
self.warning('Serial Communicator write execption: {}'.format(e))
return
return cmd
def _read_nchars(self, n, timeout=1, delay=None):
func = lambda r: self._get_nchars(n, r)
return self._read_loop(func, delay, timeout)
def _read_hex(self, nbytes=8, timeout=1, delay=None):
func = lambda r: self._get_nbytes(nbytes, r)
return self._read_loop(func, delay, timeout)
def _read_handshake(self, handshake, handshake_only, timeout=1, delay=None):
def hfunc(r):
terminated = False
ack, r = self._check_handshake(handshake)
if handshake_only and ack:
r = handshake[0]
terminated = True
elif ack and r is not None:
terminated = True
return r, terminated
return self._read_loop(hfunc, delay, timeout)
def _read_terminator(self, timeout=1, delay=None,
terminator=None, terminator_position=None):
if terminator is None:
terminator = self.read_terminator
if terminator_position is None:
terminator_position = self.read_terminator_position
def func(r):
return self._get_isterminated(r, terminator, terminator_position)
return self._read_loop(func, delay, timeout)
def _get_nbytes(self, nchars, r):
"""
1 byte == 2 chars
"""
handle = self.handle
inw = handle.inWaiting()
c = min(inw, nchars - len(r))
re = handle.read(c)
# print('inw', inw, 'c', c, re)
r += re
# print('r', r, len(r), nchars)
# r += b''.join(map('{:02X}'.format, map(ord, handle.read(c)))))
# print('r', r)
return r[:nchars], len(r) >= nchars
def _get_nchars(self, nchars, r):
handle = self.handle
inw = handle.inWaiting()
c = min(inw, nchars - len(r))
r += handle.read(c)
return r[:nchars], len(r) >= nchars
def _check_handshake(self, handshake_chrs):
ack, nak = handshake_chrs
inw = self.handle.inWaiting()
r = self.handle.read(inw)
if r:
return ack == r[0], r[1:]
return False, None
def _get_isterminated(self, r, terminator=None, pos=None):
terminated = False
try:
inw = self.handle.inWaiting()
r += self.handle.read(inw)
# r += chrs.decode('utf-8')
# print 'inw', inw, r, terminator
if terminator is None:
terminator = (b'\r\x00', b'\r\n', b'\r', b'\n')
if not isinstance(terminator, (list, tuple)):
terminator = (terminator,)
if r and r.strip():
for ti in terminator:
if pos:
t = r[pos] == ti
else:
if isinstance(ti, str):
ti = ti.encode()
t = r.endswith(ti)
if t:
terminated = True
break
except BaseException as e:
self.warning(e)
return r, terminated
def _read_loop(self, func, delay, timeout=1):
if delay is not None:
time.sleep(delay / 1000.)
elif self.read_delay:
time.sleep(self.read_delay / 1000.)
r = b''
st = time.time()
handle = self.handle
ct = time.time()
while ct - st < timeout:
if not handle.isOpen():
break
try:
r, isterminated = func(r)
if isterminated:
break
except (ValueError, TypeError):
pass
time.sleep(0.01)
ct = time.time()
if ct - st > timeout:
l = len(r) if r else 0
self.info('timed out. {}s r={}, len={}'.format(timeout, r, l))
return r
if __name__ == '__main__':
s = SerialCommunicator()
s.read_delay = 0
s.port = 'usbmodemfd1221'
s.open()
time.sleep(2)
s.tell('A', verbose=False)
for i in range(10):
print('dddd', s.ask('1', verbose=False))
time.sleep(1)
# s.tell('ddd', verbose=False)
# print s.ask('ddd', verbose=False)
# ===================== EOF ==========================================
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCircuitPeeringsOperations(object):
"""ExpressRouteCircuitPeeringsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def _delete_initial(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'}
def get(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified authorization from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitPeering or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'}
def _create_or_update_initial(
self, resource_group_name, circuit_name, peering_name, peering_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, peering_name, peering_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
express route circuit peering operation.
:type peering_parameters:
~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitPeering or
ClientRawResponse<ExpressRouteCircuitPeering> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'}
def list(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuitPeering
:rtype:
~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeeringPaged[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPeeringPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPeeringPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'}
| |
# TODO(jiayq): as more and more tests are moving to hypothesis test, we
# can gradually remove this test script. DO NOT ADD MORE TESTS TO THIS
# FILE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import (
brew,
core,
device_checker,
gradient_checker,
model_helper,
test_util,
workspace,
)
from caffe2.python.gradient_checker import NetGradientChecker
from caffe2.proto import caffe2_pb2
import unittest
if workspace.has_gpu_support and workspace.NumCudaDevices() > 0:
gpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option.device_type = caffe2_pb2.CUDA
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option]
)
device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option, cpu_device_option]
)
gpu_gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
]
gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
),
]
else:
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option = None
gpu_device_checker = device_checker.DeviceChecker(
0.01, []
)
device_checker = device_checker.DeviceChecker(0.01, [cpu_device_option])
gradient_checkers = [
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
)
]
gpu_gradient_checkers = []
class TestLRN(test_util.TestCase):
def setUp(self):
self.test_configs = [(6, 10), (3, 13), ]
def testLRN(self):
for input_size, depth in self.test_configs:
op = core.CreateOperator("LRN",
["X"],
["Y", "Y_scale"],
size=11,
alpha=0.001,
beta=0.5,
bias=2.0,
order="NHWC"
)
X = np.random.rand(2, input_size, input_size,
depth).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestFlatten(test_util.TestCase):
def testFlatten(self):
op = core.CreateOperator("Flatten", ["X"], ["Y"])
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestConcat(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input_size, depth1, depth2, depth3, depth4
(3, 2, 3, 4, 5),
(4, 5, 4, 3, 2),
]
def testConcatNHWC(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NHWC"
)
Xs = [
np.random.rand(2, input_size, input_size,
d1).astype(np.float32),
np.random.rand(2, input_size, input_size,
d2).astype(np.float32),
np.random.rand(2, input_size, input_size,
d3).astype(np.float32),
np.random.rand(2, input_size, input_size, d4).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
def testConcatNCHW(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NCHW"
)
Xs = [
np.random.rand(2, d1, input_size,
input_size).astype(np.float32),
np.random.rand(2, d2, input_size,
input_size).astype(np.float32),
np.random.rand(2, d3, input_size,
input_size).astype(np.float32),
np.random.rand(2, d4, input_size, input_size).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
class TestRelu(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1, 1),
(2, 1),
(1, 3, 3, 1),
(2, 3, 3, 1),
(1, 5, 5, 3),
(2, 5, 5, 3),
]
def testRelu(self):
for input_size in self.test_configs:
op = core.CreateOperator("Relu", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# go away from the origin point to avoid kink problems
X += 0.01 * np.sign(X)
X[X == 0] = 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestTanh(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testTanh(self):
for input_size in self.test_configs:
op = core.CreateOperator("Tanh", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestAbs(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 1),
(2, 3),
(2, 3, 4),
(2, 3, 4, 5),
]
def testAbs(self):
for input_size in self.test_configs:
op = core.CreateOperator("Abs", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# go away from the origin point to avoid kink problems
X += 0.01 * np.sign(X)
X[X == 0] = 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestExp(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testExp(self):
for input_size in self.test_configs:
op = core.CreateOperator("Exp", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestCos(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 1),
(2, 3),
(2, 3, 4),
(2, 3, 4, 5),
]
def testCos(self):
for input_size in self.test_configs:
op = core.CreateOperator("Cos", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSin(test_util.TestCase):
def setUp(self):
self.test_configs = [
(1, 1),
(2, 3),
(2, 3, 4),
(2, 3, 4, 5),
]
def testSin(self):
for input_size in self.test_configs:
op = core.CreateOperator("Sin", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSigmoid(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testSigmoid(self):
for input_size in self.test_configs:
op = core.CreateOperator("Sigmoid", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSum(test_util.TestCase):
def setUp(self):
self.test_configs = [
# ((0, 1), False),
((1, 2, 3, 4), True),
((1, 2, 3, 4), False)]
def testSum(self):
for (input_size, in_place) in self.test_configs:
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1 = np.random.rand(*input_size).astype(np.float32) - 0.5
X2 = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X1, X2], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(
op, [X1, X2], 0, [0])
self.assertTrue(res)
class TestMakeTwoClass(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1,),
(7,),
(1, 3),
(2, 5),
]
def testMakeTwoClass(self):
for input_size in self.test_configs:
op = core.CreateOperator("MakeTwoClass", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# step a little to avoid gradient problems
X[X < 0.01] += 0.01
X[X > 0.99] -= 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestNetGradientChecker(test_util.TestCase):
def test_net_gradient_checker(self):
model = model_helper.ModelHelper(name="test")
const = model.net.AddExternalInputs("const1", "const2")
fc = brew.fc(model, dim_in=3, dim_out=4, blob_in="X", blob_out="Y", axis=0)
dist = [model.net.SquaredL2Distance([fc, c]) for c in const]
losses = [model.net.AveragedLoss(d) for d in dist] # using two losses here
workspace.RunNetOnce(model.param_init_net)
NetGradientChecker.Check(
model.net,
outputs_with_grad=losses,
input_values={"X": np.array([1, 2, 3], dtype="float32"),
const[0]: np.array([1, 1, 1, 1], dtype="float32"),
const[1]: np.array([2, 2, 2, 2], dtype="float32")},
input_to_check="X",
)
def test_net_comparison(self):
# (a + b) * (c + d) == a * c + a * d + b * c + b * d
net1 = core.Net("net1")
a, b, c, d = net1.AddExternalInputs("a", "b", "c", "d")
a_b = net1.Sum([a, b], "a+b")
c_d = net1.Sum([c, d], "c+d")
x = net1.Mul([a_b, c_d], "x")
net2 = core.Net("net2")
ac = net2.Mul([a, c], "ac")
ad = net2.Mul([a, d], "ad")
bc = net2.Mul([b, c], "bc")
bd = net2.Mul([b, d], "bd")
y = net2.Sum([ac, ad, bc, bd], "y")
input_values = {blob: np.array([i], dtype=np.float32)
for i, blob in enumerate([a, b, c, d])}
NetGradientChecker.CompareNets(
[net1, net2], [[x], [y]], [0],
inputs_with_grads=[a, b, c, d],
input_values=input_values,
)
if __name__ == '__main__':
workspace.GlobalInit(["python"])
unittest.main()
| |
################################################################################################
### nlpTranslation
### Richard Mau ALLTEMPS, Richard Boyce PhD
### April 8, 2015
################################################################################################
from xml.etree.ElementTree import Element, SubElement, tostring
from xml.etree import ElementTree
from xml.dom import minidom
import re
import os
import nltk
import csv
import sys
import codecs
### p is abbreviated for precipitant
### o is abbreviated for object
### i is abbreviated for interaction
col = ['fileName', 'pType', 'p', 'pAnnotator', 'pSpanStart', 'pSpanEnd', 'oType', 'o',
'oAnnotator', 'metabolite', 'oSpanStart', 'oSpanEnd', 'modality', 'iPhraseType',
'iPhrase', 'iPhraseAnnotator', 'iPhraseSpanStart', 'iPhraseSpanEnd']
### data is stored in an 'off-by-one' fashion due to the Header column names
### so first element is data[1], in other words -1 to index desired element
### then to retreive key-value is data[1][0][1] where the second index is the
### column and the the third index is the key or value 0,1
### so data[1][0][1] = package-insert-section-1.txt
data = [] # initiated here for compile_data()
################################################################################################
### The Functions
################################################################################################
### @desc: Reads the specific csv file that contains the drug interactions and data[]
### @param:
### @return: the data[][][] list
def compile_data():
try:
csvfile = open('input/all-consensus-interaction-entities-dumped-05162011.csv', 'rb')
except IOError:
print "Error: File not found"
reader = csv.reader(csvfile, delimiter=',')
ctr = 0
for line in reader:
row = []
for i in range(18):
row.append(line[i])
data.append(zip(col, row))
ctr = ctr + 1
csvfile.closed
return data
### @desc: creates a document element for xml
### @param: paramId = the document id which represents the row in the csv file
### paramOrigId = the origId is the section number from the package file
### @return: the document element as document
def create_document(paramId, paramOrigId, paramText):
paramText = paramText.replace('\n', '')
document = Element('document', {'id':paramId, 'origId':paramOrigId})
return document
### @desc: creates a sentence element for xml
### @param: paramId = the sentence id is a combination of document_id + s[i]
### paramText = the entire sentence
### i = the iteration for the sentences [0-(len(text)-1)]
### @return: the sentence element as sentence under document
def create_sentence(paramId, paramText, i):
paramText = paramText.replace('\n', '\\n')
sentence = SubElement(document, 'sentence',
{'id':paramId+'.s'+str(i), 'origId':'s'+str(i), 'text':paramText})
return sentence
### @desc: creates an entity element for xml
### @param: paramId = combination of sentence_id + e[j]
### paramText = name of the drug
### paramOff = the offset in which the drug appears in package text
### i = the sentence id in which the entity appeared
### j = the entity id
### paramType = the type (Active Ingredient/Drug product/Metabolite)
### @return: the entity element as entity under sentence
def create_entity(paramId, paramText, paramOff, i, j, paramType):
entity = SubElement(sentence, 'entity',
{'id':paramId+'.s'+str(i)+'.e'+str(j),
'origId':'s'+str(i)+'.e'+str(j),
'text': paramText,
'charOffset': paramOff + '-' + str(int(paramOff) + len(paramText)),
'type':paramType})
return entity
### @desc: creates a pair element for the xml
### @param: paramId = combination of sentence_id + p[n]
### interaction = (true/false), whether there's an interaction
### modality = (Negative/Positive)
### ipt = (Qualitative/Quantitative) interactionPhraseType
### e1 = acting entity as precipitant
### e2 = acting entity as object
### i = sentence number
### n = pair number
### p = displayed text of entity as precipitant
### o = displayed text of entity as object
### @return: the pair element as pair under sentence
def create_pair(paramId, interaction, modality, ipt, e1, e2, i, n, p, o):
pair = SubElement(sentence, 'pair',
{'id':paramId+'.s'+str(i)+'.p'+str(n),
'object':o,
'precipitant':p,
'e2':paramId+'.s'+str(i)+'.e'+str(e2),
'e1':paramId+'.s'+str(i)+'.e'+str(e1),
'interaction': interaction, # true or false
'modality':modality,
'interactionPhraseType':ipt})
return pair
### @desc: creates a sentencespan element for xml
### @param: paramId = the sentence id is a combination of document_id + s[i]
### paramText = the entire sentence
### i = the iteration for the sentencespan [0-(len(text)-1)]
### @return: the sentencespan element as sentencespan under document
def create_sentence_span(paramId, paramText, i):
paramText = paramText.replace('\n', '\\n')
sentence_span = SubElement(document, 'sentencespan',
{'id':paramId+'.sp'+str(i),
'origId':'sp' + str(i),
'text':paramText})
return sentence_span
### @desc: creates an entity element for xml under sentencespan
### @param: paramId = combination of sentencespan id + e[j]
### paramText = name of the drug
### paramOff = the offset in which the drug appears in package text
### i = the sentencespan id in which the entity appeared
### j = the entity id
### paramType = the type (Active Ingredient/Drug product/Metabolite)
### @return: the entity element as entity under sentencespan
def create_entity_span(paramId, paramText, paramOff, i, j, paramType):
entity = SubElement(sentencespan, 'entity',
{'id':paramId+'.sp'+str(i)+'.e'+str(j),
'origId':'sp'+str(i)+'.e'+str(j),
'text': paramText,
'charOffset': paramOff + '-' + str(int(paramOff) + len(paramText)),
'type':paramType})
return entity
### @desc: creates a pair element for the xml under sentencespan
### @param: paramId = combination of sentencespan id+ p[n]
### interaction = (true/false), whether there's an interaction
### modality = (Negative/Positive)
### ipt = (Qualitative/Quantitative) interactionPhraseType
### e1 = acting entity as precipitant
### e2 = acting entity as object
### i = sentence number
### n = pair number
### p = displayed text of entity as precipitant
### o = displayed text of entity as object
### @return: the pair element as pair under sentencespan
def create_pair_span(paramId, interaction, modality, ipt, e1, e2, i, n, p, o):
pair = SubElement(sentencespan, 'pair',
{'id':paramId+'.sp'+str(i)+'.p'+str(n),
'object':o,
'precipitant':p,
'e2':paramId+'.sp'+str(i)+'.e'+str(e2),
'e1':paramId+'.sp'+str(i)+'.e'+str(e1),
'interaction': interaction, # true or false
'modality':modality,
'interactionPhraseType':ipt})
return pair
### @desc: prints out xml in a readable format
### @param: elem = the element in which to format
### @return: the formatted result
def prettify(elem):
roughString = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(roughString)
result = reparsed.toprettyxml(indent='\t', encoding='UTF-8')
result = result.replace('<?xml version="1.0" encoding="UTF-8"?>', '')
return result
### @desc: parses package-insert-section-xxx.txt for text attribute in sentence
### @param: the file name
### @return: an array of the sentences as sentences
def parse_sentences(paramText):
# parse the sentences and put into an array
paramText = paramText.decode('utf8')
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = sent_detector.tokenize(paramText)
retval = []
for s in range(len(sentences)):
sent = sentences[s].split()
if sent[0][0].islower():
retval.append(retval.pop(len(retval)-1) + sentences[s])
else:
retval.append(sentences[s])
return retval
### @desc: reads the package-insert-section-xxx.txt from the all-statements-combined directory
### @param: the package-insert-section-xxx.txt file
### @return: the text from the file
def read_file(paramFile):
try:
packageFile = codecs.open(os.path.join('input/all-statements-combined', paramFile), 'rb')
except IOError:
print "Error: File not found"
packageText = packageFile.read()
packageFile.closed
return packageText.decode('ascii', 'ignore').encode('utf8', 'replace')
### @desc: key for sorting
### @param: item = the sort by option
### @return: the sort by precipitant offset
def getKey(item):
return item[4][1]
### @desc: groups interactions from the csv by section number into a list
### @param: the data[][][] after it has been compiled_data()
### @return: a 4D list that is keyed by section number
def entity_array(paramText):
entityArray = [0] * len(paramText)
prevSecNo = 0
temp = []
for i in range(1,len(paramText)):
secNo = int(re.search('\d+', paramText[i][0][1]).group(0))
if i == 1 or prevSecNo == secNo:
temp.append(paramText[i])
else:
prevSecNo = secNo
temp = []
temp.append(paramText[i])
entityArray[secNo] = temp
return entityArray
### @desc: creates the header for the xml file
### @param: the xml file
### @return:
def header_xml(paramFile):
xmlFile = codecs.open('output/' + paramFile, 'wb')
xmlFile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
xmlFile.write('<documents>')
xmlFile.closed
### @desc: creates the footer for the xml file
### @param: the xml file
### @return:
def closer_xml(paramFile):
xmlFile = codecs.open('output/' + paramFile, 'ab')
xmlFile.write('</documents>')
xmlFile.closed
### @desc: appends to the xml file
### @param: the xml file
### @return:
def append_xml(paramString, paramFile):
xmlFile = codecs.open('output/' + paramFile, 'ab')
xmlFile.write(paramString)
xmlFile.closed
################################################################################################
### Main
################################################################################################
data = compile_data() # stores all the information from csv file
fileName = sys.argv[1]
docId = 'DBMI.pac'
entityArray = entity_array(data) # stores entites by package file number
### Creates the xml file for argv[1] and erase nlpTranslation
header_xml(fileName)
with open('output/nlpTranslation.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter='\t')
spamwriter.writerow(['FileName', 'Precipitant Type', 'Precipitant', 'Precipitant Annotator', 'Precipitant Span Start', 'Precipitant SpanEnd', 'Object Type', 'Object',
'Object Annotator', 'Metabolite', 'Object Span Start', 'Object Span End', 'Modality', 'Interaction Phrase Type',
'Interaction Phrase', 'Interaction Phrase Annotator', 'Interaction Phrase Span Start', 'Interaction Phrase Span End'])
### Iterates through each package-insert-section in the 'all-statements-combined' file
for m in range(1,len(data)):
secNo = re.search('\d+', data[m][0][1]).group(0)
docOrigId = 'pac' + secNo
packageText = read_file(data[m][0][1])
text = parse_sentences(packageText) # stores the entire text from file as a list of sentences
newText = [] # store the entire text from file as a list of sentence spans of 3 sentences
### if same document from the csv file then continue under current document element
if m == 1 or secNo != prevSecNo:
document = create_document(docId + secNo, docOrigId, ''.join(map(str, text)))
entityLen = len(entityArray[int(secNo)])
head = 0
nextHead = 0
### Iterates through each sentence in text
for i in range(len(text)):
sentence = create_sentence(docId + secNo, text[i], i)
entCount = 0
entList = []
entPList = []
entOList = []
nextHead += len(text[i]) + 1
### Append to the newText for sentence span
if ((i+1) % 3) == 0 and i != 0: # sentences broken into 3s
newText.append(text[i-2]+ ' ' + text[i-1]+ ' ' +text[i])
elif len(text) == 1 and i == 0:
newText.append(text[i])
elif len(text) < 3 and i == 1: # text is 2 in length
newText.append(text[i-1] + ' ' + text[i])
elif (len(text) > 3) and (len(text) % 3 == len(text) - i):
if len(text) % 3 == 1:
newText.append(text[i])
elif len(text) % 3 == 2:
newText.append(text[i] + ' ' + text[i+1])
### Iterate through each interaction in the package-insert-section
sortedEntArray = sorted(entityArray[int(secNo)], key=getKey) # sort before entering entity array
for j in range(entityLen):
curEnt = sortedEntArray[j]
currentO = int(curEnt[10][1])
currentP = int(curEnt[4][1])
### Measure if the interaction is in the current sentence and create entity if not created
if (head <= currentP and currentP < nextHead):
entPList.append(curEnt)
if curEnt[2][1] not in entList:
entList.append(curEnt[2][1])
entity = create_entity(docId + secNo, curEnt[2][1], curEnt[4][1], i, entCount, curEnt[1][1])
entCount += 1
if curEnt[7][1] not in entList:
entList.append(curEnt[7][1])
entity = create_entity(docId + secNo, curEnt[7][1], curEnt[10][1], i, entCount, curEnt[6][1])
entCount += 1
else:
pass
else:
pass
### Cartesian Product of all the entities created and create pairs
pair = 0
for n in range(entCount):
e1 = entList[n]
for p in range(entCount):
match = 0
e2 = entList[p]
for q in range(len(entPList)):
e3 = entPList[q]
if e1 == e3[2][1] and e2 == e3[7][1] and n != p:
match = 1
create_pair(docId+secNo, 'true', e3[12][1], e3[13][1], n, p, i, pair, e3[2][1], e3[7][1])
pair += 1
### Write to csv file
with open('output/nlpTranslation.csv', 'ab') as csvfile:
spamwriter = csv.writer(csvfile, delimiter='\t')
spam = [e3[0][1], e3[1][1], e3[2][1], e3[3][1], e3[4][1], e3[5][1], e3[6][1],
e3[7][1], e3[8][1], e3[9][1], e3[10][1], e3[11][1], e3[12][1], e3[13][1], text[i], e3[15][1], e3[16][1], e3[17][1]]
spamwriter.writerow(spam)
if match == 0 and n != p:
create_pair(docId+secNo, 'false', '', '', n, p, i, pair, '', '')
pair +=1
head = nextHead - 1
### End of sentence loop
### Beginning of sentence span. Iterate through each sentence span
head = 0
nextHead = 0
for i in range(len(newText)):
sentencespan = create_sentence_span(docId + secNo, newText[i].replace('\n', '\\n'), i)
entCount = 0
entList = []
entPList = []
entOList = []
nextHead += len(newText[i]) + 2
### Iterate through each interaction in the package-insert-section
sortedEntArray = sorted(entityArray[int(secNo)], key=getKey) # sort before entering entity array
for j in range(entityLen):
curEnt = sortedEntArray[j]
currentO = int(curEnt[10][1])
currentP = int(curEnt[4][1])
### Measure if the interaction is in the current sentence and create entity if not created
if (head <= currentP and currentP < nextHead):
entPList.append(curEnt)
if curEnt[2][1] not in entList:
entList.append(curEnt[2][1])
entity = create_entity_span(docId + secNo, curEnt[2][1], curEnt[4][1], i, entCount, curEnt[1][1])
entCount += 1
if curEnt[7][1] not in entList:
entList.append(curEnt[7][1])
entity = create_entity_span(docId + secNo, curEnt[7][1], curEnt[10][1], i, entCount, curEnt[6][1])
entCount += 1
else:
pass
else:
pass
### Cartesian Product of all the entities created and create pairs
pair = 0
for n in range(len(entList)):
e1 = entList[n]
for p in range(len(entList)):
match = 0
e2 = entList[p]
for q in range(len(entPList)):
e3 = entPList[q]
if e1 == e3[2][1] and e2 == e3[7][1] and n != p:
match = 1
create_pair_span(docId+secNo, 'true', e3[12][1], e3[13][1], n, p, i, pair, e3[2][1], e3[7][1])
pair += 1
if match == 0 and n != p:
create_pair_span(docId+secNo, 'false', '', '', n, p, i, pair, '', '')
pair +=1
head = nextHead - 2
### End of sentence span loop
prevSecNo = secNo
append_xml(prettify(document), fileName)
del(document)
### End of document
# If the same document, then skip
else:
prevSecNo = secNo
closer_xml(fileName)
### End of Main
| |
#Embedded file name: ACEStream\Video\Ogg.pyo
import sys
import os
from cStringIO import StringIO
DEBUG = False
def is_ogg(name):
return name.endswith('.ogg') or name.endswith('.ogv') or name.endswith('ogm') or name.endswith('oga') or name.endswith('ogx')
def ogg_grab_page(input, checkcrc = False):
capture_pattern = input.read(4)
stream_structure_version = input.read(1)
header_type_flag = input.read(1)
granule_position = input.read(8)
bitstream_serial_number = input.read(4)
page_sequence_number = input.read(4)
CRC_checksum = input.read(4)
number_page_segments = input.read(1)
segment_table = input.read(ord(number_page_segments))
header_size = ord(number_page_segments) + 27
segment_size = 0
for i in range(0, ord(number_page_segments)):
segment_size += ord(segment_table[i])
page_size = header_size + segment_size
if capture_pattern != 'OggS':
raise ValueError('Header does not start with OggS')
if page_size > 65307:
raise ValueError('Page too big')
if DEBUG:
print >> sys.stderr, 'ogg: type', ord(header_type_flag)
header = capture_pattern + stream_structure_version + header_type_flag + granule_position + bitstream_serial_number + page_sequence_number + CRC_checksum + number_page_segments + segment_table
body = input.read(page_size - header_size)
if checkcrc:
import binascii
import socket
crcheader = capture_pattern + stream_structure_version + header_type_flag + granule_position + bitstream_serial_number + page_sequence_number + '\x00\x00\x00\x00' + number_page_segments + segment_table
crcpage = crcheader + body
newcrc = ogg_crc(crcpage)
newcrcnbo = socket.htonl(newcrc) & 4294967295L
newcrcstr = '%08x' % newcrcnbo
oldcrcstr = binascii.hexlify(CRC_checksum)
if DEBUG:
print >> sys.stderr, 'ogg: CRC exp', oldcrcstr, 'got', newcrcstr
if oldcrcstr != newcrcstr:
raise ValueError('Page fails CRC check')
header_type = body[0]
isheader = False
if header_type == '\x01' or header_type == '\x03' or header_type == '\x05':
isheader = True
vorbis_grab_header(StringIO(body))
elif header_type == '\x80' or header_type == '\x81' or header_type == '\x82':
isheader = True
theora_grab_header(StringIO(body))
elif header_type == '\x7f':
isheader = True
flac_grab_header(StringIO(body))
return (isheader, header, body)
def vorbis_grab_header(input):
if DEBUG:
header_type = input.read(1)
if header_type == '\x01':
codec = input.read(6)
print >> sys.stderr, 'ogg: Got vorbis ident header', codec
elif header_type == '\x03':
print >> sys.stderr, 'ogg: Got vorbis comment header'
elif header_type == '\x05':
print >> sys.stderr, 'ogg: Got vorbis setup header'
def theora_grab_header(input):
if DEBUG:
header_type = input.read(1)
if header_type == '\x80':
codec = input.read(6)
print >> sys.stderr, 'ogg: Got theora ident header', codec
elif header_type == '\x81':
print >> sys.stderr, 'ogg: Got theora comment header'
elif header_type == '\x82':
print >> sys.stderr, 'ogg: Got theora setup header'
def flac_grab_header(input):
if DEBUG:
header_type = input.read(1)
if header_type == '\x7f':
codec = input.read(4)
print >> sys.stderr, 'ogg: Got flac ident header', codec
def makeCRCTable(idx):
r = idx << 24
for i in range(8):
if r & 2147483648L != 0:
r = (r & 2147483647) << 1 ^ 79764919
else:
r = (r & 2147483647) << 1
return r
CRCTable = [ makeCRCTable(i) for i in range(256) ]
def ogg_crc(src):
crc = 0
for c in src:
crc = (crc & 16777215) << 8 ^ CRCTable[crc >> 24 ^ ord(c)]
return crc
OGGMAGIC_TDEF = 0
OGGMAGIC_FIRSTPAGE = 1
OGGMAGIC_REST_OF_INPUT = 2
class OggMagicLiveStream:
def __init__(self, tdef, input):
self.tdef = tdef
self.input = input
self.firstpagestream = None
self.mode = OGGMAGIC_TDEF
self.find_first_page()
def find_first_page(self):
nwant = 65311
firstpagedata = ''
while len(firstpagedata) < nwant:
print >> sys.stderr, 'OggMagicLiveStream: Reading first page, avail', self.input.available()
data = self.input.read(nwant)
firstpagedata += data
if len(data) == 0 and len(firstpagedata < nwant):
raise ValueError('OggMagicLiveStream: Could not get max. page bytes')
self.firstpagestream = StringIO(firstpagedata)
while True:
char = self.firstpagestream.read(1)
if len(char) == 0:
break
if char == 'O':
rest = self.firstpagestream.read(3)
if rest == 'ggS':
print >> sys.stderr, 'OggMagicLiveStream: Found page'
self.firstpagestream.seek(-4, os.SEEK_CUR)
break
else:
self.firstpagestream.seek(-3, os.SEEK_CUR)
if len(char) == 0:
raise ValueError('OggMagicLiveStream: could not find start-of-page in P2P-stream')
def read(self, numbytes = None):
if numbytes is None:
raise ValueError("OggMagicLiveStream: don't support read all")
if self.mode == OGGMAGIC_TDEF:
data = self.tdef.get_live_ogg_headers()
if DEBUG:
print >> sys.stderr, 'OggMagicLiveStream: Writing TDEF', len(data)
if len(data) > numbytes:
raise ValueError('OggMagicLiveStream: Not implemented, Ogg headers too big, need more code')
self.mode = OGGMAGIC_FIRSTPAGE
return data
if self.mode == OGGMAGIC_FIRSTPAGE:
data = self.firstpagestream.read(numbytes)
if DEBUG:
print >> sys.stderr, 'OggMagicLiveStream: Writing 1st remain', len(data)
if len(data) == 0:
self.mode = OGGMAGIC_REST_OF_INPUT
return self.input.read(numbytes)
else:
return data
elif self.mode == OGGMAGIC_REST_OF_INPUT:
data = self.input.read(numbytes)
return data
def seek(self, offset, whence = None):
print >> sys.stderr, 'OggMagicLiveStream: SEEK CALLED', offset, whence
if offset == 0:
if self.mode != OGGMAGIC_TDEF:
self.mode = OGGMAGIC_TDEF
self.find_first_page()
else:
raise ValueError("OggMagicLiveStream doens't support seeking other than to beginning")
def close(self):
self.input.close()
def available(self):
return -1
if __name__ == '__main__':
header_pages = []
f = open('libre.ogg', 'rb')
while True:
isheader, header, body = ogg_grab_page(f)
if not isheader:
break
else:
header_pages.append((header, body))
f.close()
g = open('stroom.ogg', 'rb')
while True:
char = g.read(1)
if len(char) == 0:
break
if char == 'O':
rest = g.read(3)
if rest == 'ggS':
print >> sys.stderr, 'Found page'
g.seek(-4, os.SEEK_CUR)
isheader, pheader, pbody = ogg_grab_page(g)
break
else:
g.seek(-3, os.SEEK_CUR)
if len(char) > 0:
h = open('new.ogg', 'wb')
for header, body in header_pages:
h.write(header)
h.write(body)
h.write(pheader)
h.write(pbody)
while True:
data = g.read(65536)
if len(data) == 0:
break
else:
h.write(data)
h.close()
g.close()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines NeuMF model for NCF framework.
Some abbreviations used in the code base:
NeuMF: Neural Matrix Factorization
NCF: Neural Collaborative Filtering
GMF: Generalized Matrix Factorization
MLP: Multi-Layer Perceptron
GMF applies a linear kernel to model the latent feature interactions, and MLP
uses a nonlinear kernel to learn the interaction function from data. NeuMF model
is a fused model of GMF and MLP to better model the complex user-item
interactions, and unifies the strengths of linearity of MF and non-linearity of
MLP for modeling the user-item latent structures.
In NeuMF model, it allows GMF and MLP to learn separate embeddings, and combine
the two models by concatenating their last hidden layer.
"""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from typing import Any, Dict, Text
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import ncf_common
from official.recommendation import stat_utils
def sparse_to_dense_grads(grads_and_vars):
"""Convert sparse gradients to dense gradients.
All sparse gradients, which are represented as instances of tf.IndexedSlices,
are converted to dense Tensors. Dense gradients, which are represents as
Tensors, are unchanged.
The purpose of this conversion is that for small embeddings, which are used by
this model, applying dense gradients with the AdamOptimizer is faster than
applying sparse gradients.
Args
grads_and_vars: A list of (gradient, variable) tuples. Each gradient can
be a Tensor or an IndexedSlices. Tensors are unchanged, and IndexedSlices
are converted to dense Tensors.
Returns:
The same list of (gradient, variable) as `grads_and_vars`, except each
IndexedSlices gradient is converted to a Tensor.
"""
# Calling convert_to_tensor changes IndexedSlices into Tensors, and leaves
# Tensors unchanged.
return [(tf.convert_to_tensor(g), v) for g, v in grads_and_vars]
def neumf_model_fn(features, labels, mode, params):
"""Model Function for NeuMF estimator."""
if params.get("use_seed"):
tf.set_random_seed(stat_utils.random_int32())
users = features[movielens.USER_COLUMN]
items = features[movielens.ITEM_COLUMN]
user_input = tf.keras.layers.Input(tensor=users)
item_input = tf.keras.layers.Input(tensor=items)
logits = construct_model(user_input, item_input, params).output
# Softmax with the first column of zeros is equivalent to sigmoid.
softmax_logits = ncf_common.convert_to_softmax_logits(logits)
if mode == tf.estimator.ModeKeys.EVAL:
duplicate_mask = tf.cast(features[rconst.DUPLICATE_MASK], tf.float32)
return _get_estimator_spec_with_metrics(
logits,
softmax_logits,
duplicate_mask,
params["num_neg"],
params["match_mlperf"],
use_tpu_spec=params["use_tpu"])
elif mode == tf.estimator.ModeKeys.TRAIN:
labels = tf.cast(labels, tf.int32)
valid_pt_mask = features[rconst.VALID_POINT_MASK]
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=params["learning_rate"],
beta1=params["beta1"],
beta2=params["beta2"],
epsilon=params["epsilon"])
if params["use_tpu"]:
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=labels,
logits=softmax_logits,
weights=tf.cast(valid_pt_mask, tf.float32)
)
tf.identity(loss, name="cross_entropy")
global_step = tf.compat.v1.train.get_global_step()
tvars = tf.compat.v1.trainable_variables()
gradients = optimizer.compute_gradients(
loss, tvars, colocate_gradients_with_ops=True)
gradients = sparse_to_dense_grads(gradients)
minimize_op = optimizer.apply_gradients(
gradients, global_step=global_step, name="train")
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
else:
raise NotImplementedError
def _strip_first_and_last_dimension(x, batch_size):
return tf.reshape(x[0, :], (batch_size,))
def construct_model(user_input: tf.Tensor, item_input: tf.Tensor,
params: Dict[Text, Any]) -> tf.keras.Model:
"""Initialize NeuMF model.
Args:
user_input: keras input layer for users
item_input: keras input layer for items
params: Dict of hyperparameters.
Raises:
ValueError: if the first model layer is not even.
Returns:
model: a keras Model for computing the logits
"""
num_users = params["num_users"]
num_items = params["num_items"]
model_layers = params["model_layers"]
mf_regularization = params["mf_regularization"]
mlp_reg_layers = params["mlp_reg_layers"]
mf_dim = params["mf_dim"]
if model_layers[0] % 2 != 0:
raise ValueError("The first layer size should be multiple of 2!")
# Initializer for embedding layers
embedding_initializer = "glorot_uniform"
def mf_slice_fn(x):
x = tf.squeeze(x, [1])
return x[:, :mf_dim]
def mlp_slice_fn(x):
x = tf.squeeze(x, [1])
return x[:, mf_dim:]
# It turns out to be significantly more effecient to store the MF and MLP
# embedding portions in the same table, and then slice as needed.
embedding_user = tf.keras.layers.Embedding(
num_users,
mf_dim + model_layers[0] // 2,
embeddings_initializer=embedding_initializer,
embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization),
input_length=1,
name="embedding_user")(
user_input)
embedding_item = tf.keras.layers.Embedding(
num_items,
mf_dim + model_layers[0] // 2,
embeddings_initializer=embedding_initializer,
embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization),
input_length=1,
name="embedding_item")(
item_input)
# GMF part
mf_user_latent = tf.keras.layers.Lambda(
mf_slice_fn, name="embedding_user_mf")(embedding_user)
mf_item_latent = tf.keras.layers.Lambda(
mf_slice_fn, name="embedding_item_mf")(embedding_item)
# MLP part
mlp_user_latent = tf.keras.layers.Lambda(
mlp_slice_fn, name="embedding_user_mlp")(embedding_user)
mlp_item_latent = tf.keras.layers.Lambda(
mlp_slice_fn, name="embedding_item_mlp")(embedding_item)
# Element-wise multiply
mf_vector = tf.keras.layers.multiply([mf_user_latent, mf_item_latent])
# Concatenation of two latent features
mlp_vector = tf.keras.layers.concatenate([mlp_user_latent, mlp_item_latent])
num_layer = len(model_layers) # Number of layers in the MLP
for layer in xrange(1, num_layer):
model_layer = tf.keras.layers.Dense(
model_layers[layer],
kernel_regularizer=tf.keras.regularizers.l2(mlp_reg_layers[layer]),
activation="relu")
mlp_vector = model_layer(mlp_vector)
# Concatenate GMF and MLP parts
predict_vector = tf.keras.layers.concatenate([mf_vector, mlp_vector])
# Final prediction layer
logits = tf.keras.layers.Dense(
1, activation=None, kernel_initializer="lecun_uniform",
name=movielens.RATING_COLUMN)(predict_vector)
# Print model topology.
model = tf.keras.models.Model([user_input, item_input], logits)
model.summary()
sys.stdout.flush()
return model
def _get_estimator_spec_with_metrics(logits: tf.Tensor,
softmax_logits: tf.Tensor,
duplicate_mask: tf.Tensor,
num_training_neg: int,
match_mlperf: bool = False,
use_tpu_spec: bool = False):
"""Returns a EstimatorSpec that includes the metrics."""
cross_entropy, \
metric_fn, \
in_top_k, \
ndcg, \
metric_weights = compute_eval_loss_and_metrics_helper(
logits,
softmax_logits,
duplicate_mask,
num_training_neg,
match_mlperf)
if use_tpu_spec:
return tf.estimator.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=cross_entropy,
eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights]))
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=cross_entropy,
eval_metric_ops=metric_fn(in_top_k, ndcg, metric_weights)
)
def compute_eval_loss_and_metrics_helper(logits: tf.Tensor,
softmax_logits: tf.Tensor,
duplicate_mask: tf.Tensor,
num_training_neg: int,
match_mlperf: bool = False):
"""Model evaluation with HR and NDCG metrics.
The evaluation protocol is to rank the test interacted item (truth items)
among the randomly chosen 999 items that are not interacted by the user.
The performance of the ranked list is judged by Hit Ratio (HR) and Normalized
Discounted Cumulative Gain (NDCG).
For evaluation, the ranked list is truncated at 10 for both metrics. As such,
the HR intuitively measures whether the test item is present on the top-10
list, and the NDCG accounts for the position of the hit by assigning higher
scores to hits at top ranks. Both metrics are calculated for each test user,
and the average scores are reported.
If `match_mlperf` is True, then the HR and NDCG computations are done in a
slightly unusual way to match the MLPerf reference implementation.
Specifically, if the evaluation negatives contain duplicate items, it will be
treated as if the item only appeared once. Effectively, for duplicate items in
a row, the predicted score for all but one of the items will be set to
-infinity
For example, suppose we have that following inputs:
logits_by_user: [[ 2, 3, 3],
[ 5, 4, 4]]
items_by_user: [[10, 20, 20],
[30, 40, 40]]
# Note: items_by_user is not explicitly present. Instead the relevant \
information is contained within `duplicate_mask`
top_k: 2
Then with match_mlperf=True, the HR would be 2/2 = 1.0. With
match_mlperf=False, the HR would be 1/2 = 0.5. This is because each user has
predicted scores for only 2 unique items: 10 and 20 for the first user, and 30
and 40 for the second. Therefore, with match_mlperf=True, it's guaranteed the
first item's score is in the top 2. With match_mlperf=False, this function
would compute the first user's first item is not in the top 2, because item 20
has a higher score, and item 20 occurs twice.
Args:
logits: A tensor containing the predicted logits for each user. The shape of
logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a
user are grouped, and the last element of the group is the true element.
softmax_logits: The same tensor, but with zeros left-appended.
duplicate_mask: A vector with the same shape as logits, with a value of 1 if
the item corresponding to the logit at that position has already appeared
for that user.
num_training_neg: The number of negatives per positive during training.
match_mlperf: Use the MLPerf reference convention for computing rank.
Returns:
cross_entropy: the loss
metric_fn: the metrics function
in_top_k: hit rate metric
ndcg: ndcg metric
metric_weights: metric weights
"""
in_top_k, ndcg, metric_weights, logits_by_user = compute_top_k_and_ndcg(
logits, duplicate_mask, match_mlperf)
# Examples are provided by the eval Dataset in a structured format, so eval
# labels can be reconstructed on the fly.
eval_labels = tf.reshape(shape=(-1,), tensor=tf.one_hot(
tf.zeros(shape=(logits_by_user.shape[0],), dtype=tf.int32) +
rconst.NUM_EVAL_NEGATIVES, logits_by_user.shape[1], dtype=tf.int32))
eval_labels_float = tf.cast(eval_labels, tf.float32)
# During evaluation, the ratio of negatives to positives is much higher
# than during training. (Typically 999 to 1 vs. 4 to 1) By adjusting the
# weights for the negative examples we compute a loss which is consistent with
# the training data. (And provides apples-to-apples comparison)
negative_scale_factor = num_training_neg / rconst.NUM_EVAL_NEGATIVES
example_weights = (
(eval_labels_float + (1 - eval_labels_float) * negative_scale_factor) *
(1 + rconst.NUM_EVAL_NEGATIVES) / (1 + num_training_neg))
# Tile metric weights back to logit dimensions
expanded_metric_weights = tf.reshape(tf.tile(
metric_weights[:, tf.newaxis], (1, rconst.NUM_EVAL_NEGATIVES + 1)), (-1,))
# ignore padded examples
example_weights *= tf.cast(expanded_metric_weights, tf.float32)
cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
logits=softmax_logits, labels=eval_labels, weights=example_weights)
def metric_fn(top_k_tensor, ndcg_tensor, weight_tensor):
return {
rconst.HR_KEY: tf.compat.v1.metrics.mean(top_k_tensor,
weights=weight_tensor,
name=rconst.HR_METRIC_NAME),
rconst.NDCG_KEY: tf.compat.v1.metrics.mean(ndcg_tensor,
weights=weight_tensor,
name=rconst.NDCG_METRIC_NAME)
}
return cross_entropy, metric_fn, in_top_k, ndcg, metric_weights
def compute_top_k_and_ndcg(logits: tf.Tensor,
duplicate_mask: tf.Tensor,
match_mlperf: bool = False):
"""Compute inputs of metric calculation.
Args:
logits: A tensor containing the predicted logits for each user. The shape of
logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a
user are grouped, and the first element of the group is the true element.
duplicate_mask: A vector with the same shape as logits, with a value of 1 if
the item corresponding to the logit at that position has already appeared
for that user.
match_mlperf: Use the MLPerf reference convention for computing rank.
Returns:
is_top_k, ndcg and weights, all of which has size (num_users_in_batch,), and
logits_by_user which has size
(num_users_in_batch, (rconst.NUM_EVAL_NEGATIVES + 1)).
"""
logits_by_user = tf.reshape(logits, (-1, rconst.NUM_EVAL_NEGATIVES + 1))
duplicate_mask_by_user = tf.cast(
tf.reshape(duplicate_mask, (-1, rconst.NUM_EVAL_NEGATIVES + 1)),
logits_by_user.dtype)
if match_mlperf:
# Set duplicate logits to the min value for that dtype. The MLPerf
# reference dedupes during evaluation.
logits_by_user *= (1 - duplicate_mask_by_user)
logits_by_user += duplicate_mask_by_user * logits_by_user.dtype.min
# Determine the location of the first element in each row after the elements
# are sorted.
sort_indices = tf.argsort(
logits_by_user, axis=1, direction="DESCENDING")
# Use matrix multiplication to extract the position of the true item from the
# tensor of sorted indices. This approach is chosen because both GPUs and TPUs
# perform matrix multiplications very quickly. This is similar to np.argwhere.
# However this is a special case because the target will only appear in
# sort_indices once.
one_hot_position = tf.cast(tf.equal(sort_indices, rconst.NUM_EVAL_NEGATIVES),
tf.int32)
sparse_positions = tf.multiply(
one_hot_position, tf.range(logits_by_user.shape[1])[tf.newaxis, :])
position_vector = tf.reduce_sum(sparse_positions, axis=1)
in_top_k = tf.cast(tf.less(position_vector, rconst.TOP_K), tf.float32)
ndcg = tf.math.log(2.) / tf.math.log(
tf.cast(position_vector, tf.float32) + 2)
ndcg *= in_top_k
# If a row is a padded row, all but the first element will be a duplicate.
metric_weights = tf.not_equal(tf.reduce_sum(duplicate_mask_by_user, axis=1),
rconst.NUM_EVAL_NEGATIVES)
return in_top_k, ndcg, metric_weights, logits_by_user
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.