repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0_T(Bar,\nint());',
self.GenerateMethodSource(source))
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C*));',
self.GenerateMethodSource(source))
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C&));',
self.GenerateMethodSource(source))
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C[]));',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedef(self):
source = """
class Test {
public:
typedef std::vector<std::list<int>> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedefWithComma(self):
source = """
class Test {
public:
typedef std::function<void(
const vector<std::list<int>>&, int> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
| 11,356 | 24.293987 | 78 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/cpp/utils.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
| 1,153 | 26.47619 | 74 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/cpp/__init__.py | 0 | 0 | 0 | py | |
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/cpp/ast.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| 62,773 | 35.201845 | 82 | py |
spegg | spegg-master/tests/googletest/googlemock/scripts/generator/cpp/tokenize.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenize C++ source code."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
from cpp import utils
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
# Add $ as a valid identifier char since so much code uses it.
_letters = 'abcdefghijklmnopqrstuvwxyz'
VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$')
HEX_DIGITS = set('0123456789abcdefABCDEF')
INT_OR_FLOAT_DIGITS = set('01234567890eE-+')
# C++0x string preffixes.
_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR'))
# Token types.
UNKNOWN = 'UNKNOWN'
SYNTAX = 'SYNTAX'
CONSTANT = 'CONSTANT'
NAME = 'NAME'
PREPROCESSOR = 'PREPROCESSOR'
# Where the token originated from. This can be used for backtracking.
# It is always set to WHENCE_STREAM in this code.
WHENCE_STREAM, WHENCE_QUEUE = range(2)
class Token(object):
"""Data container to represent a C++ token.
Tokens can be identifiers, syntax char(s), constants, or
pre-processor directives.
start contains the index of the first char of the token in the source
end contains the index of the last char of the token in the source
"""
def __init__(self, token_type, name, start, end):
self.token_type = token_type
self.name = name
self.start = start
self.end = end
self.whence = WHENCE_STREAM
def __str__(self):
if not utils.DEBUG:
return 'Token(%r)' % self.name
return 'Token(%r, %s, %s)' % (self.name, self.start, self.end)
__repr__ = __str__
def _GetString(source, start, i):
i = source.find('"', i+1)
while source[i-1] == '\\':
# Count the trailing backslashes.
backslash_count = 1
j = i - 2
while source[j] == '\\':
backslash_count += 1
j -= 1
# When trailing backslashes are even, they escape each other.
if (backslash_count % 2) == 0:
break
i = source.find('"', i+1)
return i + 1
def _GetChar(source, start, i):
# NOTE(nnorwitz): may not be quite correct, should be good enough.
i = source.find("'", i+1)
while source[i-1] == '\\':
# Need to special case '\\'.
if (i - 2) > start and source[i-2] == '\\':
break
i = source.find("'", i+1)
# Try to handle unterminated single quotes (in a #if 0 block).
if i < 0:
i = start
return i + 1
def GetTokens(source):
"""Returns a sequence of Tokens.
Args:
source: string of C++ source code.
Yields:
Token that represents the next token in the source.
"""
# Cache various valid character sets for speed.
valid_identifier_chars = VALID_IDENTIFIER_CHARS
hex_digits = HEX_DIGITS
int_or_float_digits = INT_OR_FLOAT_DIGITS
int_or_float_digits2 = int_or_float_digits | set('.')
# Only ignore errors while in a #if 0 block.
ignore_errors = False
count_ifs = 0
i = 0
end = len(source)
while i < end:
# Skip whitespace.
while i < end and source[i].isspace():
i += 1
if i >= end:
return
token_type = UNKNOWN
start = i
c = source[i]
if c.isalpha() or c == '_': # Find a string token.
token_type = NAME
while source[i] in valid_identifier_chars:
i += 1
# String and character constants can look like a name if
# they are something like L"".
if (source[i] == "'" and (i - start) == 1 and
source[start:i] in 'uUL'):
# u, U, and L are valid C++0x character preffixes.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif source[i] == "'" and source[start:i] in _STR_PREFIXES:
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == '/' and source[i+1] == '/': # Find // comments.
i = source.find('\n', i)
if i == -1: # Handle EOF.
i = end
continue
elif c == '/' and source[i+1] == '*': # Find /* comments. */
i = source.find('*/', i) + 2
continue
elif c in ':+-<>&|*=': # : or :: (plus other chars).
token_type = SYNTAX
i += 1
new_ch = source[i]
if new_ch == c and c != '>': # Treat ">>" as two tokens.
i += 1
elif c == '-' and new_ch == '>':
i += 1
elif new_ch == '=':
i += 1
elif c in '()[]{}~!?^%;/.,': # Handle single char tokens.
token_type = SYNTAX
i += 1
if c == '.' and source[i].isdigit():
token_type = CONSTANT
i += 1
while source[i] in int_or_float_digits:
i += 1
# Handle float suffixes.
for suffix in ('l', 'f'):
if suffix == source[i:i+1].lower():
i += 1
break
elif c.isdigit(): # Find integer.
token_type = CONSTANT
if c == '0' and source[i+1] in 'xX':
# Handle hex digits.
i += 2
while source[i] in hex_digits:
i += 1
else:
while source[i] in int_or_float_digits2:
i += 1
# Handle integer (and float) suffixes.
for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'):
size = len(suffix)
if suffix == source[i:i+size].lower():
i += size
break
elif c == '"': # Find string.
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == "'": # Find char.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif c == '#': # Find pre-processor command.
token_type = PREPROCESSOR
got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace()
if got_if:
count_ifs += 1
elif source[i:i+6] == '#endif':
count_ifs -= 1
if count_ifs == 0:
ignore_errors = False
# TODO(nnorwitz): handle preprocessor statements (\ continuations).
while 1:
i1 = source.find('\n', i)
i2 = source.find('//', i)
i3 = source.find('/*', i)
i4 = source.find('"', i)
# NOTE(nnorwitz): doesn't handle comments in #define macros.
# Get the first important symbol (newline, comment, EOF/end).
i = min([x for x in (i1, i2, i3, i4, end) if x != -1])
# Handle #include "dir//foo.h" properly.
if source[i] == '"':
i = source.find('"', i+1) + 1
assert i > 0
continue
# Keep going if end of the line and the line ends with \.
if not (i == i1 and source[i-1] == '\\'):
if got_if:
condition = source[start+4:i].lstrip()
if (condition.startswith('0') or
condition.startswith('(0)')):
ignore_errors = True
break
i += 1
elif c == '\\': # Handle \ in code.
# This is different from the pre-processor \ handling.
i += 1
continue
elif ignore_errors:
# The tokenizer seems to be in pretty good shape. This
# raise is conditionally disabled so that bogus code
# in an #if 0 block can be handled. Since we will ignore
# it anyways, this is probably fine. So disable the
# exception and return the bogus char.
i += 1
else:
sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' %
('?', i, c, source[i-10:i+10]))
raise RuntimeError('unexpected token')
if i <= 0:
print('Invalid index, exiting now.')
return
yield Token(token_type, source[start:i], start, i)
if __name__ == '__main__':
def main(argv):
"""Driver mostly for testing purposes."""
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
for token in GetTokens(source):
print('%-12s: %s' % (token.token_type, token.name))
# print('\r%6.2f%%' % (100.0 * index / token.end),)
sys.stdout.write('\n')
main(sys.argv)
| 9,752 | 32.864583 | 79 | py |
deephyper | deephyper-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import os
import platform
import sys
from shutil import rmtree
from setuptools import Command, setup
# path of the directory where this file is located
here = os.path.abspath(os.path.dirname(__file__))
# query platform informations, e.g. 'macOS-12.0.1-arm64-arm-64bit'
platform_infos = platform.platform()
# What packages are required for this module to be executed?
REQUIRED = [
"ConfigSpace>=0.4.20",
"dm-tree",
"Jinja2<3.1",
"numpy", # ==1.19.4", # working with 1.20.1
"pandas>=0.24.2",
"packaging",
"parse",
"scikit-learn>=0.23.1",
"scipy>=1.7",
"tqdm>=4.64.0",
"pyyaml",
]
# !Requirements for Neural Architecture Search (NAS)
REQUIRED_NAS = ["networkx", "pydot"]
REQUIRED_NAS_PLATFORM = {
"default": ["tensorflow>=2.0.0", "tensorflow_probability"],
"macOS-arm64": ["tensorflow_probability~=0.14"],
}
# if "macOS" in platform_infos and "arm64" in platform_infos:
# REQUIRED_NAS = REQUIRED_NAS + REQUIRED_NAS_PLATFORM["macOS-arm64"]
# else: # x86_64
REQUIRED_NAS = REQUIRED_NAS + REQUIRED_NAS_PLATFORM["default"]
# !Requirements for Pipeline Optimization for ML (popt)
REQUIRED_POPT = ["xgboost"]
# !Requirements for Automated Deep Ensemble with Uncertainty Quantification (AutoDEUQ)
REQUIRED_AUTODEUQ = REQUIRED_NAS + ["ray[default]>=1.3.0"]
# !Transfer Learning for Bayesian Optimization with SVD
REQUIRED_TL_SDV = ["sdv>=0.17.1"]
# What packages are optional?
EXTRAS = {
"autodeuq": REQUIRED_AUTODEUQ, # automated deep ensemble with uncertainty quantification
"automl": ["xgboost"], # for automl with scikit-learn
"jax-cpu": ["jax[cpu]>=0.3.25", "numpyro[cpu]"],
"jax-cuda": ["jax[cuda]>=0.3.25", "numpyro[cuda]"],
"hps": [], # hyperparameter search (already the base requirements)
"nas": REQUIRED_NAS, # neural architecture search
"hps-tl": REQUIRED_TL_SDV, # transfer learning for bayesian optimization,
"mpi": ["mpi4py>=3.1.3"],
"ray": ["ray[default]>=1.3.0"],
"redis": ["redis[hiredis]"],
"dev": [
# Test
"codecov",
"pytest",
"pytest-cov",
# Packaging
"twine",
# Formatter and Linter
"black==22.6.0",
"flake8==5.0.4",
"pre-commit",
"rstcheck",
# Documentation
"GitPython",
"ipython",
"nbsphinx",
"Sphinx~=3.5.4",
"sphinx-book-theme==0.3.2",
"sphinx-copybutton",
"sphinx-gallery",
"sphinx_lfs_content",
"sphinx-togglebutton",
],
"analytics": [
"altair",
"jupyter",
"jupyter_contrib_nbextensions>=0.5.1",
"nbconvert<6",
"streamlit",
"streamlit-aggrid",
"tinydb",
],
"hvd": ["horovod>=0.21.3", "mpi4py>=3.0.0"],
}
# Default dependencies for DeepHyper
DEFAULT_DEPENDENCIES = REQUIRED[:]
DEFAULT_DEPENDENCIES += EXTRAS["nas"]
DEFAULT_DEPENDENCIES += EXTRAS["autodeuq"]
DEFAULT_DEPENDENCIES += EXTRAS["hps-tl"]
DEFAULT_DEPENDENCIES += EXTRAS["jax-cpu"]
EXTRAS["default"] = DEFAULT_DEPENDENCIES
# Useful commands to build/upload the wheel to PyPI
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
sys.exit()
class TestUploadCommand(Command):
"""Support setup.py testupload."""
description = "Build and publish the package to test.pypi."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload --repository-url https://test.pypi.org/legacy/ dist/*")
sys.exit()
class TestInstallCommand(Command):
"""Support setup.py testinstall"""
description = "Install deephyper from TestPyPI."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.status("Downloading the package from Test PyPI and installing it")
os.system("pip install --index-url https://test.pypi.org/simple/ deephyper")
sys.exit()
# Where the magic happens:
setup(
install_requires=REQUIRED,
extras_require=EXTRAS,
cmdclass={
"upload": UploadCommand,
"testupload": TestUploadCommand,
"testinstall": TestInstallCommand,
},
)
| 5,761 | 25.552995 | 93 | py |
deephyper | deephyper-master/examples/plot_from_serial_to_parallel_hyperparameter_search.py | # -*- coding: utf-8 -*-
"""
From Serial to Parallel Evaluations
===================================
**Author(s)**: Romain Egele.
This example demonstrates the advantages of parallel evaluations over serial evaluations. We start by defining an artificial black-box ``run``-function by using the Ackley function:
.. image:: https://www.sfu.ca/~ssurjano/ackley.png
:width: 400
:alt: Ackley Function in 2D
We will use the ``time.sleep`` function to simulate a budget of 2 secondes of execution in average which helps illustrate the advantage of parallel evaluations. The ``@profile`` decorator is useful to collect starting/ending time of the ``run``-function execution which help us know exactly when we are inside the black-box. When using this decorator, the ``run``-function will return a dictionnary with 2 new keys ``"timestamp_start"`` and ``"timestamp_end"``. The ``run``-function is defined in a separate module because of the "multiprocessing" backend that we are using in this example.
.. literalinclude:: ../../examples/black_box_util.py
:language: python
After defining the black-box we can continue with the definition of our main script:
"""
import black_box_util as black_box
# %%
# Then we define the variable(s) we want to optimize. For this problem we optimize Ackley in a 2-dimensional search space, the true minimul is located at ``(0, 0)``.
from deephyper.problem import HpProblem
nb_dim = 2
problem = HpProblem()
for i in range(nb_dim):
problem.add_hyperparameter((-32.768, 32.768), f"x{i}")
problem
# %%
# Then we define serial search by creation a ``"serial"``-evaluator and we execute the search with a fixed time-budget of 2 minutes (i.e., 120 secondes).
if __name__ == "__main__":
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
from deephyper.search.hps import CBO
# we give a budget of 2 minutes for each search
timeout = 120
serial_evaluator = Evaluator.create(
black_box.run_ackley,
method="serial",
method_kwargs={"callbacks": [TqdmCallback()]},
)
results = {}
serial_search = CBO(problem, serial_evaluator, random_state=42)
results["serial"] = serial_search.search(timeout=timeout)
# %%
# After, executing the serial-search for 2 minutes we can create a parallel search which uses the ``"process"``-evaluator and defines 5 parallel workers. The search is also executed for 2 minutes.
if __name__ == "__main__":
parallel_evaluator = Evaluator.create(
black_box.run_ackley,
method="process",
method_kwargs={"num_workers": 5, "callbacks": [TqdmCallback()]},
)
parallel_search = CBO(problem, parallel_evaluator, random_state=42)
results["parallel"] = parallel_search.search(timeout=timeout)
# %%
# Finally, we plot the results from the collected DataFrame. The execution time is used as the x-axis which help-us vizualise the advantages of the parallel search.
if __name__ == "__main__":
import matplotlib.pyplot as plt
plt.figure()
for strategy, df in results.items():
plt.scatter(df.timestamp_end, df.objective, label=strategy)
plt.plot(df.timestamp_end, df.objective.cummax())
plt.xlabel("Time (sec.)")
plt.ylabel("Objective")
plt.grid()
plt.legend()
plt.show()
| 3,321 | 39.512195 | 590 | py |
deephyper | deephyper-master/examples/plot_transfer_learning_for_hps.py | # -*- coding: utf-8 -*-
"""
Transfer Learning for Hyperparameter Search
===========================================
**Author(s)**: Romain Egele.
In this example we present how to apply transfer-learning for hyperparameter search. Let's assume you have a bunch of similar tasks for example the search of neural networks hyperparameters for different datasets. You can easily imagine that close choices of hyperparameters can perform well these different datasets even if some light additional tuning can help improve the performance. Therefore, you can perform an expensive search once to then reuse the explored set of hyperparameters of thid search and bias the following search with it. Here, we will use a cheap to compute and easy to understand example where we maximise the :math:`f(x) = -\sum_{i=0}^{n-1}` function. In this case the size of the problem can be defined by the variable :math:`n`. We will start by optimizing the small-size problem where :math:`n=1`, then apply transfer-learning from to optimize the larger-size problem where :math:`n=2` and visualize the difference if were not to apply transfer-learning on this larger problem instance.
Let us start by defining the run-functions of the small and large scale problems:
"""
# %%
import functools
def run(config: dict, N: int) -> float:
y = -sum([config[f"x{i}"] ** 2 for i in range(N)])
return y
run_small = functools.partial(run, N=1)
run_large = functools.partial(run, N=2)
# %%
# Then, we can define the hyperparameter problem space based on :math:`n`
from deephyper.problem import HpProblem
N = 1
problem_small = HpProblem()
for i in range(N):
problem_small.add_hyperparameter((-10.0, 10.0), f"x{i}")
problem_small
# %%
N = 2
problem_large = HpProblem()
for i in range(N):
problem_large.add_hyperparameter((-10.0, 10.0), f"x{i}")
problem_large
# %%
# Then, we define setup the search and execute it:
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
from deephyper.search.hps import CBO
results = {}
max_evals = 20
evaluator_small = Evaluator.create(
run_small, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search_small = CBO(problem_small, evaluator_small, random_state=42)
results["Small"] = search_small.search(max_evals)
# %%
evaluator_large = Evaluator.create(
run_large, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search_large = CBO(problem_large, evaluator_large, random_state=42)
results["Large"] = search_large.search(max_evals)
# %%
evaluator_large_tl = Evaluator.create(
run_large, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search_large_tl = CBO(problem_large, evaluator_large_tl, random_state=42)
search_large_tl.fit_generative_model(results["Small"])
results["Large+TL"] = search_large_tl.search(max_evals)
# %%
# Finally, we compare the results and quickly see that transfer-learning provided a consequant speed-up for the search:
import matplotlib.pyplot as plt
plt.figure()
for strategy, df in results.items():
x = [i for i in range(len(df))]
plt.scatter(x, df.objective, label=strategy, alpha=0.5)
plt.plot(x, df.objective.cummax(), alpha=0.5)
plt.xlabel("Time (sec.)")
plt.ylabel("Objective")
plt.grid()
plt.legend()
plt.show()
| 3,293 | 36.431818 | 1,014 | py |
deephyper | deephyper-master/examples/black_box_util.py | """Set of Black-Box functions useful to build examples.
"""
import time
import numpy as np
from deephyper.evaluator import profile
def ackley(x, a=20, b=0.2, c=2 * np.pi):
d = len(x)
s1 = np.sum(x**2)
s2 = np.sum(np.cos(c * x))
term1 = -a * np.exp(-b * np.sqrt(s1 / d))
term2 = -np.exp(s2 / d)
y = term1 + term2 + a + np.exp(1)
return y
@profile
def run_ackley(config, sleep_loc=2, sleep_scale=0.5):
# to simulate the computation of an expensive black-box
if sleep_loc > 0:
t_sleep = np.random.normal(loc=sleep_loc, scale=sleep_scale)
t_sleep = max(t_sleep, 0)
time.sleep(t_sleep)
x = np.array([config[k] for k in config if "x" in k])
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
return -ackley(x) # maximisation is performed
| 820 | 26.366667 | 68 | py |
deephyper | deephyper-master/examples/plot_notify_failures_hyperparameter_search.py | # -*- coding: utf-8 -*-
"""
Notify Failures in Hyperparameter optimization
==============================================
**Author(s)**: Romain Egele.
This example demonstrates how to handle failure of objectives in hyperparameter search. In many cases such as software auto-tuning (where we minimize the run-time of a software application) some configurations can create run-time errors and therefore no scalar objective is returned. A default choice could be to return in this case the worst case objective if known and it can be done inside the ``run``-function. Other possibilites are to ignore these configurations or to replace them with the running mean/min objective. To illustrate such a use-case we define an artificial ``run``-function which will fail when one of its input parameters is greater than 0.5. To define a failure, it is possible to return a "string" value with ``"F"`` as prefix such as:
"""
def run(config: dict) -> float:
if config["y"] > 0.5:
return "F_postfix"
else:
return config["x"]
# %%
# Then, we define the corresponding hyperparameter problem where ``x`` is the value to maximize and ``y`` is a value impact the appearance of failures.
from deephyper.problem import HpProblem
problem = HpProblem()
problem.add_hyperparameter([1, 2, 4, 8, 16, 32], "x")
problem.add_hyperparameter((0.0, 1.0), "y")
print(problem)
# %%
# Then, we define a centralized Bayesian optimization (CBO) search (i.e., master-worker architecture) which uses the Random-Forest regressor as default surrogate model. We will compare the ``ignore`` strategy which filters-out failed configurations, the ``mean`` strategy which replaces a failure by the running mean of collected objectives and the ``min`` strategy which replaces by the running min of collected objectives.
from deephyper.search.hps import CBO
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
results = {}
max_evals = 30
for failure_strategy in ["ignore", "mean", "min"]:
# for failure_strategy in ["min"]:
print(f"Executing failure strategy: {failure_strategy}")
evaluator = Evaluator.create(
run, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search = CBO(
problem,
evaluator,
filter_failures=failure_strategy,
log_dir=f"search_{failure_strategy}",
random_state=42,
)
results[failure_strategy] = search.search(max_evals)
# %%
# Finally we plot the collected results
import matplotlib.pyplot as plt
import numpy as np
plt.figure()
for i, (failure_strategy, df) in enumerate(results.items()):
plt.subplot(3, 1, i + 1)
if df.objective.dtype != np.float64:
x = np.arange(len(df))
mask_failed = np.where(df.objective.str.startswith("F"))[0]
mask_success = np.where(~df.objective.str.startswith("F"))[0]
x_success, x_failed = x[mask_success], x[mask_failed]
y_success = df["objective"][mask_success].astype(float)
plt.scatter(x_success, y_success, label=failure_strategy)
plt.scatter(x_failed, np.zeros(x_failed.shape), marker="v", color="red")
plt.xlabel(r"Iterations")
plt.ylabel(r"Objective")
plt.legend()
plt.show()
| 3,235 | 42.146667 | 760 | py |
deephyper | deephyper-master/examples/plot_profile_worker_utilization.py | # -*- coding: utf-8 -*-
"""
Profile the Worker Utilization
==============================
**Author(s)**: Romain Egele.
This example demonstrates the advantages of parallel evaluations over serial evaluations. We start by defining an artificial black-box ``run``-function by using the Ackley function:
.. image:: https://www.sfu.ca/~ssurjano/ackley.png
:width: 400
:alt: Ackley Function in 2D
We will use the ``time.sleep`` function to simulate a budget of 2 secondes of execution in average which helps illustrate the advantage of parallel evaluations. The ``@profile`` decorator is useful to collect starting/ending time of the ``run``-function execution which help us know exactly when we are inside the black-box. This decorator is necessary when profiling the worker utilization. When using this decorator, the ``run``-function will return a dictionnary with 2 new keys ``"timestamp_start"`` and ``"timestamp_end"``. The ``run``-function is defined in a separate module because of the "multiprocessing" backend that we are using in this example.
.. literalinclude:: ../../examples/black_box_util.py
:language: python
:emphasize-lines: 19-28
:linenos:
After defining the black-box we can continue with the definition of our main script:
"""
import black_box_util as black_box
# %%
# Then we define the variable(s) we want to optimize. For this problem we optimize Ackley in a 2-dimensional search space, the true minimul is located at ``(0, 0)``.
from deephyper.problem import HpProblem
nb_dim = 2
problem = HpProblem()
for i in range(nb_dim):
problem.add_hyperparameter((-32.768, 32.768), f"x{i}")
problem
# %%
# Then we define a parallel search.
if __name__ == "__main__":
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
from deephyper.search.hps import CBO
timeout = 20
num_workers = 4
results = {}
evaluator = Evaluator.create(
black_box.run_ackley,
method="process",
method_kwargs={
"num_workers": num_workers,
"callbacks": [TqdmCallback()],
},
)
search = CBO(problem, evaluator, random_state=42)
results = search.search(timeout=timeout)
# %%
# Finally, we plot the results from the collected DataFrame.
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
def compile_profile(df):
"""Take the results dataframe as input and return the number of jobs running at a given timestamp."""
history = []
for _, row in df.iterrows():
history.append((row["timestamp_start"], 1))
history.append((row["timestamp_end"], -1))
history = sorted(history, key=lambda v: v[0])
nb_workers = 0
timestamp = [0]
n_jobs_running = [0]
for time, incr in history:
nb_workers += incr
timestamp.append(time)
n_jobs_running.append(nb_workers)
return timestamp, n_jobs_running
plt.figure()
plt.subplot(2, 1, 1)
plt.scatter(results.timestamp_end, results.objective)
plt.plot(results.timestamp_end, results.objective.cummax())
plt.xlabel("Time (sec.)")
plt.ylabel("Objective")
plt.grid()
plt.subplot(2, 1, 2)
x, y = compile_profile(results)
y = np.asarray(y) / num_workers * 100
plt.step(
x,
y,
where="pre",
)
plt.ylim(0, 100)
plt.xlabel("Time (sec.)")
plt.ylabel("Worker Utilization (%)")
plt.tight_layout()
plt.show()
| 3,542 | 31.805556 | 657 | py |
deephyper | deephyper-master/tests/conftest.py | import pytest
# -- Control skipping of tests according to command line option
def pytest_addoption(parser):
parser.addoption(
"--run",
default="fast,hps",
help="Select tests to run.",
)
def pytest_collection_modifyitems(config, items):
selected_marks = set(config.getoption("--run").split(","))
get_markers = lambda item: {m.name for m in item.iter_markers()}
skip_mark = pytest.mark.skip(reason=f"need --run with compatible marks to run")
for item in items:
item_marks = get_markers(item)
if not (item_marks.issubset(selected_marks)):
item.add_marker(skip_mark)
# -- Incremental testing - test steps
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" % previousfailed.name)
| 1,138 | 28.973684 | 83 | py |
deephyper | deephyper-master/tests/test_quickstart.py | import pytest
def run(job):
# The suggested parameters are accessible in job.parameters (dict)
x = job.parameters["x"]
b = job.parameters["b"]
if job.parameters["function"] == "linear":
y = x + b
elif job.parameters["function"] == "cubic":
y = x**3 + b
# Maximization!
return y
@pytest.mark.hps
def test_quickstart(tmp_path):
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
from deephyper.evaluator import Evaluator
# define the variable you want to optimize
problem = HpProblem()
problem.add_hyperparameter((-10.0, 10.0), "x") # real parameter
problem.add_hyperparameter((0, 10), "b") # discrete parameter
problem.add_hyperparameter(["linear", "cubic"], "function") # categorical parameter
# define the evaluator to distribute the computation
evaluator = Evaluator.create(
run,
method="process",
method_kwargs={
"num_workers": 2,
},
)
# define your search and execute it
search = CBO(problem, evaluator, log_dir=tmp_path, random_state=42)
results = search.search(max_evals=100)
print(results)
assert abs(results.objective.max()) > 1000
assert "p:x" in results.columns
assert "p:b" in results.columns
assert "p:function" in results.columns
assert len(results) == 100
if __name__ == "__main__":
test_quickstart(".")
| 1,432 | 25.537037 | 88 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_space.py | import pytest
import numbers
import numpy as np
import os
import yaml
from tempfile import NamedTemporaryFile
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises_regex
from deephyper.skopt import Optimizer
from deephyper.skopt.space import Space
from deephyper.skopt.space import Real
from deephyper.skopt.space import Integer
from deephyper.skopt.space import Categorical
from deephyper.skopt.space import check_dimension as space_check_dimension
from deephyper.skopt.utils import normalize_dimensions
def check_dimension(Dimension, vals, random_val):
x = Dimension(*vals)
assert_equal(x, Dimension(*vals))
assert x != Dimension(vals[0], vals[1] + 1)
assert x != Dimension(vals[0] + 1, vals[1])
y = x.rvs(random_state=1)
if isinstance(y, list):
y = np.array(y)
assert_equal(y, random_val)
def check_categorical(vals, random_val):
x = Categorical(vals)
assert_equal(x, Categorical(vals))
assert x != Categorical(vals[:-1] + ("zzz",))
assert_equal(x.rvs(random_state=1), random_val)
def check_limits(value, low, high):
# check if low <= value <= high
if isinstance(value, list):
value = np.array(value)
assert np.all(low <= value)
assert np.all(high >= value)
@pytest.mark.hps
def test_dimensions():
check_dimension(Real, (1.0, 4.0), 2.251066014107722)
check_dimension(Real, (1, 4), 2.251066014107722)
check_dimension(Integer, (1, 4), 2)
check_dimension(Integer, (1.0, 4.0), 2)
check_dimension(Integer, (1, 4), 2)
check_categorical(("a", "b", "c", "d"), "b")
check_categorical((1.0, 2.0, 3.0, 4.0), 2.0)
check_categorical((1, 2, 3, 4), 2)
@pytest.mark.hps
def test_real_log_sampling_in_bounds():
dim = Real(low=1, high=32, prior="log-uniform", transform="normalize")
# round trip a value that is within the bounds of the space
#
# x = dim.inverse_transform(dim.transform(31.999999999999999))
for n in (32.0, 31.999999999999999):
round_tripped = dim.inverse_transform(dim.transform([n]))
assert np.allclose([n], round_tripped)
assert n in dim
assert round_tripped in dim
@pytest.mark.hps
def test_real():
a = Real(1, 25)
for i in range(50):
r = a.rvs(random_state=i)
check_limits(r, 1, 25)
assert r in a
random_values = a.rvs(random_state=0, n_samples=10)
assert len(random_values) == 10
assert_array_equal(a.transform(random_values), random_values)
assert_array_equal(a.inverse_transform(random_values), random_values)
log_uniform = Real(10**-5, 10**5, prior="log-uniform")
assert log_uniform != Real(10**-5, 10**5)
for i in range(50):
random_val = log_uniform.rvs(random_state=i)
check_limits(random_val, 10**-5, 10**5)
random_values = log_uniform.rvs(random_state=0, n_samples=10)
assert len(random_values) == 10
transformed_vals = log_uniform.transform(random_values)
assert_array_equal(transformed_vals, np.log10(random_values))
assert_array_equal(log_uniform.inverse_transform(transformed_vals), random_values)
@pytest.mark.hps
def test_real_bounds():
# should give same answer as using check_limits() but this is easier
# to read
a = Real(1.0, 2.1)
assert 0.99 not in a
assert 1.0 in a
assert 2.09 in a
assert 2.1 in a
assert np.nextafter(2.1, 3.0) not in a
@pytest.mark.hps
def test_integer():
a = Integer(1, 10)
for i in range(50):
r = a.rvs(random_state=i)
assert 1 <= r
assert 11 >= r
assert r in a
random_values = a.rvs(random_state=0, n_samples=10)
assert_array_equal(random_values.shape, (10))
assert_array_equal(a.transform(random_values), random_values)
assert_array_equal(a.inverse_transform(random_values), random_values)
@pytest.mark.hps
def test_categorical_transform():
categories = ["apple", "orange", "banana", None, True, False, 3]
cat = Categorical(categories)
apple = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
orange = [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
banana = [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]
none = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
true = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
false = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]
three = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
assert_equal(cat.transformed_size, 7)
assert_equal(cat.transformed_size, cat.transform(["apple"]).size)
assert_array_equal(
cat.transform(categories), [apple, orange, banana, none, true, false, three]
)
assert_array_equal(cat.transform(["apple", "orange"]), [apple, orange])
assert_array_equal(cat.transform(["apple", "banana"]), [apple, banana])
assert_array_equal(cat.inverse_transform([apple, orange]), ["apple", "orange"])
assert_array_equal(cat.inverse_transform([apple, banana]), ["apple", "banana"])
ent_inverse = cat.inverse_transform(
[apple, orange, banana, none, true, false, three]
)
assert_array_equal(ent_inverse, categories)
@pytest.mark.hps
def test_categorical_transform_binary():
categories = ["apple", "orange"]
cat = Categorical(categories)
apple = [0.0]
orange = [1.0]
assert_equal(cat.transformed_size, 1)
assert_equal(cat.transformed_size, cat.transform(["apple"]).size)
assert_array_equal(cat.transform(categories), [apple, orange])
assert_array_equal(cat.transform(["apple", "orange"]), [apple, orange])
assert_array_equal(cat.inverse_transform([apple, orange]), ["apple", "orange"])
ent_inverse = cat.inverse_transform([apple, orange])
assert_array_equal(ent_inverse, categories)
@pytest.mark.hps
def test_categorical_repr():
small_cat = Categorical([1, 2, 3, 4, 5])
assert small_cat.__repr__() == "Categorical(categories=(1, 2, 3, 4, 5), prior=None)"
big_cat = Categorical([1, 2, 3, 4, 5, 6, 7, 8])
assert (
big_cat.__repr__()
== "Categorical(categories=(1, 2, 3, ..., 6, 7, 8), prior=None)"
)
@pytest.mark.hps
def test_space_consistency():
# Reals (uniform)
s1 = Space([Real(0.0, 1.0)])
s2 = Space([Real(0.0, 1.0)])
s3 = Space([Real(0, 1)])
s4 = Space([(0.0, 1.0)])
s5 = Space([(0.0, 1.0, "uniform")])
s6 = Space([(0, 1.0)])
s7 = Space([(np.float64(0.0), 1.0)])
s8 = Space([(0, np.float64(1.0))])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
a4 = s4.rvs(n_samples=10, random_state=0)
a5 = s5.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_equal(s1, s3)
assert_equal(s1, s4)
assert_equal(s1, s5)
assert_equal(s1, s6)
assert_equal(s1, s7)
assert_equal(s1, s8)
assert_array_equal(a1, a2)
assert_array_equal(a1, a3)
assert_array_equal(a1, a4)
assert_array_equal(a1, a5)
# Reals (log-uniform)
s1 = Space([Real(10**-3.0, 10**3.0, prior="log-uniform", base=10)])
s2 = Space([Real(10**-3.0, 10**3.0, prior="log-uniform", base=10)])
s3 = Space([Real(10**-3, 10**3, prior="log-uniform", base=10)])
s4 = Space([(10**-3.0, 10**3.0, "log-uniform", 10)])
s5 = Space([(np.float64(10**-3.0), 10**3.0, "log-uniform", 10)])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
a4 = s4.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_equal(s1, s3)
assert_equal(s1, s4)
assert_equal(s1, s5)
assert_array_equal(a1, a2)
assert_array_equal(a1, a3)
assert_array_equal(a1, a4)
# Integers
s1 = Space([Integer(1, 5)])
s2 = Space([Integer(1.0, 5.0)])
s3 = Space([(1, 5)])
s4 = Space([(np.int64(1.0), 5)])
s5 = Space([(1, np.int64(5.0))])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_equal(s1, s3)
assert_equal(s1, s4)
assert_equal(s1, s5)
assert_array_equal(a1, a2)
assert_array_equal(a1, a3)
# Integers (log-uniform)
s1 = Space([Integer(16, 512, prior="log-uniform", base=2)])
s2 = Space([Integer(16.0, 512.0, prior="log-uniform", base=2)])
s3 = Space([(16, 512, "log-uniform", 2)])
s4 = Space([(np.int64(16.0), 512, "log-uniform", 2)])
s5 = Space([(16, np.int64(512.0), "log-uniform", 2)])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_equal(s1, s3)
assert_equal(s1, s4)
assert_equal(s1, s5)
assert_array_equal(a1, a2)
assert_array_equal(a1, a3)
# Categoricals
s1 = Space([Categorical(["a", "b", "c"])])
s2 = Space([Categorical(["a", "b", "c"])])
s3 = Space([["a", "b", "c"]])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_array_equal(a1, a2)
assert_equal(s1, s3)
assert_array_equal(a1, a3)
s1 = Space([(True, False)])
s2 = Space([Categorical([True, False])])
s3 = Space([np.array([True, False])])
assert s1 == s2 == s3
# Categoricals Integer
s1 = Space([Categorical([1, 2, 3])])
s2 = Space([Categorical([1, 2, 3])])
s3 = Space([[1, 2, 3]])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_array_equal(a1, a2)
assert_equal(s1, s3)
assert_array_equal(a1, a3)
s1 = Space([(True, False)])
s2 = Space([Categorical([True, False])])
s3 = Space([np.array([True, False])])
assert s1 == s2 == s3
@pytest.mark.hps
def test_space_api():
space = Space(
[(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")]
)
cat_space = Space([(1, "r"), (1.0, "r")])
assert isinstance(cat_space.dimensions[0], Categorical)
assert isinstance(cat_space.dimensions[1], Categorical)
assert_equal(len(space.dimensions), 5)
assert isinstance(space.dimensions[0], Real)
assert isinstance(space.dimensions[1], Integer)
assert isinstance(space.dimensions[2], Categorical)
assert isinstance(space.dimensions[3], Real)
assert isinstance(space.dimensions[4], Categorical)
samples = space.rvs(n_samples=10, random_state=0)
assert_equal(len(samples), 10)
assert_equal(len(samples[0]), 5)
assert isinstance(samples, list)
for n in range(4):
assert isinstance(samples[n], list)
assert isinstance(samples[0][0], numbers.Real)
assert isinstance(samples[0][1], numbers.Integral)
assert isinstance(samples[0][2], str)
assert isinstance(samples[0][3], numbers.Real)
assert isinstance(samples[0][4], str)
samples_transformed = space.transform(samples)
assert_equal(samples_transformed.shape[0], len(samples))
assert_equal(samples_transformed.shape[1], 1 + 1 + 3 + 1 + 1)
# our space contains mixed types, this means we can't use
# `array_allclose` or similar to check points are close after a round-trip
# of transformations
for orig, round_trip in zip(samples, space.inverse_transform(samples_transformed)):
assert space.distance(orig, round_trip) < 1.0e-8
samples = space.inverse_transform(samples_transformed)
assert isinstance(samples[0][0], numbers.Real)
assert isinstance(samples[0][1], numbers.Integral)
assert isinstance(samples[0][2], str)
assert isinstance(samples[0][3], numbers.Real)
assert isinstance(samples[0][4], str)
for b1, b2 in zip(
space.bounds,
[
(0.0, 1.0),
(-5, 5),
np.asarray(["a", "b", "c"]),
(1.0, 5.0),
np.asarray(["e", "f"]),
],
):
assert_array_equal(b1, b2)
for b1, b2 in zip(
space.transformed_bounds,
[
(0.0, 1.0),
(-5, 5),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(np.log10(1.0), np.log10(5.0)),
(0.0, 1.0),
],
):
assert_array_equal(b1, b2)
@pytest.mark.hps
def test_space_from_space():
# can you pass a Space instance to the Space constructor?
space = Space(
[(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")]
)
space2 = Space(space)
assert_equal(space, space2)
@pytest.mark.hps
def test_constant_property():
space = Space(
[(0.0, 1.0), (1,), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e",)]
)
assert space.n_constant_dimensions == 2
for i in [1, 4]:
assert space.dimensions[i].is_constant
for i in [0, 2, 3]:
assert not space.dimensions[i].is_constant
@pytest.mark.hps
def test_set_get_transformer():
# can you pass a Space instance to the Space constructor?
space = Space(
[(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")]
)
transformer = space.get_transformer()
assert_array_equal(
["identity", "identity", "onehot", "identity", "onehot"], transformer
)
space.set_transformer("normalize")
transformer = space.get_transformer()
assert_array_equal(["normalize"] * 5, transformer)
space.set_transformer(transformer)
assert_array_equal(transformer, space.get_transformer())
space.set_transformer_by_type("label", Categorical)
assert space.dimensions[2].transform(["a"]) == [0]
@pytest.mark.hps
def test_normalize():
# can you pass a Space instance to the Space constructor?
space = Space(
[(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")]
)
space.set_transformer("normalize")
X = [[0.0, -5, "a", 1.0, "e"]]
Xt = np.zeros((1, 5))
assert_array_equal(space.transform(X), Xt)
assert_array_equal(space.inverse_transform(Xt), X)
assert_array_equal(space.inverse_transform(space.transform(X)), X)
@pytest.mark.hps
def test_normalize_types():
# can you pass a Space instance to the Space constructor?
space = Space([(0.0, 1.0), Integer(-5, 5, dtype=int), (True, False)])
space.set_transformer("normalize")
X = [[0.0, -5, False]]
Xt = np.zeros((1, 3))
assert_array_equal(space.transform(X), Xt)
assert_array_equal(space.inverse_transform(Xt), X)
assert_array_equal(space.inverse_transform(space.transform(X)), X)
assert isinstance(space.inverse_transform(Xt)[0][0], float)
assert isinstance(space.inverse_transform(Xt)[0][1], int)
assert isinstance(space.inverse_transform(Xt)[0][2], (np.bool_, bool))
@pytest.mark.hps
def test_normalize_real():
a = Real(2.0, 30.0, transform="normalize")
for i in range(50):
check_limits(a.rvs(random_state=i), 2, 30)
rng = np.random.RandomState(0)
X = rng.randn(100)
X = 28 * (X - X.min()) / (X.max() - X.min()) + 2
# Check transformed values are in [0, 1]
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)
# log-uniform prior
a = Real(10**2.0, 10**4.0, prior="log-uniform", transform="normalize")
for i in range(50):
check_limits(a.rvs(random_state=i), 10**2, 10**4)
rng = np.random.RandomState(0)
X = np.clip(10**3 * rng.randn(100), 10**2.0, 10**4.0)
# Check transform
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)
a = Real(0, 1, transform="normalize", dtype=float)
for i in range(50):
check_limits(a.rvs(random_state=i), 0, 1)
assert_array_equal(a.transformed_bounds, (0, 1))
X = rng.rand()
# Check transformed values are in [0, 1]
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, float)
assert_array_equal(X_orig, X)
a = Real(0, 1, transform="normalize", dtype="float64")
X = np.float64(rng.rand())
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, np.float64)
a = Real(0, 1, transform="normalize", dtype=np.float64)
X = np.float64(rng.rand())
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, np.float64)
a = Real(0, 1, transform="normalize", dtype="float64")
X = np.float64(rng.rand())
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, np.float64)
@pytest.mark.hps
def test_normalize_integer():
a = Integer(2, 30, transform="normalize")
for i in range(50):
check_limits(a.rvs(random_state=i), 2, 30)
assert_array_equal(a.transformed_bounds, (0, 1))
rng = np.random.RandomState(0)
X = rng.randint(2, 31, dtype=np.int64)
# Check transformed values are in [0, 1]
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, np.int64)
assert_array_equal(X_orig, X)
a = Integer(2, 30, transform="normalize", dtype=int)
X = rng.randint(2, 31, dtype=int)
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, int)
a = Integer(2, 30, transform="normalize", dtype="int")
X = rng.randint(2, 31, dtype=int)
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, int)
a = Integer(2, 30, prior="log-uniform", base=2, transform="normalize", dtype=int)
for i in range(50):
check_limits(a.rvs(random_state=i), 2, 30)
assert_array_equal(a.transformed_bounds, (0, 1))
X = rng.randint(2, 31, dtype=int)
# Check transformed values are in [0, 1]
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert isinstance(X_orig, int)
assert_array_equal(X_orig, X)
@pytest.mark.hps
def test_normalize_categorical():
categories = ["cat", "dog", "rat"]
a = Categorical(categories, transform="normalize")
for i in range(len(categories)):
assert a.rvs(random_state=i)[0] in categories
assert a.inverse_transform([0.0]) == [categories[0]]
assert a.inverse_transform([0.5]) == [categories[1]]
assert a.inverse_transform([1.0]) == [categories[2]]
assert_array_equal(categories, a.inverse_transform([0.0, 0.5, 1]))
categories = [1, 2, 3]
a = Categorical(categories, transform="normalize")
assert_array_equal(categories, np.sort(np.unique(a.rvs(100, random_state=1))))
assert_array_equal(categories, a.inverse_transform([0.0, 0.5, 1.0]))
categories = [1.0, 2.0, 3.0]
a = Categorical(categories, transform="normalize")
assert_array_equal(categories, np.sort(np.unique(a.rvs(100, random_state=1))))
assert_array_equal(categories, a.inverse_transform([0.0, 0.5, 1.0]))
categories = [1, 2, 3]
a = Categorical(categories, transform="string")
a.set_transformer("normalize")
assert_array_equal(categories, np.sort(np.unique(a.rvs(100, random_state=1))))
assert_array_equal(categories, a.inverse_transform([0.0, 0.5, 1.0]))
@pytest.mark.hps
def test_normalize_integer():
for dtype in [
"int",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
]:
a = Integer(2, 30, transform="normalize", dtype=dtype)
for X in range(2, 31):
X_orig = a.inverse_transform(a.transform(X))
assert_array_equal(X_orig, X)
for dtype in [
int,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]:
a = Integer(2, 30, transform="normalize", dtype=dtype)
for X in range(2, 31):
X_orig = a.inverse_transform(a.transform(X))
assert_array_equal(X_orig, X)
assert isinstance(X_orig, dtype)
def check_valid_transformation(klass):
assert klass(2, 30, transform="normalize")
assert klass(2, 30, transform="identity")
assert_raises_regex(
ValueError,
"should be 'normalize' or 'identity'",
klass,
2,
30,
transform="not a valid transform name",
)
@pytest.mark.hps
def test_valid_transformation():
check_valid_transformation(Integer)
check_valid_transformation(Real)
@pytest.mark.hps
def test_invalid_dimension():
assert_raises_regex(
ValueError, "has to be a list or tuple", space_check_dimension, "23"
)
# single value fixes dimension of space
space_check_dimension((23,))
@pytest.mark.hps
def test_categorical_identity():
categories = ["cat", "dog", "rat"]
cat = Categorical(categories, transform="identity")
samples = cat.rvs(100)
assert all([t in categories for t in cat.rvs(100)])
transformed = cat.transform(samples)
assert_array_equal(transformed, samples)
assert_array_equal(samples, cat.inverse_transform(transformed))
@pytest.mark.hps
def test_categorical_string():
categories = [1, 2, 3]
categories_transformed = ["1", "2", "3"]
cat = Categorical(categories, transform="string")
samples = cat.rvs(100)
assert all([t in categories for t in cat.rvs(100)])
transformed = cat.transform(samples)
assert all([t in categories_transformed for t in transformed])
assert_array_equal(samples, cat.inverse_transform(transformed))
@pytest.mark.hps
def test_categorical_distance():
categories = ["car", "dog", "orange"]
cat = Categorical(categories)
for cat1 in categories:
for cat2 in categories:
delta = cat.distance(cat1, cat2)
if cat1 == cat2:
assert delta == 0
else:
assert delta == 1
@pytest.mark.hps
def test_integer_distance():
ints = Integer(1, 10)
for i in range(1, 10 + 1):
assert_equal(ints.distance(4, i), abs(4 - i))
@pytest.mark.hps
def test_integer_distance_out_of_range():
ints = Integer(1, 10)
assert_raises_regex(
RuntimeError, "compute distance for values within", ints.distance, 11, 10
)
@pytest.mark.hps
def test_real_distance_out_of_range():
ints = Real(1, 10)
assert_raises_regex(
RuntimeError, "compute distance for values within", ints.distance, 11, 10
)
@pytest.mark.hps
def test_real_distance():
reals = Real(1, 10)
for i in range(1, 10 + 1):
assert_equal(reals.distance(4.1234, i), abs(4.1234 - i))
@pytest.mark.parametrize(
"dimension, bounds",
[(Real, (2, 1)), (Integer, (2, 1)), (Real, (2, 2)), (Integer, (2, 2))],
)
def test_dimension_bounds(dimension, bounds):
with pytest.raises(ValueError) as exc:
dim = dimension(*bounds)
assert "has to be less than the upper bound " in exc.value.args[0]
@pytest.mark.parametrize(
"dimension, name",
[
(Real(1, 2, name="learning_rate"), "learning_rate"),
(Integer(1, 100, name="n_trees"), "n_trees"),
(Categorical(["red, blue"], name="colors"), "colors"),
],
)
def test_dimension_name(dimension, name):
assert dimension.name == name
def test_dimension_name():
notnames = [1, 1.0, True]
for n in notnames:
with pytest.raises(ValueError) as exc:
real = Real(1, 2, name=n)
assert (
"Dimension's name must be either string or" "None." == exc.value.args[0]
)
s = Space(
[
Real(1, 2, name="a"),
Integer(1, 100, name="b"),
Categorical(["red, blue"], name="c"),
]
)
assert s["a"] == (0, s.dimensions[0])
assert s["a", "c"] == [(0, s.dimensions[0]), (2, s.dimensions[2])]
assert s[["a", "c"]] == [(0, s.dimensions[0]), (2, s.dimensions[2])]
assert s[("a", "c")] == [(0, s.dimensions[0]), (2, s.dimensions[2])]
assert s[0] == (0, s.dimensions[0])
assert s[0, "c"] == [(0, s.dimensions[0]), (2, s.dimensions[2])]
assert s[0, 2] == [(0, s.dimensions[0]), (2, s.dimensions[2])]
@pytest.mark.parametrize(
"dimension", [Real(1, 2), Integer(1, 100), Categorical(["red, blue"])]
)
def test_dimension_name_none(dimension):
assert dimension.name is None
@pytest.mark.hps
def test_space_from_yaml():
with NamedTemporaryFile(delete=False) as tmp:
tmp.write(
b"""
Space:
- Real:
low: 0.0
high: 1.0
- Integer:
low: -5
high: 5
- Categorical:
categories:
- a
- b
- c
- Real:
low: 1.0
high: 5.0
prior: log-uniform
- Categorical:
categories:
- e
- f
"""
)
tmp.flush()
space = Space(
[
(0.0, 1.0),
(-5, 5),
("a", "b", "c"),
(1.0, 5.0, "log-uniform"),
("e", "f"),
]
)
space2 = Space.from_yaml(tmp.name)
assert_equal(space, space2)
tmp.close()
os.unlink(tmp.name)
@pytest.mark.parametrize("name", [1, 1.0, True])
def test_dimension_with_invalid_names(name):
with pytest.raises(ValueError) as exc:
Real(1, 2, name=name)
assert "Dimension's name must be either string or None." == exc.value.args[0]
@pytest.mark.hps
def test_purely_categorical_space():
# Test reproduces the bug in #908, make sure it doesn't come back
dims = [Categorical(["a", "b", "c"]), Categorical(["A", "B", "C"])]
optimizer = Optimizer(dims, n_initial_points=2, random_state=3)
for _ in range(2):
x = optimizer.ask()
# before the fix this call raised an exception
optimizer.tell(x, np.random.uniform())
@pytest.mark.hps
def test_partly_categorical_space():
dims = Space([Categorical(["a", "b", "c"]), Categorical(["A", "B", "C"])])
assert dims.is_partly_categorical
dims = Space([Categorical(["a", "b", "c"]), Integer(1, 2)])
assert dims.is_partly_categorical
assert not dims.is_categorical
dims = Space([Integer(1, 2), Integer(1, 2)])
assert not dims.is_partly_categorical
@pytest.mark.hps
def test_normalize_bounds():
bounds = [(-999, 189000), Categorical((True, False))]
space = Space(normalize_dimensions(bounds))
for a in np.linspace(1e-9, 0.4999, 1000):
x = space.inverse_transform([[a, a]])
check_limits(x[0][0], -999, 189000)
y = space.transform(x)
check_limits(y, 0.0, 1.0)
for a in np.linspace(0.50001, 1e-9 + 1.0, 1000):
x = space.inverse_transform([[a, a]])
check_limits(x[0][0], -999, 189000)
y = space.transform(x)
check_limits(y, 0.0, 1.0)
| 27,583 | 31.186698 | 88 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_optimizer.py | import numpy as np
import pytest
from sklearn.multioutput import MultiOutputRegressor
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from deephyper.skopt import gp_minimize
from deephyper.skopt import forest_minimize
from deephyper.skopt.benchmarks import bench1, bench1_with_time
from deephyper.skopt.benchmarks import branin
from deephyper.skopt.learning import ExtraTreesRegressor, RandomForestRegressor
from deephyper.skopt.learning import GradientBoostingQuantileRegressor
from deephyper.skopt.optimizer import Optimizer
from scipy.optimize import OptimizeResult
TREE_REGRESSORS = (
ExtraTreesRegressor(random_state=2),
RandomForestRegressor(random_state=2),
GradientBoostingQuantileRegressor(random_state=2),
)
ACQ_FUNCS_PS = ["EIps", "PIps"]
ACQ_FUNCS_MIXED = ["EI", "EIps"]
ESTIMATOR_STRINGS = [
"GP",
"RF",
"ET",
"GBRT",
"DUMMY",
"gp",
"rf",
"et",
"gbrt",
"dummy",
]
@pytest.mark.hps
def test_multiple_asks():
# calling ask() multiple times without a tell() inbetween should
# be a "no op"
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling"
)
opt.run(bench1, n_iter=3)
# tell() computes the next point ready for the next call to ask()
# hence there are three after three iterations
assert_equal(len(opt.models), 3)
assert_equal(len(opt.Xi), 3)
opt.ask()
assert_equal(len(opt.models), 3)
assert_equal(len(opt.Xi), 3)
assert_equal(opt.ask(), opt.ask())
opt.update_next()
assert_equal(opt.ask(), opt.ask())
@pytest.mark.hps
def test_model_queue_size():
# Check if model_queue_size limits the model queue size
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(-2.0, 2.0)],
base_estimator,
n_initial_points=1,
acq_optimizer="sampling",
model_queue_size=2,
)
opt.run(bench1, n_iter=3)
# tell() computes the next point ready for the next call to ask()
# hence there are three after three iterations
assert_equal(len(opt.models), 2)
assert_equal(len(opt.Xi), 3)
opt.ask()
assert_equal(len(opt.models), 2)
assert_equal(len(opt.Xi), 3)
assert_equal(opt.ask(), opt.ask())
@pytest.mark.hps
def test_invalid_tell_arguments():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling"
)
# can't have single point and multiple values for y
assert_raises(ValueError, opt.tell, [1.0], [1.0, 1.0])
@pytest.mark.hps
def test_invalid_tell_arguments_list():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling"
)
assert_raises(ValueError, opt.tell, [[1.0], [2.0]], [1.0, None])
@pytest.mark.hps
def test_bounds_checking_1D():
low = -2.0
high = 2.0
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(low, high)], base_estimator, n_initial_points=1, acq_optimizer="sampling"
)
assert_raises(ValueError, opt.tell, [high + 0.5], 2.0)
assert_raises(ValueError, opt.tell, [low - 0.5], 2.0)
# feed two points to tell() at once
assert_raises(ValueError, opt.tell, [high + 0.5, high], (2.0, 3.0))
assert_raises(ValueError, opt.tell, [low - 0.5, high], (2.0, 3.0))
@pytest.mark.hps
def test_bounds_checking_2D():
low = -2.0
high = 2.0
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(low, high), (low + 4, high + 4)],
base_estimator,
n_initial_points=1,
acq_optimizer="sampling",
)
assert_raises(ValueError, opt.tell, [high + 0.5, high + 4.5], 2.0)
assert_raises(ValueError, opt.tell, [low - 0.5, low - 4.5], 2.0)
# first out, second in
assert_raises(ValueError, opt.tell, [high + 0.5, high + 0.5], 2.0)
assert_raises(ValueError, opt.tell, [low - 0.5, high + 0.5], 2.0)
@pytest.mark.hps
def test_bounds_checking_2D_multiple_points():
low = -2.0
high = 2.0
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(low, high), (low + 4, high + 4)],
base_estimator,
n_initial_points=1,
acq_optimizer="sampling",
)
# first component out, second in
assert_raises(
ValueError,
opt.tell,
[(high + 0.5, high + 0.5), (high + 0.5, high + 0.5)],
[2.0, 3.0],
)
assert_raises(
ValueError,
opt.tell,
[(low - 0.5, high + 0.5), (low - 0.5, high + 0.5)],
[2.0, 3.0],
)
@pytest.mark.hps
def test_dimension_checking_1D():
low = -2
high = 2
opt = Optimizer([(low, high)])
with pytest.raises(ValueError) as e:
# within bounds but one dimension too high
opt.tell([low + 1, low + 1], 2.0)
assert "Dimensions of point " in str(e.value)
@pytest.mark.hps
def test_dimension_checking_2D():
low = -2
high = 2
opt = Optimizer([(low, high), (low, high)])
# within bounds but one dimension too little
with pytest.raises(ValueError) as e:
opt.tell(
[
low + 1,
],
2.0,
)
assert "Dimensions of point " in str(e.value)
# within bounds but one dimension too much
with pytest.raises(ValueError) as e:
opt.tell([low + 1, low + 1, low + 1], 2.0)
assert "Dimensions of point " in str(e.value)
@pytest.mark.hps
def test_dimension_checking_2D_multiple_points():
low = -2
high = 2
opt = Optimizer([(low, high), (low, high)])
# within bounds but one dimension too little
with pytest.raises(ValueError) as e:
opt.tell(
[
[
low + 1,
],
[low + 1, low + 2],
[low + 1, low + 3],
],
2.0,
)
assert "dimensions as the space" in str(e.value)
# within bounds but one dimension too much
with pytest.raises(ValueError) as e:
opt.tell(
[[low + 1, low + 1, low + 1], [low + 1, low + 2], [low + 1, low + 3]], 2.0
)
assert "dimensions as the space" in str(e.value)
@pytest.mark.hps
def test_returns_result_object():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling"
)
result = opt.tell([1.5], 2.0)
assert isinstance(result, OptimizeResult)
assert_equal(len(result.x_iters), len(result.func_vals))
assert_equal(np.min(result.func_vals), result.fun)
@pytest.mark.hps
@pytest.mark.parametrize("base_estimator", TREE_REGRESSORS)
def test_acq_optimizer(base_estimator):
with pytest.raises(ValueError) as e:
Optimizer(
[(-2.0, 2.0)],
base_estimator=base_estimator,
n_initial_points=1,
acq_optimizer="lbfgs",
)
assert "should run with acq_optimizer='sampling'" in str(e.value)
@pytest.mark.parametrize("base_estimator", TREE_REGRESSORS)
@pytest.mark.parametrize("acq_func", ACQ_FUNCS_PS)
def test_acq_optimizer_with_time_api(base_estimator, acq_func):
opt = Optimizer(
[
(-2.0, 2.0),
],
base_estimator=base_estimator,
acq_func=acq_func,
acq_optimizer="sampling",
n_initial_points=2,
)
x1 = opt.ask()
opt.tell(x1, (bench1(x1), 1.0))
x2 = opt.ask()
res = opt.tell(x2, (bench1(x2), 2.0))
# x1 and x2 are random.
assert x1 != x2
assert len(res.models) == 1
assert_array_equal(res.func_vals.shape, (2,))
assert_array_equal(res.log_time.shape, (2,))
# x3 = opt.ask()
with pytest.raises(TypeError) as e:
opt.tell(x2, bench1(x2))
@pytest.mark.hps
@pytest.mark.parametrize("acq_func", ACQ_FUNCS_MIXED)
def test_optimizer_copy(acq_func):
# Checks that the base estimator, the objective and target values
# are copied correctly.
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(-2.0, 2.0)],
base_estimator,
acq_func=acq_func,
n_initial_points=1,
acq_optimizer="sampling",
)
# run three iterations so that we have some points and objective values
if "ps" in acq_func:
opt.run(bench1_with_time, n_iter=3)
else:
opt.run(bench1, n_iter=3)
opt_copy = opt.copy()
copied_estimator = opt_copy.base_estimator_
if "ps" in acq_func:
assert isinstance(copied_estimator, MultiOutputRegressor)
# check that the base_estimator is not wrapped multiple times
is_multi = isinstance(copied_estimator.estimator, MultiOutputRegressor)
assert not is_multi
else:
assert not isinstance(copied_estimator, MultiOutputRegressor)
assert_array_equal(opt_copy.Xi, opt.Xi)
assert_array_equal(opt_copy.yi, opt.yi)
@pytest.mark.parametrize("base_estimator", ESTIMATOR_STRINGS)
def test_exhaust_initial_calls(base_estimator):
# check a model is fitted and used to make suggestions after we added
# at least n_initial_points via tell()
opt = Optimizer(
[(-2.0, 2.0)],
base_estimator,
n_initial_points=2,
acq_optimizer="sampling",
random_state=1,
)
x0 = opt.ask() # random point
x1 = opt.ask() # random point
assert x0 != x1
# first call to tell()
r1 = opt.tell(x1, 3.0)
assert len(r1.models) == 0
x2 = opt.ask() # random point
assert x1 != x2
# second call to tell()
r2 = opt.tell(x2, 4.0)
if base_estimator.lower() == "dummy":
assert len(r2.models) == 0
else:
assert len(r2.models) == 1
# this is the first non-random point
x3 = opt.ask()
assert x2 != x3
x4 = opt.ask()
r3 = opt.tell(x3, 1.0)
# no new information was added so should be the same, unless we are using
# the dummy estimator which will forever return random points and never
# fits any models
if base_estimator.lower() == "dummy":
assert x3 != x4
assert len(r3.models) == 0
else:
assert x3 == x4
assert len(r3.models) == 2
@pytest.mark.hps
def test_optimizer_base_estimator_string_invalid():
with pytest.raises(ValueError) as e:
Optimizer([(-2.0, 2.0)], base_estimator="rtr", n_initial_points=1)
assert "'RF', 'ET', 'GP', 'GBRT' or 'DUMMY'" in str(e.value)
@pytest.mark.hps
@pytest.mark.parametrize("base_estimator", ESTIMATOR_STRINGS)
def test_optimizer_base_estimator_string_smoke(base_estimator):
opt = Optimizer(
[(-2.0, 2.0)], base_estimator=base_estimator, n_initial_points=2, acq_func="EI"
)
opt.run(func=lambda x: x[0] ** 2, n_iter=3)
@pytest.mark.hps
def test_optimizer_base_estimator_string_smoke_njobs():
opt = Optimizer(
[(-2.0, 2.0)],
base_estimator="GBRT",
n_initial_points=1,
acq_func="EI",
n_jobs=-1,
)
opt.run(func=lambda x: x[0] ** 2, n_iter=3)
def test_defaults_are_equivalent():
# check that the defaults of Optimizer reproduce the defaults of
# gp_minimize
space = [(-5.0, 10.0), (0.0, 15.0)]
# opt = Optimizer(space, 'ET', acq_func="EI", random_state=1)
opt = Optimizer(space, random_state=1)
for n in range(12):
x = opt.ask()
res_opt = opt.tell(x, branin(x))
# res_min = forest_minimize(branin, space, n_calls=12, random_state=1)
res_min = gp_minimize(branin, space, n_calls=12, random_state=1)
assert res_min.space == res_opt.space
# tolerate small differences in the points sampled
assert np.allclose(res_min.x_iters, res_opt.x_iters) # , atol=1e-5)
assert np.allclose(res_min.x, res_opt.x) # , atol=1e-5)
res_opt2 = opt.get_result()
assert np.allclose(res_min.x_iters, res_opt2.x_iters) # , atol=1e-5)
assert np.allclose(res_min.x, res_opt2.x) # , atol=1e-5)
@pytest.mark.hps
def test_dimensions_names():
from deephyper.skopt.space import Real, Categorical, Integer
# create search space and optimizer
space = [
Real(0, 1, name="real"),
Categorical(["a", "b", "c"], name="cat"),
Integer(0, 1, name="int"),
]
opt = Optimizer(space, n_initial_points=2)
# result of the optimizer missing dimension names
result = opt.tell([(0.5, "a", 0.5)], [3])
names = []
for d in result.space.dimensions:
names.append(d.name)
assert len(names) == 3
assert "real" in names
assert "cat" in names
assert "int" in names
assert None not in names
@pytest.mark.hps
def test_categorical_only():
from deephyper.skopt.space import Categorical
cat1 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
cat2 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
opt = Optimizer([cat1, cat2])
for n in range(15):
x = opt.ask()
res = opt.tell(x, 12 * n)
assert len(res.x_iters) == 15
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
cat3 = Categorical(["2", "3", "4", "5", "6", "7", "8", "9", "10", "11"])
cat4 = Categorical(["2", "3", "4", "5", "6", "7", "8", "9", "10", "11"])
opt = Optimizer([cat3, cat4])
for n in range(15):
x = opt.ask()
res = opt.tell(x, 12 * n)
assert len(res.x_iters) == 15
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
def test_categorical_only2():
from numpy import linalg
from deephyper.skopt.space import Categorical
from deephyper.skopt.learning import GaussianProcessRegressor
space = [Categorical([1, 2, 3]), Categorical([4, 5, 6])]
opt = Optimizer(
space,
base_estimator=GaussianProcessRegressor(alpha=1e-7),
acq_optimizer="lbfgs",
n_initial_points=10,
n_jobs=2,
)
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
opt.tell(next_x, [linalg.norm(x) for x in next_x])
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
opt.tell(next_x, [linalg.norm(x) for x in next_x])
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
| 14,307 | 28.501031 | 87 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_gp_opt.py | import numpy as np
from numpy.testing import assert_array_equal
import pytest
from deephyper.skopt import gp_minimize
from deephyper.skopt.benchmarks import bench1
from deephyper.skopt.benchmarks import bench2
from deephyper.skopt.benchmarks import bench3
from deephyper.skopt.benchmarks import bench4
from deephyper.skopt.benchmarks import branin
from deephyper.skopt.space.space import Real, Categorical, Space
from deephyper.skopt.utils import cook_estimator
def check_minimize(
func,
y_opt,
bounds,
acq_optimizer,
acq_func,
margin,
n_calls,
n_initial_points=10,
init_gen="random",
):
r = gp_minimize(
func,
bounds,
acq_optimizer=acq_optimizer,
acq_func=acq_func,
n_initial_points=n_initial_points,
n_calls=n_calls,
random_state=1,
initial_point_generator=init_gen,
noise=1e-10,
)
assert r.fun < y_opt + margin
SEARCH = ["sampling", "lbfgs"]
ACQUISITION = ["LCB", "EI"]
INITGEN = ["random", "lhs", "halton", "hammersly", "sobol"]
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench1(search, acq):
check_minimize(bench1, 0.0, [(-2.0, 2.0)], search, acq, 0.05, 20)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("search", ["sampling"])
@pytest.mark.parametrize("acq", ["LCB"])
@pytest.mark.parametrize("initgen", INITGEN)
def test_gp_minimize_bench1_initgen(search, acq, initgen):
check_minimize(bench1, 0.0, [(-2.0, 2.0)], search, acq, 0.05, 20, init_gen=initgen)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench2(search, acq):
check_minimize(bench2, -5, [(-6.0, 6.0)], search, acq, 0.05, 20)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench3(search, acq):
check_minimize(bench3, -0.9, [(-2.0, 2.0)], search, acq, 0.05, 20)
@pytest.mark.hps
@pytest.mark.parametrize("search", ["sampling"])
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench4(search, acq):
# this particular random_state picks "2" twice so we can make an extra
# call to the objective without repeating options
check_minimize(bench4, 0, [("-2", "-1", "0", "1", "2")], search, acq, 1.05, 20)
@pytest.mark.hps
def test_n_jobs():
r_single = gp_minimize(
bench3,
[(-2.0, 2.0)],
acq_optimizer="lbfgs",
acq_func="EI",
n_calls=4,
n_initial_points=2,
random_state=1,
noise=1e-10,
)
r_double = gp_minimize(
bench3,
[(-2.0, 2.0)],
acq_optimizer="lbfgs",
acq_func="EI",
n_calls=4,
n_initial_points=2,
random_state=1,
noise=1e-10,
n_jobs=2,
)
assert_array_equal(r_single.x_iters, r_double.x_iters)
@pytest.mark.hps
def test_gpr_default():
"""Smoke test that gp_minimize does not fail for default values."""
gp_minimize(branin, ((-5.0, 10.0), (0.0, 15.0)), n_initial_points=2, n_calls=2)
@pytest.mark.hps
def test_use_given_estimator():
"""Test that gp_minimize does not use default estimator if one is passed
in explicitly."""
domain = [(1.0, 2.0), (3.0, 4.0)]
noise_correct = 1e5
noise_fake = 1e-10
estimator = cook_estimator("GP", domain, noise=noise_correct)
res = gp_minimize(
branin,
domain,
n_calls=4,
n_initial_points=2,
base_estimator=estimator,
noise=noise_fake,
)
assert res["models"][-1].noise == noise_correct
@pytest.mark.hps
def test_use_given_estimator_with_max_model_size():
"""Test that gp_minimize does not use default estimator if one is passed
in explicitly."""
domain = [(1.0, 2.0), (3.0, 4.0)]
noise_correct = 1e5
noise_fake = 1e-10
estimator = cook_estimator("GP", domain, noise=noise_correct)
res = gp_minimize(
branin,
domain,
n_calls=4,
n_initial_points=2,
base_estimator=estimator,
noise=noise_fake,
model_queue_size=1,
)
assert len(res["models"]) == 1
assert res["models"][-1].noise == noise_correct
@pytest.mark.hps
def test_categorical_integer():
def f(params):
return np.random.uniform()
dims = [[1]]
res = gp_minimize(f, dims, n_calls=2, n_initial_points=2, random_state=1)
assert res.x_iters[0][0] == dims[0][0]
@pytest.mark.parametrize("initgen", INITGEN)
def test_mixed_categoricals(initgen):
space = Space(
[
Categorical(name="x", categories=["1", "2", "3"]),
Categorical(name="y", categories=[4, 5, 6]),
Real(name="z", low=1.0, high=5.0),
]
)
def objective(param_list):
x = param_list[0]
y = param_list[1]
z = param_list[2]
loss = int(x) + y * z
return loss
res = gp_minimize(
objective, space, n_calls=20, random_state=1, initial_point_generator=initgen
)
assert res["x"] in [["1", 4, 1.0], ["2", 4, 1.0]]
@pytest.mark.parametrize("initgen", INITGEN)
def test_mixed_categoricals2(initgen):
space = Space(
[
Categorical(name="x", categories=["1", "2", "3"]),
Categorical(name="y", categories=[4, 5, 6]),
]
)
def objective(param_list):
x = param_list[0]
y = param_list[1]
loss = int(x) + y
return loss
res = gp_minimize(
objective, space, n_calls=12, random_state=1, initial_point_generator=initgen
)
assert res["x"] == ["1", 4]
| 5,703 | 26.291866 | 87 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_gpr.py | import pytest
import numpy as np
from deephyper.skopt.learning import GaussianProcessRegressor
@pytest.mark.hps
def test_gpr_uses_noise():
"""Test that gpr is using WhiteKernel"""
X = np.random.normal(size=[100, 2])
Y = np.random.normal(size=[100])
g_gaussian = GaussianProcessRegressor(noise="gaussian")
g_gaussian.fit(X, Y)
m, sigma = g_gaussian.predict(X[0:1], return_cov=True)
assert sigma > 0
| 431 | 23 | 61 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_common.py | from functools import partial
from itertools import product
import numpy as np
from scipy.optimize import OptimizeResult
import pytest
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from deephyper.skopt import dummy_minimize
from deephyper.skopt import gp_minimize
from deephyper.skopt import forest_minimize
from deephyper.skopt import gbrt_minimize
from deephyper.skopt.benchmarks import branin
from deephyper.skopt.benchmarks import bench1
from deephyper.skopt.benchmarks import bench4
from deephyper.skopt.benchmarks import bench5
from deephyper.skopt.callbacks import DeltaXStopper
from deephyper.skopt.space import Space
# dummy_minimize does not support same parameters so
# treated separately
MINIMIZERS = [gp_minimize]
ACQUISITION = ["LCB", "PI", "EI"]
ACQ_FUNCS_PS = ["PIps", "EIps"]
for est, acq in product(["ET", "RF"], ACQUISITION):
MINIMIZERS.append(partial(forest_minimize, base_estimator=est, acq_func=acq))
for acq in ACQUISITION:
MINIMIZERS.append(partial(gbrt_minimize, acq_func=acq))
def check_minimizer_api(result, n_calls, n_models=None):
# assumes the result was produced on branin
assert isinstance(result.space, Space)
if n_models is not None:
assert_equal(len(result.models), n_models)
assert_equal(len(result.x_iters), n_calls)
assert_array_equal(result.func_vals.shape, (n_calls,))
assert isinstance(result.x, list)
assert_equal(len(result.x), 2)
assert isinstance(result.x_iters, list)
for n in range(n_calls):
assert isinstance(result.x_iters[n], list)
assert_equal(len(result.x_iters[n]), 2)
assert isinstance(result.func_vals[n], float)
assert_almost_equal(result.func_vals[n], branin(result.x_iters[n]))
assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
assert_almost_equal(result.fun, branin(result.x))
assert isinstance(result.specs, dict)
assert "args" in result.specs
assert "function" in result.specs
def check_minimizer_bounds(result, n_calls):
# no values should be below or above the bounds
eps = 10e-9 # check for assert_array_less OR equal
assert_array_less(result.x_iters, np.tile([10 + eps, 15 + eps], (n_calls, 1)))
assert_array_less(np.tile([-5 - eps, 0 - eps], (n_calls, 1)), result.x_iters)
def check_result_callable(res):
"""
Check that the result instance is set right at every callable call.
"""
assert isinstance(res, OptimizeResult)
assert_equal(len(res.x_iters), len(res.func_vals))
assert_equal(np.min(res.func_vals), res.fun)
def call_single(res):
pass
@pytest.mark.hps
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("call", [call_single, [call_single, check_result_callable]])
def test_minimizer_api_dummy_minimize(verbose, call):
# dummy_minimize is special as it does not support all parameters
# and does not fit any models
n_calls = 7
result = dummy_minimize(
branin,
[(-5.0, 10.0), (0.0, 15.0)],
n_calls=n_calls,
random_state=1,
verbose=verbose,
callback=call,
)
assert result.models == []
check_minimizer_api(result, n_calls)
check_minimizer_bounds(result, n_calls)
with pytest.raises(ValueError):
dummy_minimize(lambda x: x, [[-5, 10]])
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("call", [call_single, [call_single, check_result_callable]])
@pytest.mark.parametrize("minimizer", MINIMIZERS)
def test_minimizer_api(verbose, call, minimizer):
n_calls = 7
n_initial_points = 3
n_models = n_calls - n_initial_points + 1
result = minimizer(
branin,
[(-5.0, 10.0), (0.0, 15.0)],
n_initial_points=n_initial_points,
n_calls=n_calls,
random_state=1,
verbose=verbose,
callback=call,
)
check_minimizer_api(result, n_calls, n_models)
check_minimizer_bounds(result, n_calls)
with pytest.raises(ValueError):
minimizer(lambda x: x, [[-5, 10]])
@pytest.mark.hps
@pytest.mark.parametrize("minimizer", MINIMIZERS)
def test_minimizer_api_random_only(minimizer):
# no models should be fit as we only evaluate at random points
n_calls = 5
n_initial_points = 5
result = minimizer(
branin,
[(-5.0, 10.0), (0.0, 15.0)],
n_initial_points=n_initial_points,
n_calls=n_calls,
random_state=1,
)
check_minimizer_api(result, n_calls)
check_minimizer_bounds(result, n_calls)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("minimizer", MINIMIZERS)
def test_fixed_random_states(minimizer):
# check that two runs produce exactly same results, if not there is a
# random state somewhere that is not reproducible
n_calls = 4
n_initial_points = 2
space = [(-5.0, 10.0), (0.0, 15.0)]
result1 = minimizer(
branin,
space,
n_calls=n_calls,
n_initial_points=n_initial_points,
random_state=1,
)
dimensions = [(-5.0, 10.0), (0.0, 15.0)]
result2 = minimizer(
branin,
dimensions,
n_calls=n_calls,
n_initial_points=n_initial_points,
random_state=1,
)
assert_array_almost_equal(result1.x_iters, result2.x_iters)
assert_array_almost_equal(result1.func_vals, result2.func_vals)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("minimizer", MINIMIZERS)
def test_minimizer_with_space(minimizer):
# check we can pass a Space instance as dimensions argument and get same
# result
n_calls = 4
n_initial_points = 2
space = Space([(-5.0, 10.0), (0.0, 15.0)])
space_result = minimizer(
branin,
space,
n_calls=n_calls,
n_initial_points=n_initial_points,
random_state=1,
)
check_minimizer_api(space_result, n_calls)
check_minimizer_bounds(space_result, n_calls)
dimensions = [(-5.0, 10.0), (0.0, 15.0)]
result = minimizer(
branin,
dimensions,
n_calls=n_calls,
n_initial_points=n_initial_points,
random_state=1,
)
assert_array_almost_equal(space_result.x_iters, result.x_iters)
assert_array_almost_equal(space_result.func_vals, result.func_vals)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("n_initial_points", [0, 1, 2, 3, 4])
@pytest.mark.parametrize(
"optimizer_func", [gp_minimize, forest_minimize, gbrt_minimize]
)
def test_init_vals_and_models(n_initial_points, optimizer_func):
# test how many models are fitted when using initial points, y0 values
# and random starts
space = [(-5.0, 10.0), (0.0, 15.0)]
x0 = [[1, 2], [3, 4], [5, 6]]
y0 = list(map(branin, x0))
n_calls = 7
optimizer = partial(optimizer_func, n_initial_points=n_initial_points)
res = optimizer(branin, space, x0=x0, y0=y0, random_state=0, n_calls=n_calls)
assert_equal(len(res.models), n_calls - n_initial_points + 1)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("n_initial_points", [0, 1, 2, 3, 4])
@pytest.mark.parametrize(
"optimizer_func", [gp_minimize, forest_minimize, gbrt_minimize]
)
def test_init_points_and_models(n_initial_points, optimizer_func):
# test how many models are fitted when using initial points and random
# starts (no y0 in this case)
space = [(-5.0, 10.0), (0.0, 15.0)]
x0 = [[1, 2], [3, 4], [5, 6]]
n_calls = 7
optimizer = partial(optimizer_func, n_initial_points=n_initial_points)
res = optimizer(branin, space, x0=x0, random_state=0, n_calls=n_calls)
assert_equal(len(res.models), n_calls - len(x0) - n_initial_points + 1)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("n_initial_points", [2, 5])
@pytest.mark.parametrize(
"optimizer_func", [gp_minimize, forest_minimize, gbrt_minimize]
)
def test_init_vals(n_initial_points, optimizer_func):
space = [(-5.0, 10.0), (0.0, 15.0)]
x0 = [[1, 2], [3, 4], [5, 6]]
n_calls = len(x0) + n_initial_points + 1
optimizer = partial(optimizer_func, n_initial_points=n_initial_points)
check_init_vals(optimizer, branin, space, x0, n_calls)
@pytest.mark.hps
def test_init_vals_dummy_minimize():
space = [(-5.0, 10.0), (0.0, 15.0)]
x0 = [[1, 2], [3, 4], [5, 6]]
n_calls = 10
check_init_vals(dummy_minimize, branin, space, x0, n_calls)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize(
"optimizer",
[
dummy_minimize,
partial(gp_minimize, n_initial_points=3),
partial(forest_minimize, n_initial_points=3),
partial(gbrt_minimize, n_initial_points=3),
],
)
def test_categorical_init_vals(optimizer):
space = [("-2", "-1", "0", "1", "2")]
x0 = [["0"], ["1"], ["2"]]
n_calls = 6
check_init_vals(optimizer, bench4, space, x0, n_calls)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize(
"optimizer",
[
dummy_minimize,
partial(gp_minimize, n_initial_points=2),
partial(forest_minimize, n_initial_points=2),
partial(gbrt_minimize, n_initial_points=2),
],
)
def test_mixed_spaces(optimizer):
space = [("-2", "-1", "0", "1", "2"), (-2.0, 2.0)]
x0 = [["0", 2.0], ["1", 1.0], ["2", 1.0]]
n_calls = 5
check_init_vals(optimizer, bench5, space, x0, n_calls)
def check_init_vals(optimizer, func, space, x0, n_calls):
y0 = list(map(func, x0))
# testing whether the provided points with their evaluations
# are taken into account
res = optimizer(func, space, x0=x0, y0=y0, random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0 : len(x0)], x0)
assert_array_equal(res.func_vals[0 : len(y0)], y0)
assert_equal(len(res.x_iters), len(x0) + n_calls)
assert_equal(len(res.func_vals), len(x0) + n_calls)
# testing whether the provided points are taken into account
res = optimizer(func, space, x0=x0, random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0 : len(x0)], x0)
assert_array_equal(res.func_vals[0 : len(y0)], y0)
assert_equal(len(res.x_iters), n_calls)
assert_equal(len(res.func_vals), n_calls)
# testing whether providing a single point instead of a list
# of points works correctly
res = optimizer(func, space, x0=x0[0], random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0], x0[0])
assert_array_equal(res.func_vals[0], y0[0])
assert_equal(len(res.x_iters), n_calls)
assert_equal(len(res.func_vals), n_calls)
# testing whether providing a single point and its evaluation
# instead of a list of points and their evaluations works correctly
res = optimizer(func, space, x0=x0[0], y0=y0[0], random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0], x0[0])
assert_array_equal(res.func_vals[0], y0[0])
assert_equal(len(res.x_iters), 1 + n_calls)
assert_equal(len(res.func_vals), 1 + n_calls)
# testing whether it correctly raises an exception when
# the number of input points and the number of evaluations differ
assert_raises(ValueError, dummy_minimize, func, space, x0=x0, y0=[1])
@pytest.mark.hps
@pytest.mark.parametrize("minimizer", MINIMIZERS)
def test_invalid_n_calls_arguments(minimizer):
with pytest.raises(ValueError):
minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_calls=0, random_state=1)
with pytest.raises(ValueError):
minimizer(
branin, [(-5.0, 10.0), (0.0, 15.0)], n_initial_points=0, random_state=1
)
# n_calls >= n_initial_points
with pytest.raises(ValueError):
minimizer(
branin,
[(-5.0, 10.0), (0.0, 15.0)],
n_calls=1,
n_initial_points=10,
random_state=1,
)
# n_calls >= n_initial_points + len(x0)
with pytest.raises(ValueError):
minimizer(
branin,
[(-5.0, 10.0), (0.0, 15.0)],
n_calls=1,
x0=[[-1, 2], [-3, 3], [2, 5]],
random_state=1,
n_initial_points=7,
)
# n_calls >= n_initial_points
with pytest.raises(ValueError):
minimizer(
branin,
[(-5.0, 10.0), (0.0, 15.0)],
n_calls=1,
x0=[[-1, 2], [-3, 3], [2, 5]],
y0=[2.0, 3.0, 5.0],
random_state=1,
n_initial_points=7,
)
@pytest.mark.hps
@pytest.mark.parametrize("minimizer", MINIMIZERS)
def test_repeated_x(minimizer):
with pytest.warns(None) as record:
minimizer(
lambda x: x[0],
dimensions=[[0, 1]],
x0=[[0], [1]],
n_initial_points=0,
n_calls=3,
)
assert len(record) > 0
w = record.pop(UserWarning)
assert issubclass(w.category, UserWarning)
assert "has been evaluated at" in str(w.message)
with pytest.warns(None) as record:
minimizer(
bench4,
dimensions=[("0", "1")],
x0=[["0"], ["1"]],
n_calls=3,
n_initial_points=0,
)
assert len(record) > 0
w = record.pop(UserWarning)
assert issubclass(w.category, UserWarning)
assert "has been evaluated at" in str(w.message)
@pytest.mark.hps
@pytest.mark.parametrize("minimizer", MINIMIZERS)
def test_consistent_x_iter_dimensions(minimizer):
# check that all entries in x_iters have the same dimensions
# two dmensional problem, bench1 is a 1D function but in this
# instance we do not really care about the objective, could be
# a total dummy
res = minimizer(
bench1,
dimensions=[(0, 1), (2, 3)],
x0=[[0, 2], [1, 2]],
n_calls=3,
n_initial_points=0,
)
assert len(set(len(x) for x in res.x_iters)) == 1
assert len(res.x_iters[0]) == 2
# one dimensional problem
res = minimizer(
bench1, dimensions=[(0, 1)], x0=[[0], [1]], n_calls=3, n_initial_points=0
)
assert len(set(len(x) for x in res.x_iters)) == 1
assert len(res.x_iters[0]) == 1
with pytest.raises(RuntimeError):
minimizer(
bench1, dimensions=[(0, 1)], x0=[[0, 1]], n_calls=3, n_initial_points=0
)
with pytest.raises(RuntimeError):
minimizer(bench1, dimensions=[(0, 1)], x0=[0, 1], n_calls=3, n_initial_points=0)
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("minimizer", [gp_minimize, forest_minimize, gbrt_minimize])
def test_early_stopping_delta_x(minimizer):
n_calls = 11
res = minimizer(
bench1,
callback=DeltaXStopper(0.1),
dimensions=[(-1.0, 1.0)],
x0=[[-0.1], [0.1], [-0.9]],
n_calls=n_calls,
n_initial_points=0,
random_state=1,
)
assert len(res.x_iters) < n_calls
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("minimizer", [gp_minimize, forest_minimize, gbrt_minimize])
def test_early_stopping_delta_x_empty_result_object(minimizer):
# check that the callback handles the case of being passed an empty
# results object, e.g. at the start of the optimization loop
n_calls = 15
res = minimizer(
bench1,
callback=DeltaXStopper(0.1),
dimensions=[(-1.0, 1.0)],
n_calls=n_calls,
n_initial_points=2,
random_state=1,
)
assert len(res.x_iters) < n_calls
@pytest.mark.parametrize("acq_func", ACQ_FUNCS_PS)
@pytest.mark.parametrize("minimizer", [gp_minimize, forest_minimize, gbrt_minimize])
def test_per_second_api(acq_func, minimizer):
def bench1_with_time(x):
return bench1(x), np.abs(x[0])
n_calls = 3
res = minimizer(
bench1_with_time,
[(-2.0, 2.0)],
acq_func=acq_func,
n_calls=n_calls,
n_initial_points=2,
random_state=1,
)
assert len(res.log_time) == n_calls
| 15,995 | 30.612648 | 88 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_parallel_cl.py | """This script contains set of functions that test parallel optimization with
skopt, where constant liar parallelization strategy is used.
"""
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from deephyper.skopt.space import Real
from deephyper.skopt import Optimizer
from deephyper.skopt.benchmarks import branin
import deephyper.skopt.learning as sol
from scipy.spatial.distance import pdist
import pytest
# list of all strategies for parallelization
supported_strategies = ["cl_min", "cl_mean", "cl_max"]
# test one acq function that incorporates the runtime, and one that does not
supported_acq_functions = ["EI", "EIps"]
# Extract available surrogates, so that new ones are used automatically
available_surrogates = [
getattr(sol, name)
for name in sol.__all__
if "GradientBoostingQuantileRegressor" not in name
] # excluding the GradientBoostingQuantileRegressor, will open issue later
n_steps = 5 # number of steps to test the algorithms with
n_points = 4 # number of points to evaluate at a single step
# n_steps x n_points > n_random_restarts should hold
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
@pytest.mark.parametrize("acq_func", supported_acq_functions)
def test_constant_liar_runs(strategy, surrogate, acq_func):
"""
Tests whether the optimizer runs properly during the random
initialization phase and beyond
Parameters
----------
* `strategy` [string]:
Name of the strategy to use during optimization.
* `surrogate` [scikit-optimize surrogate class]:
A class of the scikit-optimize surrogate used in Optimizer.
"""
optimizer = Optimizer(
base_estimator=surrogate(),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_func=acq_func,
acq_optimizer="sampling",
random_state=0,
)
# test arguments check
assert_raises(ValueError, optimizer.ask, {"strategy": "cl_maen"})
assert_raises(ValueError, optimizer.ask, {"n_points": "0"})
assert_raises(ValueError, optimizer.ask, {"n_points": 0})
for i in range(n_steps):
x = optimizer.ask(n_points=n_points, strategy=strategy)
# check if actually n_points was generated
assert_equal(len(x), n_points)
if "ps" in acq_func:
optimizer.tell(x, [[branin(v), 1.1] for v in x])
else:
optimizer.tell(x, [branin(v) for v in x])
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
def test_all_points_different(strategy, surrogate):
"""
Tests whether the parallel optimizer always generates
different points to evaluate.
Parameters
----------
* `strategy` [string]:
Name of the strategy to use during optimization.
* `surrogate` [scikit-optimize surrogate class]:
A class of the scikit-optimize surrogate used in Optimizer.
"""
optimizer = Optimizer(
base_estimator=surrogate(),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer="sampling",
random_state=1,
)
tolerance = 1e-3 # distance above which points are assumed same
for i in range(n_steps):
x = optimizer.ask(n_points, strategy)
optimizer.tell(x, [branin(v) for v in x])
distances = pdist(x)
assert all(distances > tolerance)
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
def test_same_set_of_points_ask(strategy, surrogate):
"""
For n_points not None, tests whether two consecutive calls to ask
return the same sets of points.
Parameters
----------
* `strategy` [string]:
Name of the strategy to use during optimization.
* `surrogate` [scikit-optimize surrogate class]:
A class of the scikit-optimize surrogate used in Optimizer.
"""
optimizer = Optimizer(
base_estimator=surrogate(),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer="sampling",
random_state=2,
)
for i in range(n_steps):
xa = optimizer.ask(n_points, strategy)
xb = optimizer.ask(n_points, strategy)
optimizer.tell(xa, [branin(v) for v in xa])
assert_equal(xa, xb) # check if the sets of points generated are equal
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
def test_reproducible_runs(strategy, surrogate):
# two runs of the optimizer should yield exactly the same results
optimizer = Optimizer(
base_estimator=surrogate(random_state=1),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer="sampling",
random_state=1,
)
points = []
for i in range(n_steps):
x = optimizer.ask(n_points, strategy)
points.append(x)
optimizer.tell(x, [branin(v) for v in x])
# the x's should be exaclty as they are in `points`
optimizer = Optimizer(
base_estimator=surrogate(random_state=1),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer="sampling",
random_state=1,
)
for i in range(n_steps):
x = optimizer.ask(n_points, strategy)
assert points[i] == x
optimizer.tell(x, [branin(v) for v in x])
| 5,448 | 31.825301 | 79 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_deprecation.py | from functools import partial
from itertools import product
import pytest
from deephyper.skopt import gp_minimize
from deephyper.skopt import forest_minimize
from deephyper.skopt import gbrt_minimize
from deephyper.skopt import Optimizer
from deephyper.skopt.learning import ExtraTreesRegressor
# dummy_minimize does not support same parameters so
# treated separately
MINIMIZERS = [gp_minimize]
ACQUISITION = ["LCB", "PI", "EI"]
for est, acq in product(["ET", "RF"], ACQUISITION):
MINIMIZERS.append(partial(forest_minimize, base_estimator=est, acq_func=acq))
for acq in ACQUISITION:
MINIMIZERS.append(partial(gbrt_minimize, acq_func=acq))
def test_n_random_starts_Optimizer():
# n_random_starts got renamed in v0.4
et = ExtraTreesRegressor(random_state=2)
with pytest.deprecated_call():
Optimizer([(0, 1.0)], et, n_random_starts=10, acq_optimizer="sampling")
| 896 | 28.9 | 81 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_plots.py | """Scikit-optimize plotting tests."""
import numpy as np
import pytest
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from numpy.testing import assert_array_almost_equal
from deephyper.skopt.space import Integer, Categorical
from deephyper.skopt import plots, gp_minimize
import matplotlib.pyplot as plt
from deephyper.skopt.benchmarks import bench3
from deephyper.skopt import expected_minimum, expected_minimum_random_sampling
from deephyper.skopt.plots import _evaluate_min_params, partial_dependence
from deephyper.skopt.plots import partial_dependence_1D, partial_dependence_2D
from deephyper.skopt import Optimizer
def save_axes(ax, filename):
"""Save matplotlib axes `ax` to an image `filename`."""
fig = plt.gcf()
fig.add_axes(ax)
fig.savefig(filename)
@pytest.mark.hps_slow_test
def test_plots_work():
"""Basic smoke tests to make sure plotting doesn't crash."""
SPACE = [
Integer(1, 20, name="max_depth"),
Integer(2, 100, name="min_samples_split"),
Integer(5, 30, name="min_samples_leaf"),
Integer(1, 30, name="max_features"),
Categorical(["gini", "entropy"], name="criterion"),
Categorical(list("abcdefghij"), name="dummy"),
]
def objective(params):
clf = DecisionTreeClassifier(
random_state=3,
**{dim.name: val for dim, val in zip(SPACE, params) if dim.name != "dummy"}
)
return -np.mean(cross_val_score(clf, *load_breast_cancer(True)))
res = gp_minimize(objective, SPACE, n_calls=10, random_state=3)
x = [
[11, 52, 8, 14, "entropy", "f"],
[14, 90, 10, 2, "gini", "a"],
[7, 90, 6, 14, "entropy", "f"],
]
samples = res.space.transform(x)
xi_ = [1.0, 10.5, 20.0]
yi_ = [-0.9240883492576596, -0.9240745890422687, -0.9240586402439884]
xi, yi = partial_dependence_1D(res.space, res.models[-1], 0, samples, n_points=3)
assert_array_almost_equal(xi, xi_)
assert_array_almost_equal(yi, yi_, 2)
xi_ = [0, 1]
yi_ = [-0.9241087603770617, -0.9240188905968352]
xi, yi = partial_dependence_1D(res.space, res.models[-1], 4, samples, n_points=3)
assert_array_almost_equal(xi, xi_)
assert_array_almost_equal(yi, yi_, 2)
xi_ = [0, 1]
yi_ = [1.0, 10.5, 20.0]
zi_ = [
[-0.92412562, -0.92403575],
[-0.92411186, -0.92402199],
[-0.92409591, -0.92400604],
]
xi, yi, zi = partial_dependence_2D(
res.space, res.models[-1], 0, 4, samples, n_points=3
)
assert_array_almost_equal(xi, xi_)
assert_array_almost_equal(yi, yi_)
assert_array_almost_equal(zi, zi_, 2)
x_min, f_min = expected_minimum_random_sampling(res, random_state=1)
x_min2, f_min2 = expected_minimum(res, random_state=1)
assert x_min == x_min2
assert f_min == f_min2
plots.plot_convergence(res)
plots.plot_evaluations(res)
plots.plot_objective(res)
plots.plot_objective(res, dimensions=["a", "b", "c", "d", "e", "f"])
plots.plot_objective(res, minimum="expected_minimum_random")
plots.plot_objective(
res, sample_source="expected_minimum_random", n_minimum_search=10000
)
plots.plot_objective(res, sample_source="result")
plots.plot_regret(res)
plots.plot_objective_2D(res, 0, 4)
plots.plot_histogram(res, 0, 4)
# TODO: Compare plots to known good results?
# Look into how matplotlib does this.
@pytest.mark.hps_slow_test
def test_plots_work_without_cat():
"""Basic smoke tests to make sure plotting doesn't crash."""
SPACE = [
Integer(1, 20, name="max_depth"),
Integer(2, 100, name="min_samples_split"),
Integer(5, 30, name="min_samples_leaf"),
Integer(1, 30, name="max_features"),
]
def objective(params):
clf = DecisionTreeClassifier(
random_state=3,
**{dim.name: val for dim, val in zip(SPACE, params) if dim.name != "dummy"}
)
return -np.mean(cross_val_score(clf, *load_breast_cancer(True)))
res = gp_minimize(objective, SPACE, n_calls=10, random_state=3)
plots.plot_convergence(res)
plots.plot_evaluations(res)
plots.plot_objective(res)
plots.plot_objective(res, minimum="expected_minimum")
plots.plot_objective(res, sample_source="expected_minimum", n_minimum_search=10)
plots.plot_objective(res, sample_source="result")
plots.plot_regret(res)
# TODO: Compare plots to known good results?
# Look into how matplotlib does this.
@pytest.mark.hps
def test_evaluate_min_params():
res = gp_minimize(
bench3,
[(-2.0, 2.0)],
x0=[0.0],
noise=1e-8,
n_calls=8,
n_random_starts=3,
random_state=1,
)
x_min, f_min = expected_minimum(res, random_state=1)
x_min2, f_min2 = expected_minimum_random_sampling(
res, n_random_starts=1000, random_state=1
)
plots.plot_gaussian_process(res)
assert _evaluate_min_params(res, params="result") == res.x
assert _evaluate_min_params(res, params=[1.0]) == [1.0]
assert _evaluate_min_params(res, params="expected_minimum", random_state=1) == x_min
assert (
_evaluate_min_params(
res, params="expected_minimum", n_minimum_search=20, random_state=1
)
== x_min
)
assert (
_evaluate_min_params(
res, params="expected_minimum_random", n_minimum_search=1000, random_state=1
)
== x_min2
)
def test_names_dimensions():
# Define objective
def objective(x, noise_level=0.1):
return (
np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2))
+ np.random.randn() * noise_level
)
# Initialize Optimizer
opt = Optimizer([(-2.0, 2.0)], n_initial_points=2)
# Optimize
for i in range(3):
next_x = opt.ask()
f_val = objective(next_x)
res = opt.tell(next_x, f_val)
# Plot results
plots.plot_objective(res)
| 6,050 | 31.88587 | 88 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_benchmarks.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
from deephyper.skopt.benchmarks import branin
from deephyper.skopt.benchmarks import hart6
@pytest.mark.hps
def test_branin():
xstars = np.asarray([(-np.pi, 12.275), (+np.pi, 2.275), (9.42478, 2.475)])
f_at_xstars = np.asarray([branin(xstar) for xstar in xstars])
branin_min = np.array([0.397887] * xstars.shape[0])
assert_array_almost_equal(f_at_xstars, branin_min)
@pytest.mark.hps
def test_hartmann6():
assert_almost_equal(
hart6((0.20169, 0.15001, 0.476874, 0.275332, 0.311652, 0.6573)),
-3.32237,
decimal=5,
)
| 700 | 25.961538 | 78 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_sampler.py | import pytest
import numbers
import numpy as np
import os
import yaml
from tempfile import NamedTemporaryFile
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from scipy import spatial
from deephyper.skopt import Optimizer
from deephyper.skopt.space import Space
from deephyper.skopt.space import Real
from deephyper.skopt.space import Integer
from deephyper.skopt.space import Categorical
from deephyper.skopt.space import check_dimension as space_check_dimension
from deephyper.skopt.sampler.sobol import _bit_lo0, _bit_hi1
from deephyper.skopt.sampler.halton import _van_der_corput_samples, _create_primes
from deephyper.skopt.sampler import Hammersly, Halton, Lhs, Sobol, Grid
from deephyper.skopt.sampler import InitialPointGenerator
from deephyper.skopt.sampler.grid import _create_uniform_grid_include_border
from deephyper.skopt.sampler.grid import _create_uniform_grid_exclude_border
from deephyper.skopt.sampler.grid import _quadrature_combine
from deephyper.skopt.sampler.grid import _create_uniform_grid_only_border
from deephyper.skopt.utils import cook_initial_point_generator
LHS_TYPE = ["classic", "centered"]
CRITERION = ["maximin", "ratio", "correlation", None]
SAMPLER = ["lhs", "halton", "sobol", "hammersly", "grid"]
@pytest.mark.hps
def test_lhs_centered():
lhs = Lhs(lhs_type="centered")
samples = lhs.generate(
[
(0.0, 1.0),
]
* 3,
3,
)
assert_almost_equal(np.sum(samples), 4.5)
@pytest.mark.parametrize("samlper", SAMPLER)
def test_sampler(samlper):
s = cook_initial_point_generator(samlper)
samples = s.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
assert isinstance(s, InitialPointGenerator)
samples = s.generate([("a", "b", "c")], 3)
assert samples[0][0] in ["a", "b", "c"]
samples = s.generate([("a", "b", "c"), (0, 1)], 1)
assert samples[0][0] in ["a", "b", "c"]
assert samples[0][1] in [0, 1]
samples = s.generate([("a", "b", "c"), (0, 1)], 3)
assert samples[0][0] in ["a", "b", "c"]
assert samples[0][1] in [0, 1]
@pytest.mark.parametrize("lhs_type", LHS_TYPE)
@pytest.mark.parametrize("criterion", CRITERION)
def test_lhs_criterion(lhs_type, criterion):
lhs = Lhs(lhs_type=lhs_type, criterion=criterion, iterations=100)
samples = lhs.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
samples = lhs.generate([("a", "b", "c")], 3)
assert samples[0][0] in ["a", "b", "c"]
samples = lhs.generate([("a", "b", "c"), (0, 1)], 1)
assert samples[0][0] in ["a", "b", "c"]
assert samples[0][1] in [0, 1]
samples = lhs.generate([("a", "b", "c"), (0, 1)], 3)
assert samples[0][0] in ["a", "b", "c"]
assert samples[0][1] in [0, 1]
def test_lhs_pdist():
n_dim = 2
n_samples = 20
lhs = Lhs()
h = lhs._lhs_normalized(n_dim, n_samples, 0)
d_classic = spatial.distance.pdist(np.array(h), "euclidean")
lhs = Lhs(criterion="maximin", iterations=100)
h = lhs.generate(
[
(0.0, 1.0),
]
* n_dim,
n_samples,
random_state=0,
)
d = spatial.distance.pdist(np.array(h), "euclidean")
assert np.min(d) > np.min(d_classic)
@pytest.mark.parametrize("criterion", CRITERION)
def test_lhs_random_state(criterion):
n_dim = 2
n_samples = 20
lhs = Lhs()
h = lhs._lhs_normalized(n_dim, n_samples, 0)
h2 = lhs._lhs_normalized(n_dim, n_samples, 0)
assert_array_equal(h, h2)
lhs = Lhs(criterion=criterion, iterations=100)
h = lhs.generate(
[
(0.0, 1.0),
]
* n_dim,
n_samples,
random_state=0,
)
h2 = lhs.generate(
[
(0.0, 1.0),
]
* n_dim,
n_samples,
random_state=0,
)
assert_array_equal(h, h2)
@pytest.mark.hps
def test_bit():
X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
res = [2, 1, 3, 1, 2, 1, 4, 1, 2, 1]
for i in range(len(X)):
assert _bit_lo0(X[i]) == res[i]
X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
res = [1, 2, 2, 3, 3, 3, 3, 4, 4, 4]
for i in range(len(X)):
assert _bit_hi1(X[i]) == res[i]
@pytest.mark.hps
def test_sobol():
sobol = Sobol()
x, seed = sobol._sobol(3, 0)
assert_array_equal(x, [0.0, 0.0, 0.0])
x, seed = sobol._sobol(3, 1)
assert_array_equal(x, [0.5, 0.5, 0.5])
x, seed = sobol._sobol(3, 2)
assert_array_equal(x, [0.75, 0.25, 0.75])
x, seed = sobol._sobol(3, 3)
assert_array_equal(x, [0.25, 0.75, 0.25])
x, seed = sobol._sobol(3, 4)
assert_array_equal(x, [0.375, 0.375, 0.625])
x, seed = sobol._sobol(3, 5)
assert_array_equal(x, [0.875, 0.875, 0.125])
x, seed = sobol._sobol(3, 6)
assert_array_equal(x, [0.625, 0.125, 0.375])
@pytest.mark.hps
def test_generate():
sobol = Sobol(randomize=False)
x = sobol.generate(
[
(0.0, 1.0),
]
* 3,
4,
)
x = np.array(x)
assert_array_equal(x[0, :], [0.0, 0.0, 0.0])
assert_array_equal(x[1, :], [0.5, 0.5, 0.5])
assert_array_equal(x[2, :], [0.75, 0.25, 0.75])
assert_array_equal(x[3, :], [0.25, 0.75, 0.25])
sobol.set_params(skip=2)
assert sobol.skip == 2
assert isinstance(sobol, InitialPointGenerator)
@pytest.mark.hps
def test_van_der_corput():
x = _van_der_corput_samples(range(12), number_base=10)
y = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.01, 0.11]
assert_array_equal(x, y)
x = _van_der_corput_samples(range(9), number_base=2)
y = [0.0, 0.5, 0.25, 0.75, 0.125, 0.625, 0.375, 0.875, 0.0625]
assert_array_equal(x, y)
@pytest.mark.hps
def test_halton():
h = Halton()
x = h.generate(
[
(0.0, 1.0),
],
9,
)
y = _van_der_corput_samples(range(9), number_base=2)
assert_array_almost_equal(np.array(x).flatten(), y)
h = Halton()
x = h.generate(
[
(0.0, 1.0),
]
* 2,
6,
)
y = np.array(
[
[0, 0],
[1 / 2, 1 / 3],
[1 / 4, 2 / 3],
[3 / 4, 1 / 9],
[1 / 8, 4 / 9],
[5 / 8, 7 / 9],
]
)
assert_array_almost_equal(x, y)
h = Halton(min_skip=0, max_skip=3)
x = h.generate(
[
(0.0, 1.0),
]
* 2,
4,
random_state=12345,
)
assert_array_almost_equal(x, y[2:])
samples = h.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
@pytest.mark.hps
def test_hammersly():
h = Hammersly()
x = h.generate(
[
(0.0, 1.0),
]
* 2,
4,
)
y = np.array([[0, 0], [1 / 2, 0.25], [1 / 4, 0.5], [3 / 4, 0.75]])
assert_almost_equal(x, y)
samples = h.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
@pytest.mark.hps
def test_primes():
x = _create_primes(1)
assert_array_equal(x, [])
x = _create_primes(2)
assert_array_equal(x, [2])
x = _create_primes(3)
assert_array_equal(x, [2, 3])
x = _create_primes(20)
assert_array_equal(x, [2, 3, 5, 7, 11, 13, 17, 19])
@pytest.mark.hps
def test_quadrature_combine():
a = [1, 2]
b = [[4, 4], [5, 6]]
x = [[1, 4, 4], [1, 5, 6], [2, 4, 4], [2, 5, 6]]
x_test = _quadrature_combine([a, b])
assert_array_equal(x_test, x)
@pytest.mark.hps
def test_uniform_grid():
x = _create_uniform_grid_exclude_border(1, 2)
assert_array_equal(x, [[1.0 / 3.0], [2.0 / 3.0]])
x = _create_uniform_grid_include_border(1, 2)
assert_array_equal(x, [[0.0], [1.0]])
x = _create_uniform_grid_only_border(1, 2)
assert_array_equal(x, [[0.0], [1.0]])
x = _create_uniform_grid_exclude_border(1, 3)
assert_array_equal(x, [[1.0 / 4.0], [2.0 / 4.0], [3.0 / 4.0]])
x = _create_uniform_grid_include_border(1, 3)
assert_array_equal(x, [[0.0 / 2.0], [1.0 / 2.0], [2.0 / 2.0]])
x = _create_uniform_grid_only_border(1, 3)
assert_array_equal(x, [[0.0 / 2.0], [1.0 / 2.0], [2.0 / 2.0]])
x = _create_uniform_grid_exclude_border(1, 5)
assert_array_equal(
x, [[1.0 / 6.0], [2.0 / 6.0], [3.0 / 6.0], [4.0 / 6.0], [5.0 / 6.0]]
)
x = _create_uniform_grid_include_border(1, 5)
assert_array_equal(
x, [[0.0 / 4.0], [1.0 / 4.0], [2.0 / 4.0], [3.0 / 4.0], [4.0 / 4.0]]
)
x = _create_uniform_grid_only_border(1, 5)
assert_array_equal(
x, [[0.0 / 4.0], [1.0 / 4.0], [2.0 / 4.0], [3.0 / 4.0], [4.0 / 4.0]]
)
x = _create_uniform_grid_exclude_border(2, 2)
assert_array_equal(
x,
[
[1.0 / 3.0, 1.0 / 3.0],
[1.0 / 3.0, 2.0 / 3.0],
[2.0 / 3.0, 1.0 / 3.0],
[2.0 / 3.0, 2.0 / 3.0],
],
)
x = _create_uniform_grid_include_border(2, 2)
assert_array_equal(x, [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
x = _create_uniform_grid_only_border(2, 3)
assert_array_equal(
x, [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [1.0, 0.0], [1.0, 0.5], [1.0, 1.0]]
)
assert_raises(AssertionError, _create_uniform_grid_exclude_border, 1, 0)
assert_raises(AssertionError, _create_uniform_grid_exclude_border, 0, 1)
assert_raises(AssertionError, _create_uniform_grid_include_border, 1, 0)
assert_raises(AssertionError, _create_uniform_grid_include_border, 0, 1)
assert_raises(AssertionError, _create_uniform_grid_only_border, 1, 1)
assert_raises(AssertionError, _create_uniform_grid_only_border, 0, 2)
@pytest.mark.hps
def test_grid():
grid = Grid()
samples = grid.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
grid = Grid(border="include")
samples = grid.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
grid = Grid(use_full_layout=False)
samples = grid.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
grid = Grid(use_full_layout=True, append_border="include")
samples = grid.generate(
[
(0.0, 1.0),
]
* 2,
200,
)
assert len(samples) == 200
assert len(samples[0]) == 2
| 10,894 | 25.444175 | 83 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_dummy_opt.py | import pytest
from deephyper.skopt import dummy_minimize
from deephyper.skopt.benchmarks import bench1
from deephyper.skopt.benchmarks import bench2
from deephyper.skopt.benchmarks import bench3
def check_minimize(func, y_opt, dimensions, margin, n_calls):
r = dummy_minimize(func, dimensions, n_calls=n_calls, random_state=1)
assert r.fun < y_opt + margin
@pytest.mark.hps_slow_test
def test_dummy_minimize():
check_minimize(bench1, 0.0, [(-2.0, 2.0)], 0.05, 100)
check_minimize(bench2, -5, [(-6.0, 6.0)], 0.05, 100)
check_minimize(bench3, -0.9, [(-2.0, 2.0)], 0.05, 100)
@pytest.mark.hps
def test_dummy_categorical_integer():
def f(params):
return 0
dims = [[1]]
res = dummy_minimize(f, dims, n_calls=1, random_state=1)
assert res.x_iters[0][0] == dims[0][0]
| 812 | 28.035714 | 73 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_forest_opt.py | from functools import partial
from sklearn.tree import DecisionTreeClassifier
import pytest
from deephyper.skopt import gbrt_minimize
from deephyper.skopt import forest_minimize
from deephyper.skopt.benchmarks import bench1
from deephyper.skopt.benchmarks import bench2
from deephyper.skopt.benchmarks import bench3
from deephyper.skopt.benchmarks import bench4
MINIMIZERS = [
("ET", partial(forest_minimize, base_estimator="ET")),
("RF", partial(forest_minimize, base_estimator="RF")),
("gbrt", gbrt_minimize),
]
@pytest.mark.hps
@pytest.mark.parametrize("base_estimator", [42, DecisionTreeClassifier()])
def test_forest_minimize_api(base_estimator):
# invalid string value
with pytest.raises(ValueError):
forest_minimize(lambda x: 0.0, [], base_estimator="abc")
# not a string nor a regressor
with pytest.raises(ValueError):
forest_minimize(lambda x: 0.0, [], base_estimator=base_estimator)
def check_minimize(
minimizer,
func,
y_opt,
dimensions,
margin,
n_calls,
n_initial_points=10,
x0=None,
n_jobs=1,
):
for n in range(3):
r = minimizer(
func,
dimensions,
n_calls=n_calls,
random_state=n,
n_initial_points=n_initial_points,
x0=x0,
n_jobs=n_jobs,
)
assert r.fun < y_opt + margin
@pytest.mark.hps_slow_test
@pytest.mark.parametrize("name, minimizer", MINIMIZERS)
def test_tree_based_minimize(name, minimizer):
check_minimize(minimizer, bench1, 0.05, [(-2.0, 2.0)], 0.05, 25, 5)
# XXX: We supply points at the edge of the search
# space as an initial point to the minimizer.
# This makes sure that the RF model can find the minimum even
# if all the randomly sampled points are one side of the
# the minimum, since for a decision tree any point greater than
# max(sampled_points) would give a constant value.
X0 = [[-5.6], [-5.8], [5.8], [5.6]]
check_minimize(minimizer, bench2, -4.7, [(-6.0, 6.0)], 0.1, 20, 10, X0)
check_minimize(minimizer, bench3, -0.4, [(-2.0, 2.0)], 0.05, 10, 5)
check_minimize(minimizer, bench4, 1.0, [("-2", "-1", "0", "1", "2")], 0.05, 5, 1)
@pytest.mark.hps_slow_test
def test_tree_based_minimize_n_jobs():
check_minimize(forest_minimize, bench1, 0.05, [(-2.0, 2.0)], 0.05, 25, 5, n_jobs=2)
@pytest.mark.hps
def test_categorical_integer():
def f(params):
return 0
dims = [[1]]
res = forest_minimize(f, dims, n_calls=1, random_state=1, n_initial_points=1)
assert res.x_iters[0][0] == dims[0][0]
| 2,602 | 29.267442 | 87 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_transformers.py | import pytest
import numbers
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises_regex
from deephyper.skopt.space import LogN, Normalize
from deephyper.skopt.space.transformers import StringEncoder, LabelEncoder, Identity
@pytest.mark.hps
def test_logn2_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.hps
def test_logn10_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.hps
def test_integer_encoder():
transformer = LabelEncoder()
X = [1, 5, 9]
transformer.fit(X)
assert_array_equal(transformer.transform(X), [0, 1, 2])
assert_array_equal(transformer.inverse_transform([0, 1, 2]), X)
transformer = LabelEncoder(X)
assert_array_equal(transformer.transform(X), [0, 1, 2])
assert_array_equal(transformer.inverse_transform([0, 1, 2]), X)
X = ["a", "b", "c"]
transformer.fit(X)
assert_array_equal(transformer.transform(X), [0, 1, 2])
assert_array_equal(transformer.inverse_transform([0, 1, 2]), X)
transformer = LabelEncoder(X)
assert_array_equal(transformer.transform(X), [0, 1, 2])
assert_array_equal(transformer.inverse_transform([0, 1, 2]), X)
@pytest.mark.hps
def test_string_encoder():
transformer = StringEncoder()
X = [1, 5, 9]
transformer.fit(X)
assert_array_equal(transformer.transform(X), ["1", "5", "9"])
assert_array_equal(transformer.inverse_transform(["1", "5", "9"]), X)
X = ["a", True, 1]
transformer.fit(X)
assert_array_equal(transformer.transform(X), ["a", "True", "1"])
assert_array_equal(transformer.inverse_transform(["a", "True", "1"]), X)
X = ["a", "b", "c"]
transformer.fit(X)
assert_array_equal(transformer.transform(X), X)
assert_array_equal(transformer.inverse_transform(X), X)
@pytest.mark.hps
def test_identity_encoder():
transformer = Identity()
X = [1, 5, 9, 9, 5, 1]
transformer.fit(X)
assert_array_equal(transformer.transform(X), X)
assert_array_equal(transformer.inverse_transform(X), X)
X = ["a", True, 1, "a", True, 1]
transformer.fit(X)
assert_array_equal(transformer.transform(X), X)
assert_array_equal(transformer.inverse_transform(X), X)
X = ["a", "b", "c", "a", "b", "c"]
transformer.fit(X)
assert_array_equal(transformer.transform(X), X)
assert_array_equal(transformer.inverse_transform(X), X)
@pytest.mark.hps
def test_normalize_integer():
transformer = Normalize(1, 20, is_int=True)
assert transformer.transform(19.8) == 1.0
assert transformer.transform(20.2) == 1.0
assert transformer.transform(1.2) == 0.0
assert transformer.transform(0.9) == 0.0
assert_raises(ValueError, transformer.transform, 20.6)
assert_raises(ValueError, transformer.transform, 0.4)
assert transformer.inverse_transform(0.99) == 20
assert transformer.inverse_transform(0.01) == 1
assert_raises(ValueError, transformer.inverse_transform, 1.0 + 1e-6)
assert_raises(ValueError, transformer.transform, 0.0 - 1e-6)
transformer = Normalize(0, 20, is_int=True)
assert transformer.transform(-0.2) == 0.0
assert_raises(ValueError, transformer.transform, -0.6)
@pytest.mark.hps
def test_normalize():
transformer = Normalize(1, 20, is_int=False)
assert transformer.transform(20.0) == 1.0
assert transformer.transform(1.0) == 0.0
assert_raises(ValueError, transformer.transform, 20.0 + 1e-6)
assert_raises(ValueError, transformer.transform, 1.0 - 1e-6)
assert_raises(ValueError, transformer.inverse_transform, 1.0 + 1e-6)
assert_raises(ValueError, transformer.transform, 0.0 - 1e-6)
| 4,002 | 32.082645 | 84 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_acquisition.py | import numpy as np
import pytest
from scipy import optimize
from sklearn.multioutput import MultiOutputRegressor
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from deephyper.skopt.acquisition import _gaussian_acquisition
from deephyper.skopt.acquisition import gaussian_acquisition_1D
from deephyper.skopt.acquisition import gaussian_ei
from deephyper.skopt.acquisition import gaussian_lcb
from deephyper.skopt.acquisition import gaussian_pi
from deephyper.skopt.learning import GaussianProcessRegressor
from deephyper.skopt.learning.gaussian_process.kernels import Matern
from deephyper.skopt.learning.gaussian_process.kernels import WhiteKernel
from deephyper.skopt.space import Space
from deephyper.skopt.utils import cook_estimator
class ConstSurrogate:
def predict(self, X, return_std=True):
X = np.array(X)
return np.zeros(X.shape[0]), np.ones(X.shape[0])
# This is used to test that given constant acquisition values at
# different points, acquisition functions "EIps" and "PIps"
# prefer candidate points that take lesser time.
# The second estimator mimics the GP regressor that is fit on
# the log of the input.
class ConstantGPRSurrogate(object):
def __init__(self, space):
self.space = space
def fit(self, X, y):
"""
The first estimator returns a constant value.
The second estimator is a gaussian process regressor that
models the logarithm of the time.
"""
X = np.array(X)
gpr = cook_estimator("GP", self.space, normalize_y=False)
gpr.fit(X, np.log(np.ravel(X)))
self.estimators_ = []
self.estimators_.append(ConstSurrogate())
self.estimators_.append(gpr)
return self
@pytest.mark.hps
def test_acquisition_ei_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
ei = gaussian_ei(X, ConstSurrogate(), -0.5, xi=0.0)
assert_array_almost_equal(ei, [0.1977966] * 4)
@pytest.mark.hps
def test_acquisition_pi_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
pi = gaussian_pi(X, ConstSurrogate(), -0.5, xi=0.0)
assert_array_almost_equal(pi, [0.308538] * 4)
@pytest.mark.hps
def test_acquisition_variance_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
var = gaussian_lcb(X, ConstSurrogate(), kappa="inf")
assert_array_almost_equal(var, [-1.0] * 4)
@pytest.mark.hps
def test_acquisition_lcb_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
lcb = gaussian_lcb(X, ConstSurrogate(), kappa=0.3)
assert_array_almost_equal(lcb, [-0.3] * 4)
@pytest.mark.hps
def test_acquisition_api():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10)
gpr = GaussianProcessRegressor()
gpr.fit(X, y)
for method in [gaussian_ei, gaussian_lcb, gaussian_pi]:
assert_array_equal(method(X, gpr).shape, 10)
assert_raises(ValueError, method, rng.rand(10), gpr)
def check_gradient_correctness(X_new, model, acq_func, y_opt):
analytic_grad = gaussian_acquisition_1D(X_new, model, y_opt, acq_func)[1]
num_grad_func = lambda x: gaussian_acquisition_1D(
x, model, y_opt, acq_func=acq_func
)[0]
num_grad = optimize.approx_fprime(X_new, num_grad_func, 1e-5)
assert_array_almost_equal(analytic_grad, num_grad, 3)
@pytest.mark.hps
def test_acquisition_gradient():
rng = np.random.RandomState(0)
X = rng.randn(20, 5)
y = rng.randn(20)
X_new = rng.randn(5)
mat = Matern()
wk = WhiteKernel()
gpr = GaussianProcessRegressor(kernel=mat + wk)
gpr.fit(X, y)
for acq_func in ["LCB", "PI", "EI"]:
check_gradient_correctness(X_new, gpr, acq_func, np.max(y))
@pytest.mark.hps
def test_acquisition_gradient_cookbook():
rng = np.random.RandomState(0)
X = rng.randn(20, 5)
y = rng.randn(20)
X_new = rng.randn(5)
gpr = cook_estimator("GP", Space(((-5.0, 5.0),)), random_state=0)
gpr.fit(X, y)
for acq_func in ["LCB", "PI", "EI"]:
check_gradient_correctness(X_new, gpr, acq_func, np.max(y))
@pytest.mark.hps
@pytest.mark.parametrize("acq_func", ["EIps", "PIps"])
def test_acquisition_per_second(acq_func):
X = np.reshape(np.linspace(4.0, 8.0, 10), (-1, 1))
y = np.vstack((np.ones(10), np.ravel(np.log(X)))).T
cgpr = ConstantGPRSurrogate(Space(((1.0, 9.0),)))
cgpr.fit(X, y)
X_pred = np.reshape(np.linspace(1.0, 11.0, 20), (-1, 1))
indices = np.arange(6)
vals = _gaussian_acquisition(X_pred, cgpr, y_opt=1.0, acq_func=acq_func)
for fast, slow in zip(indices[:-1], indices[1:]):
assert vals[slow] > vals[fast]
acq_wo_time = _gaussian_acquisition(
X, cgpr.estimators_[0], y_opt=1.2, acq_func=acq_func[:2]
)
acq_with_time = _gaussian_acquisition(X, cgpr, y_opt=1.2, acq_func=acq_func)
assert_array_almost_equal(acq_wo_time / acq_with_time, np.ravel(X), 2)
def test_gaussian_acquisition_check_inputs():
model = ConstantGPRSurrogate(Space(((1.0, 9.0),)))
with pytest.raises(ValueError) as err:
_gaussian_acquisition(np.arange(1, 5), model)
assert "it must be 2-dimensional" in err.value.args[0]
@pytest.mark.hps
@pytest.mark.parametrize("acq_func", ["EIps", "PIps"])
def test_acquisition_per_second_gradient(acq_func):
rng = np.random.RandomState(0)
X = rng.randn(20, 10)
# Make the second component large, so that mean_grad and std_grad
# do not become zero.
y = np.vstack((X[:, 0], np.abs(X[:, 0]) ** 3)).T
for X_new in [rng.randn(10), rng.randn(10)]:
gpr = cook_estimator("GP", Space(((-5.0, 5.0),)), random_state=0)
mor = MultiOutputRegressor(gpr)
mor.fit(X, y)
check_gradient_correctness(X_new, mor, acq_func, 1.5)
| 5,933 | 32.150838 | 80 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_callbacks.py | import pytest
import numpy as np
import os
from collections import namedtuple
from deephyper.skopt import dummy_minimize
from deephyper.skopt import gp_minimize
from deephyper.skopt.benchmarks import bench1
from deephyper.skopt.benchmarks import bench3
from deephyper.skopt.callbacks import TimerCallback
from deephyper.skopt.callbacks import DeltaYStopper
from deephyper.skopt.callbacks import DeadlineStopper
from deephyper.skopt.callbacks import CheckpointSaver
from deephyper.skopt.callbacks import HollowIterationsStopper
from deephyper.skopt.callbacks import ThresholdStopper
from deephyper.skopt.utils import load
@pytest.mark.hps
def test_timer_callback():
callback = TimerCallback()
dummy_minimize(bench1, [(-1.0, 1.0)], callback=callback, n_calls=10)
assert len(callback.iter_time) <= 10
assert 0.0 < sum(callback.iter_time)
@pytest.mark.hps
def test_deltay_stopper():
deltay = DeltaYStopper(0.2, 3)
Result = namedtuple("Result", ["func_vals"])
assert deltay(Result([0, 1, 2, 3, 4, 0.1, 0.19]))
assert not deltay(Result([0, 1, 2, 3, 4, 0.1]))
assert deltay(Result([0, 1])) is None
@pytest.mark.hps
def test_threshold_stopper():
threshold = ThresholdStopper(3.0)
Result = namedtuple("Result", ["func_vals"])
assert not threshold(Result([3.1, 4, 4.6, 100]))
assert threshold(Result([3.0, 3, 2.9, 0, 0.0]))
@pytest.mark.hps
def test_deadline_stopper():
deadline = DeadlineStopper(0.0001)
gp_minimize(bench3, [(-1.0, 1.0)], callback=deadline, n_calls=10, random_state=1)
assert len(deadline.iter_time) == 1
assert np.sum(deadline.iter_time) > deadline.total_time
deadline = DeadlineStopper(60)
gp_minimize(bench3, [(-1.0, 1.0)], callback=deadline, n_calls=10, random_state=1)
assert len(deadline.iter_time) == 10
assert np.sum(deadline.iter_time) < deadline.total_time
@pytest.mark.hps
def test_hollow_iterations_stopper():
Result = namedtuple("Result", ["func_vals"])
hollow = HollowIterationsStopper(3, 0)
# will run at least n_iterations + 1 times
assert not hollow(Result([10, 11, 12]))
assert hollow(Result([10, 11, 12, 13]))
# a tie is not enough
assert hollow(Result([10, 11, 12, 10]))
# every time we make a new min, we then have n_iterations rounds to beat it
assert not hollow(Result([10, 9, 8, 7, 7, 7]))
assert hollow(Result([10, 9, 8, 7, 7, 7, 7]))
hollow = HollowIterationsStopper(3, 1.1)
assert not hollow(Result([10, 11, 12, 8.89]))
assert hollow(Result([10, 11, 12, 8.9]))
# individual improvement below threshold contribute
assert hollow(Result([10, 9.9, 9.8, 9.7]))
assert not hollow(Result([10, 9.5, 9, 8.5, 8, 7.5]))
hollow = HollowIterationsStopper(3, 0)
result = gp_minimize(
bench3, [(-1.0, 1.0)], callback=hollow, n_calls=100, random_state=1
)
assert len(result.func_vals) == 10
hollow = HollowIterationsStopper(3, 0.1)
result = gp_minimize(
bench3, [(-1.0, 1.0)], callback=hollow, n_calls=100, random_state=1
)
assert len(result.func_vals) == 5
hollow = HollowIterationsStopper(3, 0.2)
result = gp_minimize(
bench3, [(-1.0, 1.0)], callback=hollow, n_calls=100, random_state=1
)
assert len(result.func_vals) == 4
@pytest.mark.hps
def test_checkpoint_saver():
checkpoint_path = "./test_checkpoint.pkl"
if os.path.isfile(checkpoint_path):
os.remove(checkpoint_path)
checkpoint_saver = CheckpointSaver(checkpoint_path, compress=9)
result = dummy_minimize(
bench1, [(-1.0, 1.0)], callback=checkpoint_saver, n_calls=10
)
assert os.path.exists(checkpoint_path)
assert load(checkpoint_path).x == result.x
if os.path.isfile(checkpoint_path):
os.remove(checkpoint_path)
| 3,800 | 29.902439 | 85 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_searchcv.py | """Test scikit-optimize based implementation of hyperparameter
search with interface similar to those of GridSearchCV
"""
import pytest
from sklearn.datasets import load_iris, make_classification
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.base import clone
from sklearn.base import BaseEstimator
from scipy.stats import rankdata
import numpy as np
from numpy.testing import assert_array_equal
from deephyper.skopt.space import Real, Categorical, Integer
from deephyper.skopt import BayesSearchCV
def _fit_svc(n_jobs=1, n_points=1, cv=None):
"""
Utility function to fit a larger classification task with SVC
"""
X, y = make_classification(
n_samples=1000,
n_features=20,
n_redundant=0,
n_informative=18,
random_state=1,
n_clusters_per_class=1,
)
opt = BayesSearchCV(
SVC(),
{
"C": Real(1e-3, 1e3, prior="log-uniform"),
"gamma": Real(1e-3, 1e1, prior="log-uniform"),
"degree": Integer(1, 3),
},
n_jobs=n_jobs,
n_iter=11,
n_points=n_points,
cv=cv,
random_state=42,
)
opt.fit(X, y)
assert opt.score(X, y) > 0.9
opt2 = BayesSearchCV(
SVC(),
{
"C": Real(1e-3, 1e3, prior="log-uniform"),
"gamma": Real(1e-3, 1e1, prior="log-uniform"),
"degree": Integer(1, 3),
},
n_jobs=n_jobs,
n_iter=11,
n_points=n_points,
cv=cv,
random_state=42,
)
opt2.fit(X, y)
assert opt.score(X, y) == opt2.score(X, y)
def test_raise_errors():
# check if empty search space is raising errors
with pytest.raises(ValueError):
BayesSearchCV(SVC(), {})
# check if invalid dimensions are raising errors
with pytest.raises(ValueError):
BayesSearchCV(SVC(), {"C": "1 ... 100.0"})
with pytest.raises(TypeError):
BayesSearchCV(SVC(), ["C", (1.0, 1)])
@pytest.mark.parametrize("surrogate", ["gp", None])
@pytest.mark.parametrize("n_jobs", [1, -1]) # test sequential and parallel
@pytest.mark.parametrize("n_points", [1, 3]) # test query of multiple points
def test_searchcv_runs(surrogate, n_jobs, n_points, cv=None):
"""
Test whether the cross validation search wrapper around sklearn
models runs properly with available surrogates and with single
or multiple workers and different number of parameter settings
to ask from the optimizer in parallel.
Parameters
----------
* `surrogate` [str or None]:
A class of the scikit-optimize surrogate used. None means
to use default surrogate.
* `n_jobs` [int]:
Number of parallel processes to use for computations.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# create an instance of a surrogate if it is not a string
if surrogate is not None:
optimizer_kwargs = {"base_estimator": surrogate}
else:
optimizer_kwargs = None
opt = BayesSearchCV(
SVC(),
{
"C": Real(1e-6, 1e6, prior="log-uniform"),
"gamma": Real(1e-6, 1e1, prior="log-uniform"),
"degree": Integer(1, 8),
"kernel": Categorical(["linear", "poly", "rbf"]),
},
n_jobs=n_jobs,
n_iter=11,
n_points=n_points,
cv=cv,
optimizer_kwargs=optimizer_kwargs,
)
opt.fit(X_train, y_train)
# this normally does not hold only if something is wrong
# with the optimizaiton procedure as such
assert opt.score(X_test, y_test) > 0.9
@pytest.mark.hps_slow_test
def test_parallel_cv():
"""
Test whether parallel jobs work
"""
_fit_svc(n_jobs=1, cv=5)
_fit_svc(n_jobs=2, cv=5)
def test_searchcv_runs_multiple_subspaces():
"""
Test whether the BayesSearchCV runs without exceptions when
multiple subspaces are given.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# used to try different model classes
pipe = Pipeline([("model", SVC())])
# single categorical value of 'model' parameter sets the model class
lin_search = {
"model": Categorical([LinearSVC()]),
"model__C": Real(1e-6, 1e6, prior="log-uniform"),
}
dtc_search = {
"model": Categorical([DecisionTreeClassifier()]),
"model__max_depth": Integer(1, 32),
"model__min_samples_split": Real(1e-3, 1.0, prior="log-uniform"),
}
svc_search = {
"model": Categorical([SVC()]),
"model__C": Real(1e-6, 1e6, prior="log-uniform"),
"model__gamma": Real(1e-6, 1e1, prior="log-uniform"),
"model__degree": Integer(1, 8),
"model__kernel": Categorical(["linear", "poly", "rbf"]),
}
opt = BayesSearchCV(pipe, [(lin_search, 1), (dtc_search, 1), svc_search], n_iter=2)
opt.fit(X_train, y_train)
# test if all subspaces are explored
total_evaluations = len(opt.cv_results_["mean_test_score"])
assert total_evaluations == 1 + 1 + 2, "Not all spaces were explored!"
assert len(opt.optimizer_results_) == 3
assert isinstance(opt.optimizer_results_[0].x[0], LinearSVC)
assert isinstance(opt.optimizer_results_[1].x[0], DecisionTreeClassifier)
assert isinstance(opt.optimizer_results_[2].x[0], SVC)
def test_searchcv_sklearn_compatibility():
"""
Test whether the BayesSearchCV is compatible with base sklearn methods
such as clone, set_params, get_params.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# used to try different model classes
pipe = Pipeline([("model", SVC())])
# single categorical value of 'model' parameter sets the model class
lin_search = {
"model": Categorical([LinearSVC()]),
"model__C": Real(1e-6, 1e6, prior="log-uniform"),
}
dtc_search = {
"model": Categorical([DecisionTreeClassifier()]),
"model__max_depth": Integer(1, 32),
"model__min_samples_split": Real(1e-3, 1.0, prior="log-uniform"),
}
svc_search = {
"model": Categorical([SVC()]),
"model__C": Real(1e-6, 1e6, prior="log-uniform"),
"model__gamma": Real(1e-6, 1e1, prior="log-uniform"),
"model__degree": Integer(1, 8),
"model__kernel": Categorical(["linear", "poly", "rbf"]),
}
opt = BayesSearchCV(pipe, [(lin_search, 1), svc_search], n_iter=2)
opt_clone = clone(opt)
params, params_clone = opt.get_params(), opt_clone.get_params()
assert params.keys() == params_clone.keys()
for param, param_clone in zip(params.items(), params_clone.items()):
assert param[0] == param_clone[0]
assert isinstance(param[1], type(param_clone[1]))
opt.set_params(search_spaces=[(dtc_search, 1)])
opt.fit(X_train, y_train)
opt_clone.fit(X_train, y_train)
total_evaluations = len(opt.cv_results_["mean_test_score"])
total_evaluations_clone = len(opt_clone.cv_results_["mean_test_score"])
# test if expected number of subspaces is explored
assert total_evaluations == 1
assert total_evaluations_clone == 1 + 2
def test_searchcv_reproducibility():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
"C": Real(1e-6, 1e6, prior="log-uniform"),
"gamma": Real(1e-6, 1e1, prior="log-uniform"),
"degree": Integer(1, 8),
"kernel": Categorical(["linear", "poly", "rbf"]),
},
n_iter=11,
random_state=random_state,
)
opt.fit(X_train, y_train)
best_est = opt.best_estimator_
optim_res = opt.optimizer_results_[0].x
opt2 = clone(opt).fit(X_train, y_train)
best_est2 = opt2.best_estimator_
optim_res2 = opt2.optimizer_results_[0].x
assert getattr(best_est, "C") == getattr(best_est2, "C")
assert getattr(best_est, "gamma") == getattr(best_est2, "gamma")
assert getattr(best_est, "degree") == getattr(best_est2, "degree")
assert getattr(best_est, "kernel") == getattr(best_est2, "kernel")
# dict is sorted by alphabet
assert optim_res[0] == getattr(best_est, "C")
assert optim_res[2] == getattr(best_est, "gamma")
assert optim_res[1] == getattr(best_est, "degree")
assert optim_res[3] == getattr(best_est, "kernel")
assert optim_res2[0] == getattr(best_est, "C")
assert optim_res2[2] == getattr(best_est, "gamma")
assert optim_res2[1] == getattr(best_est, "degree")
assert optim_res2[3] == getattr(best_est, "kernel")
@pytest.mark.hps
def test_searchcv_rank():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
"C": Real(1e-6, 1e6, prior="log-uniform"),
"gamma": Real(1e-6, 1e1, prior="log-uniform"),
"degree": Integer(1, 8),
"kernel": Categorical(["linear", "poly", "rbf"]),
},
n_iter=11,
random_state=random_state,
return_train_score=True,
)
opt.fit(X_train, y_train)
results = opt.cv_results_
test_rank = np.asarray(
rankdata(-np.array(results["mean_test_score"]), method="min"), dtype=np.int32
)
train_rank = np.asarray(
rankdata(-np.array(results["mean_train_score"]), method="min"), dtype=np.int32
)
assert_array_equal(np.array(results["rank_test_score"]), test_rank)
assert_array_equal(np.array(results["rank_train_score"]), train_rank)
def test_searchcv_refit():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
"C": Real(1e-6, 1e6, prior="log-uniform"),
"gamma": Real(1e-6, 1e1, prior="log-uniform"),
"degree": Integer(1, 8),
"kernel": Categorical(["linear", "poly", "rbf"]),
},
n_iter=11,
random_state=random_state,
)
opt2 = BayesSearchCV(
SVC(random_state=random_state),
{
"C": Real(1e-6, 1e6, prior="log-uniform"),
"gamma": Real(1e-6, 1e1, prior="log-uniform"),
"degree": Integer(1, 8),
"kernel": Categorical(["linear", "poly", "rbf"]),
},
n_iter=11,
random_state=random_state,
refit=True,
)
opt.fit(X_train, y_train)
opt2.best_estimator_ = opt.best_estimator_
opt2.fit(X_train, y_train)
# this normally does not hold only if something is wrong
# with the optimizaiton procedure as such
assert opt2.score(X_test, y_test) > 0.9
def test_searchcv_callback():
# Test whether callback is used in BayesSearchCV and
# whether is can be used to interrupt the search loop
X, y = load_iris(True)
opt = BayesSearchCV(
DecisionTreeClassifier(),
{
"max_depth": [3], # additional test for single dimension
"min_samples_split": Real(0.1, 0.9),
},
n_iter=5,
)
total_iterations = [0]
def callback(opt_result):
# this simply counts iterations
total_iterations[0] += 1
# break the optimization loop at some point
if total_iterations[0] > 2:
return True # True == stop optimization
return False
opt.fit(X, y, callback=callback)
assert total_iterations[0] == 3
# test whether final model was fit
opt.score(X, y)
def test_searchcv_total_iterations():
# Test the total iterations counting property of BayesSearchCV
opt = BayesSearchCV(
DecisionTreeClassifier(),
[
({"max_depth": (1, 32)}, 10), # 10 iterations here
{"min_samples_split": Real(0.1, 0.9)}, # 5 (default) iters here
],
n_iter=5,
)
assert opt.total_iterations == 10 + 5
def test_search_cv_internal_parameter_types():
# Test whether the parameters passed to the
# estimator of the BayesSearchCV are of standard python
# types - float, int, str
# This is estimator is used to check whether the types provided
# are native python types.
class TypeCheckEstimator(BaseEstimator):
def __init__(self, float_param=0.0, int_param=0, str_param=""):
self.float_param = float_param
self.int_param = int_param
self.str_param = str_param
def fit(self, X, y):
assert isinstance(self.float_param, float)
assert isinstance(self.int_param, int)
assert isinstance(self.str_param, str)
return self
def score(self, X, y):
return np.random.uniform()
# Below is example code that used to not work.
X, y = make_classification(10, 4)
model = BayesSearchCV(
estimator=TypeCheckEstimator(),
search_spaces={
"float_param": [0.0, 1.0],
"int_param": [0, 10],
"str_param": ["one", "two", "three"],
},
n_iter=11,
)
model.fit(X, y)
| 13,971 | 28.352941 | 87 | py |
deephyper | deephyper-master/tests/deephyper/skopt/test_utils.py | import pytest
import tempfile
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
import numpy as np
from deephyper.skopt import gp_minimize, forest_minimize
from deephyper.skopt import load
from deephyper.skopt import dump
from deephyper.skopt import expected_minimum, expected_minimum_random_sampling
from deephyper.skopt.benchmarks import bench1
from deephyper.skopt.benchmarks import bench3
from deephyper.skopt.learning import ExtraTreesRegressor
from deephyper.skopt import Optimizer
from deephyper.skopt import Space
from deephyper.skopt.space import Dimension
from deephyper.skopt.utils import point_asdict
from deephyper.skopt.utils import point_aslist
from deephyper.skopt.utils import dimensions_aslist
from deephyper.skopt.utils import has_gradients
from deephyper.skopt.utils import cook_estimator
from deephyper.skopt.utils import normalize_dimensions
from deephyper.skopt.utils import use_named_args
from deephyper.skopt.utils import check_list_types
from deephyper.skopt.utils import check_dimension_names
from deephyper.skopt.space import Real, Integer, Categorical
def check_optimization_results_equality(res_1, res_2):
# Check if the results objects have the same keys
assert_equal(sorted(res_1.keys()), sorted(res_2.keys()))
# Shallow check of the main optimization results
assert_array_equal(res_1.x, res_2.x)
assert_array_equal(res_1.x_iters, res_2.x_iters)
assert_array_equal(res_1.fun, res_2.fun)
assert_array_equal(res_1.func_vals, res_2.func_vals)
@pytest.mark.hps
def test_dump_and_load():
res = gp_minimize(
bench3,
[(-2.0, 2.0)],
x0=[0.0],
acq_func="LCB",
n_calls=2,
n_random_starts=1,
random_state=1,
)
# Test normal dumping and loading
with tempfile.TemporaryFile() as f:
dump(res, f)
f.seek(0)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert "func" in res_loaded.specs["args"]
# Test dumping without objective function
with tempfile.TemporaryFile() as f:
dump(res, f, store_objective=False)
f.seek(0)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert not ("func" in res_loaded.specs["args"])
# Delete the objective function and dump the modified object
del res.specs["args"]["func"]
with tempfile.TemporaryFile() as f:
dump(res, f, store_objective=False)
f.seek(0)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert not ("func" in res_loaded.specs["args"])
@pytest.mark.hps
def test_dump_and_load_optimizer():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer(
[(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling"
)
opt.run(bench1, n_iter=3)
with tempfile.TemporaryFile() as f:
dump(opt, f)
f.seek(0)
load(f)
@pytest.mark.hps
def test_expected_minimum():
res = gp_minimize(
bench3,
[(-2.0, 2.0)],
x0=[0.0],
noise=1e-8,
n_calls=8,
n_random_starts=3,
random_state=1,
)
x_min, f_min = expected_minimum(res, random_state=1)
x_min2, f_min2 = expected_minimum(res, random_state=1)
assert f_min <= res.fun # true since noise ~= 0.0
assert x_min == x_min2
assert f_min == f_min2
@pytest.mark.hps
def test_expected_minimum_random_sampling():
res = gp_minimize(
bench3,
[(-2.0, 2.0)],
x0=[0.0],
noise=1e-8,
n_calls=8,
n_random_starts=3,
random_state=1,
)
x_min, f_min = expected_minimum_random_sampling(res, random_state=1)
x_min2, f_min2 = expected_minimum_random_sampling(res, random_state=1)
assert f_min <= res.fun # true since noise ~= 0.0
assert x_min == x_min2
assert f_min == f_min2
@pytest.mark.hps
def test_dict_list_space_representation():
"""
Tests whether the conversion of the dictionary and list representation
of a point from a search space works properly.
"""
chef_space = {
"Cooking time": (0, 1200), # in minutes
"Main ingredient": [
"cheese",
"cherimoya",
"chicken",
"chard",
"chocolate",
"chicory",
],
"Secondary ingredient": ["love", "passion", "dedication"],
"Cooking temperature": (-273.16, 10000.0), # in Celsius
}
opt = Optimizer(dimensions=dimensions_aslist(chef_space))
point = opt.ask()
# check if the back transformed point and original one are equivalent
assert_equal(point, point_aslist(chef_space, point_asdict(chef_space, point)))
@pytest.mark.hps
@pytest.mark.parametrize(
"estimator, gradients",
zip(["GP", "RF", "ET", "GBRT", "DUMMY"], [True, False, False, False, False]),
)
def test_has_gradients(estimator, gradients):
space = Space([(-2.0, 2.0)])
assert has_gradients(cook_estimator(estimator, space=space)) == gradients
@pytest.mark.hps
def test_categorical_gp_has_gradients():
space = Space([("a", "b")])
assert not has_gradients(cook_estimator("GP", space=space))
@pytest.mark.hps
def test_normalize_dimensions_all_categorical():
dimensions = (["a", "b", "c"], ["1", "2", "3"])
space = normalize_dimensions(dimensions)
assert space.is_categorical
@pytest.mark.hps
def test_categoricals_mixed_types():
domain = [[1, 2, 3, 4], ["a", "b", "c"], [True, False]]
x = [1, "a", True]
space = normalize_dimensions(domain)
assert space.inverse_transform(space.transform([x])) == [x]
@pytest.mark.hps
@pytest.mark.parametrize(
"dimensions, normalizations",
[
(((1, 3), (1.0, 3.0)), ("normalize", "normalize")),
(((1, 3), ("a", "b", "c")), ("normalize", "onehot")),
],
)
def test_normalize_dimensions(dimensions, normalizations):
space = normalize_dimensions(dimensions)
for dimension, normalization in zip(space, normalizations):
assert dimension.transform_ == normalization
@pytest.mark.hps
@pytest.mark.parametrize(
"dimension, name",
[
(Real(1, 2, name="learning rate"), "learning rate"),
(Integer(1, 100, name="no of trees"), "no of trees"),
(Categorical(["red, blue"], name="colors"), "colors"),
],
)
def test_normalize_dimensions(dimension, name):
space = normalize_dimensions([dimension])
assert space.dimensions[0].name == name
@pytest.mark.hps
def test_use_named_args():
"""
Test the function wrapper @use_named_args which is used
for wrapping an objective function with named args so it
can be called by the optimizers which only pass a single
list as the arg.
This test does not actually use the optimizers but merely
simulates how they would call the function.
"""
# Define the search-space dimensions. They must all have names!
dim1 = Real(name="foo", low=0.0, high=1.0)
dim2 = Real(name="bar", low=0.0, high=1.0)
dim3 = Real(name="baz", low=0.0, high=1.0)
# Gather the search-space dimensions in a list.
dimensions = [dim1, dim2, dim3]
# Parameters that will be passed to the objective function.
default_parameters = [0.5, 0.6, 0.8]
# Define the objective function with named arguments
# and use this function-decorator to specify the search-space dimensions.
@use_named_args(dimensions=dimensions)
def func(foo, bar, baz):
# Assert that all the named args are indeed correct.
assert foo == default_parameters[0]
assert bar == default_parameters[1]
assert baz == default_parameters[2]
# Return some objective value.
return foo**2 + bar**4 + baz**8
# Ensure the objective function can be called with a single
# argument named x.
res = func(x=default_parameters)
assert isinstance(res, float)
# Ensure the objective function can be called with a single
# argument that is unnamed.
res = func(default_parameters)
assert isinstance(res, float)
# Ensure the objective function can be called with a single
# argument that is a numpy array named x.
res = func(x=np.array(default_parameters))
assert isinstance(res, float)
# Ensure the objective function can be called with a single
# argument that is an unnamed numpy array.
res = func(np.array(default_parameters))
assert isinstance(res, float)
@pytest.mark.hps
def test_space_names_in_use_named_args():
space = [Integer(250, 2000, name="n_estimators")]
@use_named_args(space)
def objective(n_estimators):
return n_estimators
res = gp_minimize(objective, space, n_calls=10, random_state=0)
best_params = dict(zip((s.name for s in res.space), res.x))
assert "n_estimators" in best_params
assert res.space.dimensions[0].name == "n_estimators"
@pytest.mark.hps
def test_check_dimension_names():
# Define the search-space dimensions. They must all have names!
dim1 = Real(name="foo", low=0.0, high=1.0)
dim2 = Real(name="bar", low=0.0, high=1.0)
dim3 = Real(name="baz", low=0.0, high=1.0)
# Gather the search-space dimensions in a list.
dimensions = [dim1, dim2, dim3]
check_dimension_names(dimensions)
dimensions = [dim1, dim2, dim3, Real(-1, 1)]
assert_raises(ValueError, check_dimension_names, dimensions)
@pytest.mark.hps
def test_check_list_types():
# Define the search-space dimensions. They must all have names!
dim1 = Real(name="foo", low=0.0, high=1.0)
dim2 = Real(name="bar", low=0.0, high=1.0)
dim3 = Real(name="baz", low=0.0, high=1.0)
# Gather the search-space dimensions in a list.
dimensions = [dim1, dim2, dim3]
check_list_types(dimensions, Dimension)
dimensions = [dim1, dim2, dim3, "test"]
assert_raises(ValueError, check_list_types, dimensions, Dimension)
| 9,985 | 30.304075 | 83 | py |
deephyper | deephyper-master/tests/deephyper/evaluator/test_evaluator.py | import unittest
from collections import Counter
import pandas as pd
import pytest
def run(job, y=0):
return job["x"] + y
def run_many_results(job, y=0):
return {"objective": job["x"], "metadata": {"y": y}}
class TestEvaluator(unittest.TestCase):
@pytest.mark.fast
@pytest.mark.hps
def test_import(self):
from deephyper.evaluator import Evaluator
@pytest.mark.fast
@pytest.mark.hps
def test_wrong_evaluator(self):
from deephyper.evaluator import Evaluator
with pytest.raises(ValueError):
evaluator = Evaluator.create(
run,
method="threadPool",
method_kwargs={
"num_workers": 1,
},
)
@pytest.mark.fast
@pytest.mark.hps
def test_run_function_standards(self):
from deephyper.evaluator import SerialEvaluator
configs = [{"x": i} for i in range(10)]
# float for single objective optimization
def run(config):
return 42.0
evaluator = SerialEvaluator(run)
evaluator.submit(configs)
evaluator.gather(type="ALL")
evaluator.dump_evals()
results = pd.read_csv("results.csv")
assert all(
results.columns
== [
"p:x",
"objective",
"job_id",
"m:timestamp_submit",
"m:timestamp_gather",
]
)
assert len(results) == 10
assert results["objective"][0] == 42.0
# str with "F" prefix for failed evaluation
def run(config):
return "F_out_of_memory"
evaluator = SerialEvaluator(run)
evaluator.submit(configs)
evaluator.gather(type="ALL")
evaluator.dump_evals()
results = pd.read_csv("results.csv")
assert all(
results.columns
== [
"p:x",
"objective",
"job_id",
"m:timestamp_submit",
"m:timestamp_gather",
]
)
assert len(results) == 10
assert results["objective"][0] == "F_out_of_memory"
# dict
def run(config):
return {"objective": 42.0}
evaluator = SerialEvaluator(run)
evaluator.submit(configs)
evaluator.gather(type="ALL")
evaluator.dump_evals()
results = pd.read_csv("results.csv")
assert all(
results.columns
== [
"p:x",
"objective",
"job_id",
"m:timestamp_submit",
"m:timestamp_gather",
]
)
assert len(results) == 10
assert results["objective"][0] == 42.0
# dict with additional information
def run(config):
return {
"objective": 42.0,
"metadata": {"num_epochs_trained": 25, "num_parameters": 420000},
}
evaluator = SerialEvaluator(run)
evaluator.submit(configs)
evaluator.gather(type="ALL")
evaluator.dump_evals()
results = pd.read_csv("results.csv")
print(results)
assert all(
results.columns
== [
"p:x",
"objective",
"job_id",
"m:timestamp_submit",
"m:timestamp_gather",
"m:num_epochs_trained",
"m:num_parameters",
]
)
assert len(results) == 10
assert results["objective"][0] == 42.0
assert results["m:num_epochs_trained"][0] == 25
assert results["m:num_parameters"][0] == 420000
# dict with reserved keywords (when @profile decorator is used)
from deephyper.evaluator import profile
@profile
def run(config):
return 42.0
evaluator = SerialEvaluator(run)
evaluator.submit(configs)
evaluator.gather(type="ALL")
evaluator.dump_evals()
results = pd.read_csv("results.csv")
assert all(
results.columns
== [
"p:x",
"objective",
"job_id",
"m:timestamp_submit",
"m:timestamp_gather",
"m:timestamp_start",
"m:timestamp_end",
]
)
assert len(results) == 10
assert results["objective"][0] == 42.0
# combine previous the two previous tests
@profile
def run(config):
return {
"objective": 42.0,
"metadata": {
"num_epochs_trained": 25,
"num_parameters": 420000,
},
}
evaluator = SerialEvaluator(run)
evaluator.submit(configs)
evaluator.gather(type="ALL")
evaluator.dump_evals()
results = pd.read_csv("results.csv")
assert all(
results.columns
== [
"p:x",
"objective",
"job_id",
"m:timestamp_submit",
"m:timestamp_gather",
"m:timestamp_start",
"m:timestamp_end",
"m:num_epochs_trained",
"m:num_parameters",
]
)
assert len(results) == 10
assert results["objective"][0] == 42.0
assert results["m:num_epochs_trained"][0] == 25
assert results["m:num_parameters"][0] == 420000
# tuple of float for multi-objective optimization (will appear as "objective_0" and "objective_1" in the resulting dataframe)
def run(config):
if config["x"] < 5:
return 42.0, 0.42
else:
return "F_out_of_memory"
evaluator = SerialEvaluator(run)
evaluator.num_objective = 2
evaluator.submit(configs)
evaluator.gather(type="ALL")
evaluator.dump_evals()
results = pd.read_csv("results.csv")
assert all(
results.columns
== [
"p:x",
"objective_0",
"objective_1",
"job_id",
"m:timestamp_submit",
"m:timestamp_gather",
]
)
assert len(results) == 10
counter = Counter(results["objective_0"])
assert counter["42.0"] == 5 and counter["F_out_of_memory"] == 5
def execute_evaluator(self, method):
from deephyper.evaluator import Evaluator
# without kwargs
method_kwargs = {"num_workers": 1}
if method == "ray":
import os
HERE = os.path.dirname(os.path.abspath(__file__))
method_kwargs["ray_kwargs"] = {"runtime_env": {"working_dir": HERE}}
evaluator = Evaluator.create(run, method=method, method_kwargs=method_kwargs)
configs = [{"x": i} for i in range(10)]
evaluator.submit(configs)
jobs = evaluator.gather("ALL")
jobs.sort(key=lambda j: j.config["x"])
for config, job in zip(configs, jobs):
assert config["x"] == job.config["x"]
assert config["x"] == job.objective
evaluator.submit(configs)
jobs = evaluator.gather("BATCH", size=1)
assert 1 <= len(jobs) and len(jobs) <= len(configs)
# with kwargs
evaluator = Evaluator.create(
run,
method=method,
method_kwargs={"num_workers": 1, "run_function_kwargs": {"y": 1}},
)
configs = [{"x": i} for i in range(10)]
evaluator.submit(configs)
jobs = evaluator.gather("ALL")
jobs.sort(key=lambda j: j.config["x"])
for config, job in zip(configs, jobs):
assert config["x"] == job.config["x"]
assert job.objective == config["x"] + 1
evaluator.submit(configs)
jobs = evaluator.gather("BATCH", size=1)
assert 1 <= len(jobs) and len(jobs) <= len(configs)
# many results
evaluator = Evaluator.create(
run_many_results,
method=method,
method_kwargs={
"num_workers": 1,
},
)
configs = [{"x": i} for i in range(10)]
evaluator.submit(configs)
jobs = evaluator.gather("ALL")
jobs.sort(key=lambda j: j.config["x"])
for config, job in zip(configs, jobs):
assert config["x"] == job.config["x"]
assert type(job.output) is dict
assert job.objective == config["x"]
assert job.metadata["y"] == 0
@pytest.mark.fast
@pytest.mark.hps
def test_serial(self):
self.execute_evaluator("serial")
@pytest.mark.fast
@pytest.mark.hps
def test_thread(self):
self.execute_evaluator("thread")
@pytest.mark.fast
@pytest.mark.hps
def test_process(self):
self.execute_evaluator("process")
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.ray
def test_ray(self):
try:
self.execute_evaluator("ray")
except ModuleNotFoundError as e:
e_str = str(e)
if not ("ray" in e_str):
raise e
if __name__ == "__main__":
test = TestEvaluator()
test.test_thread()
| 9,383 | 28.142857 | 133 | py |
deephyper | deephyper-master/tests/deephyper/evaluator/test_distributed_evaluator.py | """
mpirun -np 2 python test_distributed_evaluator.py
"""
import os
import sys
import time
PYTHON = sys.executable
SCRIPT = os.path.abspath(__file__)
import pytest
import deephyper.test
def run(config):
r = config["r"]
if r == 1:
time.sleep(2)
print(f"r={r}")
return config["r"]
def _test_mpi_distributed_evaluator():
from deephyper.evaluator._serial import SerialEvaluator
from deephyper.evaluator._distributed import distributed
evaluator = distributed(backend="mpi")(SerialEvaluator)(run)
configs = [{"i": i, "r": evaluator.rank} for i in range(1)]
# test synchronous share
evaluator.submit(configs)
local_results, other_results = evaluator.gather("ALL", sync_communication=True)
results = local_results + other_results
if evaluator.rank == 0:
print(f"results={results}", flush=True)
assert len(results) == evaluator.size * len(configs)
# test asynchronous share
evaluator.submit(configs)
local_results, other_results = evaluator.gather("ALL")
results = local_results + other_results
print(f"r={evaluator.rank} -> {len(results)}")
if evaluator.rank != 1:
assert len(results) <= (evaluator.size - 1) * len(configs)
else:
assert len(results) == (evaluator.size) * len(configs)
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.mpi
def test_mpi_distributed_evaluator():
command = f"mpirun -np 4 {PYTHON} {SCRIPT} _test_mpi_distributed_evaluator"
result = deephyper.test.run(command, live_output=False)
if __name__ == "__main__":
func = sys.argv[-1]
func = globals()[func]
func()
| 1,630 | 24.092308 | 83 | py |
deephyper | deephyper-master/tests/deephyper/evaluator/test_mpi_comm_evaluator.py | import os
import sys
import time
import pytest
PYTHON = sys.executable
SCRIPT = os.path.abspath(__file__)
import deephyper.test
from deephyper.evaluator import Evaluator
def run(config):
job_id = config["job_id"]
print(f"job {job_id}...")
if job_id > 3:
time.sleep(2)
print(f"job {job_id} done!", flush=True)
return config["x"]
def _test_mpicomm_evaluator():
"""Test the MPICommEvaluator"""
configs = [{"x": i} for i in range(8)]
t1 = time.time()
with Evaluator.create(
run, method="mpicomm", method_kwargs={"abort_on_exit": False}
) as evaluator:
if evaluator is not None:
print(configs)
evaluator.submit(configs)
results = evaluator.gather(type="BATCH", size=4)
print("gather", flush=True)
objectives = sorted([job.result for job in results])
assert objectives == list(range(4))
duration = time.time() - t1
print("duration:", duration)
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.mpi
def test_mpicomm_evaluator():
command = f"mpirun -np 4 {PYTHON} {SCRIPT} _test_mpicomm_evaluator"
result = deephyper.test.run(command, live_output=False)
if __name__ == "__main__":
func = sys.argv[-1]
func = globals()[func]
func()
| 1,299 | 22.214286 | 71 | py |
deephyper | deephyper-master/tests/deephyper/evaluator/test_decorator.py | import unittest
import pytest
from deephyper.evaluator import profile
@profile
def run_profile(config):
return config["x"]
@pytest.mark.fast
@pytest.mark.hps
class TestDecorator(unittest.TestCase):
def test_profile(self):
output = run_profile({"x": 0})
assert "timestamp_end" in output["metadata"]
assert "timestamp_start" in output["metadata"]
assert 0 == output["objective"]
| 424 | 18.318182 | 54 | py |
deephyper | deephyper-master/tests/deephyper/evaluator/test_queued_evaluator.py | import pytest
import unittest
def run(config, dequed=None):
return config["x"] + dequed[0]
class TestQueuedEvaluator(unittest.TestCase):
@pytest.mark.fast
@pytest.mark.hps
def test_queued_serial_evaluator(self):
from deephyper.evaluator import SerialEvaluator, queued
QueuedSerialEvaluator = queued(
SerialEvaluator
) # returns class of type Queued{evaluator_class}
evaluator = QueuedSerialEvaluator(
run,
num_workers=1,
# queued arguments
queue=[1, 2, 3, 4],
queue_pop_per_task=1,
)
assert evaluator.num_workers == 1
assert list(evaluator.queue) == [1, 2, 3, 4]
assert evaluator.queue_pop_per_task == 1
results = []
for i in range(8):
evaluator.submit([{"x": 0}])
jobs = evaluator.gather("ALL")
results.append(jobs[0].objective)
assert results == [1, 2, 3, 4, 1, 2, 3, 4]
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.ray
def test_queued_ray_evaluator(self):
try:
import os
import sys
HERE = os.path.dirname(os.path.abspath(__file__))
from deephyper.evaluator import RayEvaluator, queued
QueuedRayEvaluator = queued(
RayEvaluator
) # returns class of type Queued{evaluator_class}
evaluator = QueuedRayEvaluator(
run,
num_cpus=1,
num_cpus_per_task=1,
num_workers=1,
ray_kwargs={"runtime_env": {"working_dir": HERE}},
# queued arguments
queue=[1, 2, 3, 4],
queue_pop_per_task=1,
)
assert evaluator.num_workers == 1
assert list(evaluator.queue) == [1, 2, 3, 4]
assert evaluator.queue_pop_per_task == 1
results = []
for i in range(8):
evaluator.submit([{"x": 0}])
jobs = evaluator.gather("ALL")
results.append(jobs[0].result)
assert results == [1, 2, 3, 4, 1, 2, 3, 4]
except ImportError as e:
e_str = str(e)
if not ("RayEvaluator" in e_str):
raise e
| 2,317 | 26.270588 | 66 | py |
deephyper | deephyper-master/tests/deephyper/evaluator/storage/test_memory_storage.py | import unittest
import pytest
from deephyper.evaluator import Evaluator, RunningJob
from deephyper.evaluator.storage import MemoryStorage
def run_0(job: RunningJob) -> dict:
return {
"objective": job.parameters["x"],
"metadata": {"storage_id": id(job.storage)},
}
@pytest.mark.fast
@pytest.mark.hps
class TestMemoryStorage(unittest.TestCase):
def test_basic(self):
# Creation of the database
storage = MemoryStorage()
search_id0 = storage.create_new_search()
job_id0 = storage.create_new_job(search_id0)
job_id1 = storage.create_new_job(search_id=search_id0)
self.assertEqual(search_id0, "0")
self.assertEqual(job_id0, "0.0")
self.assertEqual(job_id1, "0.1")
search_id1 = storage.create_new_search()
job_id0 = storage.create_new_job(search_id1)
job_id1 = storage.create_new_job(search_id=search_id1)
self.assertEqual(search_id1, "1")
self.assertEqual(job_id0, "1.0")
self.assertEqual(job_id1, "1.1")
# Check available ids
search_ids = storage.load_all_search_ids()
self.assertEqual(search_ids, ["0", "1"])
job_ids = storage.load_all_job_ids(search_id0)
self.assertEqual(job_ids, ["0.0", "0.1"])
# Store/Load
# Job is empty
job_id0_data = storage.load_job(job_id0)
self.assertIn("in", job_id0_data)
self.assertIn("out", job_id0_data)
self.assertIn("metadata", job_id0_data)
# Storing inputs of job
storage.store_job_in(job_id0, args=(1, 2), kwargs={"foo": 0})
job_id0_data = storage.load_job(job_id0)
self.assertIn("args", job_id0_data["in"])
self.assertIn("kwargs", job_id0_data["in"])
self.assertEqual(job_id0_data["in"]["args"], (1, 2))
self.assertEqual(job_id0_data["in"]["kwargs"], {"foo": 0})
# Storing outputs of job
storage.store_job_out(job_id0, 0)
self.assertIs(None, job_id0_data["out"])
job_id0_data = storage.load_job(job_id0)
self.assertEqual(0, job_id0_data["out"])
# Storing metadata of job
storage.store_job_metadata(job_id0, "timestamp", 10)
self.assertEqual(job_id0_data["metadata"], {})
job_id0_data = storage.load_job(job_id0)
self.assertEqual(job_id0_data["metadata"], {"timestamp": 10})
def test_with_evaluator(self):
storage = MemoryStorage()
# serial evaluator
evaluator = Evaluator.create(
run_0, method="serial", method_kwargs={"storage": storage}
)
evaluator.submit([{"x": 0}])
job_done = evaluator.gather("ALL")[0]
assert job_done.metadata["storage_id"] == id(storage)
# thread evaluator
evaluator = Evaluator.create(
run_0, method="thread", method_kwargs={"storage": storage}
)
evaluator.submit([{"x": 0}])
job_done = evaluator.gather("ALL")[0]
assert job_done.metadata["storage_id"] == id(storage)
# process evaluator
evaluator = Evaluator.create(
run_0, method="process", method_kwargs={"storage": storage}
)
evaluator.submit([{"x": 0}])
job_done = evaluator.gather("ALL")[0]
assert job_done.metadata["storage_id"] != id(storage)
if __name__ == "__main__":
test = TestMemoryStorage()
test.test_with_evaluator()
| 3,427 | 32.281553 | 71 | py |
deephyper | deephyper-master/tests/deephyper/evaluator/storage/test_redis_storage.py | import unittest
import pytest
from deephyper.evaluator import Evaluator, RunningJob
def run_0(job: RunningJob) -> dict:
if not (job.storage.connected):
job.storage.connect()
job.storage.store_job_metadata(job.id, "foo", 0)
return {
"objective": job.parameters["x"],
"metadata": {"storage_id": id(job.storage)},
}
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.redis
class TestRedisStorage(unittest.TestCase):
def test_basic(self):
from deephyper.evaluator.storage._redis_storage import RedisStorage
# Creation of the database
storage = RedisStorage()
storage.connect()
storage._redis.flushdb() # empty the db before using it
search_id0 = storage.create_new_search()
job_id0 = storage.create_new_job(search_id0)
job_id1 = storage.create_new_job(search_id=search_id0)
self.assertEqual(search_id0, "0")
self.assertEqual(job_id0, "0.0")
self.assertEqual(job_id1, "0.1")
search_id1 = storage.create_new_search()
job_id0 = storage.create_new_job(search_id1)
job_id1 = storage.create_new_job(search_id=search_id1)
self.assertEqual(search_id1, "1")
self.assertEqual(job_id0, "1.0")
self.assertEqual(job_id1, "1.1")
# Check available ids
search_ids = storage.load_all_search_ids()
self.assertEqual(search_ids, ["0", "1"])
job_ids = storage.load_all_job_ids(search_id0)
self.assertEqual(job_ids, ["0.0", "0.1"])
# Store/Load
# Job is empty
job_id0_data = storage.load_job(job_id0)
self.assertIn("in", job_id0_data)
self.assertIn("out", job_id0_data)
self.assertIn("metadata", job_id0_data)
# Storing inputs of job
storage.store_job_in(job_id0, args=(1, 2), kwargs={"foo": 0})
job_id0_data = storage.load_job(job_id0)
self.assertIn("args", job_id0_data["in"])
self.assertIn("kwargs", job_id0_data["in"])
self.assertEqual(job_id0_data["in"]["args"], [1, 2])
self.assertEqual(job_id0_data["in"]["kwargs"], {"foo": 0})
# Storing outputs of job
storage.store_job_out(job_id0, 0)
self.assertIs(None, job_id0_data["out"])
job_id0_data = storage.load_job(job_id0)
self.assertEqual(0, job_id0_data["out"])
# Storing metadata of job
storage.store_job_metadata(job_id0, "timestamp", 10)
self.assertEqual(job_id0_data["metadata"], {})
job_id0_data = storage.load_job(job_id0)
self.assertEqual(job_id0_data["metadata"], {"timestamp": 10})
def test_with_evaluator(self):
from deephyper.evaluator.storage._redis_storage import RedisStorage
storage = RedisStorage()
storage.connect()
storage._redis.flushdb()
# serial evaluator
evaluator = Evaluator.create(
run_0, method="serial", method_kwargs={"storage": storage}
)
evaluator.submit([{"x": 0}])
job_done = evaluator.gather("ALL")[0]
assert job_done.metadata["storage_id"] == id(storage)
# thread evaluator
evaluator = Evaluator.create(
run_0, method="thread", method_kwargs={"storage": storage}
)
evaluator.submit([{"x": 0}])
job_done = evaluator.gather("ALL")[0]
assert job_done.metadata["storage_id"] == id(storage)
# process evaluator
evaluator = Evaluator.create(
run_0, method="process", method_kwargs={"storage": storage}
)
evaluator.submit([{"x": 0}])
job_done = evaluator.gather("ALL")[0]
assert job_done.metadata["storage_id"] != id(storage)
data = evaluator._storage.load_search(evaluator._search_id)
assert data[0]["metadata"]["foo"] == 0
if __name__ == "__main__":
test = TestRedisStorage()
test.test_with_evaluator()
| 3,926 | 32.853448 | 75 | py |
deephyper | deephyper-master/tests/deephyper/stopper/test__median_stopper.py | import pytest
import numpy as np
from deephyper.evaluator import RunningJob
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
from deephyper.stopper import MedianStopper
def run(job: RunningJob) -> dict:
assert isinstance(job.stopper, MedianStopper)
max_budget = 50
objective_i = 0
for budget_i in range(1, max_budget + 1):
objective_i += job["x"]
job.record(budget_i, objective_i)
if job.stopped():
break
return {
"objective": job.objective,
"metadata": {"budget": budget_i, "stopped": budget_i < max_budget},
}
@pytest.mark.fast
@pytest.mark.hps
def test_median_stopper(tmp_path):
# define the variable you want to optimize
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
stopper = MedianStopper(max_steps=50, min_steps=1)
search = CBO(
problem,
run,
surrogate_model="DUMMY",
stopper=stopper,
random_state=42,
log_dir=tmp_path,
)
results = search.search(max_evals=30)
assert "m:budget" in results.columns
assert "m:stopped" in results.columns
assert "p:x" in results.columns
assert "objective" in results.columns
budgets = np.sort(np.unique(results["m:budget"].to_numpy())).tolist()
assert max(budgets) == 50
assert len(budgets) > 1
assert results["m:budget"].sum() < 50 * 30
if __name__ == "__main__":
test_median_stopper(tmp_path=".")
| 1,491 | 22.3125 | 75 | py |
deephyper | deephyper-master/tests/deephyper/stopper/test__sha_stopper.py | import unittest
import pytest
import numpy as np
from deephyper.evaluator import RunningJob
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
from deephyper.stopper import SuccessiveHalvingStopper
def run(job: RunningJob) -> dict:
assert isinstance(job.stopper, SuccessiveHalvingStopper)
max_budget = 50
objective_i = 0
for budget_i in range(1, max_budget + 1):
objective_i += job["x"]
job.record(budget_i, objective_i)
if job.stopped():
break
return {
"objective": job.objective,
"metadata": {"budget": budget_i, "stopped": budget_i < max_budget},
}
@pytest.mark.fast
@pytest.mark.hps
def test_successive_halving_stopper(tmp_path):
# define the variable you want to optimize
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
stopper = SuccessiveHalvingStopper(max_steps=50, reduction_factor=3)
search = CBO(
problem,
run,
surrogate_model="DUMMY",
stopper=stopper,
random_state=42,
log_dir=tmp_path,
)
results = search.search(max_evals=30)
assert "m:budget" in results.columns
assert "m:stopped" in results.columns
assert "p:x" in results.columns
assert "objective" in results.columns
budgets = np.sort(np.unique(results["m:budget"].to_numpy())).tolist()
assert budgets == [1, 3, 9, 50]
if __name__ == "__main__":
test_successive_halving_stopper(tmp_path=".")
| 1,502 | 22.857143 | 75 | py |
deephyper | deephyper-master/tests/deephyper/stopper/test__idle_stopper.py | import pytest
import numpy as np
from deephyper.evaluator import RunningJob
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
from deephyper.stopper import IdleStopper
def run(job: RunningJob) -> dict:
assert isinstance(job.stopper, IdleStopper)
max_budget = 50
objective_i = 0
for budget_i in range(1, max_budget + 1):
objective_i += job["x"]
job.record(budget_i, objective_i)
if job.stopped():
break
return {
"objective": job.objective,
"metadata": {"budget": budget_i, "stopped": budget_i < max_budget},
}
@pytest.mark.fast
@pytest.mark.hps
def test_idle_stopper(tmp_path):
# define the variable you want to optimize
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
stopper = IdleStopper(max_steps=50)
search = CBO(
problem,
run,
surrogate_model="DUMMY",
stopper=stopper,
random_state=42,
log_dir=tmp_path,
)
results = search.search(max_evals=30)
assert "m:budget" in results.columns
assert "m:stopped" in results.columns
assert "p:x" in results.columns
assert "objective" in results.columns
budgets = np.sort(np.unique(results["m:budget"].to_numpy())).tolist()
assert budgets == [50]
if __name__ == "__main__":
test_idle_stopper(tmp_path=".")
| 1,390 | 21.435484 | 75 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_node.py | import unittest
import pytest
@pytest.mark.fast
@pytest.mark.nas
class NodeTest(unittest.TestCase):
def test_mirror_node(self):
import tensorflow as tf
from deephyper.nas.node import MirrorNode, VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
vnode = VariableNode()
vop = Dense(10)
vnode.add_op(vop)
vnode.add_op(Dense(20))
mnode = MirrorNode(vnode)
vnode.set_op(0)
assert vnode.op == vop
assert mnode.op == vop
def test_mime_node(self):
import tensorflow as tf
from deephyper.nas.node import MimeNode, VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
vnode = VariableNode()
vop = Dense(10)
vnode.add_op(vop)
vnode.add_op(Dense(20))
mnode = MimeNode(vnode)
mop = Dense(30)
mnode.add_op(mop)
mnode.add_op(Dense(40))
vnode.set_op(0)
assert vnode.op == vop
assert mnode.op == mop
| 1,118 | 21.836735 | 63 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_dense_skipco_factory.py | import pytest
@pytest.mark.fast
@pytest.mark.nas
def test_search_space():
from deephyper.nas.spacelib.tabular import DenseSkipCoSpace
space = DenseSkipCoSpace(input_shape=(10,), output_shape=(1,)).build()
model = space.sample()
| 243 | 21.181818 | 74 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_one_layer_factory.py | import pytest
@pytest.mark.fast
@pytest.mark.nas
def test_search_space():
from deephyper.nas.spacelib.tabular import OneLayerSpace
space = OneLayerSpace(input_shape=(10,), output_shape=(1,)).build()
model = space.sample()
| 237 | 20.636364 | 71 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_new_api.py | import pytest
@pytest.mark.fast
@pytest.mark.nas
def test_basic_space(verbose=0):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import VariableNode, ConstantNode
from deephyper.nas.operation import operation, Identity
Dense = operation(tf.keras.layers.Dense)
class BasicSpace(KSearchSpace):
def __init__(self, input_shape, output_shape, batch_size=None, *args, **kwargs):
super().__init__(
input_shape, output_shape, batch_size=batch_size, *args, **kwargs
)
def build(self):
input_node = self.input[0]
dense = VariableNode()
dense.add_op(Identity())
for i in range(1, 1000):
dense.add_op(Dense(i))
self.connect(input_node, dense)
output_node = ConstantNode(Dense(self.output_shape[0]))
self.connect(dense, output_node)
space = BasicSpace(input_shape=(1,), output_shape=(1,))
space.build()
model_1 = space.sample([1])
if verbose:
model_1.summary()
model_2 = space.sample()
if verbose:
model_2.summary()
| 1,177 | 24.608696 | 88 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_trainer_keras_regressor.py | import unittest
import pytest
@pytest.mark.slow
@pytest.mark.nas
class TrainerKerasRegressorTest(unittest.TestCase):
def test_trainer_regressor_train_valid_with_one_input(self):
import sys
from random import random
import deephyper.core.utils
import numpy as np
from deephyper.nas.trainer import BaseTrainer
from deephyper.test.nas.linearReg.problem import Problem
config = Problem.space
config["hyperparameters"]["num_epochs"] = 2
# load functions
load_data = deephyper.core.utils.load_attr(config["load_data"]["func"])
config["load_data"]["func"] = load_data
config["search_space"]["class"] = deephyper.core.utils.load_attr(
config["search_space"]["class"]
)
# Loading data
kwargs = config["load_data"].get("kwargs")
(tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)
# Set data shape
input_shape = np.shape(tX)[1:] # interested in shape of data not in length
output_shape = np.shape(ty)[1:]
config["data"] = {"train_X": tX, "train_Y": ty, "valid_X": vX, "valid_Y": vy}
search_space = config["search_space"]["class"](
input_shape, output_shape, **config["search_space"]["kwargs"]
).build()
arch_seq = [random() for i in range(search_space.num_nodes)]
search_space.set_ops(arch_seq)
search_space.plot("trainer_keras_regressor_test.dot")
if config.get("preprocessing") is not None:
preprocessing = deephyper.core.utils.load_attr(
config["preprocessing"]["func"]
)
config["preprocessing"]["func"] = preprocessing
else:
config["preprocessing"] = None
model = search_space.create_model()
trainer = BaseTrainer(config=config, model=model)
res = trainer.train()
assert res != sys.float_info.max and type(res) is dict
def test_trainer_regressor_train_valid_with_multiple_ndarray_inputs(self):
import sys
from random import random
import deephyper.core.utils
import numpy as np
from deephyper.nas.trainer import BaseTrainer
from deephyper.test.nas.linearRegMultiInputs.problem import Problem
config = Problem.space
config["hyperparameters"]["num_epochs"] = 2
# load functions
load_data = deephyper.core.utils.load_attr(config["load_data"]["func"])
config["load_data"]["func"] = load_data
config["search_space"]["class"] = deephyper.core.utils.load_attr(
config["search_space"]["class"]
)
# Loading data
kwargs = config["load_data"].get("kwargs")
(tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)
# Set data shape
# interested in shape of data not in length
input_shape = [np.shape(itX)[1:] for itX in tX]
output_shape = np.shape(ty)[1:]
config["data"] = {"train_X": tX, "train_Y": ty, "valid_X": vX, "valid_Y": vy}
search_space = config["search_space"]["class"](
input_shape, output_shape, **config["search_space"]["kwargs"]
).build()
arch_seq = [random() for i in range(search_space.num_nodes)]
search_space.set_ops(arch_seq)
search_space.plot("trainer_keras_regressor_test.dot")
if config.get("preprocessing") is not None:
preprocessing = deephyper.core.utils.load_attr(
config["preprocessing"]["func"]
)
config["preprocessing"]["func"] = preprocessing
else:
config["preprocessing"] = None
model = search_space.create_model()
trainer = BaseTrainer(config=config, model=model)
res = trainer.train()
assert res != sys.float_info.max and type(res) is dict
def test_trainer_regressor_train_valid_with_multiple_generator_inputs(self):
import sys
from deephyper.nas.run._util import get_search_space, load_config, setup_data
from deephyper.nas.trainer import BaseTrainer
from deephyper.test.nas.linearReg.problem import Problem
from deephyper.test.nas.linearRegMultiInputsGen import Problem
config = Problem.space
load_config(config)
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, 42)
config["hyperparameters"]["num_epochs"] = 2
model = search_space.sample()
trainer = BaseTrainer(config=config, model=model)
res = trainer.train()
assert res != sys.float_info.max and type(res) is dict
if __name__ == "__main__":
test = TrainerKerasRegressorTest()
test.test_trainer_regressor_train_valid_with_multiple_ndarray_inputs()
| 4,879 | 33.125874 | 85 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_keras_search_space.py | import unittest
import pytest
@pytest.mark.nas
class TestKSearchSpace(unittest.TestCase):
def test_create(self):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
class TestSpace(KSearchSpace):
def __init__(self, input_shape, output_shape):
super().__init__(input_shape, output_shape)
def build(self):
vnode = VariableNode()
self.connect(self.input_nodes[0], vnode)
vnode.add_op(Dense(1))
return self
space = TestSpace((5,), (1,)).build()
model = space.sample()
def test_create_more_nodes(self):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
class TestSpace(KSearchSpace):
def __init__(self, input_shape, output_shape):
super().__init__(input_shape, output_shape)
def build(self):
vnode1 = VariableNode()
self.connect(self.input_nodes[0], vnode1)
vnode1.add_op(Dense(10))
vnode2 = VariableNode()
vnode2.add_op(Dense(1))
self.connect(vnode1, vnode2)
return self
space = TestSpace((5,), (1,)).build()
model = space.sample()
def test_create_multiple_inputs_with_one_vnode(self):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import operation, Concatenate
Dense = operation(tf.keras.layers.Dense)
class TestSpace(KSearchSpace):
def __init__(self, input_shape, output_shape):
super().__init__(input_shape, output_shape)
def build(self):
merge = ConstantNode()
merge.set_op(Concatenate(self, self.input_nodes))
vnode1 = VariableNode()
self.connect(merge, vnode1)
vnode1.add_op(Dense(1))
return self
space = TestSpace([(5,), (5,)], (1,)).build()
model = space.sample()
| 2,481 | 28.2 | 66 | py |
deephyper | deephyper-master/tests/deephyper/nas/run/test_single_loss.py | import pytest
import numpy as np
def load_data(dim=10):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
rs = np.random.RandomState(42)
size = 100000
prop = 0.80
a, b = 0, 100
d = b - a
x = np.array([a + rs.random(dim) * d for i in range(size)])
y = np.array([[np.sum(v)] for v in x])
sep_index = int(prop * size)
train_X = x[:sep_index]
train_y = y[:sep_index]
valid_X = x[sep_index:]
valid_y = y[sep_index:]
print(f"train_X shape: {np.shape(train_X)}")
print(f"train_y shape: {np.shape(train_y)}")
print(f"valid_X shape: {np.shape(valid_X)}")
print(f"valid_y shape: {np.shape(valid_y)}")
return (train_X, train_y), (valid_X, valid_y)
@pytest.mark.nas
def test_single_loss():
from deephyper.nas.run import run_base_trainer
from deephyper.problem import NaProblem
from deephyper.nas.spacelib.tabular import OneLayerSpace
Problem = NaProblem()
Problem.load_data(load_data)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=100, learning_rate=0.1, optimizer="adam", num_epochs=1
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
config = Problem.space
config["hyperparameters"]["verbose"] = 1
# Baseline
config["arch_seq"] = [0.5]
run_base_trainer(config)
if __name__ == "__main__":
test_single_loss()
| 1,507 | 23.721311 | 74 | py |
deephyper | deephyper-master/tests/deephyper/nas/run/test_multi_loss.py | import pytest
import numpy as np
def load_data(dim=100):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
rs = np.random.RandomState(42)
size = 100000
prop = 0.80
a, b = 0, 100
d = b - a
x = np.array([a + rs.random(dim) * d for i in range(size)])
y = np.array([[np.sum(v)] for v in x])
sep_index = int(prop * size)
train_X = x[:sep_index]
train_y = y[:sep_index]
valid_X = x[sep_index:]
valid_y = y[sep_index:]
print(f"train_X shape: {np.shape(train_X)}")
print(f"train_y shape: [{np.shape(train_X)}, {np.shape(train_y)}]")
print(f"valid_X shape: {np.shape(valid_X)}")
print(f"valid_y shape: [{np.shape(valid_X)}, {np.shape(valid_y)}]")
return (train_X, [train_y, train_X]), (valid_X, [valid_y, valid_X])
@pytest.mark.nas
def test_multi_loss():
from deephyper.nas.run import run_base_trainer
from deephyper.problem import NaProblem
from deephyper.nas.spacelib.tabular import SupervisedRegAutoEncoderSpace
Problem = NaProblem()
Problem.load_data(load_data)
Problem.search_space(SupervisedRegAutoEncoderSpace, num_layers=10)
Problem.hyperparameters(
batch_size=100, learning_rate=0.1, optimizer="adam", num_epochs=1
)
Problem.loss(
loss={"output_0": "mse", "output_1": "mse"},
loss_weights={"output_0": 0.0, "output_1": 1.0},
)
Problem.metrics({"output_0": ["r2", "mse"], "output_1": "mse"})
Problem.objective("val_output_0_r2")
config = Problem.space
config["hyperparameters"]["verbose"] = 1
# Baseline
config["arch_seq"] = [0.5] * 19
result = run_base_trainer(config)
if __name__ == "__main__":
test_multi_loss()
| 1,794 | 27.046875 | 76 | py |
deephyper | deephyper-master/tests/deephyper/nas/run/test_single_loss_multi_var.py | import pytest
import numpy as np
def load_data(dim=10):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
rs = np.random.RandomState(42)
size = 100000
prop = 0.80
a, b = 0, 100
d = b - a
x = np.array([a + rs.random(dim) * d for i in range(size)])
y = np.array([[np.sum(v), -np.sum(v)] for v in x])
sep_index = int(prop * size)
train_X = x[:sep_index]
train_y = y[:sep_index]
valid_X = x[sep_index:]
valid_y = y[sep_index:]
print(f"train_X shape: {np.shape(train_X)}")
print(f"train_y shape: {np.shape(train_y)}")
print(f"valid_X shape: {np.shape(valid_X)}")
print(f"valid_y shape: {np.shape(valid_y)}")
return (train_X, train_y), (valid_X, valid_y)
@pytest.mark.nas
def test_single_loss_multi_var():
from deephyper.nas.run import run_base_trainer
from deephyper.problem import NaProblem
from deephyper.nas.spacelib.tabular import OneLayerSpace
Problem = NaProblem()
Problem.load_data(load_data)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=100, learning_rate=0.1, optimizer="adam", num_epochs=1
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
config = Problem.space
config["hyperparameters"]["verbose"] = 1
# Baseline
config["arch_seq"] = [0.5]
run_base_trainer(config)
if __name__ == "__main__":
test_single_loss_multi_var()
| 1,540 | 23.854839 | 74 | py |
deephyper | deephyper-master/tests/deephyper/problem/test_problem.py | import unittest
import pytest
@pytest.mark.hps
class HpProblemTest(unittest.TestCase):
def test_add_good_dim(self):
import ConfigSpace as cs
import ConfigSpace.hyperparameters as csh
from deephyper.problem import HpProblem
pb = HpProblem()
p0 = pb.add_hyperparameter((-10, 10), "p0")
p0_csh = csh.UniformIntegerHyperparameter(
name="p0", lower=-10, upper=10, log=False
)
assert p0 == p0_csh
p1 = pb.add_hyperparameter((1, 100, "log-uniform"), "p1")
p1_csh = csh.UniformIntegerHyperparameter(
name="p1", lower=1, upper=100, log=True
)
assert p1 == p1_csh
p2 = pb.add_hyperparameter((-10.0, 10.0), "p2")
p2_csh = csh.UniformFloatHyperparameter(
name="p2", lower=-10.0, upper=10.0, log=False
)
assert p2 == p2_csh
p3 = pb.add_hyperparameter((1.0, 100.0, "log-uniform"), "p3")
p3_csh = csh.UniformFloatHyperparameter(
name="p3", lower=1.0, upper=100.0, log=True
)
assert p3 == p3_csh
p4 = pb.add_hyperparameter([1, 2, 3, 4], "p4")
p4_csh = csh.OrdinalHyperparameter(name="p4", sequence=[1, 2, 3, 4])
assert p4 == p4_csh
p5 = pb.add_hyperparameter([1.0, 2.0, 3.0, 4.0], "p5")
p5_csh = csh.OrdinalHyperparameter(name="p5", sequence=[1.0, 2.0, 3.0, 4.0])
assert p5 == p5_csh
p6 = pb.add_hyperparameter(["cat0", "cat1"], "p6")
p6_csh = csh.CategoricalHyperparameter(name="p6", choices=["cat0", "cat1"])
assert p6 == p6_csh
p7 = pb.add_hyperparameter({"mu": 0, "sigma": 1}, "p7")
p7_csh = csh.NormalIntegerHyperparameter(name="p7", mu=0, sigma=1)
assert p7 == p7_csh
if cs.__version__ > "0.4.20":
p8 = pb.add_hyperparameter(
{"mu": 0, "sigma": 1, "lower": -5, "upper": 5}, "p8"
)
p8_csh = csh.NormalIntegerHyperparameter(
name="p8", mu=0, sigma=1, lower=-5, upper=5
)
assert p8 == p8_csh
p9 = pb.add_hyperparameter({"mu": 0.0, "sigma": 1.0}, "p9")
p9_csh = csh.NormalFloatHyperparameter(name="p9", mu=0, sigma=1)
assert p9 == p9_csh
def test_kwargs(self):
from deephyper.problem import HpProblem
pb = HpProblem()
pb.add_hyperparameter(value=(-10, 10), name="dim0")
def test_dim_with_wrong_name(self):
from deephyper.core.exceptions.problem import SpaceDimNameOfWrongType
from deephyper.problem import HpProblem
pb = HpProblem()
with pytest.raises(SpaceDimNameOfWrongType):
pb.add_hyperparameter((-10, 10), 0)
def test_config_space_hp(self):
import ConfigSpace.hyperparameters as csh
from deephyper.problem import HpProblem
alpha = csh.UniformFloatHyperparameter(name="alpha", lower=0, upper=1)
beta = csh.UniformFloatHyperparameter(name="beta", lower=0, upper=1)
pb = HpProblem()
pb.add_hyperparameters([alpha, beta])
@pytest.mark.nas
class TestNaProblem(unittest.TestCase):
def test_search_space(self):
from deephyper.nas.spacelib.tabular import OneLayerSpace
from deephyper.problem import NaProblem
pb = NaProblem()
with pytest.raises(TypeError):
pb.search_space(space_class="a")
pb.search_space(OneLayerSpace)
def test_full_problem(self):
from deephyper.core.exceptions.problem import NaProblemError
from deephyper.nas.preprocessing import minmaxstdscaler
from deephyper.nas.spacelib.tabular import OneLayerSpace
from deephyper.problem import NaProblem
pb = NaProblem()
def load_data(prop):
return ([[10]], [1]), ([10], [1])
pb.load_data(load_data, prop=1.0)
pb.preprocessing(minmaxstdscaler)
pb.search_space(OneLayerSpace)
pb.hyperparameters(
batch_size=64,
learning_rate=0.001,
optimizer="adam",
num_epochs=10,
loss_metric="mse",
)
with pytest.raises(NaProblemError):
pb.objective("r2")
pb.loss("mse")
pb.metrics(["r2"])
possible_objective = ["loss", "val_loss", "r2", "val_r2"]
for obj in possible_objective:
pb.objective(obj)
| 4,405 | 30.248227 | 84 | py |
deephyper | deephyper-master/tests/deephyper/keras/layers/padding_test.py | import pytest
@pytest.mark.fast
@pytest.mark.nas
def test_padding_layer():
import tensorflow as tf
import numpy as np
from deephyper.keras.layers import Padding
model = tf.keras.Sequential()
model.add(Padding([[1, 1]]))
data = np.random.random((3, 1))
shape_data = np.shape(data)
assert shape_data == (3, 1)
res = model.predict(data, batch_size=1)
res_shape = np.shape(res)
assert res_shape == (3, 3)
| 450 | 20.47619 | 46 | py |
deephyper | deephyper-master/tests/deephyper/search/hps/test_dbo_max_evals.py | import os
import sys
import pytest
PYTHON = sys.executable
SCRIPT = os.path.abspath(__file__)
import deephyper.test
def _test_dbo_max_evals(tmp_path):
import time
import numpy as np
from deephyper.problem import HpProblem
from deephyper.search.hps import MPIDistributedBO
d = 10
domain = (-32.768, 32.768)
hp_problem = HpProblem()
for i in range(d):
hp_problem.add_hyperparameter(domain, f"x{i}")
def ackley(x, a=20, b=0.2, c=2 * np.pi):
d = len(x)
s1 = np.sum(x**2)
s2 = np.sum(np.cos(c * x))
term1 = -a * np.exp(-b * np.sqrt(s1 / d))
term2 = -np.exp(s2 / d)
y = term1 + term2 + a + np.exp(1)
return y
def run(job):
config = job.parameters
x = np.array([config[f"x{i}"] for i in range(d)])
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
return -ackley(x)
search = MPIDistributedBO(
hp_problem,
run,
log_dir=tmp_path,
)
max_evals = 40
if search.rank == 0:
t1 = time.time()
results = search.search(max_evals=max_evals)
else:
search.search(max_evals=max_evals)
search.comm.Barrier()
if search.rank == 0:
assert len(results) >= max_evals
print("DEEPHYPER-OUTPUT:", float(len(results)))
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.mpi
@pytest.mark.redis
def test_dbo_timeout(tmp_path):
command = f"mpirun -np 4 {PYTHON} {SCRIPT} _test_dbo_max_evals {tmp_path}"
result = deephyper.test.run(command, live_output=False)
val = deephyper.test.parse_result(result.stdout)
assert int(val) >= 40
if __name__ == "__main__":
func = sys.argv[-2]
func = globals()[func]
func(sys.argv[-1])
| 1,760 | 23.458333 | 78 | py |
deephyper | deephyper-master/tests/deephyper/search/hps/test__cbo_mpi.py | import os
import sys
import pytest
PYTHON = sys.executable
SCRIPT = os.path.abspath(__file__)
import deephyper.test
def _test_mpi_timeout(tmp_path):
"""Test if the timeout condition is working properly when the run-function runs indefinitely."""
import time
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
from deephyper.evaluator import Evaluator
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
def run(config):
while True:
1 + 1
time.sleep(0.1)
return config["x"]
with Evaluator.create(run, method="mpicomm") as evaluator:
if evaluator:
search = CBO(
problem, run, random_state=42, surrogate_model="DUMMY", log_dir=tmp_path
)
t1 = time.time()
search.search(timeout=1)
# The search should have been interrupted after 1 second.
# The following must be placed after exiting the context manager.
if evaluator:
duration = time.time() - t1
print(f"DEEPHYPER-OUTPUT: {duration}")
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.mpi
def test_mpi_timeout(tmp_path):
command = f"mpirun -np 4 {PYTHON} {SCRIPT} _test_mpi_timeout {tmp_path}"
result = deephyper.test.run(command, live_output=False)
duration = deephyper.test.parse_result(result.stdout)
assert duration < 2
if __name__ == "__main__":
func = sys.argv[-2]
func = globals()[func]
func(sys.argv[-1])
| 1,516 | 25.614035 | 100 | py |
deephyper | deephyper-master/tests/deephyper/search/hps/test_parallel_cbo_manual.py | import os
import shutil
import sys
import pytest
PYTHON = sys.executable
SCRIPT = os.path.abspath(__file__)
import deephyper.test
def _test_parallel_cbo_manual():
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
from deephyper.problem import HpProblem
def run(job):
return -(job.parameters["x"] ** 2)
from deephyper.search.hps import CBO
from deephyper.evaluator import SerialEvaluator
from deephyper.evaluator.storage import RedisStorage
problem = HpProblem()
problem.add_hyperparameter((-10.0, 10.0), "x")
storage = RedisStorage()
storage.connect()
storage._redis.flushdb()
search_id = None
if rank == 0:
evaluator = SerialEvaluator(run, storage=storage)
search = CBO(problem, evaluator, random_state=42)
search_id = search.search_id
print(f"{search_id}")
search_id = comm.bcast(search_id)
print(f"rank={rank} - search_id={search_id}")
if rank > 0:
evaluator = SerialEvaluator(run, storage=storage, search_id=search_id)
def dumps_evals(*args, **kwargs):
pass
evaluator.dump_evals = dumps_evals
search = CBO(problem, evaluator, random_state=42)
comm.Barrier()
if rank == 0:
results = search.search(max_evals=20)
else:
search.search(max_evals=20)
comm.Barrier()
if rank == 0:
print(f"{len(results)} results")
print(results.objective.tolist())
comm.Barrier()
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.mpi
@pytest.mark.redis
def test_dbo_timeout():
command = f"time mpirun -np 4 {PYTHON} {SCRIPT} _test_parallel_cbo_manual"
result = deephyper.test.run(command, live_output=False)
result = result.stderr.replace("\n", "").split(" ")
# i = result.index("sys")
# t = float(result[i - 1])
# assert t < 3
if __name__ == "__main__":
func = sys.argv[-1]
func = globals()[func]
func()
| 1,971 | 22.2 | 78 | py |
deephyper | deephyper-master/tests/deephyper/search/hps/test_dbo_timeout.py | import os
import shutil
import sys
import pytest
PYTHON = sys.executable
SCRIPT = os.path.abspath(__file__)
import deephyper.test
def _test_dbo_timeout():
import time
import numpy as np
from deephyper.problem import HpProblem
from deephyper.search.hps import MPIDistributedBO
d = 10
domain = (-32.768, 32.768)
hp_problem = HpProblem()
for i in range(d):
hp_problem.add_hyperparameter(domain, f"x{i}")
def ackley(x, a=20, b=0.2, c=2 * np.pi):
d = len(x)
s1 = np.sum(x**2)
s2 = np.sum(np.cos(c * x))
term1 = -a * np.exp(-b * np.sqrt(s1 / d))
term2 = -np.exp(s2 / d)
y = term1 + term2 + a + np.exp(1)
return y
def run(job):
config = job.parameters
x = np.array([config[f"x{i}"] for i in range(d)])
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
return -ackley(x)
log_dir = "log-dbo"
search = MPIDistributedBO(
hp_problem,
run,
log_dir=log_dir,
)
timeout = 2
if search.rank == 0:
t1 = time.time()
results = search.search(timeout=timeout)
duration = time.time() - t1
assert duration < timeout + 1
else:
search.search(timeout=timeout)
search.comm.Barrier()
if search.rank == 0:
print("\n", results)
shutil.rmtree(log_dir)
@pytest.mark.fast
@pytest.mark.hps
@pytest.mark.mpi
@pytest.mark.redis
def test_dbo_timeout():
command = f"time mpirun -np 4 {PYTHON} {SCRIPT} _test_dbo_timeout"
result = deephyper.test.run(command, live_output=False)
result = result.stderr.replace("\n", "").split(" ")
i = result.index("sys")
t = float(result[i - 1])
assert t < 3
if __name__ == "__main__":
func = sys.argv[-1]
func = globals()[func]
func()
| 1,836 | 22.551282 | 70 | py |
deephyper | deephyper-master/tests/deephyper/search/hps/test__cbo.py | import pytest
@pytest.mark.hps
def test_cbo_random_seed(tmp_path):
import numpy as np
from deephyper.evaluator import Evaluator
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
def run(config):
return config["x"]
create_evaluator = lambda: Evaluator.create(run, method="serial")
search = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
)
res1 = search.search(max_evals=4)
res1_array = res1[["p:x"]].to_numpy()
search = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
)
res2 = search.search(max_evals=4)
res2_array = res2[["p:x"]].to_numpy()
assert np.array_equal(res1_array, res2_array)
# test multi-objective
def run(config):
return config["x"], config["x"]
create_evaluator = lambda: Evaluator.create(run, method="serial")
search = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
)
res1 = search.search(max_evals=4)
res1_array = res1[["p:x"]].to_numpy()
search = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
)
res2 = search.search(max_evals=4)
res2_array = res2[["p:x"]].to_numpy()
assert np.array_equal(res1_array, res2_array)
@pytest.mark.hps
def test_sample_types(tmp_path):
import numpy as np
from deephyper.evaluator import Evaluator
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
problem = HpProblem()
problem.add_hyperparameter((0, 10), "x_int")
problem.add_hyperparameter((0.0, 10.0), "x_float")
problem.add_hyperparameter([0, "1", 2.0], "x_cat")
def run(config):
assert np.issubdtype(type(config["x_int"]), np.integer)
assert np.issubdtype(type(config["x_float"]), float)
if config["x_cat"] == 0:
assert np.issubdtype(type(config["x_cat"]), np.integer)
elif config["x_cat"] == "1":
assert type(config["x_cat"]) is str or type(config["x_cat"]) is np.str_
else:
assert np.issubdtype(type(config["x_cat"]), float)
return 0
create_evaluator = lambda: Evaluator.create(run, method="serial")
results = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
).search(10)
results = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="RF",
log_dir=tmp_path,
).search(10)
@pytest.mark.hps
def test_sample_types_no_cat(tmp_path):
import numpy as np
from deephyper.evaluator import Evaluator
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
problem = HpProblem()
problem.add_hyperparameter((0, 10), "x_int")
problem.add_hyperparameter((0.0, 10.0), "x_float")
def run(config):
assert np.issubdtype(type(config["x_int"]), np.integer)
assert np.issubdtype(type(config["x_float"]), float)
return 0
create_evaluator = lambda: Evaluator.create(run, method="serial")
results = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
).search(10)
results = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="RF",
log_dir=tmp_path,
).search(10)
@pytest.mark.hps
def test_gp(tmp_path):
from deephyper.evaluator import Evaluator
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
# test float hyperparameters
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
def run(config):
return config["x"]
results = CBO(
problem,
Evaluator.create(run, method="serial"),
random_state=42,
surrogate_model="GP",
log_dir=tmp_path,
).search(10)
# test int hyperparameters
problem = HpProblem()
problem.add_hyperparameter((0, 10), "x")
def run(config):
return config["x"]
results = CBO(
problem,
Evaluator.create(run, method="serial"),
random_state=42,
surrogate_model="GP",
log_dir=tmp_path,
).search(10)
# test categorical hyperparameters
problem = HpProblem()
problem.add_hyperparameter([f"{i}" for i in range(10)], "x")
def run(config):
return int(config["x"])
results = CBO(
problem,
Evaluator.create(run, method="serial"),
random_state=42,
surrogate_model="GP",
log_dir=tmp_path,
).search(10)
@pytest.mark.hps
def test_sample_types_conditional(tmp_path):
import ConfigSpace as cs
import numpy as np
from deephyper.evaluator import Evaluator
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
problem = HpProblem()
# choices
choice = problem.add_hyperparameter(
name="choice",
value=["choice1", "choice2"],
)
# integers
x1_int = problem.add_hyperparameter(name="x1_int", value=(1, 10))
x2_int = problem.add_hyperparameter(name="x2_int", value=(1, 10))
# conditions
cond_1 = cs.EqualsCondition(x1_int, choice, "choice1")
cond_2 = cs.EqualsCondition(x2_int, choice, "choice2")
problem.add_condition(cond_1)
problem.add_condition(cond_2)
def run(config):
if config["choice"] == "choice1":
assert np.issubdtype(type(config["x1_int"]), np.integer)
else:
assert np.issubdtype(type(config["x2_int"]), np.integer)
return 0
create_evaluator = lambda: Evaluator.create(run, method="serial")
results = CBO(
problem,
create_evaluator(),
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
).search(10)
@pytest.mark.hps
def test_timeout(tmp_path):
import time
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
def run(config):
try:
# simulate working thread
while True:
1 + 1
except: # simulate the catching of any exception here
time.sleep(2)
return config["x"]
search = CBO(
problem, run, random_state=42, surrogate_model="DUMMY", log_dir=tmp_path
)
t1 = time.time()
result = search.search(timeout=1)
duration = time.time() - t1
assert duration < 1.5
@pytest.mark.hps
def test_initial_points(tmp_path):
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
def run(config):
return config["x"]
search = CBO(
problem,
run,
initial_points=[problem.default_configuration],
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
)
result = search.search(10)
assert len(result) == 10
assert result.loc[0, "objective"] == problem.default_configuration["x"]
@pytest.mark.hps
def test_cbo_checkpoint_restart(tmp_path):
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
def run(config):
return config["x"]
# test pause-continue of the search
search_a = CBO(
problem,
run,
initial_points=[problem.default_configuration],
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
)
results_a = search_a.search(4)
assert len(results_a) == 4
new_results_a = search_a.search(6)
assert len(new_results_a) == 10
# test reloading of a checkpoint
search_b = CBO(
problem,
run,
initial_points=[problem.default_configuration],
random_state=42,
surrogate_model="DUMMY",
log_dir=tmp_path,
)
search_b.fit_surrogate(results_a)
new_results_b = search_b.search(6)
assert len(new_results_b) == 6
if __name__ == "__main__":
test_cbo_checkpoint_restart(tmp_path=".")
| 8,589 | 23.403409 | 83 | py |
deephyper | deephyper-master/tests/deephyper/search/nas/test_random_mpicomm.py | import os
import sys
import time
import pytest
PYTHON = sys.executable
SCRIPT = os.path.abspath(__file__)
import deephyper.test
def _test_random_search_mpicomm():
"""Example to execute:
mpirun -np 4 python test_random_mpicomm.py
"""
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_slow
from deephyper.search.nas import Random
from deephyper.test.nas import linearReg
with Evaluator.create(run_debug_slow, method="mpicomm") as evaluator:
if evaluator:
search = Random(
linearReg.Problem,
evaluator,
log_dir="log-random-mpicomm",
random_state=42,
)
t1 = time.time()
res = search.search(timeout=2)
duration = time.time() - t1
assert len(res) >= 1
assert duration < 3
@pytest.mark.slow
@pytest.mark.nas
@pytest.mark.mpi
def test_mpicomm_evaluator():
command = f"mpirun -np 4 {PYTHON} {SCRIPT} _test_random_search_mpicomm"
result = deephyper.test.run(command, live_output=True)
if __name__ == "__main__":
func = sys.argv[-1]
func = globals()[func]
func()
| 1,206 | 21.773585 | 75 | py |
deephyper | deephyper-master/tests/deephyper/search/nas/test_agebo.py | import unittest
import pytest
@pytest.mark.slow
@pytest.mark.nas
class AgEBOTest(unittest.TestCase):
def test_agebo_without_hp(self):
from deephyper.test.nas import linearReg
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import AgEBO
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
# ValueError: No hyperparameter space was defined for this problem
with pytest.raises(ValueError):
search = AgEBO(
linearReg.Problem,
create_evaluator(),
random_state=42,
)
def test_agebo_with_hp(self):
import numpy as np
from deephyper.test.nas import linearRegHybrid
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import AgEBO
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = AgEBO(
linearRegHybrid.Problem,
create_evaluator(),
random_state=42,
)
res1 = search.search(max_evals=4)
res1_array = res1[
["p:arch_seq", "p:batch_size", "p:learning_rate", "p:optimizer"]
].to_numpy()
search = AgEBO(
linearRegHybrid.Problem,
create_evaluator(),
random_state=42,
)
res2 = search.search(max_evals=4)
res2_array = res2[
["p:arch_seq", "p:batch_size", "p:learning_rate", "p:optimizer"]
].to_numpy()
assert np.array_equal(res1_array, res2_array)
| 1,697 | 29.321429 | 84 | py |
deephyper | deephyper-master/tests/deephyper/search/nas/test_random.py | import unittest
import pytest
@pytest.mark.slow
@pytest.mark.nas
class RandomTest(unittest.TestCase):
def test_random_search(self):
import numpy as np
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import Random
import deephyper.test.nas.linearReg as linearReg
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = Random(linearReg.Problem, create_evaluator(), random_state=42)
res1 = search.search(max_evals=4)
search = Random(linearReg.Problem, create_evaluator(), random_state=42)
res2 = search.search(max_evals=4)
assert np.array_equal(
res1["p:arch_seq"].to_numpy(), res2["p:arch_seq"].to_numpy()
)
def test_random_search_with_hp(self):
import numpy as np
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import Random
import deephyper.test.nas.linearRegHybrid as linearRegHybrid
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = Random(linearRegHybrid.Problem, create_evaluator(), random_state=42)
res1 = search.search(max_evals=4)
res1_array = res1[
["p:arch_seq", "p:batch_size", "p:learning_rate", "p:optimizer"]
].to_numpy()
search = Random(linearRegHybrid.Problem, create_evaluator(), random_state=42)
res2 = search.search(max_evals=4)
res2_array = res2[
["p:arch_seq", "p:batch_size", "p:learning_rate", "p:optimizer"]
].to_numpy()
assert np.array_equal(res1_array, res2_array)
if __name__ == "__main__":
test = RandomTest()
test.test_random_search()
| 1,847 | 30.322034 | 85 | py |
deephyper | deephyper-master/tests/deephyper/search/nas/test_regevomixed.py | import unittest
import pytest
@pytest.mark.slow
@pytest.mark.nas
class RegevoMixedTest(unittest.TestCase):
def test_regovomixed_without_hp(self):
import numpy as np
from deephyper.test.nas import linearReg
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import RegularizedEvolutionMixed
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = RegularizedEvolutionMixed(
linearReg.Problem, create_evaluator(), random_state=42
)
res1 = search.search(max_evals=4)
print(res1)
res1_array = res1[["p:arch_seq"]].to_numpy()
search = RegularizedEvolutionMixed(
linearReg.Problem,
create_evaluator(),
random_state=42,
)
res2 = search.search(max_evals=4)
res2_array = res2[["p:arch_seq"]].to_numpy()
assert np.array_equal(res1_array, res2_array)
def test_regevomixed_with_hp(self):
import numpy as np
from deephyper.test.nas import linearRegHybrid
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import RegularizedEvolutionMixed
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = RegularizedEvolutionMixed(
linearRegHybrid.Problem,
create_evaluator(),
random_state=42,
)
res1 = search.search(max_evals=4)
res1_array = res1[["p:arch_seq"]].to_numpy()
search = RegularizedEvolutionMixed(
linearRegHybrid.Problem,
create_evaluator(),
random_state=42,
)
res2 = search.search(max_evals=4)
res2_array = res2[["p:arch_seq"]].to_numpy()
assert np.array_equal(res1_array, res2_array)
if __name__ == "__main__":
test = RegevoMixedTest()
test.test_regovomixed_without_hp()
| 2,048 | 29.132353 | 84 | py |
deephyper | deephyper-master/tests/deephyper/search/nas/test_nas.py | import pytest
import unittest
@pytest.mark.slow
@pytest.mark.nas
class TestNeuralArchitectureSearchAlgorithms(unittest.TestCase):
def evaluate_search(self, search_cls, problem):
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
# Test "max_evals" stopping criteria
evaluator = Evaluator.create(run_debug_arch, method="serial")
search = search_cls(problem, evaluator)
res = search.search(max_evals=10)
self.assertEqual(len(res), 10)
# Test "max_evals" and "timeout" stopping criterias
# evaluator = Evaluator.create(run_debug_slow, method="serial")
# search = search_cls(problem, evaluator)
# with pytest.raises(TypeError): # timeout should be an int
# res = search.search(max_evals=10, timeout=1.0)
# t1 = time.time()
# res = search.search(max_evals=10, timeout=1)
# d = time.time() - t1
# self.assertAlmostEqual(d, 1, delta=0.1)
def test_random(self):
from deephyper.search.nas import Random
from deephyper.test.nas.linearReg import Problem as linear_reg_problem
from deephyper.test.nas.linearRegHybrid import (
Problem as linear_reg_hybrid_problem,
)
self.evaluate_search(Random, linear_reg_problem)
self.evaluate_search(Random, linear_reg_hybrid_problem)
def test_regevo(self):
from deephyper.search.nas import RegularizedEvolution
from deephyper.test.nas.linearReg import Problem as linear_reg_problem
self.evaluate_search(RegularizedEvolution, linear_reg_problem)
def test_regevomixed(self):
from deephyper.search.nas import RegularizedEvolutionMixed
from deephyper.test.nas.linearReg import Problem as linear_reg_problem
from deephyper.test.nas.linearRegHybrid import (
Problem as linear_reg_hybrid_problem,
)
self.evaluate_search(RegularizedEvolutionMixed, linear_reg_problem)
self.evaluate_search(RegularizedEvolutionMixed, linear_reg_hybrid_problem)
def test_ambsmixed(self):
from deephyper.search.nas import AMBSMixed
from deephyper.test.nas.linearReg import Problem as linear_reg_problem
from deephyper.test.nas.linearRegHybrid import (
Problem as linear_reg_hybrid_problem,
)
self.evaluate_search(AMBSMixed, linear_reg_problem)
self.evaluate_search(AMBSMixed, linear_reg_hybrid_problem)
def test_agebo(self):
from deephyper.search.nas import AgEBO
from deephyper.test.nas.linearReg import Problem as linear_reg_problem
from deephyper.test.nas.linearRegHybrid import (
Problem as linear_reg_hybrid_problem,
)
with pytest.raises(ValueError):
self.evaluate_search(AgEBO, linear_reg_problem)
self.evaluate_search(AgEBO, linear_reg_hybrid_problem)
| 2,944 | 36.278481 | 82 | py |
deephyper | deephyper-master/tests/deephyper/search/nas/test_ambsmixed.py | import unittest
import pytest
@pytest.mark.slow
@pytest.mark.nas
class AgEBOTest(unittest.TestCase):
def test_ambsmixed_without_hp(self):
import numpy as np
from deephyper.test.nas import linearReg
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import AMBSMixed
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = AMBSMixed(linearReg.Problem, create_evaluator(), random_state=42)
res1 = search.search(max_evals=4)
res1_array = res1[["p:arch_seq"]].to_numpy()
search = AMBSMixed(
linearReg.Problem,
create_evaluator(),
random_state=42,
)
res2 = search.search(max_evals=4)
res2_array = res2[["p:arch_seq"]].to_numpy()
assert np.array_equal(res1_array, res2_array)
def test_ambsmixed_with_hp(self):
import numpy as np
from deephyper.test.nas import linearRegHybrid
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import AMBSMixed
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = AMBSMixed(
linearRegHybrid.Problem,
create_evaluator(),
random_state=42,
)
res1 = search.search(max_evals=4)
res1_array = res1[["p:arch_seq"]].to_numpy()
search = AMBSMixed(
linearRegHybrid.Problem,
create_evaluator(),
random_state=42,
)
res2 = search.search(max_evals=4)
res2_array = res2[["p:arch_seq"]].to_numpy()
assert np.array_equal(res1_array, res2_array)
| 1,803 | 29.066667 | 84 | py |
deephyper | deephyper-master/tests/deephyper/search/nas/test_regevo.py | import unittest
import pytest
@pytest.mark.slow
@pytest.mark.nas
class RegevoTest(unittest.TestCase):
def test_regovo_with_hp(self):
from deephyper.test.nas import linearRegHybrid
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import RegularizedEvolution
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
with pytest.raises(ValueError): # timeout should be an int
search = RegularizedEvolution(
linearRegHybrid.Problem, create_evaluator(), random_state=42
)
def test_regevo_without_hp(self):
import numpy as np
from deephyper.test.nas import linearReg
from deephyper.evaluator import Evaluator
from deephyper.nas.run import run_debug_arch
from deephyper.search.nas import RegularizedEvolution
create_evaluator = lambda: Evaluator.create(run_debug_arch, method="serial")
search = RegularizedEvolution(
linearReg.Problem,
create_evaluator(),
random_state=42,
)
res1 = search.search(max_evals=4)
res1_array = res1[["p:arch_seq"]].to_numpy()
search = RegularizedEvolution(
linearReg.Problem,
create_evaluator(),
random_state=42,
)
res2 = search.search(max_evals=4)
res2_array = res2[["p:arch_seq"]].to_numpy()
assert np.array_equal(res1_array, res2_array)
| 1,545 | 30.55102 | 84 | py |
deephyper | deephyper-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import git
import sphinx_book_theme
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "DeepHyper"
copyright = "2018-2022, Argonne"
author = "Argonne"
# The short X.Y version
about = {}
with open(f"../deephyper/__version__.py") as f:
exec(f.read(), about)
version = about["__version__"]
# The full version, including alpha/beta/rc tags
if about["__version__"] == "":
release = f'v{about["__version__"]}'
else:
release = f'v{about["__version__"]}-{about["__version_suffix__"]}'
# PULL Tutorials
branch_name_map = {"master": "main", "latest": "main", "develop": "develop"}
if os.environ.get("READTHEDOCS"):
doc_version = os.environ["READTHEDOCS_VERSION"]
else:
github_repo = git.Repo(search_parent_directories=True)
doc_version = github_repo.active_branch.name
tutorial_branch = branch_name_map.get(doc_version, "develop")
tutorials_github_link = "https://github.com/deephyper/tutorials.git"
tutorials_dest_dir = "tutorials"
def pull_tutorials(github_link, dest_dir, tutorial_branch):
os.system(f"rm -rf {dest_dir}/")
os.system(
f"git clone --depth=1 --branch={tutorial_branch} {github_link} {dest_dir}"
)
os.system(f"rm -rf {dest_dir}/.git")
pull_tutorials(tutorials_github_link, tutorials_dest_dir, tutorial_branch)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"nbsphinx",
"sphinx_book_theme",
"sphinx_copybutton",
"sphinx_gallery.gen_gallery",
"sphinx_lfs_content",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.githubpages",
"sphinx.ext.ifconfig",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
autosummary_generate = True
autosummary_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = [
"_templates",
os.path.join(sphinx_book_theme.get_html_theme_path(), "components"),
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = {".rst": "restructuredtext"}
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [
"_build",
"_templates",
"Thumbs.db",
".DS_Store",
"examples/*.ipynb",
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_theme_path = [sphinx_book_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_logo = "_static/logo/medium.png"
html_theme_options = {
# header settings
"repository_url": "https://github.com/deephyper/deephyper",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"repository_branch": "develop",
"path_to_docs": "docs",
"use_download_button": True,
# sidebar settings
"show_navbar_depth": 1,
"logo_only": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "deephyperdoc"
# CopyButton Settings
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "deephyper.tex", "deephyper Documentation", "ArgonneMCS", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "deephyper", "deephyper Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"deephyper",
"deephyper Documentation",
author,
"Automated Machine Learning Software for HPC",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# makes sphinx do a mock import of mpi4py so it’s not broken when you try to do auto-docs and import mpi4py
autodoc_mock_imports = [
"horovod",
"joblib",
"matplotlib",
"mpi4py",
"nbformat",
"networkx",
"ray",
"redis",
"sklearn",
"skopt",
"tensorflow_probability",
"tensorflow",
"tqdm",
"xgboost",
]
autosummary_mock_imports = autodoc_mock_imports
# Remove <BLANKLINE>
trim_doctest_flags = True
# Sphinx Gallery
sphinx_gallery_conf = {
"examples_dirs": "../examples", # path to your example scripts
"gallery_dirs": "examples", # path to where to save gallery generated output
"filename_pattern": "/plot_",
"ignore_pattern": r"_util\.py",
}
def setup(app):
app.add_css_file("custom.css")
app.add_js_file("custom.js")
| 8,809 | 27.79085 | 107 | py |
deephyper | deephyper-master/docs/examples/plot_from_serial_to_parallel_hyperparameter_search.py | # -*- coding: utf-8 -*-
"""
From Serial to Parallel Evaluations
===================================
**Author(s)**: Romain Egele.
This example demonstrates the advantages of parallel evaluations over serial evaluations. We start by defining an artificial black-box ``run``-function by using the Ackley function:
.. image:: https://www.sfu.ca/~ssurjano/ackley.png
:width: 400
:alt: Ackley Function in 2D
We will use the ``time.sleep`` function to simulate a budget of 2 secondes of execution in average which helps illustrate the advantage of parallel evaluations. The ``@profile`` decorator is useful to collect starting/ending time of the ``run``-function execution which help us know exactly when we are inside the black-box. When using this decorator, the ``run``-function will return a dictionnary with 2 new keys ``"timestamp_start"`` and ``"timestamp_end"``. The ``run``-function is defined in a separate module because of the "multiprocessing" backend that we are using in this example.
.. literalinclude:: ../../examples/black_box_util.py
:language: python
After defining the black-box we can continue with the definition of our main script:
"""
import black_box_util as black_box
# %%
# Then we define the variable(s) we want to optimize. For this problem we optimize Ackley in a 2-dimensional search space, the true minimul is located at ``(0, 0)``.
from deephyper.problem import HpProblem
nb_dim = 2
problem = HpProblem()
for i in range(nb_dim):
problem.add_hyperparameter((-32.768, 32.768), f"x{i}")
problem
# %%
# Then we define serial search by creation a ``"serial"``-evaluator and we execute the search with a fixed time-budget of 2 minutes (i.e., 120 secondes).
if __name__ == "__main__":
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
from deephyper.search.hps import CBO
# we give a budget of 2 minutes for each search
timeout = 120
serial_evaluator = Evaluator.create(
black_box.run_ackley,
method="serial",
method_kwargs={"callbacks": [TqdmCallback()]},
)
results = {}
serial_search = CBO(problem, serial_evaluator, random_state=42)
results["serial"] = serial_search.search(timeout=timeout)
# %%
# After, executing the serial-search for 2 minutes we can create a parallel search which uses the ``"process"``-evaluator and defines 5 parallel workers. The search is also executed for 2 minutes.
if __name__ == "__main__":
parallel_evaluator = Evaluator.create(
black_box.run_ackley,
method="process",
method_kwargs={"num_workers": 5, "callbacks": [TqdmCallback()]},
)
parallel_search = CBO(problem, parallel_evaluator, random_state=42)
results["parallel"] = parallel_search.search(timeout=timeout)
# %%
# Finally, we plot the results from the collected DataFrame. The execution time is used as the x-axis which help-us vizualise the advantages of the parallel search.
if __name__ == "__main__":
import matplotlib.pyplot as plt
plt.figure()
for strategy, df in results.items():
plt.scatter(df.timestamp_end, df.objective, label=strategy)
plt.plot(df.timestamp_end, df.objective.cummax())
plt.xlabel("Time (sec.)")
plt.ylabel("Objective")
plt.grid()
plt.legend()
plt.show()
| 3,321 | 39.512195 | 590 | py |
deephyper | deephyper-master/docs/examples/plot_transfer_learning_for_hps.py | # -*- coding: utf-8 -*-
"""
Transfer Learning for Hyperparameter Search
===========================================
**Author(s)**: Romain Egele.
In this example we present how to apply transfer-learning for hyperparameter search. Let's assume you have a bunch of similar tasks for example the search of neural networks hyperparameters for different datasets. You can easily imagine that close choices of hyperparameters can perform well these different datasets even if some light additional tuning can help improve the performance. Therefore, you can perform an expensive search once to then reuse the explored set of hyperparameters of thid search and bias the following search with it. Here, we will use a cheap to compute and easy to understand example where we maximise the :math:`f(x) = -\sum_{i=0}^{n-1}` function. In this case the size of the problem can be defined by the variable :math:`n`. We will start by optimizing the small-size problem where :math:`n=1`, then apply transfer-learning from to optimize the larger-size problem where :math:`n=2` and visualize the difference if were not to apply transfer-learning on this larger problem instance.
Let us start by defining the run-functions of the small and large scale problems:
"""
# %%
import functools
def run(config: dict, N: int) -> float:
y = -sum([config[f"x{i}"] ** 2 for i in range(N)])
return y
run_small = functools.partial(run, N=1)
run_large = functools.partial(run, N=2)
# %%
# Then, we can define the hyperparameter problem space based on :math:`n`
from deephyper.problem import HpProblem
N = 1
problem_small = HpProblem()
for i in range(N):
problem_small.add_hyperparameter((-10.0, 10.0), f"x{i}")
problem_small
# %%
N = 2
problem_large = HpProblem()
for i in range(N):
problem_large.add_hyperparameter((-10.0, 10.0), f"x{i}")
problem_large
# %%
# Then, we define setup the search and execute it:
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
from deephyper.search.hps import CBO
results = {}
max_evals = 20
evaluator_small = Evaluator.create(
run_small, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search_small = CBO(problem_small, evaluator_small, random_state=42)
results["Small"] = search_small.search(max_evals)
# %%
evaluator_large = Evaluator.create(
run_large, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search_large = CBO(problem_large, evaluator_large, random_state=42)
results["Large"] = search_large.search(max_evals)
# %%
evaluator_large_tl = Evaluator.create(
run_large, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search_large_tl = CBO(problem_large, evaluator_large_tl, random_state=42)
search_large_tl.fit_generative_model(results["Small"])
results["Large+TL"] = search_large_tl.search(max_evals)
# %%
# Finally, we compare the results and quickly see that transfer-learning provided a consequant speed-up for the search:
import matplotlib.pyplot as plt
plt.figure()
for strategy, df in results.items():
x = [i for i in range(len(df))]
plt.scatter(x, df.objective, label=strategy, alpha=0.5)
plt.plot(x, df.objective.cummax(), alpha=0.5)
plt.xlabel("Time (sec.)")
plt.ylabel("Objective")
plt.grid()
plt.legend()
plt.show()
| 3,293 | 36.431818 | 1,014 | py |
deephyper | deephyper-master/docs/examples/plot_notify_failures_hyperparameter_search.py | # -*- coding: utf-8 -*-
"""
Notify Failures in Hyperparameter optimization
==============================================
**Author(s)**: Romain Egele.
This example demonstrates how to handle failure of objectives in hyperparameter search. In many cases such as software auto-tuning (where we minimize the run-time of a software application) some configurations can create run-time errors and therefore no scalar objective is returned. A default choice could be to return in this case the worst case objective if known and it can be done inside the ``run``-function. Other possibilites are to ignore these configurations or to replace them with the running mean/min objective. To illustrate such a use-case we define an artificial ``run``-function which will fail when one of its input parameters is greater than 0.5. To define a failure, it is possible to return a "string" value with ``"F"`` as prefix such as:
"""
def run(config: dict) -> float:
if config["y"] > 0.5:
return "F_postfix"
else:
return config["x"]
# %%
# Then, we define the corresponding hyperparameter problem where ``x`` is the value to maximize and ``y`` is a value impact the appearance of failures.
from deephyper.problem import HpProblem
problem = HpProblem()
problem.add_hyperparameter([1, 2, 4, 8, 16, 32], "x")
problem.add_hyperparameter((0.0, 1.0), "y")
print(problem)
# %%
# Then, we define a centralized Bayesian optimization (CBO) search (i.e., master-worker architecture) which uses the Random-Forest regressor as default surrogate model. We will compare the ``ignore`` strategy which filters-out failed configurations, the ``mean`` strategy which replaces a failure by the running mean of collected objectives and the ``min`` strategy which replaces by the running min of collected objectives.
from deephyper.search.hps import CBO
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
results = {}
max_evals = 30
for failure_strategy in ["ignore", "mean", "min"]:
# for failure_strategy in ["min"]:
print(f"Executing failure strategy: {failure_strategy}")
evaluator = Evaluator.create(
run, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}
)
search = CBO(
problem,
evaluator,
filter_failures=failure_strategy,
log_dir=f"search_{failure_strategy}",
random_state=42,
)
results[failure_strategy] = search.search(max_evals)
# %%
# Finally we plot the collected results
import matplotlib.pyplot as plt
import numpy as np
plt.figure()
for i, (failure_strategy, df) in enumerate(results.items()):
plt.subplot(3, 1, i + 1)
if df.objective.dtype != np.float64:
x = np.arange(len(df))
mask_failed = np.where(df.objective.str.startswith("F"))[0]
mask_success = np.where(~df.objective.str.startswith("F"))[0]
x_success, x_failed = x[mask_success], x[mask_failed]
y_success = df["objective"][mask_success].astype(float)
plt.scatter(x_success, y_success, label=failure_strategy)
plt.scatter(x_failed, np.zeros(x_failed.shape), marker="v", color="red")
plt.xlabel(r"Iterations")
plt.ylabel(r"Objective")
plt.legend()
plt.show()
| 3,235 | 42.146667 | 760 | py |
deephyper | deephyper-master/docs/examples/plot_profile_worker_utilization.py | # -*- coding: utf-8 -*-
"""
Profile the Worker Utilization
==============================
**Author(s)**: Romain Egele.
This example demonstrates the advantages of parallel evaluations over serial evaluations. We start by defining an artificial black-box ``run``-function by using the Ackley function:
.. image:: https://www.sfu.ca/~ssurjano/ackley.png
:width: 400
:alt: Ackley Function in 2D
We will use the ``time.sleep`` function to simulate a budget of 2 secondes of execution in average which helps illustrate the advantage of parallel evaluations. The ``@profile`` decorator is useful to collect starting/ending time of the ``run``-function execution which help us know exactly when we are inside the black-box. This decorator is necessary when profiling the worker utilization. When using this decorator, the ``run``-function will return a dictionnary with 2 new keys ``"timestamp_start"`` and ``"timestamp_end"``. The ``run``-function is defined in a separate module because of the "multiprocessing" backend that we are using in this example.
.. literalinclude:: ../../examples/black_box_util.py
:language: python
:emphasize-lines: 19-28
:linenos:
After defining the black-box we can continue with the definition of our main script:
"""
import black_box_util as black_box
# %%
# Then we define the variable(s) we want to optimize. For this problem we optimize Ackley in a 2-dimensional search space, the true minimul is located at ``(0, 0)``.
from deephyper.problem import HpProblem
nb_dim = 2
problem = HpProblem()
for i in range(nb_dim):
problem.add_hyperparameter((-32.768, 32.768), f"x{i}")
problem
# %%
# Then we define a parallel search.
if __name__ == "__main__":
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
from deephyper.search.hps import CBO
timeout = 20
num_workers = 4
results = {}
evaluator = Evaluator.create(
black_box.run_ackley,
method="process",
method_kwargs={
"num_workers": num_workers,
"callbacks": [TqdmCallback()],
},
)
search = CBO(problem, evaluator, random_state=42)
results = search.search(timeout=timeout)
# %%
# Finally, we plot the results from the collected DataFrame.
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
def compile_profile(df):
"""Take the results dataframe as input and return the number of jobs running at a given timestamp."""
history = []
for _, row in df.iterrows():
history.append((row["timestamp_start"], 1))
history.append((row["timestamp_end"], -1))
history = sorted(history, key=lambda v: v[0])
nb_workers = 0
timestamp = [0]
n_jobs_running = [0]
for time, incr in history:
nb_workers += incr
timestamp.append(time)
n_jobs_running.append(nb_workers)
return timestamp, n_jobs_running
plt.figure()
plt.subplot(2, 1, 1)
plt.scatter(results.timestamp_end, results.objective)
plt.plot(results.timestamp_end, results.objective.cummax())
plt.xlabel("Time (sec.)")
plt.ylabel("Objective")
plt.grid()
plt.subplot(2, 1, 2)
x, y = compile_profile(results)
y = np.asarray(y) / num_workers * 100
plt.step(
x,
y,
where="pre",
)
plt.ylim(0, 100)
plt.xlabel("Time (sec.)")
plt.ylabel("Worker Utilization (%)")
plt.tight_layout()
plt.show()
| 3,542 | 31.805556 | 657 | py |
deephyper | deephyper-master/deephyper/__version__.py | VERSION = (0, 5, 0)
__version__ = ".".join(map(str, VERSION))
# alpha/beta/rc tags
__version_suffix__ = ""
| 109 | 14.714286 | 41 | py |
deephyper | deephyper-master/deephyper/__init__.py | """
DeepHyper is a distributed machine learning (`AutoML <https://en.wikipedia.org/wiki/Automated_machine_learning>`_) package for automating the development of deep neural networks for scientific applications. It can run on a single laptop as well as on 1,000 of nodes.
It comprises different tools such as:
* Optimizing hyper-parameters for a given black-box function.
* Neural architecture search to discover high-performing deep neural network with variable operations and connections.
* Automated machine learning, to easily experiment many learning algorithms from Scikit-Learn.
DeepHyper provides an infrastructure that targets experimental research in NAS and HPS methods, scalability, and portability across diverse supercomputers.
It comprises three main modules:
* :mod:`deephyper.problem`: Tools for defining neural architecture and hyper-parameter search problems.
* :mod:`deephyper.evaluator` : A simple interface to dispatch model evaluation tasks. Implementations range from `process` for laptop experiments to `ray` for large-scale runs on HPC systems.
* :mod:`deephyper.search`: Search methods for NAS and HPS. By extending the generic `Search` class, one can easily add new NAS or HPS methods to DeepHyper.
DeepHyper installation requires **Python >= 3.7**.
"""
import warnings
from deephyper.__version__ import __version__, __version_suffix__ # noqa: F401
name = "deephyper"
version = __version__
# Suppress warnings from deephyper.skopt using deprecated sklearn API
warnings.filterwarnings(
"ignore", category=FutureWarning, message="sklearn.externals.joblib is deprecated"
)
warnings.filterwarnings(
"ignore", category=FutureWarning, message="the sklearn.metrics.scorer module"
)
| 1,721 | 49.647059 | 266 | py |
deephyper | deephyper-master/deephyper/core/parser.py | import argparse
import inspect
from inspect import signature
def add_arguments_from_signature(parser, obj, prefix="", exclude=[]):
"""Add arguments to parser base on obj default keyword parameters.
:meta private:
Args:
parser (ArgumentParser)): the argument parser to which we want to add arguments.
obj (type): the class from which we want to extract default parameters for the constructor.
"""
sig = signature(obj)
prefix = f"{prefix}-" if len(prefix) > 0 else ""
added_arguments = []
for p_name, p in sig.parameters.items():
if not (p_name in exclude):
if p.kind == inspect._POSITIONAL_OR_KEYWORD:
arg_format = f"--{prefix}{p_name.replace('_', '-')}"
arg_kwargs = {"help": ""}
# check type int
if not (p.annotation is inspect._empty):
arg_kwargs["type"] = p.annotation
arg_kwargs["help"] += f"Type[{p.annotation.__name__}]. "
# check default value
if p.default is not inspect._empty:
arg_kwargs["default"] = p.default
arg_kwargs["help"] += f"Defaults to '{str(p.default)}'. "
else:
arg_kwargs["required"] = True
parser.add_argument(
arg_format,
**arg_kwargs,
)
added_arguments.append(p_name)
return added_arguments
def str2bool(v):
"""
:meta private:
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
| 1,822 | 28.885246 | 99 | py |
deephyper | deephyper-master/deephyper/core/__init__.py | 0 | 0 | 0 | py | |
deephyper | deephyper-master/deephyper/core/cli/_new_problem.py | """
Create a DeepHyper Problem
--------------------------
Command line to create a new problem sub-package in a DeepHyper projet package.
It can be used with:
.. code-block:: console
$ deephyper new-problem hps problem_name
"""
import glob
import os
import pathlib
from jinja2 import Template
def add_subparser(subparsers):
"""
:meta private:
"""
subparser_name = "new-problem"
function_to_call = main
subparser = subparsers.add_parser(
subparser_name,
help="Create a default hyperparameter or neural architecture search experiment.",
)
subparser.add_argument(
"mode", type=str, choices=["nas", "hps"], help="NAS or HPS problem"
)
subparser.add_argument(
"name", type=str, help="Name of the problem directory to create"
)
subparser.set_defaults(func=function_to_call)
def main(mode, name, *args, **kwargs):
"""
:meta private:
"""
prob_name = name
current_path = os.getcwd()
project_path = os.path.dirname(current_path)
assert os.path.exists(
os.path.join(project_path, "setup.py")
), "No setup.py in current directory"
assert os.path.exists(
os.path.join(project_path, ".deephyper")
), "Not inside a deephyper project directory"
assert "/" not in prob_name, 'Problem name must not contain "/"'
assert prob_name.isidentifier(), f"{prob_name} is not a valid Python identifier"
pathlib.Path(prob_name).mkdir(parents=False, exist_ok=False)
with open(os.path.join(prob_name, "__init__.py"), "w"):
pass
render_files(mode, prob_name)
def render_files(mode, prob_name):
"""
:meta private:
"""
package = os.path.basename(os.getcwd())
print("DeepHyper project detected: ", package)
templates_pattern = os.path.join(
os.path.dirname(__file__), "templates", mode, "*.tmpl"
)
for template_name in glob.glob(templates_pattern):
template = Template(open(template_name).read())
py_name = os.path.basename(template_name.rstrip(".tmpl"))
with open(os.path.join(prob_name, py_name), "w") as fp:
fp.write(
template.render(
pckg=package,
pb_folder=prob_name,
)
)
print(" creating ", fp.name)
| 2,325 | 27.024096 | 89 | py |
deephyper | deephyper-master/deephyper/core/cli/_start_project.py | """
Start a DeepHyper Project
-------------------------
Command line to create a new DeepHyper project package. The package is automatically installed to the current virtual Python environment.
It can be used with:
.. code-block:: console
$ deephyper start-project project_name
"""
import os
import pathlib
import subprocess
def add_subparser(subparsers):
"""
:meta private:
"""
subparser_name = "start-project"
function_to_call = main
subparser = subparsers.add_parser(
subparser_name, help="Set up a new DeepHyper project as a Python package."
)
subparser.add_argument("path", type=str, help="Path to the new project directory.")
subparser.set_defaults(func=function_to_call)
def main(path, *args, **kwargs):
"""
:meta private:
"""
path = os.path.abspath(path)
project_name = os.path.basename(path)
path_pkg = os.path.join(path, project_name)
pathlib.Path(path_pkg).mkdir(parents=True, exist_ok=False)
with open(os.path.join(path, "setup.py"), "w") as fp:
fp.write(
f"from setuptools import setup, find_packages\n\nsetup(\n name='{project_name}',\n packages=find_packages(),\n install_requires=[]\n)"
)
with open(os.path.join(path_pkg, "__init__.py"), "w") as fp:
pass
with open(os.path.join(path, ".deephyper"), "w") as fp:
pass
result = subprocess.run(
["pip", "install", "-e", f"{path}"],
stdout=subprocess.PIPE,
)
if result.returncode != 0:
print(
f"The package could not be installed automatically! Export the following in your environment to access the package from anywhere:\n"
f"export PYTHONPATH={path}:$PYTHONPATH"
)
else:
print(result.stdout.decode("utf-8"), end="")
| 1,811 | 29.2 | 155 | py |
deephyper | deephyper-master/deephyper/core/cli/_hps.py | """
Hyperparameter Search
---------------------
Command line to execute hyperparameter search.
.. code-block:: bash
$ deephyper hps ambs --help
usage: deephyper hps ambs [-h] --problem PROBLEM --evaluator EVALUATOR [--random-state RANDOM_STATE] [--log-dir LOG_DIR] [--verbose VERBOSE] [--surrogate-model SURROGATE_MODEL] [--acq-func ACQ_FUNC]
[--kappa KAPPA] [--xi XI] [--liar-strategy LIAR_STRATEGY] [--n-jobs N_JOBS] [--max-evals MAX_EVALS] [--timeout TIMEOUT] --run-function RUN_FUNCTION [--num-workers NUM_WORKERS]
[--callbacks CALLBACKS] [--ray-address RAY_ADDRESS] [--ray-password RAY_PASSWORD] [--ray-num-cpus RAY_NUM_CPUS] [--ray-num-gpus RAY_NUM_GPUS]
[--ray-num-cpus-per-task RAY_NUM_CPUS_PER_TASK] [--ray-num-gpus-per-task RAY_NUM_GPUS_PER_TASK] [--ray-ray-kwargs RAY_RAY_KWARGS]
optional arguments:
-h, --help show this help message and exit
--problem PROBLEM
--evaluator EVALUATOR
--random-state RANDOM_STATE
Type[int]. Defaults to 'None'.
--log-dir LOG_DIR Type[str]. Defaults to '.'.
--verbose VERBOSE Type[int]. Defaults to '0'.
--surrogate-model SURROGATE_MODEL
Type[str]. Defaults to 'RF'.
--acq-func ACQ_FUNC Type[str]. Defaults to 'LCB'.
--kappa KAPPA Type[float]. Defaults to '1.96'.
--xi XI Type[float]. Defaults to '0.001'.
--liar-strategy LIAR_STRATEGY
Type[str]. Defaults to 'cl_min'.
--n-jobs N_JOBS Type[int]. Defaults to '1'.
--max-evals MAX_EVALS
Type[int]. Defaults to '-1' when an number of evaluations is not imposed.
--timeout TIMEOUT Type[int]. Number of seconds before killing the search. Defaults to 'None' when a time budget is not imposed.
--run-function RUN_FUNCTION
--num-workers NUM_WORKERS
Type[int]. Defaults to '1'.
--callbacks CALLBACKS
Defaults to 'None'.
--ray-address RAY_ADDRESS
Type[str]. Defaults to 'None'.
--ray-password RAY_PASSWORD
Type[str]. Defaults to 'None'.
--ray-num-cpus RAY_NUM_CPUS
Type[int]. Defaults to 'None'.
--ray-num-gpus RAY_NUM_GPUS
Type[int]. Defaults to 'None'.
--ray-num-cpus-per-task RAY_NUM_CPUS_PER_TASK
Type[float]. Defaults to '1'.
--ray-num-gpus-per-task RAY_NUM_GPUS_PER_TASK
Type[float]. Defaults to 'None'.
--ray-ray-kwargs RAY_RAY_KWARGS
Type[dict]. Defaults to '{}'.
"""
import argparse
import logging
import sys
from deephyper.core.parser import add_arguments_from_signature
from deephyper.core.utils import load_attr
from deephyper.evaluator import EVALUATORS, Evaluator
HPS_SEARCHES = {
"ambs": "deephyper.search.hps.AMBS",
"cbo": "deephyper.search.hps.CBO",
}
def build_parser_from(cls):
"""
:meta private:
"""
parser = argparse.ArgumentParser(conflict_handler="resolve")
# add the arguments of a specific search
add_arguments_from_signature(parser, cls)
# add argument of Search.search interface
parser.add_argument(
"--max-evals",
default=-1,
type=int,
help="Type[int]. Defaults to '-1' when an number of evaluations is not imposed.",
)
parser.add_argument(
"--timeout",
default=None,
type=int,
help="Type[int]. Number of seconds before killing the search. Defaults to 'None' when a time budget is not imposed.",
)
# add arguments for evaluators
evaluator_added_arguments = add_arguments_from_signature(parser, Evaluator)
for eval_name, eval_cls in EVALUATORS.items():
try:
eval_cls = load_attr(f"deephyper.evaluator.{eval_cls}")
add_arguments_from_signature(
parser, eval_cls, prefix=eval_name, exclude=evaluator_added_arguments
)
except ModuleNotFoundError: # some evaluators are optional
pass
return parser
def add_subparser(parsers):
"""
:meta private:
"""
parser_name = "hps"
parser = parsers.add_parser(
parser_name, help="Command line to run hyperparameter search."
)
subparsers = parser.add_subparsers()
for name, module_attr in HPS_SEARCHES.items():
search_cls = load_attr(module_attr)
search_parser = build_parser_from(search_cls)
subparser = subparsers.add_parser(
name=name, parents=[search_parser], conflict_handler="resolve"
)
subparser.set_defaults(func=main)
def main(**kwargs):
"""
:meta private:
"""
sys.path.insert(0, ".")
if kwargs["verbose"]:
logging.basicConfig(filename="deephyper.log", level=logging.INFO)
search_name = sys.argv[2]
# load search class
logging.info(f"Loading the search '{search_name}'...")
search_cls = load_attr(HPS_SEARCHES[search_name])
# load problem
logging.info("Loading the problem...")
problem = load_attr(kwargs.pop("problem"))
# load run function
logging.info("Loading the run-function...")
run_function = load_attr(kwargs.pop("run_function"))
# filter arguments from evaluator class signature
logging.info("Loading the evaluator...")
evaluator_method = kwargs.pop("evaluator")
base_arguments = ["num_workers", "callbacks"]
evaluator_kwargs = {k: kwargs.pop(k) for k in base_arguments}
# remove the arguments from unused evaluator
for method in EVALUATORS.keys():
evaluator_method_kwargs = {
k[len(evaluator_method) + 1 :]: kwargs.pop(k)
for k in kwargs.copy()
if method in k
}
if method == evaluator_method:
evaluator_kwargs = {**evaluator_kwargs, **evaluator_method_kwargs}
# create evaluator
logging.info(
f"Evaluator(method={evaluator_method}, method_kwargs={evaluator_kwargs}"
)
evaluator = Evaluator.create(
run_function, method=evaluator_method, method_kwargs=evaluator_kwargs
)
logging.info(f"Evaluator has {evaluator.num_workers} workers available.")
# filter arguments from search class signature
# remove keys in evaluator_kwargs
kwargs = {k: v for k, v in kwargs.items() if k not in evaluator_kwargs}
max_evals = kwargs.pop("max_evals")
timeout = kwargs.pop("timeout")
# TODO: How about checkpointing and transfer learning?
# execute the search
# remaining kwargs are for the search
logging.info(f"Evaluator has {evaluator.num_workers} workers available.")
search = search_cls(problem, evaluator, **kwargs)
search.search(max_evals=max_evals, timeout=timeout)
| 6,917 | 34.659794 | 203 | py |
deephyper | deephyper-master/deephyper/core/cli/_nodelist.py | import sys
import socket
def _theta_nodelist(node_str):
# string like: 1001-1005,1030,1034-1200
node_ids = []
ranges = node_str.split(",")
lo = None
hi = None
for node_range in ranges:
lo, *hi = node_range.split("-")
lo = int(lo)
if hi:
hi = int(hi[0])
node_ids.extend(list(range(lo, hi + 1)))
else:
node_ids.append(lo)
return [f"nid{node_id:05d}" for node_id in node_ids]
def expand_nodelist(system, node_str):
hostname = socket.gethostname()
if "theta" in hostname:
node_list = _theta_nodelist(node_str)
else:
node_list = [node_str]
return node_list
if __name__ == "__main__":
if len(sys.argv) != 3:
raise ValueError("No argument provided")
node_list = expand_nodelist(sys.argv[1], sys.argv[2])
node_list = str(node_list).replace(", ", " ").replace("'", "")
print(node_list)
| 938 | 22.475 | 66 | py |
deephyper | deephyper-master/deephyper/core/cli/__init__.py | # for the documentation
from . import _cli, _hps, _nas, _new_problem, _start_project
commands = [_cli, _hps, _nas, _new_problem, _start_project]
__doc__ = ""
for c in commands:
__doc__ += c.__doc__
| 205 | 19.6 | 60 | py |
deephyper | deephyper-master/deephyper/core/cli/_nas.py | """
Neural Architecture Search
--------------------------
Command line to execute neural architecture search or joint hyperparameter and neural architecture search.
.. code-block:: bash
$ deephyper nas regevo --help
usage: deephyper nas regevo [-h] --problem PROBLEM --evaluator EVALUATOR [--random-state RANDOM_STATE] [--log-dir LOG_DIR] [--verbose VERBOSE] [--population-size POPULATION_SIZE] [--sample-size SAMPLE_SIZE]
[--max-evals MAX_EVALS] [--timeout TIMEOUT] --run-function RUN_FUNCTION [--num-workers NUM_WORKERS] [--callbacks CALLBACKS] [--ray-address RAY_ADDRESS]
[--ray-password RAY_PASSWORD] [--ray-num-cpus RAY_NUM_CPUS] [--ray-num-gpus RAY_NUM_GPUS] [--ray-num-cpus-per-task RAY_NUM_CPUS_PER_TASK]
[--ray-num-gpus-per-task RAY_NUM_GPUS_PER_TASK] [--ray-ray-kwargs RAY_RAY_KWARGS]
optional arguments:
-h, --help show this help message and exit
--problem PROBLEM
--evaluator EVALUATOR
--random-state RANDOM_STATE
Type[int]. Defaults to 'None'.
--log-dir LOG_DIR Type[str]. Defaults to '.'.
--verbose VERBOSE Type[int]. Defaults to '0'.
--population-size POPULATION_SIZE
Type[int]. Defaults to '100'.
--sample-size SAMPLE_SIZE
Type[int]. Defaults to '10'.
--max-evals MAX_EVALS
Type[int]. Defaults to '-1' when an number of evaluations is not imposed.
--timeout TIMEOUT Type[int]. Number of seconds before killing the search. Defaults to 'None' when a time budget is not imposed.
--run-function RUN_FUNCTION
--num-workers NUM_WORKERS
Type[int]. Defaults to '1'.
--callbacks CALLBACKS
Defaults to 'None'.
--ray-address RAY_ADDRESS
Type[str]. Defaults to 'None'.
--ray-password RAY_PASSWORD
Type[str]. Defaults to 'None'.
--ray-num-cpus RAY_NUM_CPUS
Type[int]. Defaults to 'None'.
--ray-num-gpus RAY_NUM_GPUS
Type[int]. Defaults to 'None'.
--ray-num-cpus-per-task RAY_NUM_CPUS_PER_TASK
Type[float]. Defaults to '1'.
--ray-num-gpus-per-task RAY_NUM_GPUS_PER_TASK
Type[float]. Defaults to 'None'.
--ray-ray-kwargs RAY_RAY_KWARGS
Type[dict]. Defaults to '{}'.
"""
import argparse
import sys
import logging
from deephyper.core.parser import add_arguments_from_signature
from deephyper.evaluator import EVALUATORS, Evaluator
from deephyper.core.utils import load_attr
NAS_SEARCHES = {
"random": "deephyper.search.nas._random.Random",
"regevo": "deephyper.search.nas._regevo.RegularizedEvolution",
"agebo": "deephyper.search.nas._agebo.AgEBO",
"ambsmixed": "deephyper.search.nas._ambsmixed.AMBSMixed",
"regevomixed": "deephyper.search.nas._regevomixed.RegularizedEvolutionMixed",
}
def build_parser_from(cls):
"""
:meta private:
"""
parser = argparse.ArgumentParser(conflict_handler="resolve")
# add the arguments of a specific search
add_arguments_from_signature(parser, cls)
# add argument of Search.search interface
parser.add_argument(
"--max-evals",
default=-1,
type=int,
help="Type[int]. Defaults to '-1' when an number of evaluations is not imposed.",
)
parser.add_argument(
"--timeout",
default=None,
type=int,
help="Type[int]. Number of seconds before killing the search. Defaults to 'None' when a time budget is not imposed.",
)
# add arguments for evaluators
evaluator_added_arguments = add_arguments_from_signature(parser, Evaluator)
for eval_name, eval_cls in EVALUATORS.items():
try:
eval_cls = load_attr(f"deephyper.evaluator.{eval_cls}")
add_arguments_from_signature(
parser, eval_cls, prefix=eval_name, exclude=evaluator_added_arguments
)
except ModuleNotFoundError: # some evaluators are optional
pass
return parser
def add_subparser(parsers):
"""
:meta private:
"""
parser_name = "nas"
parser = parsers.add_parser(
parser_name, help="Command line to run neural architecture search."
)
subparsers = parser.add_subparsers()
for name, module_attr in NAS_SEARCHES.items():
search_cls = load_attr(module_attr)
search_parser = build_parser_from(search_cls)
subparser = subparsers.add_parser(
name=name, parents=[search_parser], conflict_handler="resolve"
)
subparser.set_defaults(func=main)
def main(**kwargs):
"""
:meta private:
"""
sys.path.insert(0, ".")
if kwargs["verbose"]:
logging.basicConfig(filename="deephyper.log", level=logging.INFO)
search_name = sys.argv[2]
# load search class
logging.info(f"Loading the search '{search_name}'...")
search_cls = load_attr(NAS_SEARCHES[search_name])
# load problem
logging.info("Loading the problem...")
problem = load_attr(kwargs.pop("problem"))
# load run function
logging.info("Loading the run-function...")
run_function = load_attr(kwargs.pop("run_function"))
# filter arguments from evaluator class signature
logging.info("Loading the evaluator...")
evaluator_method = kwargs.pop("evaluator")
base_arguments = ["num_workers", "callbacks"]
evaluator_kwargs = {k: kwargs.pop(k) for k in base_arguments}
for method in EVALUATORS.keys():
evaluator_method_kwargs = {
k[len(evaluator_method) + 1 :]: kwargs.pop(k)
for k in kwargs.copy()
if method in k
}
if method == evaluator_method:
evaluator_kwargs = {**evaluator_kwargs, **evaluator_method_kwargs}
# create evaluator
logging.info(
f"Evaluator(method={evaluator_method}, method_kwargs={evaluator_kwargs}"
)
evaluator = Evaluator.create(
run_function, method=evaluator_method, method_kwargs=evaluator_kwargs
)
logging.info(f"Evaluator has {evaluator.num_workers} workers available.")
# filter arguments from search class signature
# remove keys in evaluator_kwargs
kwargs = {k: v for k, v in kwargs.items() if k not in evaluator_kwargs}
max_evals = kwargs.pop("max_evals")
timeout = kwargs.pop("timeout")
# TODO: How about checkpointing and transfer learning?
# execute the search
# remaining kwargs are for the search
logging.info("Starting the search")
search = search_cls(problem, evaluator, **kwargs)
search.search(max_evals=max_evals, timeout=timeout)
| 6,849 | 34.677083 | 210 | py |
deephyper | deephyper-master/deephyper/core/cli/_cli.py | """DeepHyper command line interface.
It can be used in the shell with:
.. code-block:: console
$ deephyper --help
usage: deephyper [-h] {hps,nas,new-problem,ray-cluster,ray-submit,start-project} ...
DeepHyper command line.
positional arguments:
{hps,nas,new-problem,ray-cluster,ray-submit,start-project}
hps Command line to run hyperparameter search.
nas Command line to run neural architecture search.
new-problem Tool to init an hyper-parameter search package or a neural architecture search problem folder.
start-project Set up a new project folder for DeepHyper benchmarks
optional arguments:
-h, --help show this help message and exit
"""
import argparse
from deephyper.core.cli import _hps, _nas, _new_problem, _start_project
def create_parser():
"""
:meta private:
"""
parser = argparse.ArgumentParser(description="DeepHyper command line.")
subparsers = parser.add_subparsers()
# hyper-parameter search
_hps.add_subparser(subparsers)
# neural architecture search cli
_nas.add_subparser(subparsers)
# new-problem
_new_problem.add_subparser(subparsers)
# start-project
_start_project.add_subparser(subparsers)
return parser
def main():
"""
:meta private:
"""
parser = create_parser()
args = parser.parse_args()
if hasattr(args, "func"):
func = args.func
kwargs = vars(args)
kwargs.pop("func")
func(**kwargs)
else:
parser.print_help()
| 1,600 | 23.257576 | 122 | py |
deephyper | deephyper-master/deephyper/core/cli/_cobalt_nodelist.py | import os
# Adapted from 'get_job_nodelist()' found in the following project:
# https://github.com/argonne-lcf/balsam/blob/main/balsam/platform/compute_node/alcf_thetaknl_node.py
def nodelist():
"""Get all compute nodes allocated in the current job context.
:meta private:
"""
node_str = os.environ["COBALT_PARTNAME"]
# string like: 1001-1005,1030,1034-1200
node_ids = []
ranges = node_str.split(",")
lo = None
hi = None
for node_range in ranges:
lo, *hi = node_range.split("-")
lo = int(lo)
if hi:
hi = int(hi[0])
node_ids.extend(list(range(lo, hi + 1)))
else:
node_ids.append(lo)
print([f"nid{node_id:05d}" for node_id in node_ids])
if __name__ == "__main__":
nodelist()
| 796 | 23.90625 | 100 | py |
deephyper | deephyper-master/deephyper/core/analytics/_topk.py | """
Top-K Configuration
-------------------
A command line to extract the top-k best configuration from a DeepHyper execution.
It can be used with:
.. code-block:: console
$ deephyper-analytics --help
usage: deephyper-analytics topk [-h] [-k K] [-o OUTPUT] path
positional arguments:
path Path to the input CSV file.
optional arguments:
-h, --help show this help message and exit
-k K Number of best configurations to output in decreasing order of best objective.
-o OUTPUT, --output OUTPUT
Path to the output file.
An example usage is:
.. code-block:: console
$ deephyper-analytics topk combo_8gpu_8_agebo/infos/results.csv -k 2
'0':
arch_seq: '[229, 0, 22, 1, 1, 53, 29, 1, 119, 1, 0, 116, 123, 1, 273, 0, 1, 388]'
batch_size: 59
elapsed_sec: 10259.2741303444
learning_rate: 0.0001614947
loss: log_cosh
objective: 0.9236862659
optimizer: adam
patience_EarlyStopping: 22
patience_ReduceLROnPlateau: 10
'1':
arch_seq: '[229, 0, 22, 0, 1, 235, 29, 1, 313, 1, 0, 116, 123, 1, 37, 0, 1, 388]'
batch_size: 51
elapsed_sec: 8818.2674164772
learning_rate: 0.0001265946
loss: mae
objective: 0.9231553674
optimizer: nadam
patience_EarlyStopping: 23
patience_ReduceLROnPlateau: 14
An ``--output`` argument is also available to save the output in a YAML, JSON or CSV format.
"""
import json
import pandas as pd
import yaml
from deephyper.core.exceptions import DeephyperRuntimeError
def add_subparser(subparsers):
"""
:meta private:
"""
subparser_name = "topk"
function_to_call = main
parser = subparsers.add_parser(
subparser_name, help="Print the top-k configurations."
)
# best search_spaces
parser.add_argument("path", type=str, help="Path to the input CSV file.")
parser.add_argument(
"-k",
type=int,
default=1,
required=False,
help="Number of best configurations to output in decreasing order of best objective.",
)
parser.add_argument(
"-o",
"--output",
type=str,
required=False,
default="",
help="Path to the output file.",
)
return subparser_name, function_to_call
def output_best_configuration_from_csv(
path: str, output: str, k: int, **kwargs
) -> None:
"""Output the configuration based on the maximal objective found in the CSV input file.
:meta private:
Args:
path (str): Path of the CSV input file.
output (str): Path of the output file ending in (.csv|.yaml|.json).
k (int): Number of configuration to output.
"""
input_extension = path.split(".")[-1]
if input_extension == "csv":
df = pd.read_csv(path)
output_best_configuration_from_df(df, output, k)
else:
raise DeephyperRuntimeError(
f"The specified input file extension '{input_extension}' is not supported."
)
def output_best_configuration_from_df(df: str, output: str, k: int, **kwargs) -> None:
"""Output the configuration based on the maximal objective found in the CSV input file.
:meta private:
Args:
df (DataFrame): a Pandas DataFrame.
output (str): Path of the output file ending in (.csv|.yaml|.json).
k (int): Number of configuration to output.
"""
df = df.sort_values(by=["objective"], ascending=False, ignore_index=True)
subdf = df.iloc[:k]
if len(output) == 0:
print(yaml.dump(json.loads(subdf.to_json(orient="index"))))
else:
output_extension = output.split(".")[-1]
if output_extension == "yaml":
with open(output, "w") as f:
yaml.dump(json.loads(subdf.to_json(orient="index")), f)
elif output_extension == "csv":
subdf.to_csv(output)
elif output_extension == "json":
subdf.to_json(output, orient="index")
else:
raise DeephyperRuntimeError(
f"The specified output extension is not supported: {output_extension}"
)
def main(*args, **kwargs):
"""
:meta private:
"""
output_best_configuration_from_csv(**kwargs)
| 4,269 | 27.278146 | 104 | py |
deephyper | deephyper-master/deephyper/core/analytics/_dashboard.py | """
Dashboard
---------
A tool to open an interactive dashboard in the browser to help analyse DeepHyper results.
It can be used such as:
.. code-block:: console
$ deephyper-analytics dashboard --database db.json
Then an interactive dashboard will appear in your browser.
"""
import os
import subprocess
HERE = os.path.dirname(os.path.abspath(__file__))
def add_subparser(subparsers):
"""
:meta private:
"""
subparser_name = "dashboard"
function_to_call = main
parser = subparsers.add_parser(
subparser_name, help="Open a dashboard in the browser."
)
parser.add_argument(
"-d",
"--database",
default="~/.deephyper/db.json",
help="Path to the default database used for the dashboard.",
)
return subparser_name, function_to_call
def main(database, *args, **kwargs):
"""
:meta private:
"""
path_st_app = os.path.join(HERE, "dashboard", "_views.py")
database = os.path.abspath(database)
# the "--" is a posix standard to separate streamlit arguments from other arguments
# which are forwarded to the launched script
subprocess.run(
["streamlit", "run", path_st_app, "--", database],
)
| 1,222 | 22.075472 | 89 | py |
deephyper | deephyper-master/deephyper/core/analytics/_analytics.py | """Analytics command line interface for DeepHyper.
It can be used with:
.. code-block:: console
$ deephyper-analytics --help
Command line to analysis the outputs produced by DeepHyper.
positional arguments:
{dashboard,notebook,quickplot,topk}
Kind of analytics.
dashboard Open a DeepHyper dashboard in the browser.
quickplot Tool to generate a quick 2D plot from file.
topk Print the top-k configurations.
optional arguments:
-h, --help show this help message and exit
"""
import argparse
import sys
from deephyper.core.analytics import _topk, _quick_plot, _dashboard, _db_manager
def create_parser():
"""
:meta private:
"""
parser = argparse.ArgumentParser(
description="Command line to analysis the outputs produced by DeepHyper."
)
subparsers = parser.add_subparsers(help="Kind of analytics.")
mapping = dict()
modules = [_dashboard, _db_manager, _quick_plot, _topk] # output quick plots
for module in modules:
name, func = module.add_subparser(subparsers)
mapping[name] = func
return parser, mapping
def main():
"""
:meta private:
"""
parser, mapping = create_parser()
args = parser.parse_args()
mapping[sys.argv[1]](**vars(args))
| 1,364 | 22.947368 | 81 | py |
deephyper | deephyper-master/deephyper/core/analytics/__init__.py | # for the documentation
from . import _topk, _quick_plot, _dashboard
from ._db_manager import DBManager, Query
commands = [_topk, _quick_plot, _dashboard]
__all__ = ["DBManager", "Query"]
__doc__ = "Provides command lines tools to visualize results from DeepHyper.\n\n"
for c in commands:
__doc__ += c.__doc__
| 317 | 25.5 | 81 | py |
deephyper | deephyper-master/deephyper/core/analytics/_db_manager.py | """
Database
---------
A tool to interact with a database of Deephyper results.
To view the database run:
.. code-block:: console
$ deephyper-analytics database --view '' --database db.json
To add an entry to the database run:
.. code-block:: console
$ deephyper-analytics database --add $log_dir --database db.json
To delete an entry from the database run:
.. code-block:: console
$ deephyper-analytics database --delete $id --database db.json
"""
import json
import os
import abc
import getpass
import platform
import subprocess
import pandas as pd
import yaml
from datetime import datetime
from typing import Union
from tinydb import TinyDB
from deephyper.core.utils._files import ensure_dh_folder_exists
class DBManager(abc.ABC):
"""Database Manager, for saving DeepHyper experiments and accessing/modifying the resulting databases.
Example Usage:
>>> dbm = DBManager(username="Bob", path="path/to/db.json")
Args:
username (str, optional): the name of the user accessing the database. Defaults to ``os.getlogin()``.
path (str, optional): the path to the database. Defaults to ``"~/.deephyper/local_db.json"``.
"""
def __init__(
self,
username: str = None,
path: str = None,
) -> None:
if path is None:
path = os.path.join(ensure_dh_folder_exists(), "db.json")
self._username = username if username else getpass.getuser()
self._db = TinyDB(path)
def _get_pip_env(self, pip_versions=True):
if isinstance(pip_versions, str):
with open(pip_versions, "r") as file:
pip_env = json.load(file)
elif pip_versions:
pip_list_com = subprocess.run(
["pip", "list", "--format", "json"], stdout=subprocess.PIPE
)
pip_env = json.loads(pip_list_com.stdout.decode("utf-8"))
else:
return None
return pip_env
def add(
self,
log_dir: str,
label: str = None,
description: str = None,
pip_versions: Union[str, bool] = True,
metadata: dict = None,
) -> int:
"""Adds an experiment to the database.
Example Usage:
>>> dbm = DBManager(username="Bob", path="path/to/db.json")
>>> metadata = {"machine": "ThetaGPU", "n_nodes": 4, "num_gpus_per_node": 8}
>>> dbm.add("path/to/search/log_dir/", label="exp_101", description="The experiment 101", metadata=metadata)
Args:
log_dir (str): the path to the search's logging directory.
label (str, optional): the label wished for the experiment. Defaults to None.
description (str, optional): the description wished for the experiment. Defaults to None.
pip_versions (str or bool, optional): a boolean for which ``False`` means that we don't store any pip version checkpoint, and ``True`` that we store the current pip version checkpoint ; or the path to a ``.json`` file corresponding to the ouptut of ``pip list --format json``. Defaults to True.
metadata (dict, optional): a dictionary of metadata. When the same key is found in the default `default_metadata` and the passed `metadata` then the values from `default_metadata` are overriden by `metadata` values. Defaults to None.
"""
context_path = os.path.join(log_dir, "context.yaml")
with open(context_path, "r") as file:
context = yaml.load(file, Loader=yaml.SafeLoader)
results_path = os.path.join(log_dir, "results.csv")
results = pd.read_csv(results_path, index_col=0).to_dict(orient="list")
logs = []
for log_file in context.get("logs", []):
logging_path = os.path.join(log_dir, log_file)
logs.append(open(logging_path, "r"))
metadata = metadata if metadata else {}
experiment = {
"metadata": {
"label": label,
"description": description,
"user": self._username,
"add_date": str(datetime.now()),
"env": self._get_pip_env(pip_versions=pip_versions),
"search": context.get("search", None),
**metadata,
},
"data": {
"search": {
"calls": context.get("calls", None),
"results": results,
},
"logging": logs, # TODO: how to save files as str ?
},
}
return self._db.insert(experiment)
def get(self, cond=None, exp_id=None):
"""Retrieve the desired experiment from the database.
Example Usage:
>>> dbm = DBManager(username="Bob", path="path/to/db.json")
>>> dbm.get(23)
Args:
cond (tinydb.Query): a search condition.
exp_id (int): index of the record to delete.
Returns:
(list|dict): the retrieved documents in the database.
"""
if cond is None:
exp = self._db.get(doc_id=exp_id)
if exp is not None:
exp = dict(exp)
exp["id"] = exp_id
return exp
else:
docs = self._db.search(cond=cond)
for i, doc in enumerate(docs):
doc_id = doc.doc_id
docs[i] = dict(doc)
docs[i]["id"] = doc_id
return docs
def delete(self, ids: list):
"""Deletes an experiment from the database.
Example Usage:
>>> dbm = DBManager(username="Bob", path="path/to/db.json")
>>> dbm.delete([23, 16])
Args:
ids (list): indexes of the records to delete.
"""
self._db.remove(doc_ids=ids)
def list(self):
"""Returns an iterator over the records stored in the database.
Example Usage:
>>> dbm = DBManager(username="Bob", path="path/to/db.json")
>>> experiments = dbm.list()
>>> for exp in experiments:
>>> ...
"""
for exp in self._db:
doc_id = exp.doc_id
exp = dict(exp)
exp["id"] = doc_id
yield (exp)
def add_subparser(subparsers):
"""
:meta private:
"""
subparser_name = "database"
function_to_call = main
parser = subparsers.add_parser(subparser_name, help="Interact with a database.")
parser.add_argument(
"-u",
"--username",
type=str,
default=getpass.getuser(),
help=f"Username used to interact with the database. Defaults to '{getpass.getuser()}'.",
)
parser.add_argument(
"-d",
"--database",
type=str,
default="~/.deephyper/db.json",
help="Path to the default database used for the dashboard. Defaults to '~/.deephyper/db.json'.",
)
parser.add_argument(
"-A",
"--add",
type=str,
default=None,
help="Add an entry to the database. A path to the folder containing the results is expected.",
)
parser.add_argument(
"-D",
"--delete",
type=int,
default=None,
help="Delete an entry from the database. The 'id' of the entry to delete is expected.",
)
parser.add_argument(
"-v",
"--view",
type=str,
default=None,
help="Print a view of the database based on filtered labels.",
)
return subparser_name, function_to_call
def main(username, database, add, delete, view, **kwargs):
"""
:meta private:
"""
if not (os.path.exists(database)):
print(f"[DBManager] Creating new database: {database}")
dbm = DBManager(username=username, path=database)
if add:
input_logs = os.path.abspath(add)
print(f"[DBManager] Adding '{input_logs}'")
metadata = {
"hostname": platform.node(),
"platform": platform.platform(),
"processor": platform.processor(),
}
entry_id = dbm.add(
input_logs, label=os.path.basename(input_logs), metadata=metadata
)
print(f"[DBManager] Entry added with 'id={entry_id}'")
elif delete:
entry_id = delete
print(f"[DBManager] Deleting: {entry_id}")
dbm.delete([entry_id])
print("[DBManager] Entry deleted")
elif view is not None:
search_value = view
data_summary = []
for exp_data in dbm.list():
label = exp_data["metadata"]["label"]
if (search_value == "") or (search_value in label):
data_summary.append(
{
"id": exp_data["id"],
"date created": exp_data["metadata"]["add_date"],
"label": exp_data["metadata"]["label"],
}
)
if len(data_summary) > 0:
df = pd.DataFrame(data=data_summary)
print(df)
else:
print("[DBManager] No entry found!")
| 9,098 | 30.814685 | 306 | py |
deephyper | deephyper-master/deephyper/core/analytics/_quick_plot.py | """
Quick Plot
----------
A tool to have quick and simple visualization from your data.
It can be use such as:
.. code-block:: console
$ deephyper-analytics quickplot nas_big_data/combo/exp_sc21/combo_1gpu_8_age/infos/results.csv
$ deephyper-analytics quickplot save/history/*.json --xy time val_r2
$ deephyper-analytics quickplot save/history/*.json --xy epochs val_r2
"""
import json
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from deephyper.core.exceptions import DeephyperRuntimeError
width = 8
height = width / 1.618
fontsize = 18
matplotlib.rcParams.update(
{
"font.size": fontsize,
"figure.figsize": (width, height),
"figure.facecolor": "white",
"savefig.dpi": 72,
"figure.subplot.bottom": 0.125,
"figure.edgecolor": "white",
"xtick.labelsize": fontsize,
"ytick.labelsize": fontsize,
}
)
def add_subparser(subparsers):
subparser_name = "quickplot"
function_to_call = main
parser = subparsers.add_parser(
subparser_name, help="Tool to generate a quick 2D plot from file."
)
# best search_spaces
parser.add_argument("path", nargs="+", type=str)
parser.add_argument(
"--xy",
metavar="xy",
type=str,
nargs=2,
default=[],
help="name of x y variables in the CSV file.",
)
return subparser_name, function_to_call
def plot_for_single_csv(path: str, xy: list):
"""Generate a plot from a single CSV file.
:meta private:
Args:
path (str): Path to the CSV file.
xy (list): If empty ``list`` then it will use ``"elapsed_sec"`` for x-axis and ``"objective"`` for the y-axis.
Raises:
DeephyperRuntimeError: if only 1 or more than 2 arguments are provided.
"""
if len(xy) == 0:
xy = ["elapsed_sec", "objective"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy elapsed_sec objective'"
)
df = pd.read_csv(path)
plt.figure()
plt.scatter(df[xy[0]], df[xy[1]], s=5, alpha=1.0)
plt.xlabel(xy[0])
plt.ylabel(xy[1])
plt.grid()
plt.tight_layout()
plt.show()
def plot_for_single_json(path: str, xy: list):
"""[summary]
:meta private:
Args:
path (str): [description]
xy (list): [description]
Raises:
DeephyperRuntimeError: [description]
"""
if len(xy) == 0:
xy = ["epochs", "val_loss"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy epochs val_loss'"
)
xlabel, ylabel = xy
with open(path, "r") as f:
history = json.load(f)
x = list(range(len(history[ylabel]))) if xlabel == "epochs" else history[xlabel]
y = history[ylabel]
plt.figure()
plt.plot(x, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
plt.tight_layout()
plt.show()
def plot_multiple_training(path: list, ylabel: str):
"""[summary]
:meta private:
Args:
path (list): [description]
ylabel (str): [description]
"""
for p in path:
with open(p, "r") as f:
history = json.load(f)
x = list(range(len(history[ylabel])))
y = history[ylabel]
plt.plot(x, y)
plt.xlabel("Epochs")
def plot_multiple_objective_wrp_time(path: list, ylabel: str):
"""[summary]
:meta private:
Args:
path (list): [description]
ylabel (str): [description]
"""
times = []
objectives = []
for p in path:
with open(p, "r") as f:
history = json.load(f)
time = "_".join(p[:-5].split("_")[-2:])
time = datetime.strptime(time, "%d-%b-%Y_%H-%M-%S").timestamp()
times.append(time)
objective = max(history[ylabel])
objectives.append(objective)
plt.scatter(times, objectives)
plt.xlabel("Time")
def plot_for_multiple_json(path: list, xy: list):
"""
:meta private:
"""
if len(xy) == 0:
xy = ["epochs", "val_loss"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy epochs val_loss'"
)
xlabel, ylabel = xy
plt.figure()
if xlabel == "epochs":
plot_multiple_training(path, ylabel)
elif xlabel == "time":
plot_multiple_objective_wrp_time(path, ylabel)
plt.ylabel(ylabel)
plt.grid()
plt.tight_layout()
plt.show()
def main(path: list, xy: list, *args, **kwargs):
"""
:meta private:
"""
def extension(path):
return path.split(".")[-1]
if len(path) == 1:
if extension(path[0]) == "csv":
plot_for_single_csv(path[0], xy)
elif extension(path[0]) == "json":
plot_for_single_json(path[0], xy)
else:
raise DeephyperRuntimeError(
f"Extension of input file '{extension(path[0])}' is not yet supported."
)
else:
# Comparing multiple results.csv files (different search experiments)
if all([extension(p) == "csv" for p in path]):
raise DeephyperRuntimeError(
"Comparison of multiple experiments is not yet supported."
)
# Comparing multiple history.json files (different neural networks)
elif all([extension(p) == "json" for p in path]):
plot_for_multiple_json(path, xy)
else:
raise DeephyperRuntimeError(
"Multiple input files should all have the same extension '.csv' or '.json'"
)
| 5,701 | 22.561983 | 118 | py |
deephyper | deephyper-master/deephyper/core/analytics/dashboard/_pyplot.py | import json
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import streamlit as st
from deephyper.core.exceptions import DeephyperRuntimeError
width = 8
height = width / 1.618
fontsize = 18
matplotlib.rcParams.update(
{
"font.size": fontsize,
"figure.figsize": (width, height),
"figure.facecolor": "white",
"savefig.dpi": 72,
"figure.subplot.bottom": 0.125,
"figure.edgecolor": "white",
"xtick.labelsize": fontsize,
"ytick.labelsize": fontsize,
}
)
def to_max(l):
r = [l[0]]
for e in l[1:]:
r.append(max(r[-1], e))
return r
def plot_single_line(df, x_label, y_label):
fig = plt.figure()
plt.scatter(df[x_label], df[y_label])
plt.xlabel(x_label.title())
plt.ylabel(y_label.title())
plt.grid()
plt.tight_layout()
st.pyplot(fig)
plt.close()
def plot_single_line_improvement(df, x_label, y_label):
fig = plt.figure()
plt.plot(df[x_label], to_max(df[y_label].tolist()))
plt.xlabel(x_label.title())
plt.ylabel(y_label.title())
plt.grid()
plt.tight_layout()
st.pyplot(fig)
plt.close()
def plot_for_single_json(path: str, xy: list):
"""[summary]
:meta private:
Args:
path (str): [description]
xy (list): [description]
Raises:
DeephyperRuntimeError: [description]
"""
if len(xy) == 0:
xy = ["epochs", "val_loss"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy epochs val_loss'"
)
xlabel, ylabel = xy
with open(path, "r") as f:
history = json.load(f)
x = list(range(len(history[ylabel]))) if xlabel == "epochs" else history[xlabel]
y = history[ylabel]
plt.figure()
plt.plot(x, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
plt.tight_layout()
plt.show()
def plot_multiple_training(path: list, ylabel: str):
"""[summary]
:meta private:
Args:
path (list): [description]
ylabel (str): [description]
"""
for p in path:
with open(p, "r") as f:
history = json.load(f)
x = list(range(len(history[ylabel])))
y = history[ylabel]
plt.plot(x, y)
plt.xlabel("Epochs")
def plot_multiple_objective_wrp_time(path: list, ylabel: str):
"""[summary]
:meta private:
Args:
path (list): [description]
ylabel (str): [description]
"""
times = []
objectives = []
for p in path:
with open(p, "r") as f:
history = json.load(f)
time = "_".join(p[:-5].split("_")[-2:])
time = datetime.strptime(time, "%d-%b-%Y_%H-%M-%S").timestamp()
times.append(time)
objective = max(history[ylabel])
objectives.append(objective)
plt.scatter(times, objectives)
plt.xlabel("Time")
def plot_for_multiple_json(path: list, xy: list):
"""
:meta private:
"""
if len(xy) == 0:
xy = ["epochs", "val_loss"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy epochs val_loss'"
)
xlabel, ylabel = xy
plt.figure()
if xlabel == "epochs":
plot_multiple_training(path, ylabel)
elif xlabel == "time":
plot_multiple_objective_wrp_time(path, ylabel)
plt.ylabel(ylabel)
plt.grid()
plt.tight_layout()
plt.show()
| 3,507 | 19.045714 | 84 | py |
deephyper | deephyper-master/deephyper/core/analytics/dashboard/_views.py | import abc
import os
import sys
import altair as alt
import pandas as pd
import streamlit as st
from deephyper.core.analytics import DBManager
from st_aggrid import AgGrid, GridOptionsBuilder, ColumnsAutoSizeMode
class View(abc.ABC):
@abc.abstractmethod
def show(self):
...
class Dashboard(View):
def __init__(self, database_path="~/.deephyper/db.json") -> None:
super().__init__()
self.database_path = database_path
def show(self):
st.title("DeepHyper Dashboard")
st.sidebar.header("Settings")
source_selection = SourceSelection(self.database_path)
with st.sidebar.expander("Experiments Source"):
source_selection.show()
if not (source_selection.dbm):
return
experiment_selection = ExperimentSelection(source_selection.dbm)
experiment_selection.show()
if len(experiment_selection.data) == 0:
st.warning("No experiments selected!")
else:
if len(experiment_selection.data) == 1:
# There is only 1 experiment to visualize
charts = {
"Table": CSVView,
"Scatter": ScatterPlotView,
"Search Trajectory": SearchTrajectoryPlotView,
"Utilization": ProfilePlotView,
}
default_charts = ["Table", "Scatter"]
exp = experiment_selection.data[0]
exp["data"]["search"]["results"] = pd.DataFrame(
exp["data"]["search"]["results"]
).reset_index()
else:
# There are multiple experiments to compare
charts = {
"Table": CSVView,
"Scatter": ScatterPlotView,
"Search Trajectory": SearchTrajectoryPlotView,
"Utilization": ProfilePlotView,
}
default_charts = ["Search Trajectory"]
exp = experiment_selection.data
for i in range(len(exp)):
exp[i]["data"]["search"]["results"] = pd.DataFrame(
exp[i]["data"]["search"]["results"]
).reset_index()
with st.sidebar.expander("Charts Selection"):
selected_charts = st.multiselect(
"Charts", options=charts.keys(), default=default_charts
)
st.sidebar.markdown("""---""")
st.sidebar.header("Options")
for name in selected_charts:
chart = charts[name]
chart(exp).show()
class SourceSelection(View):
"""Select the source database."""
def __init__(self, path) -> None:
super().__init__()
self.path = path
self.dbm = None
def show(self):
st.markdown("The database should be a `json` file.")
self.path = st.text_input("Enter the database path", value=self.path)
if self.path is not None and len(self.path) > 0:
if not (os.path.exists(self.path)):
st.warning("File not found!")
else:
self.dbm = DBManager(path=self.path)
class ExperimentSelection(View):
def __init__(self, dbm):
self.dbm = dbm
self.selection = {}
self.data = []
self.selection_mode = "single"
def show(self):
# main column
st.header("Available Experiments")
self.selection_mode = st.radio(
"Selection Mode", ["single", "multiple"], index=0, horizontal=True
)
with st.spinner(text="In progress..."):
data_summary = []
for exp_data in self.dbm.list():
data_summary.append(
{
"id": exp_data["id"],
"date_created": exp_data["metadata"]["add_date"],
"label": exp_data["metadata"]["label"],
"num_workers": exp_data["metadata"]["search"]["num_workers"],
"num_evaluations": len(
next(iter(exp_data["data"]["search"]["results"].values()))
),
}
)
if len(data_summary) > 0:
df = pd.DataFrame(data=data_summary)
# Full example
# https://github.com/PablocFonseca/streamlit-aggrid-examples/blob/main/main_example.py
options_builder = GridOptionsBuilder.from_dataframe(df)
options_builder.configure_selection(
self.selection_mode, use_checkbox=True
)
options_builder.configure_column(
"date_created",
type=["dateColumnFilter", "customDateTimeFormat"],
custom_format_string="yyyy-MM-dd HH:mm",
pivot=True,
)
options_builder.configure_column("label", editable=True)
grid_options = options_builder.build()
grid_response = AgGrid(
df,
grid_options,
theme="streamlit",
data_return_mode="filtered",
width="100%",
fit_columns_on_grid_load=False,
columns_auto_size_mode=ColumnsAutoSizeMode.FIT_CONTENTS,
)
df = grid_response["data"]
self.data = [
self.dbm.get(exp_id=int(row["id"]))
for row in grid_response["selected_rows"]
]
for exp_data, row in zip(self.data, grid_response["selected_rows"]):
exp_data["metadata"]["label"] = row["label"]
class DataView(View):
"""For experiments displaying data (e.g., plots and tables)"""
def __init__(self, name, data):
super().__init__()
self.name = name
self.data = data
self.is_single = not (isinstance(data, list))
class CSVView(DataView):
def __init__(self, data) -> None:
super().__init__("Table", data)
def show(self):
st.header(self.name)
if self.is_single:
st.dataframe(self.data["data"]["search"]["results"])
else:
for i in range(min(len(self.data), 5)):
with st.expander(
f"[{self.data[i]['id']}] {self.data[i]['metadata']['label']}"
):
st.dataframe(self.data[i]["data"]["search"]["results"])
class ScatterPlotView(DataView):
def __init__(self, data):
super().__init__("Scatter Plot", data)
def show(self):
if self.is_single:
df = self.data["data"]["search"]["results"]
else:
df = []
for i in range(len(self.data)):
df_i = self.data[i]["data"]["search"]["results"]
df_i["label"] = self.data[i]["metadata"]["label"]
df.append(df_i)
df = pd.concat(df, axis=0)
columns = list(df.columns)
# Options for the plot
with st.sidebar.expander(self.name):
if "timestamp_end" in columns:
x_idx = columns.index("timestamp_end")
elif "timestamp_gather" in columns:
x_idx = columns.index("timestamp_gather")
else:
x_idx = columns.index("index")
x_axis = st.selectbox(
label="X-axis",
options=columns,
index=x_idx,
key=f"{self.name}:x-axis:selectbox",
)
col1, col2 = st.columns(2)
x_axis_max = col1.number_input(
"X-axis Max",
value=df[x_axis].max(),
key=f"{self.name}:x-axis max:number_input",
)
x_axis_min = col2.number_input(
"X-axis Min",
value=df[x_axis].min(),
key=f"{self.name}:x-axis min:number_input",
)
domain_x = [x_axis_min, x_axis_max]
y_axis = st.selectbox(
label="Y-axis",
options=columns,
index=columns.index("objective"),
key=f"{self.name}:y-axis:selectbox",
)
col1, col2 = st.columns(2)
y_axis_max = col1.number_input(
"Y-axis Max",
value=df[y_axis].max(),
key=f"{self.name}:y-axis max:number_input",
)
y_axis_min = col2.number_input(
"Y-axis Min",
value=df[y_axis].min(),
key=f"{self.name}:y-axis min:number_input",
)
domain_y = [y_axis_min, y_axis_max]
color_var = st.selectbox(
label="Color",
options=columns,
index=columns.index("objective")
if self.is_single
else columns.index("label"),
)
st.header(self.name)
c = (
alt.Chart(df)
.mark_circle(size=60)
.encode(
x=alt.X(
x_axis,
title=x_axis.replace("_", " ").title(),
scale=alt.Scale(domain=domain_x),
),
y=alt.Y(
y_axis,
title=y_axis.replace("_", " ").title(),
scale=alt.Scale(domain=domain_y),
),
color=color_var,
tooltip=columns,
)
.interactive()
)
st.altair_chart(c, use_container_width=True)
class SearchTrajectoryPlotView(DataView):
def __init__(self, data):
super().__init__("Search Trajectory", data)
def show(self):
if self.is_single:
df = self.data["data"]["search"]["results"]
columns = list(df.columns)
else:
df = []
for i in range(len(self.data)):
df_i = self.data[i]["data"]["search"]["results"]
df_i["label"] = self.data[i]["metadata"]["label"]
df.append(df_i)
df = pd.concat(df, axis=0)
columns = list(df.columns)
columns.remove("label")
# Options for the plot
with st.sidebar.expander(self.name):
if "timestamp_end" in columns:
x_idx = columns.index("timestamp_end")
elif "timestamp_gather" in columns:
x_idx = columns.index("timestamp_gather")
else:
x_idx = columns.index("index")
x_axis = st.selectbox(
label="X-axis",
options=columns,
index=x_idx,
key=f"{self.name}:x-axis:selectbox",
)
col1, col2 = st.columns(2)
x_axis_max = col1.number_input(
"X-axis Max",
value=df[x_axis].max(),
key=f"{self.name}:x-axis max:number_input",
)
x_axis_min = col2.number_input(
"X-axis Min",
value=df[x_axis].min(),
key=f"{self.name}:x-axis min:number_input",
)
domain_x = [x_axis_min, x_axis_max]
y_axis = "max objective"
if self.is_single:
df = df.sort_values(x_axis)
df["max objective"] = df["objective"].cummax()
else:
df = df.sort_values(["label", x_axis])
df[y_axis] = df.groupby(["label"])["objective"].transform("cummax")
col1, col2 = st.columns(2)
y_axis_max = col1.number_input(
"Y-axis Max",
value=df[y_axis].max(),
key=f"{self.name}:y-axis max:number_input",
)
y_axis_min = col2.number_input(
"Y-axis Min",
value=df[y_axis].min(),
key=f"{self.name}:y-axis min:number_input",
)
domain_y = [y_axis_min, y_axis_max]
st.header(self.name)
encode_kwargs = {}
if not (self.is_single):
encode_kwargs["color"] = "label"
encode_kwargs["tooltip"] = "label"
c = (
alt.Chart(df)
.mark_line(interpolate="basis")
.encode(
x=alt.X(
x_axis,
title=x_axis.replace("_", " ").title(),
scale=alt.Scale(domain=domain_x),
),
y=alt.Y(
y_axis,
title=y_axis.replace("_", " ").title(),
scale=alt.Scale(domain=domain_y),
),
**encode_kwargs,
)
.interactive()
)
st.altair_chart(c, use_container_width=True)
class ProfilePlotView(DataView):
def __init__(self, data):
super().__init__("Utilization", data)
self.profile_type = "start/end"
def get_profile(self, df, num_workers=0):
"""
Args:
df: data frame on which to compute the profile.
num_workers: number of workers to use for normalization. if 0 then it is ignored.
"""
# profile_type = "submit/gather"
# profile_type = "start/end"
if self.profile_type == "submit/gather":
column_start = "timestamp_submit"
column_end = "timestamp_gather"
else:
column_start = "timestamp_start"
column_end = "timestamp_end"
hist = []
for _, row in df.iterrows():
hist.append((row[column_start], 1))
hist.append((row[column_end], -1))
n_processes = 0
profile_dict = dict(t=[0], n_processes=[0])
for e in sorted(hist):
t, incr = e
n_processes += incr
profile_dict["t"].append(t)
profile_dict["n_processes"].append(n_processes)
profile = pd.DataFrame(profile_dict)
if num_workers > 0:
profile["n_processes"] = profile["n_processes"] / num_workers
return profile
def get_perc_util(self, profile, num_workers):
csum = 0
for i in profile.index[:-1]:
csum += (profile.loc[i + 1, "t"] - profile.loc[i, "t"]) * profile.loc[
i, "n_processes"
]
perc_util = csum / (profile["t"].iloc[-1] * num_workers)
return perc_util
def show(self):
if self.is_single:
df = self.data["data"]["search"]["results"]
metadata = self.data["metadata"]
columns = list(df.columns)
else:
# df = []
# columns = None
# for i in range(len(self.data)):
# df_i = self.data[i]["data"]["search"]["results"]
# label = self.data[i]["metadata"]["label"]
# df.append((label, df_i))
# if columns is None:
# columns = set(df_i.columns)
# else:
# columns = columns.intersection(set(df_i.columns))
# columns = list(columns)
df = []
metadata = {}
for i in range(len(self.data)):
df_i = self.data[i]["data"]["search"]["results"]
df_i["db:id"] = self.data[i]["id"]
metadata[self.data[i]["id"]] = self.data[i]["metadata"]
df.append(df_i)
df = pd.concat(df, axis=0)
columns = list(df.columns)
# Options for the plot
with st.sidebar.expander(self.name):
normalize = st.checkbox("Normalize by number of workers")
x_idx = None
profile_types = []
if "timestamp_end" in columns:
x_idx = columns.index("timestamp_end")
profile_types.append("start/end")
if "timestamp_gather" in columns:
if x_idx is None:
x_idx = columns.index("timestamp_gather")
profile_types.append("submit/gather")
if len(profile_types) == 0:
st.warning(
"Nothing to display as no profiling information provided (e.g., timestamp_submit/gather, timestamp_start/end!)"
)
return
self.profile_type = st.selectbox(
label="Type of profile",
options=profile_types,
index=0,
key=f"{self.name}:type of profile:selectbox",
)
col1, col2 = st.columns(2)
x_axis = columns[x_idx]
x_axis_max = col1.number_input(
"X-axis Max",
value=df[x_axis].max(),
key=f"{self.name}:x-axis max:number_input",
)
x_axis_min = col2.number_input(
"X-axis Min",
value=df[x_axis].min(),
key=f"{self.name}:x-axis min:number_input",
)
domain_x = [x_axis_min, x_axis_max]
y_label = {
"start/end": "# Jobs Running",
"submit/gather": "# Jobs Pending",
}[self.profile_type]
if self.is_single:
num_workers = metadata["search"]["num_workers"]
if normalize:
profiles = self.get_profile(df, num_workers)
else:
profiles = self.get_profile(df)
profiles = profiles[
(x_axis_min <= profiles["t"]) & (profiles["t"] <= x_axis_max)
]
utilization = self.get_perc_util(
profiles, 1 if normalize else num_workers
)
else:
profiles = []
utilization = []
for i, (exp_id, df_group) in enumerate(df.groupby("db:id")):
num_workers = metadata[exp_id]["search"]["num_workers"]
label = metadata[exp_id]["label"]
if normalize:
profiles_i = self.get_profile(df_group, num_workers)
else:
profiles_i = self.get_profile(df_group)
profiles_i["label"] = metadata[exp_id]["label"]
profiles_i = profiles_i[
(x_axis_min <= profiles_i["t"])
& (profiles_i["t"] <= x_axis_max)
]
utilization_i = self.get_perc_util(
profiles_i, 1 if normalize else num_workers
)
profiles.append(profiles_i)
utilization.append((label, utilization_i))
profiles = pd.concat(profiles, axis=0)
st.header(self.name)
if self.is_single:
st.markdown(
f"{utilization*100:.2f}%"
+ " of utilization between $t_{min}="
+ f"${x_axis_min:.0f}"
+ " and $t_{max}"
+ f"=${x_axis_max:.0f}"
)
else:
text = ""
for label, util in utilization:
text += (
f"* __{label}__: {util*100:.2f}%"
+ " of utilization between $t_{min}="
+ f"${x_axis_min:.0f}"
+ " and $t_{max}"
+ f"=${x_axis_max:.0f}\n"
)
st.markdown(text)
encode_kwargs = {}
if not (self.is_single):
encode_kwargs["color"] = "label"
c = (
alt.Chart(profiles)
.mark_line(interpolate="step-after")
.encode(
x=alt.X(
"t",
title="Time (sec.)",
scale=alt.Scale(domain=domain_x),
),
y=alt.Y(
"n_processes",
title=y_label,
# scale=alt.Scale(domain=domain_y),
),
**encode_kwargs,
)
.interactive()
)
st.altair_chart(c, use_container_width=True)
def main(database_path):
st.set_page_config(layout="wide")
dashboard = Dashboard(database_path)
dashboard.show()
if __name__ == "__main__":
database_path = sys.argv[1]
main(database_path)
| 20,546 | 31.927885 | 131 | py |
deephyper | deephyper-master/deephyper/core/analytics/dashboard/__init__.py | 0 | 0 | 0 | py | |
deephyper | deephyper-master/deephyper/core/exceptions/loading.py | """Exceptions related with imports of modules/attributes/scripts.
"""
from deephyper.core.exceptions import DeephyperError
class GenericLoaderError(DeephyperError):
"""Raised when the generic_loader function is failing."""
def __init__(self, target, attr, error_source, custom_msg=""):
self.target = target
self.attr = attr
self.error_source = error_source
self.custom_msg = custom_msg
def __str__(self):
error = (
f"{self.error_source}\n"
f"{self.custom_msg}"
f"The attribute '{self.attr}' cannot be importe from '{self.target}'."
)
return error
| 655 | 28.818182 | 82 | py |
deephyper | deephyper-master/deephyper/core/exceptions/problem.py | """Exceptions related with problem definition.
"""
from deephyper.core.exceptions import DeephyperError
class SpaceDimNameOfWrongType(DeephyperError):
"""Raised when a dimension name of the space is not a string."""
def __init__(self, value):
self.value = value
def __str__(self):
return f"Dimension name: '{self.value}' is of type == {type(self.value)} when should be 'str'!"
# ! NaProblemErrors
class NaProblemError(DeephyperError):
"""Raise when an error occurs in a NaProblem instance."""
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return self.msg
class SearchSpaceBuilderIsNotCallable(NaProblemError):
"""Raised when a search space builder is not a callable."""
def __init__(self, parameter):
self.parameter = parameter
def __str__(self):
raise f"The search space builder {self.parameter} should be a callable when it is not!"
class SearchSpaceBuilderMissingParameter(NaProblemError):
"""Raised when a missing parameter is detected in a callable which creates a Structure.
Args:
missing_parameter (str): name of the missing parameter.
"""
def __init__(self, missing_parameter):
self.missing_parameter = missing_parameter
def __str__(self):
return f"The callable which creates a Structure is missing a '{self.missing_parameter}' parameter!"
class SearchSpaceBuilderMissingDefaultParameter(NaProblemError):
"""Raised when a parameter of a search space builder is missing a default value."""
def __init__(self, parameter):
self.parameter = parameter
def __str__(self):
return f"The parameter {self.parameter} must have a default value!"
class ProblemPreprocessingIsNotCallable(NaProblemError):
"""Raised when the preprocessing parameter is not callable."""
def __init__(self, parameter):
self.parameter = parameter
def __str__(self):
return f"The parameter {self.parameter} must be a callable."
class ProblemLoadDataIsNotCallable(NaProblemError):
"""Raised when the load_data parameter is not callable."""
def __init__(self, parameter):
self.parameter = parameter
def __str__(self):
return f"The parameter {self.parameter} must be a callable."
class WrongProblemObjective(NaProblemError):
"""Raised when the objective parameter is neither a callable nor a string."""
def __init__(self, objective, possible_names=None):
self.objective = objective
self.possible_names = possible_names
def __str__(self):
output = f"The objective: {str(self.objective)} is not valid."
if self.possible_names is not None:
output += f" Possible objectives are: {self.possible_names}"
return output
| 2,811 | 28.291667 | 107 | py |
deephyper | deephyper-master/deephyper/core/exceptions/__init__.py | """Deephyper exceptions
"""
# ! Root exceptions
class DeephyperError(Exception):
"""Root deephyper exception."""
class DeephyperRuntimeError(RuntimeError):
"""Raised when an error is detected in deephyper and that doesn’t fall in any of the other categories. The associated value is a string indicating what precisely went wrong."""
class SearchTerminationError(RuntimeError):
"""Raised when a search receives SIGALARM"""
class RunFunctionError(RuntimeError):
"""Raised when error occurs in run-function"""
def __init__(self, msg: str = None) -> None:
self.msg = msg
def __str__(self) -> str:
return self.msg
class MissingRequirementError(RuntimeError):
"""Raised when a requirement is not installed properly."""
| 770 | 23.870968 | 180 | py |
deephyper | deephyper-master/deephyper/core/exceptions/nas/space.py | from deephyper.core.exceptions.nas import NASError
class WrongSequenceToSetOperations(NASError):
"""Raised when a sequence of actions is not of the same lenght as the number of variable nodes of the search_space."""
def __init__(self, sequence_given, sequence_valid):
self.sequence_given = sequence_given
self.sequence_valid = sequence_valid
def __str__(self):
return f"Wrong sequence given: '{self.sequence_given}' of length {len(self.sequence_given)} when a valid sequence should be of length {len(self.sequence_valid)}"
class StructureHasACycle(NASError):
"""Raised when a search_space is containing a cycle."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class InputShapeOfWrongType(NASError):
"""Raised when an input shape of a search_space is of a wrong type."""
def __init__(self, input_shape):
self.input_shape = input_shape
def __str__(self):
f"input_shape must be either a 'tuple' or a 'list(tuple)' but it is of type '{type(self.input_shape)}'!"
class NodeAlreadyAdded(NASError):
"""Raised when a node has already been added in a search_space."""
def __init__(self, node):
self.node = node
def __str__(self):
return (
f"The node '{str(self.node)}' has already been added to the search_space."
)
class WrongOutputShape(NASError):
"""Raised when the output shape of the model generated by a search_space doesn't
match the expected shape.
"""
def __init__(self, tensor_shape, expected_shape):
self.tensor_shape = tensor_shape
self.expected_shape = expected_shape
def __str__(self):
return f"The output tensor of shape {self.tensor_shape} doesn't match the expected shape {self.expected_shape}!"
| 1,838 | 30.706897 | 169 | py |
deephyper | deephyper-master/deephyper/core/exceptions/nas/__init__.py | """Neural architecture search exceptions.
"""
from deephyper.core.exceptions import DeephyperError
class NASError(DeephyperError):
"""Root neural architecture search exception."""
| 187 | 19.888889 | 52 | py |
deephyper | deephyper-master/deephyper/core/utils/_timeout.py | import multiprocessing
import multiprocessing.pool
from deephyper.core.exceptions import SearchTerminationError
def terminate_on_timeout(timeout, func, *args, **kwargs):
"""High order function to wrap the call of a function in a thread to monitor its execution time."""
pool = multiprocessing.pool.ThreadPool(processes=1)
results = pool.apply_async(func, args, kwargs)
pool.close()
try:
return results.get(timeout)
except multiprocessing.TimeoutError:
raise SearchTerminationError(f"Search timeout expired after: {timeout}")
finally:
pool.terminate()
| 608 | 29.45 | 103 | py |
deephyper | deephyper-master/deephyper/core/utils/_import.py | import importlib
def load_attr(str_full_module):
"""Loadd attribute from module.
Args:
str_full_module (str): string of the form ``{module_name}.{attr}``.
Returns:
Any: the attribute.
"""
if type(str_full_module) == str:
split_full = str_full_module.split(".")
str_module = ".".join(split_full[:-1])
str_attr = split_full[-1]
module = importlib.import_module(str_module)
return getattr(module, str_attr)
else:
return str_full_module
| 525 | 24.047619 | 75 | py |
deephyper | deephyper-master/deephyper/core/utils/_files.py | import pathlib
def ensure_dh_folder_exists():
"""Creates a ``".deephyper"`` directory in the user home directory."""
home = pathlib.Path.home()
deephyper_dir = home.joinpath(".deephyper")
deephyper_dir.mkdir(parents=False, exist_ok=True)
return deephyper_dir.as_posix()
| 292 | 28.3 | 74 | py |
deephyper | deephyper-master/deephyper/core/utils/__init__.py | from ._import import load_attr
__all__ = ["load_attr"]
| 56 | 13.25 | 30 | py |
deephyper | deephyper-master/deephyper/core/utils/_introspection.py | import inspect
import json
def _get_init_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor
init = cls.__init__
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and p.name not in ["self"]
]
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_init_params(obj):
"""Get the raw parameters of an object.
Args:
obj (any): The object of which we want to know the ``__init__`` arguments.
Returns:
params (dict): Parameter names mapped to their values.
"""
if hasattr(obj, "_init_params"):
base_init_params = obj._init_params
else:
base_init_params = dict()
params = dict()
for key in _get_init_param_names(obj):
if hasattr(obj, f"_{key}"):
value = getattr(obj, f"_{key}")
elif hasattr(obj, f"{key}"):
value = getattr(obj, f"{key}")
else:
value = base_init_params.get(key, None)
params[key] = value
return params
def get_init_params_as_json(obj):
"""Get the parameters of an object in a json format.
Args:
obj (any): The object of which we want to know the ``__init__`` arguments.
Returns:
params (dict): Parameter names mapped to their values.
"""
if hasattr(obj, "_init_params"):
base_init_params = obj._init_params
if "self" in base_init_params:
base_init_params.pop("self")
else:
base_init_params = dict()
params = dict()
for k, v in base_init_params.items():
if "__" not in k:
if hasattr(v, "to_json"):
params[k] = v.to_json()
else:
try:
params[k] = json.loads(json.dumps(v))
except Exception:
params[k] = "NA"
return params
| 2,257 | 28.324675 | 82 | py |
deephyper | deephyper-master/deephyper/skopt/callbacks.py | """Monitor and influence the optimization procedure via callbacks.
Callbacks are callables which are invoked after each iteration of the optimizer
and are passed the results "so far". Callbacks can monitor progress, or stop
the optimization early by returning `True`.
"""
try:
from collections.abc import Callable
except ImportError:
from collections import Callable
from time import time
import numpy as np
from deephyper.skopt.utils import dump
def check_callback(callback):
"""
Check if callback is a callable or a list of callables.
"""
if callback is not None:
if isinstance(callback, Callable):
return [callback]
elif isinstance(callback, list) and all(
[isinstance(c, Callable) for c in callback]
):
return callback
else:
raise ValueError(
"callback should be either a callable or " "a list of callables."
)
else:
return []
class VerboseCallback(object):
"""
Callback to control the verbosity.
Parameters
----------
n_init : int, optional
Number of points provided by the user which are yet to be
evaluated. This is equal to `len(x0)` when `y0` is None
n_random : int, optional
Number of points randomly chosen.
n_total : int
Total number of func calls.
Attributes
----------
iter_no : int
Number of iterations of the optimization routine.
"""
def __init__(self, n_total, n_init=0, n_random=0):
self.n_init = n_init
self.n_random = n_random
self.n_total = n_total
self.iter_no = 1
self._start_time = time()
self._print_info(start=True)
def _print_info(self, start=True):
iter_no = self.iter_no
if start:
status = "started"
eval_status = "Evaluating function"
search_status = "Searching for the next optimal point."
else:
status = "ended"
eval_status = "Evaluation done"
search_status = "Search finished for the next optimal point."
if iter_no <= self.n_init:
print(
"Iteration No: %d %s. %s at provided point."
% (iter_no, status, eval_status)
)
elif self.n_init < iter_no <= (self.n_random + self.n_init):
print(
"Iteration No: %d %s. %s at random point."
% (iter_no, status, eval_status)
)
else:
print("Iteration No: %d %s. %s" % (iter_no, status, search_status))
def __call__(self, res):
"""
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
time_taken = time() - self._start_time
self._print_info(start=False)
curr_y = res.func_vals[-1]
curr_min = res.fun
print("Time taken: %0.4f" % time_taken)
print("Function value obtained: %0.4f" % curr_y)
print("Current minimum: %0.4f" % curr_min)
self.iter_no += 1
if self.iter_no <= self.n_total:
self._print_info(start=True)
self._start_time = time()
class TimerCallback(object):
"""
Log the elapsed time between each iteration of the minimization loop.
The time for each iteration is stored in the `iter_time` attribute which
you can inspect after the minimization has completed.
Attributes
----------
iter_time : list, shape (n_iter,)
`iter_time[i-1]` gives the time taken to complete iteration `i`
"""
def __init__(self):
self._time = time()
self.iter_time = []
def __call__(self, res):
"""
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
elapsed_time = time() - self._time
self.iter_time.append(elapsed_time)
self._time = time()
class EarlyStopper(object):
"""Decide to continue or not given the results so far.
The optimization procedure will be stopped if the callback returns True.
"""
def __call__(self, result):
"""
Parameters
----------
result : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
return self._criterion(result)
def _criterion(self, result):
"""Compute the decision to stop or not.
Classes inheriting from `EarlyStop` should use this method to
implement their decision logic.
Parameters
----------
result : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
Returns
-------
decision : boolean or None
Return True/False if the criterion can make a decision or `None` if
there is not enough data yet to make a decision.
"""
raise NotImplementedError(
"The _criterion method should be implemented"
" by subclasses of EarlyStopper."
)
class DeltaXStopper(EarlyStopper):
"""Stop the optimization when ``|x1 - x2| < delta``
If the last two positions at which the objective has been evaluated
are less than `delta` apart stop the optimization procedure.
"""
def __init__(self, delta):
super(EarlyStopper, self).__init__()
self.delta = delta
def _criterion(self, result):
if len(result.x_iters) >= 2:
return (
result.space.distance(result.x_iters[-2], result.x_iters[-1])
< self.delta
)
else:
return None
class DeltaYStopper(EarlyStopper):
"""Stop the optimization if the `n_best` minima are within `delta`
Stop the optimizer if the absolute difference between the `n_best`
objective values is less than `delta`.
"""
def __init__(self, delta, n_best=5):
super(EarlyStopper, self).__init__()
self.delta = delta
self.n_best = n_best
def _criterion(self, result):
if len(result.func_vals) >= self.n_best:
func_vals = np.sort(result.func_vals)
worst = func_vals[self.n_best - 1]
best = func_vals[0]
# worst is always larger, so no need for abs()
return worst - best < self.delta
else:
return None
class HollowIterationsStopper(EarlyStopper):
"""
Stop if the improvement over the last n iterations is below a threshold.
"""
def __init__(self, n_iterations, threshold=0):
super(HollowIterationsStopper, self).__init__()
self.n_iterations = n_iterations
self.threshold = abs(threshold)
def _criterion(self, result):
if len(result.func_vals) <= self.n_iterations:
return False
cummin = np.minimum.accumulate(result.func_vals)
return cummin[-self.n_iterations - 1] - cummin[-1] <= self.threshold
class DeadlineStopper(EarlyStopper):
"""
Stop the optimization before running out of a fixed budget of time.
Attributes
----------
iter_time : list, shape (n_iter,)
`iter_time[i-1]` gives the time taken to complete iteration `i`
Parameters
----------
total_time : float
fixed budget of time (seconds) that the optimization must
finish within.
"""
def __init__(self, total_time):
super(DeadlineStopper, self).__init__()
self._time = time()
self.iter_time = []
self.total_time = total_time
def _criterion(self, result):
elapsed_time = time() - self._time
self.iter_time.append(elapsed_time)
self._time = time()
if result.x_iters:
time_remaining = self.total_time - np.sum(self.iter_time)
return time_remaining <= np.max(self.iter_time)
else:
return None
class ThresholdStopper(EarlyStopper):
"""
Stop the optimization when the objective value is lower
than the given threshold.
"""
def __init__(self, threshold: float) -> None:
super(EarlyStopper, self).__init__()
self.threshold = threshold
def _criterion(self, result) -> bool:
return np.any([val <= self.threshold for val in result.func_vals])
class CheckpointSaver(object):
"""
Save current state after each iteration with :class:`deephyper.skopt.dump`.
Examples
--------
>>> import deephyper.skopt
>>> def obj_fun(x):
... return x[0]**2
>>> checkpoint_callback = deephyper.skopt.callbacks.CheckpointSaver("./result.pkl")
>>> deephyper.skopt.gp_minimize(obj_fun, [(-2, 2)], n_calls=10,
... callback=[checkpoint_callback]) # doctest: +SKIP
Parameters
----------
checkpoint_path : string
location where checkpoint will be saved to;
dump_options : string
options to pass on to `deephyper.skopt.dump`, like `compress=9`
"""
def __init__(self, checkpoint_path, **dump_options):
self.checkpoint_path = checkpoint_path
self.dump_options = dump_options
def __call__(self, res):
"""
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization as a OptimizeResult object.
"""
dump(res, self.checkpoint_path, **self.dump_options)
| 9,497 | 27.183976 | 87 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.