hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f230de5d04d6eda92e826261aa15d43ce3b3f7f6
| 34
|
py
|
Python
|
src/python/flask/__init__.py
|
computer-geek64/guinn
|
11e10a9fbf1f99fd0ff8e15d7a812679ae7015f4
|
[
"MIT"
] | 2
|
2020-06-25T00:06:38.000Z
|
2020-09-11T18:59:45.000Z
|
src/python/flask/__init__.py
|
computer-geek64/guinn
|
11e10a9fbf1f99fd0ff8e15d7a812679ae7015f4
|
[
"MIT"
] | 20
|
2020-06-25T00:16:35.000Z
|
2020-06-25T19:24:14.000Z
|
src/python/flask/__init__.py
|
computer-geek64/guinn
|
11e10a9fbf1f99fd0ff8e15d7a812679ae7015f4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# __init__.py
| 8.5
| 18
| 0.676471
|
462d3a04d5c9ea54d7c66ecfdba721f8eef129a6
| 8,667
|
py
|
Python
|
ambari-server/src/test/python/TestStackSelect.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,664
|
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
ambari-server/src/test/python/TestStackSelect.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 3,018
|
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
ambari-server/src/test/python/TestStackSelect.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,673
|
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
# !/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import patch
from mock.mock import MagicMock
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions import stack_select
from resource_management.libraries.script import Script
from unittest import TestCase
Logger.initialize_logger()
class TestStackSelect(TestCase):
def test_missing_role_information_throws_exception(self):
"""
Tests that missing the service & role throws an excpetion
:return:
"""
version = "2.5.9.9-9999"
command_json = TestStackSelect._get_incomplete_cluster_simple_upgrade_json()
Script.config = command_json
self.assertRaises(Fail, stack_select.select_packages, version)
@patch.object(stack_select, "get_supported_packages")
@patch("resource_management.libraries.functions.stack_select.select")
def test_select_package_for_standard_orchestration(self, stack_select_select_mock, get_supported_packages_mock):
"""
Tests that missing the service & role throws an excpetion
:return:
"""
get_supported_packages_mock.return_value = TestStackSelect._get_supported_packages()
version = "2.5.9.9-9999"
command_json = TestStackSelect._get_cluster_simple_upgrade_json()
Script.config = dict()
Script.config.update(command_json)
Script.config.update( { "configurations" : { "cluster-env" : {} }, "clusterLevelParams": {} } )
Script.config["configurations"]["cluster-env"]["stack_packages"] = self._get_stack_packages()
Script.config["clusterLevelParams"] = { "stack_name" : "HDP" }
stack_select.select_packages(version)
self.assertEqual(len(stack_select_select_mock.call_args_list), 2)
self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-master", version))
self.assertEqual(stack_select_select_mock.call_args_list[1][0], ("foo-client", version))
@patch.object(stack_select, "get_supported_packages")
@patch("resource_management.libraries.functions.stack_select.select")
def test_select_package_for_patch_orchestration(self, stack_select_select_mock, get_supported_packages_mock):
"""
Tests that missing the service & role throws an excpetion
:return:
"""
get_supported_packages_mock.return_value = TestStackSelect._get_supported_packages()
version = "2.5.9.9-9999"
command_json = TestStackSelect._get_cluster_simple_upgrade_json()
command_json["upgradeSummary"]["orchestration"] = "PATCH"
Script.config = dict()
Script.config.update(command_json)
Script.config.update( { "configurations" : { "cluster-env" : {} }, "clusterLevelParams": {} } )
Script.config["configurations"]["cluster-env"]["stack_packages"] = self._get_stack_packages()
Script.config["clusterLevelParams"] = { "stack_name" : "HDP" }
stack_select.select_packages(version)
self.assertEqual(len(stack_select_select_mock.call_args_list), 1)
self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-master", version))
stack_select_select_mock.reset_mock()
command_json["upgradeSummary"]["orchestration"] = "MAINT"
stack_select.select_packages(version)
self.assertEqual(len(stack_select_select_mock.call_args_list), 1)
self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-master", version))
@patch.object(stack_select, "get_supported_packages")
@patch("resource_management.libraries.functions.stack_select.select")
def test_legacy_package_fallback(self, stack_select_select_mock, get_supported_packages_mock):
"""
Tests that if the package specified by the JSON isn't support by the stack-select tool,
the the fallback legacy value is used.
:return:
"""
get_supported_packages_mock.return_value = ["foo-legacy"]
version = "2.5.9.9-9999"
command_json = TestStackSelect._get_cluster_simple_upgrade_json()
Script.config = dict()
Script.config.update(command_json)
Script.config.update( { "configurations" : { "cluster-env" : {} }, "clusterLevelParams": {} } )
Script.config["configurations"]["cluster-env"]["stack_packages"] = self._get_stack_packages_with_legacy()
Script.config["clusterLevelParams"] = { "stack_name" : "HDP" }
stack_select.select_packages(version)
self.assertEqual(len(stack_select_select_mock.call_args_list), 1)
self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-legacy", version))
@staticmethod
def _get_incomplete_cluster_simple_upgrade_json():
"""
A command missing the role and service name during an upgrade.
:return:
"""
return {
"roleCommand":"ACTIONEXECUTE",
"clusterLevelParams": {
"stack_name": "HDP",
"stack_version": "2.4",
},
"commandParams": {
"source_stack": "2.4",
"target_stack": "2.5",
"upgrade_direction": "upgrade",
"version": "2.5.9.9-9999"
},
"upgradeSummary": {
"services":{
"HDFS":{
"sourceRepositoryId":1,
"sourceStackId":"HDP-2.4",
"sourceVersion":"2.4.0.0-1234",
"targetRepositoryId":2,
"targetStackId":"HDP-2.5",
"targetVersion":"2.5.9.9-9999"
}
},
"direction":"UPGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"associatedStackId":"HDP-2.5",
"associatedVersion":"2.5.9.9-9999",
"isDowngradeAllowed": True,
"isSwitchBits": False
}
}
@staticmethod
def _get_cluster_simple_upgrade_json():
"""
A restart command during an upgrade.
:return:
"""
return {
"roleCommand":"ACTIONEXECUTE",
"serviceName": "FOO_SERVICE",
"role": "FOO_MASTER",
"clusterLevelParams": {
"stack_name": "HDP",
"stack_version": "2.4",
},
"commandParams": {
"source_stack": "2.4",
"target_stack": "2.5",
"upgrade_direction": "upgrade",
"version": "2.5.9.9-9999"
},
"upgradeSummary": {
"services":{
"HDFS":{
"sourceRepositoryId":1,
"sourceStackId":"HDP-2.4",
"sourceVersion":"2.4.0.0-1234",
"targetRepositoryId":2,
"targetStackId":"HDP-2.5",
"targetVersion":"2.5.9.9-9999"
}
},
"direction":"UPGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"associatedStackId":"HDP-2.5",
"associatedVersion":"2.5.9.9-9999",
"isDowngradeAllowed": True,
"isSwitchBits": False
}
}
@staticmethod
def _get_stack_packages():
import json
return json.dumps( {
"HDP": {
"stack-select": {
"FOO_SERVICE": {
"FOO_MASTER": {
"STACK-SELECT-PACKAGE": "foo-master",
"INSTALL": [
"foo-master",
"foo-client"
],
"PATCH": [
"foo-master"
],
"STANDARD": [
"foo-master",
"foo-client"
]
}
}
}
}
} )
@staticmethod
def _get_stack_packages_with_legacy():
import json
return json.dumps( {
"HDP": {
"stack-select": {
"FOO_SERVICE": {
"FOO_MASTER": {
"LEGACY":"foo-legacy",
"STACK-SELECT-PACKAGE": "foo-master",
"INSTALL": [
"foo-master"
],
"PATCH": [
"foo-master"
],
"STANDARD": [
"foo-master"
]
}
}
}
}
} )
@staticmethod
def _get_supported_packages():
return ["foo-master", "foo-client"]
| 32.460674
| 114
| 0.641975
|
734cbb20862fe502328a3173591901167ca62ca8
| 8,513
|
py
|
Python
|
tests/test_package_tools.py
|
eyllanesc/bincrafters-package-tools
|
110a6715b2bfa2099b6f1c1ce555941035efecbd
|
[
"MIT"
] | null | null | null |
tests/test_package_tools.py
|
eyllanesc/bincrafters-package-tools
|
110a6715b2bfa2099b6f1c1ce555941035efecbd
|
[
"MIT"
] | null | null | null |
tests/test_package_tools.py
|
eyllanesc/bincrafters-package-tools
|
110a6715b2bfa2099b6f1c1ce555941035efecbd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import platform
import pytest
from bincrafters import build_shared
from bincrafters import build_template_boost_default
from bincrafters import build_template_boost_header_only
from bincrafters import build_template_default
from bincrafters import build_template_header_only
from bincrafters import build_template_installer
@pytest.fixture(autouse=True)
def set_matrix_variables():
if platform.system() == "Linux":
os.environ["CONAN_GCC_VERSIONS"] = "7"
elif platform.system() == "Windows":
os.environ["CONAN_VISUAL_VERSIONS"] = "15"
elif platform.system() == "Darwin":
os.environ["CONAN_APPLE_CLANG_VERSIONS"] = "9.0"
@pytest.fixture()
def set_minimal_build_environment():
os.environ["CONAN_ARCHS"] = "x86_64"
os.environ["CONAN_BUILD_TYPES"] = "Release"
yield
del os.environ["CONAN_ARCHS"]
del os.environ["CONAN_BUILD_TYPES"]
@pytest.fixture()
def set_upload_when_stable_false():
os.environ["CONAN_UPLOAD_ONLY_WHEN_STABLE"] = "0"
yield
del os.environ["CONAN_UPLOAD_ONLY_WHEN_STABLE"]
@pytest.fixture()
def set_upload_address():
os.environ["CONAN_UPLOAD"] = "https://api.bintray.com/conan/foo/bar@False@remotefoo"
yield
del os.environ["CONAN_UPLOAD"]
@pytest.fixture()
def set_remote_address():
os.environ["CONAN_REMOTES"] = "https://api.bintray.com/conan/foo/bar@False@remotefoo"
yield
del os.environ["CONAN_REMOTES"]
@pytest.fixture()
def set_multi_remote_address():
os.environ["CONAN_REMOTES"] = "https://api.bintray.com/conan/foo/bar,https://api.bintray.com/conan/qux/baz"
yield
del os.environ["CONAN_REMOTES"]
@pytest.fixture()
def set_mixed_remote_address():
os.environ["CONAN_REMOTES"] = "https://api.bintray.com/conan/foo/bar@False@remotefoo,https://api.bintray.com/conan/qux/baz"
yield
del os.environ["CONAN_REMOTES"]
def test_build_template_boost_default():
builder = build_template_boost_default.get_builder()
for settings, options, env_vars, build_requires, reference in builder.items:
assert "foobar:shared" in options
assert "boost_*:shared" in options
if platform.system() == "Darwin":
assert "x86_64" == settings['arch']
if platform.system() == "Linux":
assert 8 == len(builder.items)
elif platform.system() == "Windows":
assert 6 == len(builder.items)
elif platform.system() == "Darwin":
assert 4 == len(builder.items)
assert False == builder.upload_only_when_stable
def test_build_template_default():
builder = build_template_default.get_builder()
for settings, options, env_vars, build_requires, reference in builder.items:
assert "foobar:shared" in options
if platform.system() == "Darwin":
assert "x86_64" == settings['arch']
if platform.system() == "Linux":
assert 4 == len(builder.items)
elif platform.system() == "Windows":
assert 6 == len(builder.items)
elif platform.system() == "Darwin":
assert 4 == len(builder.items)
assert True == builder.upload_only_when_stable
def test_build_template_default_minimal(set_minimal_build_environment):
builder = build_template_default.get_builder()
for settings, options, env_vars, build_requires, reference in builder.items:
assert "foobar:shared" in options
assert "x86_64" == settings['arch']
if platform.system() == "Linux":
assert 2 == len(builder.items)
elif platform.system() == "Windows":
assert 3 == len(builder.items)
elif platform.system() == "Darwin":
assert 2 == len(builder.items)
def test_build_template_default_non_pure_c():
builder = build_template_default.get_builder(pure_c=False)
for settings, options, env_vars, build_requires, reference in builder.items:
assert "foobar:shared" in options
assert "x86_64" == settings['arch']
if platform.system() == "Linux":
assert 8 == len(builder.items)
elif platform.system() == "Windows":
assert 6 == len(builder.items)
elif platform.system() == "Darwin":
assert 4 == len(builder.items)
def test_build_shared():
builder = build_shared.get_builder()
assert 0 == len(builder.items)
def test_build_template_installer():
builder = build_template_installer.get_builder()
assert 0 == len(builder.items)
def test_build_header_only():
builder = build_template_header_only.get_builder()
for settings, options, env_vars, build_requires, reference in builder.items:
assert 0 == len(options)
assert 1 == len(builder.items)
def test_build_boost_header_only():
builder = build_template_boost_header_only.get_builder()
for settings, options, env_vars, build_requires, reference in builder.items:
assert 0 == len(options)
assert 1 == len(builder.items)
assert builder.upload_only_when_stable == False
def test_get_os():
expected_os = "Macos" if platform.system() == "Darwin" else platform.system()
assert expected_os == build_shared.get_os()
def test_ci_is_running():
expected = True if os.getenv("CI", None) is not None else False
assert expected == build_shared.is_ci_running()
def test_build_policy_not_set():
builder = build_template_default.get_builder()
assert None == builder.build_policy
def test_build_policy_set_in_args():
builder = build_template_default.get_builder(build_policy='missing')
assert 'missing' == builder.build_policy
def test_build_policy_set_header_only():
builder = build_template_header_only.get_builder(build_policy='missing')
assert 'missing' == builder.build_policy
def test_upload_only_when_stable_builder(set_upload_when_stable_false):
builder = build_template_default.get_builder()
assert False == builder.upload_only_when_stable
def test_upload_only_when_stable_parameter(set_upload_when_stable_false):
builder = build_template_default.get_builder(upload_only_when_stable=True)
assert True == builder.upload_only_when_stable
def test_upload_only_when_stable_header_only(set_upload_when_stable_false):
builder = build_template_header_only.get_builder()
assert False == builder.upload_only_when_stable
def test_format_upload(set_upload_address):
builder = build_template_default.get_builder()
assert "remotefoo" == builder.remotes_manager.upload_remote_name
assert "remotefoo" == builder.remotes_manager._upload.name
assert "https://api.bintray.com/conan/foo/bar" == builder.remotes_manager._upload.url
assert 'False' == builder.remotes_manager._upload.use_ssl
def test_format_remote(set_remote_address):
builder = build_template_default.get_builder()
remote = builder.remotes_manager._remotes[0]
assert 1 == len(builder.remotes_manager._remotes)
assert "remotefoo" == remote.name
assert "https://api.bintray.com/conan/foo/bar" == remote.url
assert False == remote.use_ssl
def test_format_multi_remotes(set_multi_remote_address):
builder = build_template_default.get_builder()
assert 2 == len(builder.remotes_manager._remotes)
remote = builder.remotes_manager._remotes[0]
assert "remotefoo" == remote.name
assert "https://api.bintray.com/conan/foo/bar" == remote.url
assert remote.use_ssl
remote = builder.remotes_manager._remotes[1]
assert "remote1" == remote.name
assert "https://api.bintray.com/conan/qux/baz" == remote.url
assert True == remote.use_ssl
def test_format_mixed_remotes(set_mixed_remote_address):
builder = build_template_default.get_builder()
assert 2 == len(builder.remotes_manager._remotes)
remote = builder.remotes_manager._remotes[0]
assert "remotefoo" == remote.name
assert "https://api.bintray.com/conan/foo/bar" == remote.url
assert False == remote.use_ssl
remote = builder.remotes_manager._remotes[1]
assert "remote1" == remote.name
assert "https://api.bintray.com/conan/qux/baz" == remote.url
assert True == remote.use_ssl
def test_default_remote_address(set_upload_address):
builder = build_template_default.get_builder()
assert 2 == len(builder.remotes_manager._remotes)
remote = builder.remotes_manager._remotes[0]
assert "remotefoo" == remote.name
assert "https://api.bintray.com/conan/foo/bar" == remote.url
remote = builder.remotes_manager._remotes[1]
assert "upload_repo" == remote.name
assert "https://api.bintray.com/conan/bincrafters/public-conan" == remote.url
| 34.188755
| 127
| 0.725009
|
f49b7032f77f990fe80cee042cdf6d03aaf85a37
| 1,449
|
py
|
Python
|
src/medius/mediuspackets/getworldsecuritylevel.py
|
Dnawrkshp/robo
|
5ba20305f7a0c9dfe03229e0d49bde416bea6353
|
[
"MIT"
] | null | null | null |
src/medius/mediuspackets/getworldsecuritylevel.py
|
Dnawrkshp/robo
|
5ba20305f7a0c9dfe03229e0d49bde416bea6353
|
[
"MIT"
] | null | null | null |
src/medius/mediuspackets/getworldsecuritylevel.py
|
Dnawrkshp/robo
|
5ba20305f7a0c9dfe03229e0d49bde416bea6353
|
[
"MIT"
] | null | null | null |
from enums.enums import MediusEnum, CallbackStatus, WorldSecurityLevelType
from utils import utils
from medius.mediuspackets.getworldsecuritylevelresponse import GetWorldSecurityLevelResponseSerializer
class GetWorldSecurityLevelSerializer:
data_dict = [
{'name': 'mediusid', 'n_bytes': 2, 'cast': None},
{'name': 'message_id', 'n_bytes': MediusEnum.MESSAGEID_MAXLEN, 'cast': None},
{'name': 'session_key', 'n_bytes': MediusEnum.SESSIONKEY_MAXLEN, 'cast': None},
{'name': 'buf', 'n_bytes': 2, 'cast': None},
{'name': 'world_id', 'n_bytes': 4, 'cast': utils.bytes_to_int_little},
{'name': 'app_type', 'n_bytes': 4, 'cast': utils.bytes_to_int_little}
]
class GetWorldSecurityLevelHandler:
def process(self, serialized, monolith, con):
game = monolith.get_client_manager().get_game(serialized['world_id'])
if game == None:
return [GetWorldSecurityLevelResponseSerializer.build(
serialized['message_id'],
CallbackStatus.NO_RESULT,
0,
0,
0
)]
return [GetWorldSecurityLevelResponseSerializer.build(
serialized['message_id'],
CallbackStatus.SUCCESS,
serialized['world_id'],
serialized['app_type'],
WorldSecurityLevelType.WORLD_SECURITY_NONE # TODO: implement games with password protection
)]
| 42.617647
| 103
| 0.638371
|
fe197ec5cd8ecc39a29a9c2308fbdb6174de608d
| 127
|
py
|
Python
|
tests/regression/RandomReg_10/ws_RandomReg_10_SVR_poly_mssql_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1
|
2019-07-09T14:45:18.000Z
|
2019-07-09T14:45:18.000Z
|
tests/regression/RandomReg_10/ws_RandomReg_10_SVR_poly_mssql_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 5
|
2017-11-13T13:35:37.000Z
|
2021-11-11T12:57:20.000Z
|
tests/regression/RandomReg_10/ws_RandomReg_10_SVR_poly_mssql_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T15:05:33.000Z
|
2021-09-19T15:05:33.000Z
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SVR_poly" , "RandomReg_10" , "mssql")
| 25.4
| 66
| 0.795276
|
78e0c68b9fb9df6388d7ae28d1cf4b382e73c027
| 3,810
|
py
|
Python
|
biped/include/left_arm.py
|
DIvyanshu-Goel/Biped
|
883d3e889319d612f3bf56efda856db25c68adb7
|
[
"CC0-1.0"
] | null | null | null |
biped/include/left_arm.py
|
DIvyanshu-Goel/Biped
|
883d3e889319d612f3bf56efda856db25c68adb7
|
[
"CC0-1.0"
] | null | null | null |
biped/include/left_arm.py
|
DIvyanshu-Goel/Biped
|
883d3e889319d612f3bf56efda856db25c68adb7
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import rospy
import time
import numpy as np
from std_msgs.msg import *
from math import *
from dynamixel_msgs.msg import JointState
from biped.msg import *
from biped.srv import *
#for details on motor ids see Data_Server.py
start_pos = [0, 0 ,0 ,0];
motorid_LA = [4,5,6,7];
update_rate = 50;
###########################################################################################################################
def left_arm(goal_pos,time_limit):
global start_pos;
motorLA1_response = motor_data_client(motorid_LA[0]);
motorLA2_response = motor_data_client(motorid_LA[1]);
motorLA3_response = motor_data_client(motorid_LA[2]);
motorLA4_response = motor_data_client(motorid_LA[3]);
start_pos = [motorLA1_response.current_pos,motorLA2_response.current_pos,motorLA3_response.current_pos,motorLA4_response.current_pos];
curr_pos = start_pos;
#handlers for motor publishers
LA1 = rospy.Publisher('/LA1_controller/command', Float64, queue_size=10);
LA2 = rospy.Publisher('/LA2_controller/command', Float64, queue_size=10);
LA3 = rospy.Publisher('/LA3_controller/command', Float64, queue_size=10);
LA4 = rospy.Publisher('/LA4_controller/command', Float64, queue_size=10);
#initialize node for the specific subpart
#rospy.init_node('Left_arm_node', anonymous=True);
rate = rospy.Rate(update_rate) # 50hz update rate
time.sleep(0.05); # make the system sleep a while
time_count = 0 ;
time_limit = time_limit * update_rate;
while (rospy.is_shutdown() == 0 and time_count <= time_limit ):
global curr_pos;
curr_pos = calculate_trajectory(time_count,start_pos,goal_pos,time_limit);
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left arm motor 1" %curr_pos[0] );
LA1.publish(curr_pos[0] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left arm motor 2" %curr_pos[1] );
LA2.publish(curr_pos[1] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left arm motor 3" %curr_pos[2] );
LA3.publish(curr_pos[2] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left arm motor 4" %curr_pos[3] );
LA4.publish(curr_pos[3] );
time_count = time_count + 1;
time.sleep(0.02);
###########################################################################################################################
def calculate_trajectory(time_count,start_pos,goal_pos,time_limit):
curr_position = start_pos;
curr_position[0] = start_pos[0] + ((goal_pos[0]-start_pos[0])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[1] = start_pos[1] + ((goal_pos[1]-start_pos[1])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[2] = start_pos[2] + ((goal_pos[2]-start_pos[2])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[3] = start_pos[3] + ((goal_pos[3]-start_pos[3])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
return(curr_position);
###########################################################################################################################
def motor_data_client(x):
rospy.wait_for_service('Fetch_Motor_data')
client = rospy.ServiceProxy('Fetch_Motor_data', Fetch_Motor_Data)
resp1 = client(x);
return (resp1);
###########################################################################################################################
if __name__ == '__main__':
try:
left_arm([0,1,1,1],1);
time.sleep(2);
left_arm([0,0,0,0],2);
time.sleep(2);
except rospy.ROSInterruptException:
pass
| 44.823529
| 147
| 0.603675
|
3d9a249e4d77402626dfab0b47682f0f57576660
| 10,804
|
py
|
Python
|
accelbyte_py_sdk/api/platform/operations/subscription/cancel_subscription.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/platform/operations/subscription/cancel_subscription.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/platform/operations/subscription/cancel_subscription.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-platform-service (4.10.0)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import CancelRequest
from ...models import ErrorEntity
from ...models import SubscriptionInfo
class CancelSubscription(Operation):
"""Cancel a subscription (cancelSubscription)
Cancel a subscription, only ACTIVE subscription can be cancelled. Ensure successfully cancel, recommend at least 1 day before current period ends, otherwise it may be charging or charged.
Set immediate true, the subscription will be terminated immediately, otherwise till the end of current billing cycle.
Set force true, will ignore the error if subscription is during recurring charging.
Other detail info:
* Required permission : resource="ADMIN:NAMESPACE:{namespace}:USER:{userId}:SUBSCRIPTION", action=4 (UPDATE)
* Returns : cancelled subscription
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:USER:{userId}:SUBSCRIPTION [UPDATE]
Properties:
url: /platform/admin/namespaces/{namespace}/users/{userId}/subscriptions/{subscriptionId}/cancel
method: PUT
tags: ["Subscription"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL CancelRequest in body
namespace: (namespace) REQUIRED str in path
subscription_id: (subscriptionId) REQUIRED str in path
user_id: (userId) REQUIRED str in path
force: (force) OPTIONAL bool in query
Responses:
200: OK - SubscriptionInfo (successful operation)
404: Not Found - ErrorEntity (40141: Subscription [{subscriptionId}] does not exist)
409: Conflict - ErrorEntity (40171: Subscription [{subscriptionId}] is not active | 40172: Subscription [{subscriptionId}] is charging, waiting for payment notification)
"""
# region fields
_url: str = "/platform/admin/namespaces/{namespace}/users/{userId}/subscriptions/{subscriptionId}/cancel"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"], ["BEARER_AUTH"]]
_location_query: str = None
body: CancelRequest # OPTIONAL in [body]
namespace: str # REQUIRED in [path]
subscription_id: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
force: bool # OPTIONAL in [query]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
"query": self.get_query_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "subscription_id"):
result["subscriptionId"] = self.subscription_id
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
def get_query_params(self) -> dict:
result = {}
if hasattr(self, "force"):
result["force"] = self.force
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: CancelRequest) -> CancelSubscription:
self.body = value
return self
def with_namespace(self, value: str) -> CancelSubscription:
self.namespace = value
return self
def with_subscription_id(self, value: str) -> CancelSubscription:
self.subscription_id = value
return self
def with_user_id(self, value: str) -> CancelSubscription:
self.user_id = value
return self
def with_force(self, value: bool) -> CancelSubscription:
self.force = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = CancelRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "subscription_id") and self.subscription_id:
result["subscriptionId"] = str(self.subscription_id)
elif include_empty:
result["subscriptionId"] = ""
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
if hasattr(self, "force") and self.force:
result["force"] = bool(self.force)
elif include_empty:
result["force"] = False
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, SubscriptionInfo], Union[None, ErrorEntity, HttpResponse]]:
"""Parse the given response.
200: OK - SubscriptionInfo (successful operation)
404: Not Found - ErrorEntity (40141: Subscription [{subscriptionId}] does not exist)
409: Conflict - ErrorEntity (40171: Subscription [{subscriptionId}] is not active | 40172: Subscription [{subscriptionId}] is charging, waiting for payment notification)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return SubscriptionInfo.create_from_dict(content), None
if code == 404:
return None, ErrorEntity.create_from_dict(content)
if code == 409:
return None, ErrorEntity.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
subscription_id: str,
user_id: str,
body: Optional[CancelRequest] = None,
force: Optional[bool] = None,
) -> CancelSubscription:
instance = cls()
instance.namespace = namespace
instance.subscription_id = subscription_id
instance.user_id = user_id
if body is not None:
instance.body = body
if force is not None:
instance.force = force
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> CancelSubscription:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = CancelRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = CancelRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "subscriptionId" in dict_ and dict_["subscriptionId"] is not None:
instance.subscription_id = str(dict_["subscriptionId"])
elif include_empty:
instance.subscription_id = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
if "force" in dict_ and dict_["force"] is not None:
instance.force = bool(dict_["force"])
elif include_empty:
instance.force = False
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"subscriptionId": "subscription_id",
"userId": "user_id",
"force": "force",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": False,
"namespace": True,
"subscriptionId": True,
"userId": True,
"force": False,
}
# endregion static methods
| 33.7625
| 191
| 0.621066
|
158e35b9d74415b21f683b7015b7fa3c59a92827
| 2,299
|
py
|
Python
|
official/vision/detection/layers/det/pooler.py
|
aspnetcs/Models
|
640a0e0ed9c2b9f739f5070b8b41680678b5213a
|
[
"Apache-2.0"
] | 1
|
2021-01-22T05:52:38.000Z
|
2021-01-22T05:52:38.000Z
|
official/vision/detection/layers/det/pooler.py
|
aspnetcs/Models
|
640a0e0ed9c2b9f739f5070b8b41680678b5213a
|
[
"Apache-2.0"
] | null | null | null |
official/vision/detection/layers/det/pooler.py
|
aspnetcs/Models
|
640a0e0ed9c2b9f739f5070b8b41680678b5213a
|
[
"Apache-2.0"
] | 1
|
2021-09-19T13:12:18.000Z
|
2021-09-19T13:12:18.000Z
|
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import numpy as np
import megengine as mge
import megengine.functional as F
def roi_pool(
rpn_fms, rois, stride, pool_shape, roi_type="roi_align",
):
assert len(stride) == len(rpn_fms)
canonical_level = 4
canonical_box_size = 224
min_level = math.log2(stride[0])
max_level = math.log2(stride[-1])
num_fms = len(rpn_fms)
box_area = (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])
level_assignments = F.floor(
canonical_level + F.log(box_area.sqrt() / canonical_box_size) / np.log(2)
)
level_assignments = F.minimum(level_assignments, max_level)
level_assignments = F.maximum(level_assignments, min_level)
level_assignments = level_assignments - min_level
# avoid empty assignment
level_assignments = F.concat(
[level_assignments, mge.tensor(np.arange(num_fms, dtype=np.int32))],
)
rois = F.concat([rois, mge.zeros((num_fms, rois.shapeof(-1)))])
pool_list, inds_list = [], []
for i in range(num_fms):
mask = level_assignments == i
_, inds = F.cond_take(mask == 1, mask)
level_rois = rois.ai[inds]
if roi_type == "roi_pool":
pool_fm = F.roi_pooling(
rpn_fms[i], level_rois, pool_shape, mode="max", scale=1.0 / stride[i]
)
elif roi_type == "roi_align":
pool_fm = F.roi_align(
rpn_fms[i],
level_rois,
pool_shape,
mode="average",
spatial_scale=1.0 / stride[i],
sample_points=2,
aligned=True,
)
pool_list.append(pool_fm)
inds_list.append(inds)
fm_order = F.concat(inds_list, axis=0)
fm_order = F.argsort(fm_order.reshape(1, -1))[1].reshape(-1)
pool_feature = F.concat(pool_list, axis=0)
pool_feature = pool_feature.ai[fm_order][:-num_fms]
return pool_feature
| 33.318841
| 88
| 0.626794
|
c368da6f15c2cff9e529291db66ba4b07d7f868c
| 943
|
py
|
Python
|
tests/test_yarn_backend.py
|
fhoering/mlflow-yarn
|
58fd7484dd885b94c081defef22ea05a3dfefeba
|
[
"Apache-2.0"
] | 7
|
2020-07-17T13:44:45.000Z
|
2020-09-18T08:44:25.000Z
|
tests/test_yarn_backend.py
|
fhoering/mlflow-yarn
|
58fd7484dd885b94c081defef22ea05a3dfefeba
|
[
"Apache-2.0"
] | 1
|
2020-08-06T16:57:38.000Z
|
2020-08-06T16:57:38.000Z
|
tests/test_yarn_backend.py
|
fhoering/mlflow-yarn
|
58fd7484dd885b94c081defef22ea05a3dfefeba
|
[
"Apache-2.0"
] | 3
|
2020-07-17T14:56:17.000Z
|
2022-02-21T11:33:30.000Z
|
import logging
import os
import pytest
import skein
from mlflow_yarn import yarn_backend, _upload_logs
_logger = logging.getLogger(__name__)
@pytest.mark.parametrize(
"cmd, expected_entry_point, expected_args",
[
("python -m my_package.my_module --param1 'Hello python' b",
"my_package.my_module",
["--param1", "Hello python", "b"]),
("python3 myscript.py arg",
"myscript.py",
["arg"]),
("python3.6 myscript.py arg1 arg2",
"myscript.py",
["arg1", "arg2"])
]
)
def test_try_split_cmd(cmd, expected_entry_point, expected_args):
entry_point, args = yarn_backend.try_split_cmd(cmd)
assert entry_point == expected_entry_point
assert args == expected_args
def test__extract_skein_container_name():
name = _upload_logs._extract_skein_container_name("skein_launcher_0")
assert name == "skein_launcher"
| 27.735294
| 74
| 0.654295
|
f85629658d58e030dca0b17f8a1ff12bb080fcb3
| 5,752
|
py
|
Python
|
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210714132537.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210714132537.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210714132537.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
"""Decorators
Recall the simple closure example we did which allowed us to maintain a count of ho9w many times a function was called:
def counter(fn):
count = 0
def inner(*args, **kwargs): # using *args. **kwargs means we can call any function fn with any combination of positional and keyword arguments
nonlocal count
count += 1
print('Function {0} was called {1} times'.format(fn.__name__, count))
return fn(*args, **kwargs)
return inner
def add(a, b=0):
return a + b
add = counter(add)
result = add(1, 2) # Function add was called 1 times
# result = 3
print(result)
I essentially modified our add function by wrapping it inside another function that added some functionally to it
I can also say that we decorated ourfunction add with the function counter
And I call counter a decorator function
In general a decorator function:
takes a function as an argument
returns a closure
the closure usually accepts any combination of parameters
runs some code in the inner function(closure)
the closure function calls the original function using the arguments passed to the closure
returns whatever is returned by that function call
Decorators and the @ symbool
In our previous example, we saw that the counter was a decorator and we could decorate our add function using: add = counter(add)
In general, if func is a decorator function, we decorate another function my_func using:
my_func = func(my_func)
This is so common that Python provides a convenient way of writing that:
@counter (is the sameas writing) @func
def add(a, b): def my_func(...):
return a + b ...
is the same as writing is the same as writing
def add(a, b): def my_func(...):
return a + b ...
add = counter(add) my_func = func(my_func)
Introspecting Decorated Functions
Let's use the same count decorator def counter(fn):
count = 0
def inner(*args, **kwargs): # using *args. **kwargs means we can call any function fn with any combination of positional and keyword arguments
nonlocal count
count += 1
print('Function {0} was called {1} times'.format(fn.__name__, count))
return fn(*args, **kwargs)
return inner
"""
# @counter # if not commented out, python shows it is not defined
from itertools import count
def mult(a, b, c=1):
# returns the product of three values I could have written:
return a * b* c # mult = counter (the same thing as @counter)
mult.__name__ # mult is now inner # The dunder 'name' property
help(mult) # Help on function inner in module __main__:
# inner(*args, kwargs)
# we have lost our docstring, and even the original function signature
# even using the inspect module's signature does not yield better results
""" One approach to fixing this
We can try to fix this problem, at least for the docstring and function name as follows:
def counter(fn):
count = 0
def inner(*args, **kwargs):
nonlocal count
count += 1
print*'Function {0} was called {1} times'.format(fn.__name__, count)
return fn(*args, **kwargs)
inner.__name__ = fn.__name__ # these two have been added in to change the function
inner.doc__ = fn.__doc__ # these two have been added in to change the function
return inner
But this doesn't fix losing the function signature - doing so would be quite complicated
The functools.wraps function
The functools module has a wraps function that we can use to fix the metadata of our inner function in our decorator
from functools import wraps
in fact, the wraps function is itself a decorator
but, it needs to know what was our 'original' function-in this case fn
"""
def counter(fn): # @counter
count = 0 # def mult(a:int, b:int, c:int=1):
def inner(**args, **kwargs):
nonlocal count # returns the product of three values
count += 1
print(count) # return a * b * c
return fn(*args, **kwargs)
inner = wraps(fn)(inner)
return inner
#
The same thing:
def counter(fn):
count = 0
@warps(fn)
def inner(**args, **kwargs):
nonlocal count
count += 1
print(count)
return fn(*args, **kwargs)
inner = wraps(fn)(inner)
return inner
| 37.594771
| 205
| 0.513387
|
8c8c100107d0e4989b6d0895afb2c0a954cbcace
| 1,991
|
py
|
Python
|
xgb_attributes.py
|
lihuinian/lncRNAIdentification
|
2b2b51dfbbe1608f50d6fc75c0bc08fcd5daace0
|
[
"MIT"
] | 1
|
2021-05-15T08:28:33.000Z
|
2021-05-15T08:28:33.000Z
|
xgb_attributes.py
|
lihuinian/lncRNAIdentification
|
2b2b51dfbbe1608f50d6fc75c0bc08fcd5daace0
|
[
"MIT"
] | 1
|
2021-06-30T16:22:48.000Z
|
2021-06-30T16:22:48.000Z
|
xgb_attributes.py
|
lihuinian/lncRNAIdentification
|
2b2b51dfbbe1608f50d6fc75c0bc08fcd5daace0
|
[
"MIT"
] | 1
|
2021-08-12T02:25:52.000Z
|
2021-08-12T02:25:52.000Z
|
import pandas as pd
import xgboost as xgb
import operator
import matplotlib.pyplot as plt
class XGBAttributes:
def __init__(self, data,base_name):
self.data = data
self.base_name = base_name
def create_feature_map(self,features,feature_map):
outfile = open(feature_map,'w')
i = 0
for feat in features:
outfile.write('{0}\t{1}\tq\n'.format(i,feat))
i = i+1
outfile.close()
def attributes(self):
params = {
'booster':'gbtree',
'objective': 'binary:logistic',
'eval_metric': 'auc',
'max_depth':4,
'lambda':10,
'subsample':0.75,
'colsample_bytree':0.75,
'min_child_weight':2,
'eta': 0.025,
'seed':0,
'nthread':8,
'silent':1
}
round = 5
y = self.data["class"]
X = self.data.drop(["class","id"],1)
feature_map = './material/%s_xgb.map' % self.base_name
xgtrain = xgb.DMatrix(X,label=y)
bst = xgb.train(params,xgtrain,num_boost_round=round)
features = [x for x in self.data.columns if x not in ["id","class"]]
self.create_feature_map(features,feature_map)
importance = bst.get_fscore(fmap=feature_map)
importance = sorted(importance.items(),key = operator.itemgetter(1))
df = pd.DataFrame(importance,columns=['feature','fscore'])
df['fscore'] = df['fscore'] / df['fscore'].sum()
df.to_csv("./material/%s_feature_importance.csv" % self.base_name, index=False)
# df.plot(kind='barh', x = 'feature',y = 'fscore', legend=False,figsize=(6,10))
# plt.title('%s Feature Importance' % self.base_name)
# plt.xlabel('relative importance')
# figure_name = './material/%s_feature_weight.eps' % self.base_name
# plt.savefig(figure_name, format='eps')
return df['feature'].tolist()
| 32.112903
| 87
| 0.566047
|
636ffd67e525f497deb4fa744ef81b83b7c60e5c
| 542
|
py
|
Python
|
modules/gltf/config.py
|
you-win/godot
|
6658f5c4754e1e9957e0dcd53049b4be6610b1d4
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
modules/gltf/config.py
|
you-win/godot
|
6658f5c4754e1e9957e0dcd53049b4be6610b1d4
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
modules/gltf/config.py
|
you-win/godot
|
6658f5c4754e1e9957e0dcd53049b4be6610b1d4
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
def can_build(env, platform):
return not env["disable_3d"]
def configure(env):
pass
def get_doc_classes():
return [
"EditorSceneImporterGLTF",
"GLTFAccessor",
"GLTFAnimation",
"GLTFBufferView",
"GLTFCamera",
"GLTFDocument",
"GLTFLight",
"GLTFMesh",
"GLTFNode",
"GLTFSkeleton",
"GLTFSkin",
"GLTFSpecGloss",
"GLTFState",
"GLTFTexture",
"PackedSceneGLTF",
]
def get_doc_path():
return "doc_classes"
| 17.483871
| 34
| 0.54797
|
e21da40ffdaf3fb850e569772278ae438347fd59
| 24,395
|
py
|
Python
|
models/local_aggregation_operators.py
|
PointCloudYC/PointNet-modern.pytorch
|
1a0b373fcb21f24b667a0bb4831211da5b92f98d
|
[
"Apache-2.0"
] | 2
|
2021-05-20T14:36:23.000Z
|
2022-02-01T11:33:56.000Z
|
models/local_aggregation_operators.py
|
PointCloudYC/PointNet-modern.pytorch
|
1a0b373fcb21f24b667a0bb4831211da5b92f98d
|
[
"Apache-2.0"
] | null | null | null |
models/local_aggregation_operators.py
|
PointCloudYC/PointNet-modern.pytorch
|
1a0b373fcb21f24b667a0bb4831211da5b92f98d
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'ops', 'pt_custom_ops'))
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import create_kernel_points, radius_gaussian, weight_variable
from pt_utils import MaskedQueryAndGroup
class PosPool(nn.Module):
def __init__(self, in_channels, out_channels, radius, nsample, config):
"""A PosPool operator for local aggregation
Args:
in_channels: input channels.
out_channels: output channels.
radius: ball query radius
nsample: neighborhood limit.
config: config file
"""
super(PosPool, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.radius = radius
self.nsample = nsample
self.position_embedding = config.pospool.position_embedding
self.reduction = config.pospool.reduction
self.output_conv = config.pospool.output_conv or (self.in_channels != self.out_channels)
self.grouper = MaskedQueryAndGroup(radius, nsample, use_xyz=False, ret_grouped_xyz=True, normalize_xyz=True)
if self.output_conv:
self.out_conv = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True))
else:
self.out_transform = nn.Sequential(
nn.BatchNorm1d(out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True))
def forward(self, query_xyz, support_xyz, query_mask, support_mask, support_features):
"""
Args:
query_xyz: [B, N1, 3], query points.
support_xyz: [B, N2, 3], support points.
query_mask: [B, N1], mask for query points.
support_mask: [B, N2], mask for support points.
support_features: [B, C_in, N2], input features of support points.
Returns:
output features of query points: [B, C_out, 3]
"""
B = query_xyz.shape[0]
C = support_features.shape[1]
npoint = query_xyz.shape[1]
neighborhood_features, relative_position, neighborhood_mask = self.grouper(query_xyz, support_xyz, query_mask,
support_mask, support_features)
if self.position_embedding == 'xyz':
position_embedding = torch.unsqueeze(relative_position, 1)
aggregation_features = neighborhood_features.view(B, C // 3, 3, npoint, self.nsample)
aggregation_features = position_embedding * aggregation_features # (B, C//3, 3, npoint, nsample)
aggregation_features = aggregation_features.view(B, C, npoint, self.nsample) # (B, C, npoint, nsample)
elif self.position_embedding == 'sin_cos':
feat_dim = C // 6
wave_length = 1000
alpha = 100
feat_range = torch.arange(feat_dim, dtype=torch.float32).to(query_xyz.device) # (feat_dim, )
dim_mat = torch.pow(1.0 * wave_length, (1.0 / feat_dim) * feat_range) # (feat_dim, )
position_mat = torch.unsqueeze(alpha * relative_position, -1) # (B, 3, npoint, nsample, 1)
div_mat = torch.div(position_mat, dim_mat) # (B, 3, npoint, nsample, feat_dim)
sin_mat = torch.sin(div_mat) # (B, 3, npoint, nsample, feat_dim)
cos_mat = torch.cos(div_mat) # (B, 3, npoint, nsample, feat_dim)
position_embedding = torch.cat([sin_mat, cos_mat], -1) # (B, 3, npoint, nsample, 2*feat_dim)
position_embedding = position_embedding.permute(0, 1, 4, 2, 3).contiguous()
position_embedding = position_embedding.view(B, C, npoint, self.nsample) # (B, C, npoint, nsample)
aggregation_features = neighborhood_features * position_embedding # (B, C, npoint, nsample)
else:
raise NotImplementedError(f'Position Embedding {self.position_embedding} not implemented in PosPool')
if self.reduction == 'max':
out_features = F.max_pool2d(
aggregation_features, kernel_size=[1, self.nsample]
)
out_features = torch.squeeze(out_features, -1)
elif self.reduction == 'avg' or self.reduction == 'mean':
feature_mask = neighborhood_mask + (1 - query_mask[:, :, None])
feature_mask = feature_mask[:, None, :, :]
aggregation_features *= feature_mask
out_features = aggregation_features.sum(-1)
neighborhood_num = feature_mask.sum(-1)
out_features /= neighborhood_num
elif self.reduction == 'sum':
feature_mask = neighborhood_mask + (1 - query_mask[:, :, None])
feature_mask = feature_mask[:, None, :, :]
aggregation_features *= feature_mask
out_features = aggregation_features.sum(-1)
else:
raise NotImplementedError(f'Reduction {self.reduction} not implemented in PosPool ')
if self.output_conv:
out_features = self.out_conv(out_features)
else:
out_features = self.out_transform(out_features)
return out_features
class AdaptiveWeight(nn.Module):
def __init__(self, in_channels, out_channels, radius, nsample, config):
"""A AdaptiveWeight operator for local aggregation
Args:
in_channels: input channels.
out_channels: output channels.
radius: ball query radius
nsample: neighborhood limit.
config: config file
"""
super(AdaptiveWeight, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.nsample = nsample
self.weight_type = config.adaptive_weight.weight_type
self.weight_to_channels = {'dp': 3,
'df': in_channels,
'fj': in_channels,
'dp_df': 3 + in_channels,
'dp_fj': 3 + in_channels,
'fi_df': 2 * in_channels,
'dp_fi_df': 3 + 2 * in_channels,
'rscnn': 10}
self.weight_input_channels = self.weight_to_channels[self.weight_type]
self.num_mlps = config.adaptive_weight.num_mlps
self.shared_channels = config.adaptive_weight.shared_channels
self.weight_softmax = config.adaptive_weight.weight_softmax
self.reduction = config.adaptive_weight.reduction
self.output_conv = config.adaptive_weight.output_conv or (self.in_channels != self.out_channels)
self.grouper = MaskedQueryAndGroup(radius, nsample, use_xyz=False, ret_grouped_xyz=True, normalize_xyz=True)
self.mlps = nn.Sequential()
self.mlps.add_module('conv0',
nn.Conv2d(self.weight_input_channels,
self.in_channels // self.shared_channels,
kernel_size=1))
for i in range(self.num_mlps - 1):
self.mlps.add_module(f'relu{i}', nn.ReLU(inplace=True))
self.mlps.add_module(f'conv{i + 1}',
nn.Conv2d(self.in_channels // self.shared_channels,
self.in_channels // self.shared_channels,
kernel_size=1))
if self.output_conv:
self.out_conv = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True))
else:
self.out_transform = nn.Sequential(
nn.BatchNorm1d(out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True))
def forward(self, query_xyz, support_xyz, query_mask, support_mask, support_features):
"""
Args:
query_xyz: [B, N1, 3], query points.
support_xyz: [B, N2, 3], support points.
query_mask: [B, N1], mask for query points.
support_mask: [B, N2], mask for support points.
support_features: [B, C_in, N2], input features of support points.
Returns:
output features of query points: [B, C_out, 3]
"""
B = query_xyz.shape[0]
C = support_features.shape[1]
npoint = query_xyz.shape[1]
neighborhood_features, relative_position, neighborhood_mask = self.grouper(query_xyz, support_xyz, query_mask,
support_mask, support_features)
if self.weight_type == 'dp':
conv_weight = self.mlps(relative_position) # (B, C//S, npoint, nsample)
conv_weight = torch.unsqueeze(conv_weight, 2) # (B, C//S, 1, npoint, nsample)
else:
raise NotImplementedError(f'Weight Type {self.weight_type} not implemented in AdaptiveWeight')
aggregation_features = neighborhood_features.view(B, C // self.shared_channels, self.shared_channels,
npoint, self.nsample)
aggregation_features = aggregation_features * conv_weight
aggregation_features = aggregation_features.view(B, C, npoint, self.nsample)
if self.reduction == 'max':
out_features = F.max_pool2d(
aggregation_features, kernel_size=[1, self.nsample]
)
out_features = torch.squeeze(out_features, -1)
elif self.reduction == 'avg' or self.reduction == 'mean':
feature_mask = neighborhood_mask + (1 - query_mask[:, :, None])
feature_mask = feature_mask[:, None, :, :]
aggregation_features *= feature_mask
out_features = aggregation_features.sum(-1)
neighborhood_num = feature_mask.sum(-1)
out_features /= neighborhood_num
elif self.reduction == 'sum':
feature_mask = neighborhood_mask + (1 - query_mask[:, :, None])
feature_mask = feature_mask[:, None, :, :]
aggregation_features *= feature_mask
out_features = aggregation_features.sum(-1)
else:
raise NotImplementedError(f'Reduction {self.reduction} not implemented in PosPool ')
if self.output_conv:
out_features = self.out_conv(out_features)
else:
out_features = self.out_transform(out_features)
return out_features
class PointWiseMLP(nn.Module):
def __init__(self, in_channels, out_channels, radius, nsample, config):
"""A PointWiseMLP operator for local aggregation
Args:
in_channels: input channels.
out_channels: output channels.
radius: ball query radius
nsample: neighborhood limit.
config: config file
"""
super(PointWiseMLP, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.nsample = nsample
self.feature_type = config.pointwisemlp.feature_type
self.feature_input_channels = {'dp_fj': 3 + in_channels,
'dp': 3,
'dp_fi': 3+in_channels,
'fi_df': 2 * in_channels,
'dp_fi_df': 3 + 2 * in_channels}
self.feature_input_channels = self.feature_input_channels[self.feature_type]
self.num_mlps = config.pointwisemlp.num_mlps
self.reduction = config.pointwisemlp.reduction
self.grouper = MaskedQueryAndGroup(radius, nsample, use_xyz=False, ret_grouped_xyz=True, normalize_xyz=True)
self.mlps = nn.Sequential()
if self.num_mlps == 1:
self.mlps.add_module('conv0', nn.Sequential(
nn.Conv2d(self.feature_input_channels, self.out_channels, kernel_size=1, bias=False), # why 1x1 conv2d not conv1d? input is 4D tensor (center pt + nb points so is B C N M), M is the number of nb pts
nn.BatchNorm2d(self.out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True)))
else:
mfdim = max(self.in_channels // 2, 9)
self.mlps.add_module('conv0', nn.Sequential(
nn.Conv2d(self.feature_input_channels, mfdim, kernel_size=1, bias=False),
nn.BatchNorm2d(mfdim, momentum=config.bn_momentum),
nn.ReLU(inplace=True)))
for i in range(self.num_mlps - 2):
self.mlps.add_module(f'conv{i + 1}', nn.Sequential(
nn.Conv2d(mfdim, mfdim, kernel_size=1, bias=False),
nn.BatchNorm2d(mfdim, momentum=config.bn_momentum),
nn.ReLU(inplace=True)))
self.mlps.add_module(f'conv{self.num_mlps - 1}', nn.Sequential(
nn.Conv2d(mfdim, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(self.out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True)))
def forward(self, query_xyz, support_xyz, query_mask, support_mask, support_features):
"""
Args:
query_xyz: [B, N1, 3], query points.
support_xyz: [B, N2, 3], support points.
query_mask: [B, N1], mask for query points.
support_mask: [B, N2], mask for support points.
support_features: [B, C_in, N2], input features of support points.
Returns:
output features of query points: [B, C_out, 3]
"""
neighborhood_features, relative_position, neighborhood_mask = self.grouper(query_xyz, support_xyz, query_mask,
support_mask, support_features) # BxCx npoint x nsample
if self.feature_type == 'dp_fi_df':
# B C N M
center_features = torch.unsqueeze(neighborhood_features[..., 0], -1).repeat([1, 1, 1, self.nsample]) # amazing code!!!
relative_features = neighborhood_features - center_features
local_input_features = torch.cat([relative_position, center_features, relative_features], 1) # B (2*C+3) N M
aggregation_features = self.mlps(local_input_features) # why conv2d? because shape is 4D (e.g. BxC x npoint x nsample)
elif self.feature_type == 'dp':
center_features = torch.unsqueeze(neighborhood_features[..., 0], -1).repeat([1, 1, 1, self.nsample]) # amazing code!!!
local_input_features = relative_position # B (3) N M
aggregation_features = self.mlps(local_input_features) # why conv2d? because shape is 4D (e.g. BxC x npoint x nsample)
elif self.feature_type == 'dp_fi':
center_features = torch.unsqueeze(neighborhood_features[..., 0], -1).repeat([1, 1, 1, self.nsample]) # amazing code!!!
local_input_features = torch.cat([relative_position, center_features], 1) # B (2*C+3) N M
aggregation_features = self.mlps(local_input_features) # why conv2d? because shape is 4D (e.g. BxC x npoint x nsample)
elif self.feature_type == 'rscnn':
# TODO
pass
# center_features = torch.unsqueeze(neighborhood_features[..., 0], -1).repeat([1, 1, 1, self.nsample]) # amazing code!!!
# local_input_features = torch.cat([relative_position, center_features], 1) # B (2*C+3) N M
# aggregation_features = self.mlps(local_input_features) # why conv2d? because shape is 4D (e.g. BxC x npoint x nsample)
else:
raise NotImplementedError(f'Feature Type {self.feature_type} not implemented in PointWiseMLP') # TODO: add other types
if self.reduction == 'max':
out_features = F.max_pool2d(
aggregation_features, kernel_size=[1, self.nsample]
)
out_features = torch.squeeze(out_features, -1)
# TODO:
elif self.reduction == 'avg' or self.reduction == 'mean':
feature_mask = neighborhood_mask + (1 - query_mask[:, :, None])
feature_mask = feature_mask[:, None, :, :]
aggregation_features *= feature_mask
out_features = aggregation_features.sum(-1)
neighborhood_num = feature_mask.sum(-1)
out_features /= neighborhood_num
elif self.reduction == 'sum':
feature_mask = neighborhood_mask + (1 - query_mask[:, :, None])
feature_mask = feature_mask[:, None, :, :]
aggregation_features *= feature_mask
out_features = aggregation_features.sum(-1)
else:
raise NotImplementedError(f'Reduction {self.reduction} not implemented in PointWiseMLP')
return out_features
class PseudoGrid(nn.Module):
def __init__(self, in_channels, out_channels, radius, nsample, config):
"""A PseudoGrid operator for local aggregation
Args:
in_channels: input channels.
out_channels: output channels.
radius: ball query radius
nsample: neighborhood limit.
config: config file
"""
super(PseudoGrid, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.radius = radius
self.nsample = nsample
self.KP_influence = config.pseudo_grid.KP_influence
self.num_kernel_points = config.pseudo_grid.num_kernel_points
self.convolution_mode = config.pseudo_grid.convolution_mode
self.output_conv = config.pseudo_grid.output_conv or (self.in_channels != self.out_channels)
# create kernel points
KP_extent = config.pseudo_grid.KP_extent
fixed_kernel_points = config.pseudo_grid.fixed_kernel_points
density_parameter = config.density_parameter
self.extent = 2 * KP_extent * radius / density_parameter
K_radius = 1.5 * self.extent
K_points_numpy = create_kernel_points(K_radius,
self.num_kernel_points,
num_kernels=1,
dimension=3,
fixed=fixed_kernel_points)
K_points_numpy = K_points_numpy.reshape((self.num_kernel_points, 3))
self.register_buffer('K_points', torch.from_numpy(K_points_numpy).type(torch.float32))
self.grouper = MaskedQueryAndGroup(radius, nsample, use_xyz=False, ret_grouped_xyz=True, normalize_xyz=False)
self.kernel_weights = weight_variable([self.num_kernel_points, in_channels])
if self.output_conv:
self.out_conv = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True))
else:
self.out_transform = nn.Sequential(
nn.BatchNorm1d(out_channels, momentum=config.bn_momentum),
nn.ReLU(inplace=True))
def forward(self, query_xyz, support_xyz, query_mask, support_mask, support_features):
"""
Args:
query_xyz: [B, N1, 3], query points.
support_xyz: [B, N2, 3], support points.
query_mask: [B, N1], mask for query points.
support_mask: [B, N2], mask for support points.
support_features: [B, C_in, N2], input features of support points.
Returns:
output features of query points: [B, C_out, 3]
"""
B = query_xyz.shape[0]
C = support_features.shape[1]
npoint = query_xyz.shape[1]
neighborhood_features, relative_position, neighborhood_mask = self.grouper(query_xyz, support_xyz, query_mask,
support_mask, support_features)
relative_position = torch.unsqueeze(relative_position.permute(0, 2, 3, 1), 3)
relative_position = relative_position.repeat([1, 1, 1, self.num_kernel_points, 1])
# Get Kernel point influences [B, N, K, M]
differences = relative_position - self.K_points
sq_distances = torch.sum(differences ** 2, -1)
if self.KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = all_weights.permute(0, 1, 3, 2)
elif self.KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.extent, min=0.0)
all_weights = all_weights.permute(0, 1, 3, 2)
elif self.KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = self.extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = all_weights.permute(0, 1, 3, 2)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# Mask padding points
feature_mask = neighborhood_mask + (1 - query_mask[:, :, None]) # B, N, M
all_weights *= feature_mask[:, :, None, :] # B, N, K, M
if self.convolution_mode != 'sum':
raise NotImplementedError(f"convolution_mode:{self.convolution_mode} not support in PseudoGrid")
# get features for each kernel point
all_weights = all_weights.view(-1, self.num_kernel_points, self.nsample)
neighborhood_features = neighborhood_features.permute(0, 2, 3, 1).contiguous().view(-1, self.nsample, C)
weighted_features = torch.bmm(all_weights, neighborhood_features) # # [B*N, K, M],[B*N, M, C] -> [B*N, K, C]
kernel_outputs = weighted_features * self.kernel_weights # [B*N, K, C]
out_features = torch.sum(kernel_outputs, 1).view(B, npoint, C).transpose(1, 2)
if self.output_conv:
out_features = self.out_conv(out_features)
else:
out_features = self.out_transform(out_features)
return out_features
class LocalAggregation(nn.Module):
def __init__(self, in_channels, out_channels, radius, nsample, config):
"""LocalAggregation operators
Args:
in_channels: input channels.
out_channels: output channels.
radius: ball query radius
nsample: neighborhood limit.
config: config file
"""
super(LocalAggregation, self).__init__()
if config.local_aggregation_type == 'pospool':
self.local_aggregation_operator = PosPool(in_channels, out_channels, radius, nsample, config)
elif config.local_aggregation_type == 'adaptive_weight':
self.local_aggregation_operator = AdaptiveWeight(in_channels, out_channels, radius, nsample, config)
elif config.local_aggregation_type == 'pointwisemlp':
self.local_aggregation_operator = PointWiseMLP(in_channels, out_channels, radius, nsample, config)
elif config.local_aggregation_type == 'pseudo_grid':
self.local_aggregation_operator = PseudoGrid(in_channels, out_channels, radius, nsample, config)
else:
raise NotImplementedError(f'LocalAggregation {config.local_aggregation_type} not implemented')
def forward(self, query_xyz, support_xyz, query_mask, support_mask, support_features):
"""
Args:
query_xyz: [B, N1, 3], query points.
support_xyz: [B, N2, 3], support points.
query_mask: [B, N1], mask for query points.
support_mask: [B, N2], mask for support points.
support_features: [B, C_in, N2], input features of support points.
Returns:
output features of query points: [B, C_out, 3]
"""
return self.local_aggregation_operator(query_xyz, support_xyz, query_mask, support_mask, support_features)
| 50.612033
| 214
| 0.609797
|
88d8d242e3edac4cb09f788fcb233cf9e1a0a78d
| 17,742
|
py
|
Python
|
samples/NGPF/SDN/NGPF-OFSwitch/ngpf_ofswitch.py
|
vlad-shulika/ixnetwork-api-py
|
2ed71d52dd210a2d08250256c3b198a9c7c67dca
|
[
"MIT"
] | 3
|
2018-12-04T20:06:13.000Z
|
2022-02-04T01:28:45.000Z
|
samples/NGPF/SDN/NGPF-OFSwitch/ngpf_ofswitch.py
|
vlad-shulika/ixnetwork-api-py
|
2ed71d52dd210a2d08250256c3b198a9c7c67dca
|
[
"MIT"
] | 3
|
2018-05-23T17:32:56.000Z
|
2020-08-26T13:00:42.000Z
|
samples/NGPF/SDN/NGPF-OFSwitch/ngpf_ofswitch.py
|
vlad-shulika/ixnetwork-api-py
|
2ed71d52dd210a2d08250256c3b198a9c7c67dca
|
[
"MIT"
] | 9
|
2018-12-04T20:09:24.000Z
|
2021-05-12T10:28:05.000Z
|
# -*- coding: cp1252 -*-
#!/usr/bin/tclsh
################################################################################
# #
# Copyright 1997 - 2019 by IXIA Keysight #
# All Rights Reserved. #
# #
################################################################################
################################################################################
# #
# LEGAL NOTICE: #
# ============== #
# The following code and documentation (hereinafter "the script") is an #
# example script for demonstration purposes only. #
# The script is not a standard commercial product offered by Ixia and have #
# been developed and is being provided for use only as indicated herein. The #
# script [and all modifications enhancements and updates thereto (whether #
# made by Ixia and/or by the user and/or by a third party)] shall at all times #
# remain the property of Ixia. #
# #
# Ixia does not warrant (i) that the functions contained in the script will #
# meet the users requirements or (ii) that the script will be without #
# omissions or error-free. #
# THE SCRIPT IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND AND IXIA #
# DISCLAIMS ALL WARRANTIES EXPRESS IMPLIED STATUTORY OR OTHERWISE #
# INCLUDING BUT NOT LIMITED TO ANY WARRANTY OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE OR OF NON-INFRINGEMENT. #
# THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE SCRIPT IS WITH THE #
# USER. #
# IN NO EVENT SHALL IXIA BE LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING #
# OUT OF THE USE OF OR THE INABILITY TO USE THE SCRIPT OR ANY PART THEREOF #
# INCLUDING BUT NOT LIMITED TO ANY LOST PROFITS LOST BUSINESS LOST OR #
# DAMAGED DATA OR SOFTWARE OR ANY INDIRECT INCIDENTAL PUNITIVE OR #
# CONSEQUENTIAL DAMAGES EVEN IF IXIA HAS BEEN ADVISED OF THE POSSIBILITY OF #
# SUCH DAMAGES IN ADVANCE. #
# Ixia will not be required to provide any software maintenance or support #
# services of any kind (e.g. any error corrections) in connection with the #
# script or any part thereof. The user acknowledges that although Ixia may #
# from time to time and in its sole discretion provide maintenance or support #
# services for the script any such services are subject to the warranty and #
# damages limitations set forth herein and will not obligate Ixia to provide #
# any additional maintenance or support services. #
# #
################################################################################
################################################################################
# #
# Description: #
# This script intends to demonstrate how to use NGPF OpenFlow Controller API#
# It will create 1 topology of OpenFlow Controller, it will start the
# emulation and then it will retrieve and display few statistics
# It will also check detailed learned info and learned info after sending on#
# demand message #
################################################################################
import sys
import time
################################################################################
# Either feed the ixNetwork library path in the sys.path as below, or put the #
# IxNetwork.py file somewhere else where we python can autoload it #
# "IxNetwork.py" is available in <IxNetwork_installer_path>\API\Python #
################################################################################
sys.path.append('C:\Program Files (x86)\Ixia\IxNetwork\8.10-EA\API\Python')
import IxNetwork
print("loaded successfully")
#from lib import IxNetwork
#import time
class NgpfOpenFlowSwitch(object):
################################################################################
# Connecting to IxTCl server and cretaing new config #
################################################################################
def __init__(self, ix_tcl_server, ix_tcl_port, ix_version="8.10"):
ixNet = IxNetwork.IxNet()
print("connecting to IxNetwork client")
ixNet.connect(ix_tcl_server, '-port', ix_tcl_port, '-version', ix_version,
'-setAttribute', 'strict')
# cleaning up the old configfile, and creating an empty config
print("cleaning up the old configfile, and creating an empty config")
ixNet.execute('newConfig')
self.ixNet = ixNet
self.root = ixNet.getRoot()
def assignPorts(self, realPort1):
chassis1 = realPort1[0]
card1 = realPort1[1]
port1 = realPort1[2]
root = self.ixNet.getRoot()
vport1 = self.ixNet.add(root, 'vport')
self.ixNet.commit()
vport1 = self.ixNet.remapIds(vport1)[0]
chassisObj1 = self.ixNet.add(root + '/availableHardware', 'chassis')
self.ixNet.setAttribute(chassisObj1, '-hostname', chassis1)
self.ixNet.commit()
chassisObj1 = self.ixNet.remapIds(chassisObj1)[0]
cardPortRef1 = chassisObj1 + '/card:%s/port:%s' % (card1, port1)
self.ixNet.setMultiAttribute(vport1, '-connectedTo', cardPortRef1,
'-rxMode', 'captureAndMeasure', '-name', 'Ethernet - 001')
self.ixNet.commit()
################################################################################
# Start protocol and check statistics #
################################################################################
def start_protocol_check_stats(self):
print("Starting protocols and waiting for 45 seconds for protocols to come up")
self.ixNet.execute('startAllProtocols')
time.sleep(45)
print ("Fetching all Protocol Summary Stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"Protocols Summary"/page'
statcap = self.ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in self.ixNet.getAttribute(viewPage, '-rowValues'):
for statVal in statValList:
print("***************************************************")
index = 0
for satIndv in statVal:
print("%-30s:%s" % (statcap[index], satIndv))
index = index + 1
print("***************************************************")
print ("Verifying OpenFlow Switch Per Port stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"OF Switch Per Port"/page'
statcap = self.ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in self.ixNet.getAttribute(viewPage, '-rowValues'):
for statVal in statValList:
print("***************************************************")
index = 0
for satIndv in statVal:
print("%-30s:%s" % (statcap[index], satIndv))
index = index + 1
print("***************************************************")
def on_the_fly(self, switch_disable_enable):
for i in switch_disable_enable:
ofSwitchActive = self.ixNet.getAttribute(switch_disable_enable, '-active')
swActive = self.ixNet.add(ofSwitchActive, 'overlay')
self.ixNet.setMultiAttribute(swActive, '-value', 'false')
self.ixNet.commit()
globalObj = self.ixNet.getRoot() + '/globals'
topology = globalObj + '/topology'
print ("Applying changes on the fly")
try:
self.ixNet.execute('applyOnTheFly', topology)
except:
print("error in applying on the fly change")
time.sleep(10)
for i in switch_disable_enable:
ofSwitchActive = self.ixNet.getAttribute(switch_disable_enable, '-active')
swActive = self.ixNet.add(ofSwitchActive, 'overlay')
self.ixNet.setMultiAttribute(swActive, '-value', 'true')
self.ixNet.commit()
globalObj = self.ixNet.getRoot() + '/globals'
topology = globalObj + '/topology'
print ("Applying changes on the fly")
try:
self.ixNet.execute('applyOnTheFly', topology)
except:
print("error in applying on the fly change")
time.sleep(10)
def on_the_fly_port_number_ethernetaddress(self, sw_port):
EthernetDestVal = self.ixNet.getAttribute(sw_port, '-etherAddr')
print (EthernetDestVal)
val = self.ixNet.getAttribute(EthernetDestVal, '-values')[0]
print (val)
self.ixNet.setMultiAttribute(EthernetDestVal, '-clearOverlays', 'false')
self.ixNet.commit()
EthernetDestValues = self.ixNet.add(EthernetDestVal, 'singleValue')
self.ixNet.setMultiAttribute(EthernetDestValues, '-value', '56:00:00:00:00:1')
self.ixNet.commit()
time.sleep(20)
PortVal = self.ixNet.getAttribute(sw_port, '-portNumber')
self.ixNet.setMultiAttribute(PortVal, '-clearOverlays', 'false')
self.ixNet.commit()
PortSetValues = self.ixNet.add(PortVal, 'singleValue')
self.ixNet.setMultiAttribute(PortSetValues, '-value', '5677888')
self.ixNet.commit()
globalObj = self.ixNet.getRoot() + '/globals'
topology = globalObj + '/topology'
print ("Applying changes on the fly")
try:
self.ixNet.execute('applyOnTheFly', topology)
except:
print("error in applying on the fly change")
time.sleep(10)
###############################################################################
# print learned info #
###############################################################################
def print_learned_info(self, openFlowSwitch):
self.ixNet.execute('getOFChannelLearnedInfo', openFlowSwitch, '1')
time.sleep(5)
print("Print OFSwitch Learned Info")
linfo = self.ixNet.getList(openFlowSwitch, 'learnedInfo')[0]
linfoList = self.ixNet.getList(linfo, 'table')
print("***************************************************")
for table in linfoList:
tableType = self.ixNet.getAttribute(table, '-type')
print(tableType)
print("=================================================")
columns = self.ixNet.getAttribute(table, '-columns')
print(columns)
values = self.ixNet.getAttribute(table, '-values')
for value in values:
for word in values:
print(word)
time.sleep(15)
self.ixNet.execute('getOFSwitchFlowStatLearnedInfo', openFlowSwitch, '1')
time.sleep(5)
print ("Print OFswitch Flow Learned info")
linfo = self.ixNet.getList(openFlowSwitch, 'learnedInfo')[0]
linfoList = self.ixNet.getList(linfo, 'table')
print("***************************************************")
for table in linfoList:
tableType = self.ixNet.getAttribute(table, '-type')
print(tableType)
print("=================================================")
columns = self.ixNet.getAttribute(table, '-columns')
print(columns)
values = self.ixNet.getAttribute(table, '-values')
for value in values:
for word in values:
print(word)
time.sleep(15)
print ('Stopping protocols')
self.ixNet.execute('stopAllProtocols')
################################################################################
# protocol configuration section #
################################################################################
def main(self):
self.assignPorts(ports[0])
root = self.ixNet.getRoot()
vportTx = self.ixNet.getList(root, 'vport')[0]
print("adding topologies")
self.ixNet.add(root, 'topology', '-vports', vportTx)
self.ixNet.commit()
topologies = self.ixNet.getList(self.ixNet.getRoot(), 'topology')
topo1 = topologies[0]
print ("Adding 2 device groups")
deviceGroup1 = self.ixNet.add(topo1, 'deviceGroup')
self.ixNet.commit()
t1devices = self.ixNet.getList(topo1, 'deviceGroup')
t1dev1 = t1devices[0]
print("Configuring the multipliers (number of sessions)")
self.ixNet.setAttribute(t1dev1, '-multiplier', '1')
self.ixNet.commit()
print("Adding ethernet/mac endpoints")
self.ixNet.add(t1dev1, 'ethernet')
self.ixNet.commit()
mac1 = self.ixNet.getList(t1dev1, 'ethernet')[0]
print ('ixNet.help(\'::ixNet::OBJ-/topology/deviceGroup/ethernet\')')
print("Add ipv4")
self.ixNet.add(mac1, 'ipv4')
self.ixNet.commit()
ip1 = self.ixNet.getList(mac1, 'ipv4')[0]
mvAdd1 = self.ixNet.getAttribute(ip1, '-address')
mvGw1 = self.ixNet.getAttribute(ip1, '-gatewayIp')
print("configuring ipv4 addresses")
self.ixNet.setAttribute(mvAdd1 + '/singleValue', '-value', '1.1.1.2')
self.ixNet.commit()
self.ixNet.setAttribute(mvGw1 + '/singleValue', '-value', '1.1.1.1')
self.ixNet.commit()
self.ixNet.setAttribute(self.ixNet.getAttribute(ip1, '-prefix') + '/singleValue', '-value', '24')
self.ixNet.commit()
self.ixNet.setMultiAttribute(self.ixNet.getAttribute(ip1, '-resolveGateway') + '/singleValue', '-value', 'true')
self.ixNet.commit()
time.sleep(5)
print (self.ixNet.help('::ixNet::OBJ-/topology/deviceGroup/ethernet/ipv4'))
print("Adding Openflow Switch over IP4 stacks")
self.ixNet.add(ip1, 'openFlowSwitch')
self.ixNet.commit()
openFlowSwitch1 = self.ixNet.getList(ip1, 'openFlowSwitch')[0]
print (openFlowSwitch1)
time.sleep(5)
openflowSwitchchannels = self.ixNet.add(openFlowSwitch1, 'OFSwitchChannel')
self.ixNet.commit()
time.sleep(5)
openflowchannellist = self.ixNet.getList(openFlowSwitch1, 'OFSwitchChannel')[0]
self.ixNet.setMultiAttribute(openflowSwitchchannels, '-auxConnectionsPerChannel', '1')
self.ixNet.commit()
time.sleep(5)
#openflowTablelist = self.ixNet.getList(ip1, 'switchTablesList')[0]
self.ixNet.setMultiAttribute(openFlowSwitch1, '-numberOfTableRanges', '3')
self.ixNet.commit()
time.sleep(5)
switchTableList = self.ixNet.getList(openFlowSwitch1, 'switchTablesList')[0]
print (switchTableList)
networkTopologyObj = self.ixNet.add(deviceGroup1, 'networkTopology')
self.ixNet.commit()
networkTopologyObjRing = self.ixNet.add(networkTopologyObj, 'netTopologyRing')
self.ixNet.commit()
self.start_protocol_check_stats()
swtopology = self.ixNet.getList(self.ixNet.getRoot(), 'topology')[0]
print (swtopology)
deviceGroupSW = self.ixNet.getList(swtopology, 'deviceGroup')[0]
ethernetSw = self.ixNet.getList(deviceGroupSW, 'ethernet')[0]
ipv4Sw = self.ixNet.getList(ethernetSw, 'ipv4')[0]
ofSw = self.ixNet.getList(ipv4Sw, 'openFlowSwitch')[0]
print ("Now disable/Enable of switch on the fly")
self.on_the_fly(ofSw)
print ("Changing Ethernet Address, Port number on the fly!!!!!")
swPortActive = self.ixNet.getList(ofSw, 'ofSwitchPorts')[0]
print (swPortActive)
self.on_the_fly_port_number_ethernetaddress(swPortActive)
print ("Fetching Switch Learned info !!!!!")
self.print_learned_info(ofSw)
print ('!!! Test Script Ends !!!')
#################################################################################
# Give chassis/client/ixNetwork server port/ chassis port HW port information #
# below #
#################################################################################
if __name__ == "__main__":
ixTclServer = '10.214.101.141'
ixTclPort = '8558'
ports = [('12.0.1.253', '5', '10',)]
version = '8.10'
switch = NgpfOpenFlowSwitch(ixTclServer, ixTclPort, version)
switch.main()
| 50.982759
| 121
| 0.507553
|
695476e4cf6f37fb3d6fa7203e34fc7e306c0043
| 4,417
|
py
|
Python
|
train_cnn.py
|
PootieT/deep_learning_final_project
|
c1a16b232f5806d2b8b9668807e82aee3fd992ee
|
[
"MIT"
] | 1
|
2018-03-23T03:20:20.000Z
|
2018-03-23T03:20:20.000Z
|
train_cnn.py
|
PootieT/deep_learning_final_project
|
c1a16b232f5806d2b8b9668807e82aee3fd992ee
|
[
"MIT"
] | null | null | null |
train_cnn.py
|
PootieT/deep_learning_final_project
|
c1a16b232f5806d2b8b9668807e82aee3fd992ee
|
[
"MIT"
] | null | null | null |
"""
Train on images split into directories. This assumes we've split
our videos into frames and moved them to their respective folders.
Based on:
https://keras.io/preprocessing/image/
and
https://keras.io/applications/
"""
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from data import DataSet
import os.path
data = DataSet()
# Helper: Save the model.
checkpointer = ModelCheckpoint(filepath=os.path.join('data', 'checkpoints', 'inception.{epoch:03d}-{val_loss:.2f}.hdf5',),verbose=1,
save_best_only=True)
# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=10)
# Helper: TensorBoard
tensorboard = TensorBoard(log_dir=os.path.join('data', 'logs'))
def get_generators():
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True,
rotation_range=10.,
width_shift_range=0.2,
height_shift_range=0.2)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
os.path.join('data', 'train'),
target_size=(299, 299),
batch_size=32,
classes=data.classes,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
os.path.join('data', 'test'),
target_size=(299, 299),
batch_size=32,
classes=data.classes,
class_mode='categorical')
return train_generator, validation_generator
def get_model(weights='imagenet'):
# create the base pre-trained model
base_model = VGG16(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(len(data.classes), activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
return model
def freeze_all_but_top(model):
"""Used to train just the top layers of the model."""
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in model.layers[:-2]:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def freeze_all_but_mid_and_top(model):
"""After we fine-tune the dense layers, train deeper."""
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
for layer in model.layers[:172]:
layer.trainable = False
for layer in model.layers[172:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
model.compile(
optimizer=SGD(lr=0.0001, momentum=0.9),
loss='categorical_crossentropy',
metrics=['accuracy', 'top_k_categorical_accuracy'])
return model
def train_model(model, nb_epoch, generators, callbacks=[]):
train_generator, validation_generator = generators
model.fit_generator(
train_generator,
steps_per_epoch=100,
validation_data=validation_generator,
validation_steps=10,
epochs=nb_epoch,
callbacks=callbacks)
return model
def main(weights_file):
model = get_model()
generators = get_generators()
if weights_file is None:
print("Loading network from ImageNet weights.")
# Get and train the top layers.
model = freeze_all_but_top(model)
model = train_model(model, 10, generators)
else:
print("Loading saved model: %s." % weights_file)
model.load_weights(weights_file)
# Get and train the mid layers.
model = freeze_all_but_mid_and_top(model)
model = train_model(model, 10, generators,
[checkpointer, early_stopper, tensorboard])
if __name__ == '__main__':
weights_file = None
main(weights_file)
| 32.477941
| 132
| 0.698438
|
ae2578ec8dfc677193a64e7e20e188fc16338fea
| 1,617
|
py
|
Python
|
clients/kratos/python/test/test_identity.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 77
|
2020-02-14T17:27:36.000Z
|
2022-03-25T08:44:52.000Z
|
clients/kratos/python/test/test_identity.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 125
|
2020-02-07T21:45:52.000Z
|
2022-03-31T12:54:24.000Z
|
clients/kratos/python/test/test_identity.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 44
|
2020-01-31T22:05:47.000Z
|
2022-03-09T14:41:22.000Z
|
"""
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.8.2-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.identity_credentials import IdentityCredentials
from ory_kratos_client.model.identity_state import IdentityState
from ory_kratos_client.model.recovery_address import RecoveryAddress
from ory_kratos_client.model.verifiable_identity_address import VerifiableIdentityAddress
globals()['IdentityCredentials'] = IdentityCredentials
globals()['IdentityState'] = IdentityState
globals()['RecoveryAddress'] = RecoveryAddress
globals()['VerifiableIdentityAddress'] = VerifiableIdentityAddress
from ory_kratos_client.model.identity import Identity
class TestIdentity(unittest.TestCase):
"""Identity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIdentity(self):
"""Test Identity"""
# FIXME: construct object with mandatory attributes with example values
# model = Identity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 35.933333
| 446
| 0.772418
|
06184fcca59df0a650a2ac13f685f6e263817651
| 1,025
|
py
|
Python
|
gltflib/models/__init__.py
|
Onboard-Team/gltflib
|
1aef8e5edcd6336b170226b0666bf78f92f02ee9
|
[
"MIT"
] | 56
|
2019-06-17T10:46:25.000Z
|
2022-03-30T14:48:36.000Z
|
gltflib/models/__init__.py
|
Onboard-Team/gltflib
|
1aef8e5edcd6336b170226b0666bf78f92f02ee9
|
[
"MIT"
] | 198
|
2019-11-15T03:33:25.000Z
|
2022-03-30T07:02:21.000Z
|
gltflib/models/__init__.py
|
Onboard-Team/gltflib
|
1aef8e5edcd6336b170226b0666bf78f92f02ee9
|
[
"MIT"
] | 11
|
2019-10-15T01:37:05.000Z
|
2022-03-24T12:11:12.000Z
|
from .accessor import Accessor
from .animation import Animation
from .animation_sampler import AnimationSampler
from .asset import Asset
from .attributes import Attributes
from .buffer import Buffer
from .buffer_view import BufferView
from .camera import Camera
from .channel import Channel
from .image import Image
from .material import Material
from .mesh import Mesh
from .node import Node
from .normal_texture_info import NormalTextureInfo
from .occlusion_texture_info import OcclusionTextureInfo
from .orthographic_camera_info import OrthographicCameraInfo
from .perspective_camera_info import PerspectiveCameraInfo
from .pbr_metallic_roughness import PBRMetallicRoughness
from .primitive import Primitive
from .sampler import Sampler
from .scene import Scene
from .skin import Skin
from .sparse import Sparse
from .sparse_indices import SparseIndices
from .sparse_values import SparseValues
from .gltf_model import GLTFModel
from .target import Target
from .texture import Texture
from .texture_info import TextureInfo
| 34.166667
| 60
| 0.858537
|
c28a8dc358a0c2dd3d6249546ac6f064da191a56
| 340
|
py
|
Python
|
matrix/ops/squeeze.py
|
wesselb/matrix
|
ba343d69945d7cbc67330d01a87eee363ce5d408
|
[
"MIT"
] | 3
|
2021-07-30T17:38:08.000Z
|
2021-11-21T04:41:40.000Z
|
matrix/ops/squeeze.py
|
wesselb/matrix
|
ba343d69945d7cbc67330d01a87eee363ce5d408
|
[
"MIT"
] | null | null | null |
matrix/ops/squeeze.py
|
wesselb/matrix
|
ba343d69945d7cbc67330d01a87eee363ce5d408
|
[
"MIT"
] | null | null | null |
import lab as B
from wbml.warning import warn_upmodule
from ..matrix import AbstractMatrix, structured
from ..util import ToDenseWarning
__all__ = []
@B.dispatch
def squeeze(a: AbstractMatrix):
if structured(a):
warn_upmodule(f"Squeezing {a}: converting to dense.", category=ToDenseWarning)
return B.squeeze(B.dense(a))
| 22.666667
| 86
| 0.741176
|
5104c2f8d322436379b4351260f4652ed1867414
| 6,628
|
py
|
Python
|
experiments/archived/20210119/bag_model/criterions/kld_loss.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | 5
|
2020-10-14T02:30:44.000Z
|
2021-05-06T12:48:28.000Z
|
experiments/archived/20210119/bag_model/criterions/kld_loss.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | 2
|
2020-12-19T05:59:31.000Z
|
2020-12-22T11:05:31.000Z
|
experiments/archived/20210119/bag_model/criterions/kld_loss.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@register_criterion("kld_loss")
class KLDLabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size=0,
report_accuracy=False,
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.ignore_prefix_size = ignore_prefix_size
self.report_accuracy = report_accuracy
self.count = 0
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--report-accuracy', action='store_true',
help='report accuracy metric')
parser.add_argument('--ignore-prefix-size', default=0, type=int,
help='Ignore first N tokens')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
with torch.autograd.set_detect_anomaly(True):
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=True)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
#--------------------------------------------------------------------------------------------
#x, extra = net_output
#x_z = extra['x_z']
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=4
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=4
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 38.988235
| 105
| 0.601086
|
0f020d1c783e194f96af84de9326eba25595435c
| 1,763
|
py
|
Python
|
paddlespeech/server/engine/base_engine.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 1,379
|
2021-11-10T02:42:21.000Z
|
2022-03-31T13:34:25.000Z
|
paddlespeech/server/engine/base_engine.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 268
|
2021-11-10T14:07:34.000Z
|
2022-03-31T02:25:20.000Z
|
paddlespeech/server/engine/base_engine.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 296
|
2021-11-15T02:37:11.000Z
|
2022-03-31T12:14:46.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
from pattern_singleton import Singleton
__all__ = ['BaseEngine']
class BaseEngine(metaclass=Singleton):
"""
An base engine class
"""
def __init__(self):
self._inputs = dict()
self._outputs = dict()
def init(self, *args, **kwargs):
"""
init the engine
Returns:
bool: true or false
"""
pass
def postprocess(self, *args, **kwargs) -> Union[str, os.PathLike]:
"""
Output postprocess and return results.
This method get model output from self._outputs and convert it into human-readable results.
Returns:
Union[str, os.PathLike]: Human-readable results such as texts and audio files.
"""
pass
def run(self, *args, **kwargs) -> Union[str, os.PathLike]:
"""
Output postprocess and return results.
This method get model output from self._outputs and convert it into human-readable results.
Returns:
Union[str, os.PathLike]: Human-readable results such as texts and audio files.
"""
pass
| 29.881356
| 99
| 0.653432
|
cf5121152d0141826295552676482e2ccf425854
| 3,387
|
py
|
Python
|
sif/acquisitions/improvement_acquisition.py
|
JamesBrofos/Sif
|
a38ddb14f598f9f35d3ed9e872260b938e961433
|
[
"MIT"
] | 1
|
2022-03-24T18:59:27.000Z
|
2022-03-24T18:59:27.000Z
|
sif/acquisitions/improvement_acquisition.py
|
JamesBrofos/Sif
|
a38ddb14f598f9f35d3ed9e872260b938e961433
|
[
"MIT"
] | 4
|
2020-03-24T15:39:08.000Z
|
2021-02-02T21:44:05.000Z
|
sif/acquisitions/improvement_acquisition.py
|
JamesBrofos/Sif
|
a38ddb14f598f9f35d3ed9e872260b938e961433
|
[
"MIT"
] | 1
|
2019-11-05T19:28:06.000Z
|
2019-11-05T19:28:06.000Z
|
import numpy as np
from .abstract_acquisition import AbstractAcquisitionFunction
class ImprovementAcquisitionFunction(AbstractAcquisitionFunction):
"""Improvement-Based Acquisition Function Class
Common acquisition function can be interpreted as improvements over the best
seen observation so far. In particular, Thor implements the probability of
improvement and expected improvement acquisition function, which measures
the probability that an input will result in an improvement in the latent
objective function and the extent to which an input can be expected to
result in an improvement, respectively.
As it turns out, these improvement-based acquisition functions all rely on
retaining both the best seen value of the latent objective and on computing
a z-score quantity that normalizes the predicted mean of the surrogate
probabilistic model with respect to both the maximum observed value of the
metric and the extent of uncertainty about that prediction.
Parameters:
models (AbstractProcess): A list of Gaussian process models that
interpolates the observed data. Each element of the list should
correspond to a different configuration of kernel hyperparameters.
y_best (float): The best seen value of the metric observed so far. This
is an optional parameter, and if it is not specified by the user
then it will be computed directly from Thor's database (in
particular, taking the maximum of all values of the metric recorded
for an experiment).
"""
def __init__(self, models, y_best=None):
"""Initialize parameters of the improvement-based acquisition function.
"""
super().__init__(models)
if y_best is not None:
self.y_best = y_best
else:
self.y_best = self.models[0].y.max()
def score(self, X):
"""Compute a z-score quantity for the prediction at a given input. This
allows us to balance improvement over the current best while controlling
for uncertainty.
Parameters:
X (numpy array): A numpy array representing the points at which we
need to compute the value of the improvement-based acquisition
function. For each row in the input, we will compute the
associated mean and standard deviation of the mean. This latter
quantity, alongside the best value of the metric, are then used
to standardize the mean.
Returns:
A tuple containing the z-score, the mean of the metric at each
input, and the standard deviation of the mean at each input.
"""
m, n = self.n_models, X.shape[0]
means, sds, gammas = np.zeros((m, n)), np.zeros((m, n)), np.zeros((m, n))
for i, mod in enumerate(self.models):
# Compute the mean and standard deviation of the model's interpolant
# of the objective function.
means[i], var = mod.predict(X, diagonal=True)
sds[i] = np.sqrt(var)
# Compute z-score-like quantity capturing the excess of the mean
# over the current best, adjusted for uncertainty in the measurement.
gammas[i] = (means[i] - self.y_best) / sds[i]
return gammas, means, sds
| 49.086957
| 81
| 0.674638
|
b82f29f2a011b8d59edd3202fd9e05c2f6f2ea33
| 1,401
|
py
|
Python
|
blackmamba/lib/future/builtins/iterators.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
blackmamba/lib/future/builtins/iterators.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
blackmamba/lib/future/builtins/iterators.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
"""
This module is designed to be used as follows::
from future.builtins.iterators import *
And then, for example::
for i in range(10**15):
pass
for (a, b) in zip(range(10**15), range(-10**15, 0)):
pass
Note that this is standard Python 3 code, plus some imports that do
nothing on Python 3.
The iterators this brings in are::
- ``range``
- ``filter``
- ``map``
- ``zip``
On Python 2, ``range`` is a pure-Python backport of Python 3's ``range``
iterator with slicing support. The other iterators (``filter``, ``map``,
``zip``) are from the ``itertools`` module on Python 2. On Python 3 these
are available in the module namespace but not exported for * imports via
__all__ (zero no namespace pollution).
Note that these are also available in the standard library
``future_builtins`` module on Python 2 -- but not Python 3, so using
the standard library version is not portable, nor anywhere near complete.
"""
from __future__ import division, absolute_import, print_function
import itertools
from future import utils
if not utils.PY3:
filter = itertools.ifilter
map = itertools.imap
from future.types import newrange as range
zip = itertools.izip
__all__ = ['filter', 'map', 'range', 'zip']
else:
import builtins
filter = builtins.filter
map = builtins.map
range = builtins.range
zip = builtins.zip
__all__ = []
| 25.944444
| 73
| 0.690221
|
025699d6521ddb2e551c47a8698ef1dc12efb77f
| 6,879
|
py
|
Python
|
tbfy.data/SharedDataMethods.py
|
TBFY/anomaly-detection-tool
|
7d62192a90a58bdf8cf2641415572d922d6a9f70
|
[
"Apache-2.0"
] | null | null | null |
tbfy.data/SharedDataMethods.py
|
TBFY/anomaly-detection-tool
|
7d62192a90a58bdf8cf2641415572d922d6a9f70
|
[
"Apache-2.0"
] | null | null | null |
tbfy.data/SharedDataMethods.py
|
TBFY/anomaly-detection-tool
|
7d62192a90a58bdf8cf2641415572d922d6a9f70
|
[
"Apache-2.0"
] | null | null | null |
'''
Series of methods shared by data processing methods
'''
class SharedDataMethods:
def __init__(self, conf=None):
self.conf = conf
def storeMJUOrganizationNames2Db(self, dataList, idIndex, nameIndex):
'''
function gets a list of lists and extracts company names nad ids and stores them in DB
:param dataList: list of lists
:param idIndex: integer, position of company id in list
:param nameIndex: integer, position of company name in list
:return:
'''
if len(dataList) == 0:
return None
# get names
# get names
companyNames = {}
for row in dataList:
companyNames[row[idIndex]] = row[nameIndex]
# find names that are not in DB yet
# find names that are not in DB yet
sql = self.conf.cDB.sql
cur = self.conf.cDB.db.cursor()
db = self.conf.cDB.db
dictKeys = "','".join(str(v) for v in companyNames.keys())
queryBase = "select {} from {} where company_id IN ('" + dictKeys + "')"
queryString = sql.SQL(queryBase).format(
# set queried fields
sql.Identifier('company_id'),
# set table name
sql.Identifier('cst_companies_mju'))
#print(queryString.as_string(cur))
cur.execute(queryString)
companiesInDB = self.conf.sharedCommon.returnSqlDataInDictFormat(cur, 'company_id')
# save names in DB
# save names in DB
doCommit = False
for company_id, company_name in companyNames.items():
# skip existing records
# skip existing records
if company_id in companiesInDB:
continue
# add non existing records
# add non existing records
queryBase = "INSERT INTO cst_companies_mju (company_id, company_name) VALUES (%s, %s)"
queryString = sql.SQL(queryBase).format()
#print(queryString.as_string(cur))
row_values = (company_id, company_name)
cur.execute(queryString, row_values)
doCommit = True
# commit changes
# commit changes
if doCommit:
db.commit()
return None
def storeMJURaw2SQLDB(self, dataDict, tableName, tableUniqueField):
'''
function takes in dataDict and inserts the data into sql DB table with name tableName
:param dataList: dictionary
:param idIndex: name of the table to insert the data
:return: none
'''
# set db vars
# set db vars
sql = self.conf.cDB.sql
cur = self.conf.cDB.db.cursor()
db = self.conf.cDB.db
# get query variables
# get query variables
fieldNamesString = ','.join(dataDict['head']).lower()
valuesList = []
for val in dataDict['head']:
valuesList.append('%s')
valuesString = ','.join(valuesList)
# set query statement
# set query statement
insertQueryStatement = 'INSERT INTO TABLE-NAME-REPLACE (FIELDS-REPLACE) VALUES (VALUES-REPLACE)'
insertQueryStatement = insertQueryStatement.replace('TABLE-NAME-REPLACE', tableName)
insertQueryStatement = insertQueryStatement.replace('FIELDS-REPLACE', fieldNamesString)
insertQueryStatement = insertQueryStatement.replace('VALUES-REPLACE', valuesString)
formatRules = {
'ocenjenavrednost': 'float',
'koncnavrednost': 'float',
'datumposiljanjaobvestila': 'timestamp',
'datumobjaveobvestila': 'timestamp',
'prejsnje_objave_pjn_datum': 'timestamp',
'prejsnje_objave_uleu_datum': 'timestamp',
'prejsnje_objave_rokzasprejemanjeponudnikovihvprasanj': 'timestamp',
'prejsnje_objave_rokzaprejemponudb': 'timestamp',
'prejsnje_objave_odpiranjeponudbdatum': 'timestamp',
'prejsnje_objave_sys_spremembaizracunanihdatum': 'timestamp',
'sys_spremembaizracunanihdatum': 'timestamp',
'datumoddajesklopa': 'date',
'stprejetihponudb': 'int',
'stprejetiheponudb': 'int',
'letoobdelave': 'int',
'idizpobrazca': 'int',
'kategorijanarocnika': 'int',
'idizppriloge': 'int',
'id_obrazecsubjekt': 'int',
'zaporedna': 'int'
}
keysList = [x.lower() for x in dataDict['head']]
tableUniqueField_n = keysList.index(tableUniqueField)
for row in dataDict['data']:
# avoid duplication
# avoid duplication
queryBase = "select count(*) from " + tableName + " where " + tableUniqueField.lower() + " = '" + row[tableUniqueField_n] + "'"
queryString = sql.SQL(queryBase)
cur.execute(queryString)
result = cur.fetchone()
if int(result[0]) > 0:
continue
# all's good, continue
# all's good, continue
tmp_row = row.copy()
tmp_row = self.formatRowBeforeSQLInjection(keysList, tmp_row, formatRules)
row_values = tuple(tmp_row)
queryString = sql.SQL(insertQueryStatement).format()
cur.execute(queryString, row_values)
db.commit()
return None
def formatRowBeforeSQLInjection(self, headList, valuesList, rulesDict):
'''
function takes in valuesList and updates values according rulesDict
:param headList: filed names, needed to identify position of dields in valuesList
:param valuesList: list of values to be formatted
:param rulesDict: rules for foramtting
:return: formatted values list
'''
for fieldName,fieldFormat in rulesDict.items():
# filedName must be in headList
# filedName must be in headList
if fieldName not in headList:
continue
# all good - format
# all good - format
tmp_n = headList.index(fieldName)
tmp_value = valuesList[tmp_n]
if fieldFormat == 'int':
if tmp_value == '':
tmp_value = 0
valuesList[tmp_n] = int(tmp_value)
if fieldFormat == 'float':
tmp_value = tmp_value.replace(',', '.')
if tmp_value == '':
tmp_value = 0
valuesList[tmp_n] = float(tmp_value)
if fieldFormat == 'timestamp':
if tmp_value == '':
tmp_value = None
valuesList[tmp_n] = tmp_value
if fieldFormat == 'date':
if tmp_value == '':
tmp_value = None
valuesList[tmp_n] = tmp_value
return valuesList
| 33.072115
| 139
| 0.576973
|
2bbc6bf6135f7daeb9e4389f1944d5cd6d4829a0
| 9,354
|
py
|
Python
|
mt/mvae/distributions/von_mises_fisher.py
|
macio232/mvae
|
df3d5158ce29744e54b378ad663361e8b785632a
|
[
"Apache-2.0"
] | 53
|
2019-11-20T05:39:54.000Z
|
2022-02-05T06:36:43.000Z
|
mt/mvae/distributions/von_mises_fisher.py
|
macio232/mvae
|
df3d5158ce29744e54b378ad663361e8b785632a
|
[
"Apache-2.0"
] | 8
|
2020-03-14T20:25:08.000Z
|
2021-06-10T08:06:15.000Z
|
mt/mvae/distributions/von_mises_fisher.py
|
macio232/mvae
|
df3d5158ce29744e54b378ad663361e8b785632a
|
[
"Apache-2.0"
] | 10
|
2020-03-14T20:17:47.000Z
|
2021-12-01T14:08:06.000Z
|
# Copyright 2019 Ondrej Skopek.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
from typing import Optional, Tuple
import torch
import torch.distributions
import torch.nn.functional as F
from torch import Tensor
from torch.distributions.kl import register_kl
from .wrapped_distributions import VaeDistribution, EuclideanUniform
from .hyperspherical_uniform import HypersphericalUniform
from ..ops.ive import ive
from ..ops import common as C
from ..ops import spherical_projected as SP
from ..ops import spherical as S
class VonMisesFisher(torch.distributions.Distribution, VaeDistribution):
arg_constraints = {"loc": torch.distributions.constraints.real, "scale": torch.distributions.constraints.positive}
support = torch.distributions.constraints.real
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self) -> Tensor:
return self.loc * (ive(self.p / 2, self.scale) / ive(self.p / 2 - 1, self.scale))
@property
def stddev(self) -> Tensor:
return self.scale
def __init__(self, loc: Tensor, scale: Tensor, validate_args: Optional[bool] = None) -> None:
self.dtype = loc.dtype
self.loc = loc
assert loc.norm(p=2, dim=-1).allclose(torch.ones(loc.shape[:-1], device=loc.device))
self.scale = scale
assert (scale > 0).all()
self.device = loc.device
self.p = loc.shape[-1]
self.uniform = EuclideanUniform(0, 1)
self.hyperspherical_uniform_v = HypersphericalUniform(self.p - 2, device=self.device)
# Pre-compute Householder transformation
e1 = torch.tensor([1.] + [0.] * (loc.shape[-1] - 1), requires_grad=False, device=self.device)
self.u = F.normalize(e1 - self.loc)
super().__init__(self.loc.size(), validate_args=validate_args)
def rsample(self, sample_shape: torch.Size = torch.Size()) -> Tensor:
shape = sample_shape if isinstance(sample_shape, torch.Size) else torch.Size([sample_shape])
# Sample w ~ g(w | kappa, m). Should be in [-1, 1], but not always is (numerical reasons).
w = self._sample_w3(shape=shape) if self.p == 3 else self._sample_w_rej(shape=shape)
# Sample v ~ U(S^(m-2))
v = self.hyperspherical_uniform_v.sample(shape)
w_ = C.sqrt(1 - w**2)
x = torch.cat((w, w_ * v), dim=-1)
z = self._householder_rotation(x)
return z.to(dtype=self.dtype)
def _sample_w3(self, shape: torch.Size) -> Tensor:
shape = torch.Size(shape + torch.Size(self.scale.shape))
u = self.uniform.sample(shape).to(self.device)
log_u = torch.log(u)
inv_log_u = torch.log(1 - u) - 2 * self.scale
stack = torch.stack([log_u, inv_log_u], dim=0)
w = 1 + stack.logsumexp(dim=0) / self.scale
self.__w = torch.clamp(w, min=-1, max=1) # Assure w is in [-1, 1].
return self.__w
def _sample_w_rej(self, shape: torch.Size) -> Tensor:
c = torch.sqrt((4 * (self.scale**2)) + (self.p - 1)**2)
b_true = (-2 * self.scale + c) / (self.p - 1)
# using Taylor approximation with a smooth swift from 10 < scale < 11
# to avoid numerical errors for large scale
b_app = (self.p - 1) / (4 * self.scale)
s = torch.min(torch.max(torch.tensor([0.], device=self.device), self.scale - 10),
torch.tensor([1.], device=self.device))
b = b_app * s + b_true * (1 - s)
a = (self.p - 1 + 2 * self.scale + c) / 4
d = (4 * a * b) / (1 + b) - (self.p - 1) * math.log(self.p - 1)
self.__b, (self.__e, self.__w) = b, self._while_loop(b, a, d, shape)
return self.__w
def _while_loop(self, b: Tensor, a: Tensor, d: Tensor, shape: torch.Size) -> Tuple[Tensor, Tensor]:
b, a, d = [e.repeat(*shape, *([1] * len(self.scale.shape))) for e in (b, a, d)]
w, e, bool_mask = torch.zeros_like(b).to(self.device), torch.zeros_like(b).to(
self.device), (torch.ones_like(b) == 1).to(self.device)
shape = torch.Size(shape + torch.Size(self.scale.shape))
while bool_mask.sum() != 0:
e_ = torch.distributions.Beta((self.p - 1) / 2,
(self.p - 1) / 2).sample(shape[:-1]).reshape(shape).to(self.device)
u = self.uniform.sample(shape).to(self.device)
w_ = (1 - (1 + b) * e_) / (1 - (1 - b) * e_)
t = (2 * a * b) / (1 - (1 - b) * e_)
accept = ((self.p - 1) * t.log() - t + d) > torch.log(u)
reject = 1 - accept
w[bool_mask * accept] = w_[bool_mask * accept]
e[bool_mask * accept] = e_[bool_mask * accept]
bool_mask[bool_mask * accept] = reject[bool_mask * accept]
return e, w
def _householder_rotation(self, x: Tensor) -> Tensor:
# z = Ux = (I - 2uu^T)x = x - 2(u^T x)u
z = x - 2 * (x * self.u).sum(-1, keepdim=True) * self.u
return z
def entropy(self) -> Tensor:
ive_ = ive((self.p / 2) - 1, self.scale)
output = -self.scale * ive(self.p / 2, self.scale) / ive_
return output.view(*(output.shape[:-1])) - self._c_p_kappa(self.scale, p=self.p, ive_precomp=ive_)
def log_prob(self, x: Tensor) -> Tensor:
assert torch.norm(x, p=2, dim=-1).allclose(torch.ones(x.shape[:-1], dtype=x.dtype, device=x.device))
expprob = self._log_unnormalized_prob(x)
norm_const = self._c_p_kappa(self.scale, p=self.p)
output = expprob + norm_const
# Alternative equally good way to calculate it:
# expprob2 = (self.loc * x).sum(dim=-1, keepdim=False)
# ln_kappa = torch.log(self.scale)
# ln2pi = math.log(math.pi) + math.log(2)
# ln_ive_ = torch.log(ive(self.p / 2 - 1, self.scale))
# norm_const2 = self.p * (ln_kappa - ln2pi) / 2. - ln_kappa - ln_ive_
# output2 = self.scale * (expprob2 - 1) + norm_const2
return output
def _log_unnormalized_prob(self, x: Tensor) -> Tensor: # log(e^(k*u^Tx)) = k*u^Tx
output = self.scale * (self.loc * x).sum(dim=-1, keepdim=True)
return output.view(*(output.shape[:-1]))
@staticmethod
def _c_p_kappa(kappa: Tensor, p: int, ive_precomp: Optional[Tensor] = None) -> Tensor:
# https://en.wikipedia.org/wiki/Von_Mises–Fisher_distribution
ln_kappa = torch.log(kappa) # log(kappa)
if ive_precomp is not None:
ive_ = ive_precomp
else:
ive_ = ive(p / 2 - 1, kappa)
ln_ive_ = torch.log(ive_)
ln_iv_ = ln_ive_ + kappa
ln2pi = math.log(2 * math.pi)
output1 = p * (ln_kappa - ln2pi) / 2. - ln_kappa - ln_iv_ # Same as output3.
# output2 = torch.log(kappa**(p/2 + 1) / ((2*math.pi)**(p/2) * (ive_ * math.exp(kappa)))) # Too imprecise.
# output3 = (p / 2 - 1) * ln_kappa - (p / 2) * ln2pi - ln_iv_
return output1.view(*(output1.shape[:-1]))
class RadiusVonMisesFisher(VonMisesFisher):
def __init__(self, loc: Tensor, scale: Tensor, radius: Tensor, validate_args: Optional[bool] = None) -> None:
self.radius = radius
assert torch.norm(loc, p=2,
dim=-1).allclose(self.radius * torch.ones(loc.shape[:-1], dtype=loc.dtype, device=loc.device))
self.unnormalized_loc = loc
self.normalized_loc = F.normalize(loc, dim=-1, p=2)
self.unnormalized_scale = scale
super().__init__(self.normalized_loc, scale, validate_args)
def log_prob(self, x: Tensor) -> Tensor:
assert torch.norm(x, p=2,
dim=-1).allclose(self.radius * torch.ones(x.shape[:-1], dtype=x.dtype, device=x.device))
return super().log_prob(x / self.radius)
def rsample(self, sample_shape: torch.Size = torch.Size()) -> Tensor:
rsampled = super().rsample(sample_shape)
assert torch.norm(rsampled, p=2,
dim=-1).allclose(torch.ones(rsampled.shape[:-1], dtype=rsampled.dtype,
device=rsampled.device))
return self.radius * rsampled
class RadiusProjectedVonMisesFisher(RadiusVonMisesFisher):
def log_prob(self, x_proj: Tensor) -> Tensor:
x = SP.projected_to_spherical(x_proj, self.radius)
return super().log_prob(x)
def rsample(self, sample_shape: torch.Size = torch.Size()) -> Tensor:
rsampled = super().rsample(sample_shape)
return S.spherical_to_projected(rsampled, self.radius)
@register_kl(RadiusVonMisesFisher, HypersphericalUniform)
def _kl_vmf_uniform(vmf: RadiusVonMisesFisher, hyu: HypersphericalUniform) -> Tensor:
kl = -vmf.entropy() + hyu.entropy() # Doesn't depend on radius, only implicitly through scale.
return kl
| 42.135135
| 120
| 0.607654
|
e3fa4dacd2b257aa922d13a87191e983b6419a48
| 2,091
|
py
|
Python
|
tobiko/tests/unit/test_testcase.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | 1
|
2022-01-11T20:50:06.000Z
|
2022-01-11T20:50:06.000Z
|
tobiko/tests/unit/test_testcase.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
tobiko/tests/unit/test_testcase.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import testtools
import tobiko
from tobiko.tests import unit
class TestCaseTest(unit.TobikoUnitTest):
def setUp(self):
super(TestCaseTest, self).setUp()
self.addCleanup(self._pop_inner_test_cases)
def _pop_inner_test_cases(self):
case = tobiko.get_test_case()
while case is not self:
tobiko.pop_test_case()
case = tobiko.get_test_case()
def test_get_test_case(self):
result = tobiko.get_test_case()
self.assertIs(self, result)
def test_get_test_case_out_of_context(self):
manager = tobiko.TestCasesManager()
result = tobiko.get_test_case(manager=manager)
self.assertIsInstance(result, testtools.TestCase)
self.assertEqual('tobiko.common._testcase.DummyTestCase.runTest',
result.id())
def test_push_test_case(self):
class InnerTest(testtools.TestCase):
def runTest(self):
pass
inner_case = InnerTest()
tobiko.push_test_case(inner_case)
self.assertIs(inner_case, tobiko.get_test_case())
def test_pop_test_case(self):
class InnerTest(testtools.TestCase):
def runTest(self):
pass
inner_case = InnerTest()
tobiko.push_test_case(inner_case)
result = tobiko.pop_test_case()
self.assertIs(inner_case, result)
self.assertIs(self, tobiko.get_test_case())
| 29.450704
| 78
| 0.673362
|
b9aaafcda022a53332c59f74f160de6f908af81e
| 1,207
|
py
|
Python
|
caffe2/python/onnx/tests/onnx_backend_test.py
|
shigengtian/caffe2
|
e19489d6acd17fea8ca98cd8e4b5b680e23a93c5
|
[
"Apache-2.0"
] | 1
|
2018-03-26T13:25:03.000Z
|
2018-03-26T13:25:03.000Z
|
caffe2/python/onnx/tests/onnx_backend_test.py
|
shigengtian/caffe2
|
e19489d6acd17fea8ca98cd8e4b5b680e23a93c5
|
[
"Apache-2.0"
] | null | null | null |
caffe2/python/onnx/tests/onnx_backend_test.py
|
shigengtian/caffe2
|
e19489d6acd17fea8ca98cd8e4b5b680e23a93c5
|
[
"Apache-2.0"
] | 1
|
2018-12-20T09:14:48.000Z
|
2018-12-20T09:14:48.000Z
|
## @package onnx
# Module caffe2.python.onnx.tests.onnx_backend_test
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
import onnx.backend.test
import caffe2.python.onnx.backend as c2
# This is a pytest magic variable to load extra plugins
pytest_plugins = 'onnx.backend.test.report',
backend_test = onnx.backend.test.BackendTest(c2, __name__)
backend_test.exclude(r'(test_hardsigmoid' # Does not support Hardsigmoid.
'|test_mean|test_hardmax' # Does not support Mean and Hardmax.
'|test_cast.*FLOAT16.*' # Does not support Cast on Float16.
'|test_depthtospace.*' # Does not support DepthToSpace.
'|test_.*pool_.*same.*)') # Does not support pool same.
# Skip vgg to speed up CI
if 'JENKINS_URL' in os.environ:
backend_test.exclude(r'(test_vgg19|test_vgg)')
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test
.enable_report()
.test_cases)
if __name__ == '__main__':
unittest.main()
| 31.763158
| 84
| 0.700083
|
c8b5ae597be7973ea4f02317d8dc6a6832eef016
| 3,176
|
py
|
Python
|
aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeSnapshotsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeSnapshotsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeSnapshotsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeSnapshotsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeSnapshots','sas')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_StatusList(self):
return self.get_query_params().get('StatusList')
def set_StatusList(self,StatusList):
self.add_query_param('StatusList',StatusList)
def get_Uuid(self):
return self.get_query_params().get('Uuid')
def set_Uuid(self,Uuid):
self.add_query_param('Uuid',Uuid)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_MachineRemark(self):
return self.get_query_params().get('MachineRemark')
def set_MachineRemark(self,MachineRemark):
self.add_query_param('MachineRemark',MachineRemark)
def get_NextToken(self):
return self.get_query_params().get('NextToken')
def set_NextToken(self,NextToken):
self.add_query_param('NextToken',NextToken)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_ApiVersion(self):
return self.get_query_params().get('ApiVersion')
def set_ApiVersion(self,ApiVersion):
self.add_query_param('ApiVersion',ApiVersion)
def get_MachineRegion(self):
return self.get_query_params().get('MachineRegion')
def set_MachineRegion(self,MachineRegion):
self.add_query_param('MachineRegion',MachineRegion)
def get_IsAliYunEcs(self):
return self.get_query_params().get('IsAliYunEcs')
def set_IsAliYunEcs(self,IsAliYunEcs):
self.add_query_param('IsAliYunEcs',IsAliYunEcs)
| 32.408163
| 76
| 0.762594
|
6a66aa8f419e06b13a40dd2d3bbfb13fa09f1b8a
| 268
|
py
|
Python
|
nameservice/admin.py
|
BryantS11/SkyNS
|
829c212f0557941299f105b871ffbea58cb8421c
|
[
"MIT"
] | null | null | null |
nameservice/admin.py
|
BryantS11/SkyNS
|
829c212f0557941299f105b871ffbea58cb8421c
|
[
"MIT"
] | null | null | null |
nameservice/admin.py
|
BryantS11/SkyNS
|
829c212f0557941299f105b871ffbea58cb8421c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from nameservice.models import NameServiceModel, UserPortalModel, PortalModel
# Register your models here.
admin.site.register(NameServiceModel) # Add to Admin Page
admin.site.register(PortalModel)
admin.site.register(UserPortalModel)
| 38.285714
| 77
| 0.843284
|
c1d74f64a9b461193426a8f68753258a8cbd1327
| 5,119
|
py
|
Python
|
messages.py
|
Salz0/Derzhavets_bot
|
7a592aed68cdbf450a30fb569a1eb26f9a3359d1
|
[
"CC0-1.0"
] | null | null | null |
messages.py
|
Salz0/Derzhavets_bot
|
7a592aed68cdbf450a30fb569a1eb26f9a3359d1
|
[
"CC0-1.0"
] | null | null | null |
messages.py
|
Salz0/Derzhavets_bot
|
7a592aed68cdbf450a30fb569a1eb26f9a3359d1
|
[
"CC0-1.0"
] | null | null | null |
from aiogram.utils.emoji import emojize
from aiogram.utils.markdown import text
# QUESTIONNAIRE MESSAGES:
questionnaire_start = text(emojize("Привіт!👋\nРозкажи, будь ласка: де та з ким ти хочеш жити 😏"))
questionnaire_floor = text(emojize("Де саме ти хотів би оселитися?🧳"))
questionnaire_room = text(emojize("Який номер кімнати в тебе з щасливих?🎰"))
questionnaire_roommates = text(emojize("Останнє, з ким ти хотів/ла би жити? 🐸"))
save_button = text(emojize("Зберегти📝"))
questionnaire_goodbye = text(emojize("Інформація буде передана на обробку, дякую за інформацію 📃"))
# QUESTIONNAIRE BUTTONS:
place1 = text(emojize("На Майбороди, з коліверами"))
place2 = text(emojize("У горах, наодинці🌄"))
# FOR PLACE 1 (МАЙБОРОДА)
floor1 = text(emojize("На 1️⃣ поверсі"))
floor2 = text(emojize("На 2️⃣ поверсі"))
# FOR PLACE 2 (ГОРА)
mountain_bot = text(emojize("У підніжжя 🏞"))
mountain_msg = text(emojize("Обери собі компаньона!🙌"))
mountain_top = text(emojize("На вершині 🌄"))
# FOR FLOOR1 (МАЙБОРОДА)
room1 = text(emojize("1️⃣").encode("utf-8"))
room2 = text(emojize("2️⃣"))
room3 = text(emojize("3️⃣"))
# FOR FLOOR2 (МАЙБОРОДА)
room4 = text(emojize("4️⃣"))
room5 = text(emojize("5️⃣"))
room6 = text(emojize("6️⃣"))
# MAIBORODA FRIENDS
room1_friend1 = "Алекс"
room1_friend2 = "Фродо"
room2_friend1 = "Лазло"
room2_friend2 = "Каска"
room3_friend1 = "Іван"
room3_friend2 = "Василь"
room4_friend1 = "Олекса"
room4_friend2 = "Філіп"
room5_friend1 = "Фердинанд"
room5_friend2 = "Кіра"
room6_friend1 = "Леся"
room6_friend2 = "Валерій"
# FOR MOUNTAIN (bot)
river = text(emojize("Біля річки 🐸"))
tree = text(emojize("На дереві 🌳"))
# FOR MOUNTAIN (top)
igloo = text(emojize("В іглу ☃"))
cave = text(emojize("У печері 🗻"))
# FOR MOUNTAIN (RIVER) PET
pet_river1 = text(emojize("Золоту рибку!🐡"))
pet_river2 = text(emojize("Медведя!🐻"))
# FOR MOUNTAIN (TREE) PET
pet_tree1 = text(emojize("Білочку🐿"))
pet_tree2 = text(emojize("Сову🦉"))
# IGLOO PET
pet_igloo1 = text(emojize("Чукчу!⛄"))
pet_igloo2 = text(emojize("Привида!👻"))
# CAVE PET
pet_cave1 = text(emojize("Пані Самотність!🧘♂️"))
pet_cave2 = text(emojize("Сніговика!⛄"))
# SINGLE-PURPOSE MESSAGES:
start_message = text(emojize("Привіт!👋"))
registered_message = "Давно не бачилися!"
have_a_nice_lecture = text(emojize("Продуктивної лекції тобі! 😉"))
vote_thank_you = text(emojize("Дякую, твій голос враховано!⚖"))
FINISHED = "Сподіваюся, тобі сподобалася лекція"
LECTURE_START = "ЛЕКЦІЯ ПОЧАЛАСЯ, УАЛІВЕЦЬ ВСТАВАЙ!"
NEW_VALUE = "Прохання проголосувати за/проти затвердження нової цінності: Будь Програмістом!"
za = "За"
proty = "Проти"
# Q&A RESPONSES:
Q_and_A_welcoming_message = text(emojize("Привіт, тут ти можеш задати стільки питань, скільки забажаєш\n"
"Коли закінчиш, натисни клавішу 'Вийти'😉"))
Q_and_A_confirmation_message = text(emojize("Записав!📃"))
exit_Q = "Вийти"
Q_and_A_goodbye_message = text(emojize("Дякую.\nЗадані питання будуть доставленими 🧭"))
# TEMPLATE = text(emojize())
presence = "PRESENT09"
voter = "FOR02937"
against = "AGAINST02937"
MESSAGES = {
"start_message": start_message,
"registered_message": registered_message,
"have_a_nice_lecture": have_a_nice_lecture,
"vote_thank_you": vote_thank_you,
"FINISHED": FINISHED,
"LECTURE_START": LECTURE_START,
"NEW_VALUE": NEW_VALUE,
"za": za,
"proty": proty,
# callback queries:
"presence": presence,
"voter": voter,
"against": against,
# Q&A RESPONSES:
"Q_and_A_welcoming_message": Q_and_A_welcoming_message,
"Q_and_A_confirmation_message": Q_and_A_confirmation_message,
"exit_Q": exit_Q,
"Q_and_A_goodbye_message": Q_and_A_goodbye_message,
# QUESTIONNAIRE RESPONSES:
"questionnaire_start": questionnaire_start,
"questionnaire_floor": questionnaire_floor,
"questionnaire_room": questionnaire_room,
"questionnaire_roommates": questionnaire_roommates,
"save_button": save_button,
"questionnaire_goodbye": questionnaire_goodbye,
# QUESTIONNAIRE BUTTONS
"place1": place1,
"place2": place2,
"floor1": floor1,
"floor2": floor2,
"mountain_bot": mountain_bot,
"mountain_top": mountain_top,
"mountain_msg": mountain_msg,
}
ROOMS = {
"room1": room1,
"room2": room2,
"room3": room3,
"room4": room4,
"room5": room5,
"room6": room6,
"river": river,
"tree": tree,
"igloo": igloo,
"cave": cave,
}
PETS_AND_FRIENDS = {
"pet_river1": pet_river1,
"pet_river2": pet_river2,
"pet_tree1": pet_tree1,
"pet_tree2": pet_tree2,
"pet_igloo1": pet_igloo1,
"pet_igloo2": pet_igloo2,
"pet_cave1": pet_cave1,
"pet_cave2": pet_cave2,
"room1_friend1": room1_friend1,
"room1_friend2": room1_friend2,
"room2_friend1": room2_friend1,
"room2_friend2": room2_friend2,
"room3_friend1": room3_friend1,
"room3_friend2": room3_friend2,
"room4_friend1": room4_friend1,
"room4_friend2": room4_friend2,
"room5_friend1": room5_friend1,
"room5_friend2": room5_friend2,
"room6_friend1": room6_friend1,
"room6_friend2": room6_friend2,
}
| 31.024242
| 105
| 0.702481
|
d2b7dc577966a6258a7ba19b302661730fe6477d
| 2,038
|
py
|
Python
|
setup.py
|
andersy005/repo2apptainer
|
6ba9bda304ecb410e74d53d4124c98aaf0660a1e
|
[
"BSD-3-Clause"
] | 1
|
2022-03-16T20:12:08.000Z
|
2022-03-16T20:12:08.000Z
|
setup.py
|
andersy005/repo2apptainer
|
6ba9bda304ecb410e74d53d4124c98aaf0660a1e
|
[
"BSD-3-Clause"
] | 1
|
2022-03-16T20:13:51.000Z
|
2022-03-16T20:13:51.000Z
|
setup.py
|
andersy005/repo2apptainer
|
6ba9bda304ecb410e74d53d4124c98aaf0660a1e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""The setup script."""
from setuptools import find_packages, setup
with open('requirements.txt') as f:
INSTALL_REQUIRES = f.read().strip().split('\n')
with open('README.md', encoding='utf8') as f:
LONG_DESCRIPTION = f.read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
]
setup(
name='repo2apptainer',
description='Repo2apptainer: Wrapper around repo2docker producing producing Jupyter enabled Apptainer/Singularity images.',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
python_requires='>=3.8',
maintainer='Anderson Banihirwe',
classifiers=CLASSIFIERS,
url='https://repo2apptainer.readthedocs.io',
project_urls={
'Documentation': 'https://repo2apptainer.readthedocs.io',
'Source': 'https://github.com/ncar-xdev/repo2apptainer',
'Tracker': 'https://github.com/ncar-xdev/repo2apptainer/issues',
'Discussions/Support': 'https://github.com/ncar-xdev/repo2apptainer/discussions',
},
packages=find_packages(exclude=('tests',)),
include_package_data=True,
install_requires=INSTALL_REQUIRES,
license='BSD-3-Clause',
zip_safe=False,
entry_points={
'console_scripts': [
'repo2apptainer=repo2apptainer.cli:main',
'r2a=repo2apptainer.cli:main',
'repo2singularity = repo2apptainer.cli:main',
'r2s = repo2apptainer.cli:main',
]
},
keywords='reproducible science environments docker singularity apptainer jupyter',
use_scm_version={'version_scheme': 'post-release', 'local_scheme': 'dirty-tag'},
)
| 37.740741
| 127
| 0.678606
|
c5f95092a181e5f020105edb2d276a6f68c9970c
| 287
|
py
|
Python
|
security_management/config/desktop.py
|
swsiong97/FYP2
|
dd1cdacfad2c5ab8c3f413e5814f703287a77b86
|
[
"MIT"
] | null | null | null |
security_management/config/desktop.py
|
swsiong97/FYP2
|
dd1cdacfad2c5ab8c3f413e5814f703287a77b86
|
[
"MIT"
] | null | null | null |
security_management/config/desktop.py
|
swsiong97/FYP2
|
dd1cdacfad2c5ab8c3f413e5814f703287a77b86
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Security Management",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Security Management")
}
]
| 19.133333
| 44
| 0.637631
|
b12945ba640ad4a03105665c4e82e2d609d22997
| 3,171
|
py
|
Python
|
tests/test_vector.py
|
slode/triton
|
d440c510f4841348dfb9109f03858c75adf75564
|
[
"MIT"
] | null | null | null |
tests/test_vector.py
|
slode/triton
|
d440c510f4841348dfb9109f03858c75adf75564
|
[
"MIT"
] | null | null | null |
tests/test_vector.py
|
slode/triton
|
d440c510f4841348dfb9109f03858c75adf75564
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013 Stian Lode
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import fixtures
from triton.vector import Vector
from triton.vector3d import Vector3d
from triton.vector2d import Vector2d
from pytest import approx
def test_addition():
a = Vector2d(3,0)
b = Vector2d(0,2)
c = a + b
assert c == [3,2]
a += b
assert a ==c
d = 1 + a
assert d == [a.x+1, a.y+1]
def test_subtraction():
a = Vector2d(3,0)
b = Vector2d(0,2)
c = a - b
assert c == [3,-2]
a -= b
assert a ==c
d = 1 - a
assert d == [1 - a.x, 1 - a.y]
def test_multiplication():
a = Vector2d(3,1)
b = Vector2d(1,2)
c = a * b
assert c == [3,2]
a *= b
assert a ==c
d = -1 * a
assert d == [-1 * a.x, -1 * a.y]
def test_division():
a = Vector2d(3.0,1.0)
b = Vector2d(1.0,2.0)
c = a / b
assert c == [3, 0.5]
a /= b
assert a ==c
d = 1 / a
assert d == [1 / a.x, 1 / a.y]
def test_length():
a = Vector2d(3,0)
b = Vector2d(0,4)
assert (a-b).length() == approx(5)
def test_perp():
a = Vector2d(1,9)
b = a.perp()
assert b == [-9, 1]
c = a.dot(b)
assert c == 0
def test_eq():
a = Vector2d(3,2)
b = Vector2d(3,2)
assert a ==b
assert a == [3,2]
def test_normalize():
a = Vector2d(5,2);
a.normalize()
assert a.length() == 1
def test_angle():
a = Vector2d(4,4)
b = Vector2d(4,-4)
c = b.angle_diff(a)
assert c == approx(math.pi/2)
d = b.angle_diff(b)
assert d == approx(0)
e = Vector2d(-4, -4)
f = e.angle_diff(a)
assert f == approx(math.pi)
g = a.angle_diff(e)
assert g == approx(-math.pi)
h = a.angle()
assert h == approx(math.pi/4)
def test_normalize():
a = Vector2d(8,9)
b = a.unit_vector()
assert b.length() == 1
a.normalize()
assert a.length() == 1
def test_cross():
a = Vector3d(-0, 10, 0)
b = Vector3d(10, 0, 0)
c = b.cross(a)
assert c == [0, 0, 100]
d = a.cross(b)
assert d == [0, 0, -100]
a = Vector2d(-0, 10)
b = Vector2d(10, 0)
c = b.cross(a)
assert c == 100
| 24.022727
| 79
| 0.602964
|
58af5c42bd86edc39f13aea8565ea27908be97a5
| 31,311
|
py
|
Python
|
tensorflow/python/kernel_tests/seq2seq_test.py
|
returncode13/tensorflow
|
c5f94b10bbb30e525fa3ca313e7ccb173040c90a
|
[
"Apache-2.0"
] | 2
|
2020-07-30T05:06:30.000Z
|
2020-08-28T05:10:49.000Z
|
tensorflow/python/kernel_tests/seq2seq_test.py
|
alainrk/tensorflow
|
314d9cd9b607460f8bfea80fc828b1521ca18443
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/kernel_tests/seq2seq_test.py
|
alainrk/tensorflow
|
314d9cd9b607460f8bfea80fc828b1521ca18443
|
[
"Apache-2.0"
] | 2
|
2018-03-14T03:10:40.000Z
|
2018-09-13T13:59:40.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
import tensorflow as tf
class Seq2SeqTest(tf.test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
_, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_rnn_decoder(
dec_inp, enc_state, cell, num_symbols=4, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell1, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with tf.variable_scope("decoder_symbols_seq2seq"):
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3,
embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4,
num_heads=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.GRUCell(2)
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_attention_decoder(
dec_inp, enc_state, attn_states, cell, num_symbols=4,
embedding_size=2, output_size=3)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp_dict = {}
dec_inp_dict["0"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec_inp_dict["1"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(4)]
dec_symbols_dict = {"0": 5, "1": 6}
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
dec_inp_dict2["1"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(4)]
with tf.variable_scope("other"):
outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [tf.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [tf.constant(i + 0.5, shape=[2, output_classes])
for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
def testModelWithBucketsScopeAndLoss(self):
"""Test that variable scope reuse is not reset after model_with_buckets."""
classes = 10
buckets = [(4, 4), (8, 8)]
with self.test_session():
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24)
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
per_example_loss=per_example_loss)
# Now we construct the copy model.
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False)
# Now check that we did not accidentally set reuse.
self.assertEqual(False, tf.get_variable_scope().reuse)
# Construct one more model with per-example loss.
tf.get_variable_scope().reuse_variables()
_, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True)
# First loss is scalar, the second one is a 1-dimensinal tensor.
self.assertEqual([], losses1[0].get_shape().as_list())
self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = tf.get_variable("proj_w", [24, classes])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes)
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = tf.all_variables()
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = tf.gradients(losses[i], params)
grads, _ = tf.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([tf.initialize_all_variables()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length],
out[:length], o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=tf.Graph()) as sess:
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_enc_timesteps)]
dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)]
targets = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
weights = [tf.constant(1.0, shape=[batch_size])
for i in range(num_dec_timesteps)]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with tf.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
scope_name)
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
dec_op_fp_false, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(tf.initialize_all_variables())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {v.name.split('/', 1)[-1]: v
for v in variables_fp_false}
matched_variables = [(v, v_false_name_dict[v.name.split('/', 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(tf.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false,
{holder: inp for holder, inp in zip(dec_inp_holder_fp_false,
dec_inp_fp_false)})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
tf.test.main()
| 46.181416
| 80
| 0.630833
|
1f7ea0638309be4c1eea3547e687c9a22e7fb544
| 229
|
py
|
Python
|
server.py
|
jia1/the-tagger
|
5daaa766f6c32a0b9ae8f824fa6eeff7c31dc6d2
|
[
"BSD-3-Clause"
] | null | null | null |
server.py
|
jia1/the-tagger
|
5daaa766f6c32a0b9ae8f824fa6eeff7c31dc6d2
|
[
"BSD-3-Clause"
] | null | null | null |
server.py
|
jia1/the-tagger
|
5daaa766f6c32a0b9ae8f824fa6eeff7c31dc6d2
|
[
"BSD-3-Clause"
] | null | null | null |
from os import environ
from gevent.pywsgi import WSGIServer
from app import app
if __name__ == '__main__':
port = int(environ.get('PORT', 5000))
http_server = WSGIServer(('', port), app)
http_server.serve_forever()
| 22.9
| 45
| 0.707424
|
b3547a06ba49d8157815eef1c9b9c5d5d9c2d63b
| 3,434
|
py
|
Python
|
torchrec/metrics/ctr.py
|
huaxz1986/torchrec
|
1975ee0a8f2b12c89be753a1477bf4a3d4b005bd
|
[
"BSD-3-Clause"
] | 814
|
2022-02-23T17:24:14.000Z
|
2022-03-31T16:52:23.000Z
|
torchrec/metrics/ctr.py
|
huaxz1986/torchrec
|
1975ee0a8f2b12c89be753a1477bf4a3d4b005bd
|
[
"BSD-3-Clause"
] | 89
|
2022-02-23T17:29:56.000Z
|
2022-03-31T23:44:13.000Z
|
torchrec/metrics/ctr.py
|
huaxz1986/torchrec
|
1975ee0a8f2b12c89be753a1477bf4a3d4b005bd
|
[
"BSD-3-Clause"
] | 68
|
2022-02-23T17:42:17.000Z
|
2022-03-28T06:39:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
CTR_NUM = "ctr_num"
CTR_DENOM = "ctr_denom"
def compute_ctr(ctr_num: torch.Tensor, ctr_denom: torch.Tensor) -> torch.Tensor:
return torch.where(ctr_denom == 0.0, 0.0, ctr_num / ctr_denom).double()
def get_ctr_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
return {
CTR_NUM: torch.sum(labels * weights, dim=-1),
CTR_DENOM: torch.sum(weights, dim=-1),
}
class CTRMetricComputation(RecMetricComputation):
r"""
This class implementation the RecMetricComputation for CTR, i.e. Click Through Rate,
which is the ratio between the predicted positive examples and the total examples.
The constructer arguments are defined in RecMetricComputation.
See the docstring of RecMetricComputation for more detail.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._add_state(
CTR_NUM,
torch.zeros(self._n_tasks, dtype=torch.double),
add_window_state=True,
dist_reduce_fx="sum",
persistent=True,
)
self._add_state(
CTR_DENOM,
torch.zeros(self._n_tasks, dtype=torch.double),
add_window_state=True,
dist_reduce_fx="sum",
persistent=True,
)
def update(
self,
*,
predictions: Optional[torch.Tensor],
labels: torch.Tensor,
weights: Optional[torch.Tensor]
) -> None:
if predictions is None or weights is None:
raise RecMetricException(
"Inputs 'predictions' and 'weights' should not be None for CTRMetricComputation update"
)
num_samples = predictions.shape[-1]
for state_name, state_value in get_ctr_states(
labels, predictions, weights
).items():
state = getattr(self, state_name)
state += state_value
self._aggregate_window_state(state_name, state_value, num_samples)
def _compute(self) -> List[MetricComputationReport]:
return [
MetricComputationReport(
name=MetricName.CTR,
metric_prefix=MetricPrefix.LIFETIME,
value=compute_ctr(
cast(torch.Tensor, self.ctr_num),
cast(torch.Tensor, self.ctr_denom),
),
),
MetricComputationReport(
name=MetricName.CTR,
metric_prefix=MetricPrefix.WINDOW,
value=compute_ctr(
self.get_window_state(CTR_NUM),
self.get_window_state(CTR_DENOM),
),
),
]
class CTRMetric(RecMetric):
_namespace: MetricNamespace = MetricNamespace.CTR
_computation_class: Type[RecMetricComputation] = CTRMetricComputation
| 32.704762
| 103
| 0.630169
|
57c0c528c2a690cdfa0d42522548995d33429e29
| 41
|
py
|
Python
|
miRNASNP3/miRNASNP3/wsgi.py
|
chunjie-sam-liu/miRNASNP-v3
|
41fab95b496b639674010863895547db0fc108bc
|
[
"MIT"
] | 1
|
2020-07-02T08:51:37.000Z
|
2020-07-02T08:51:37.000Z
|
miRNASNP3/miRNASNP3/wsgi.py
|
chunjie-sam-liu/miRNASNP-v3
|
41fab95b496b639674010863895547db0fc108bc
|
[
"MIT"
] | null | null | null |
miRNASNP3/miRNASNP3/wsgi.py
|
chunjie-sam-liu/miRNASNP-v3
|
41fab95b496b639674010863895547db0fc108bc
|
[
"MIT"
] | null | null | null |
from miRNASNP3 import app as application
| 20.5
| 40
| 0.853659
|
88c7dc3462e46c6ddc52039e75aa15ae28ef4e6d
| 12,143
|
py
|
Python
|
mushr_rhc_ros/src/utils/utils.py
|
rogeriobonatti/mushr_rhc
|
8316cad6544997c1742cc5f5b539f5886eb00e7f
|
[
"BSD-3-Clause"
] | null | null | null |
mushr_rhc_ros/src/utils/utils.py
|
rogeriobonatti/mushr_rhc
|
8316cad6544997c1742cc5f5b539f5886eb00e7f
|
[
"BSD-3-Clause"
] | null | null | null |
mushr_rhc_ros/src/utils/utils.py
|
rogeriobonatti/mushr_rhc
|
8316cad6544997c1742cc5f5b539f5886eb00e7f
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import numpy as np
import pickle
import os
from datetime import datetime
import glob
import re
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from collections import deque
from tqdm import tqdm
from torchvision import transforms
import numbers
import cv2
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
import math
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
class ResizeFlowNP(object):
"""Resize the np array and scale the value
"""
def __init__(self, size, scale_flow=True):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.scale_flow = scale_flow
def __call__(self, sample):
th, tw = self.size
h, w = sample.shape[0], sample.shape[1]
sample = cv2.resize(sample, (tw,th), interpolation=cv2.INTER_LINEAR)
if self.scale_flow:
sample[:,:,0] = sample[:,:,0] * (float(tw)/float(w))
sample[:,:,1] = sample[:,:,1] * (float(th)/float(h))
return sample
#def save_checkpoint(state, is_best=False, model_path='models/', keep_all=False):
# if is_best:
# past_best = glob.glob(os.path.join(model_path, 'model_best_*.pth.tar'))
# if not keep_all:
# for i in past_best:
# try: os.remove(i)
# except: pass
# torch.save(state, os.path.join(model_path, 'model_best_epoch%s.pth.tar' % str(state['epoch'])))
# else:
# if not keep_all:
# past_last = glob.glob(os.path.join(model_path, 'model_last_*.pth.tar'))
# for i in past_last:
# try: os.remove(i)
# except: pass
# torch.save(state, os.path.join(model_path, 'model_last_epoch%s.pth.tar' % str(state['epoch'])))
def save_checkpoint(state, is_best=0, gap=1, filename='models/checkpoint.pth.tar', keep_all=False):
torch.save(state, filename)
last_epoch_path = os.path.join(os.path.dirname(filename),
'epoch%s.pth.tar' % str(state['epoch']-gap))
if not keep_all:
try: os.remove(last_epoch_path)
except: pass
if is_best:
past_best = glob.glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar'))
for i in past_best:
try: os.remove(i)
except: pass
torch.save(state, os.path.join(os.path.dirname(filename), 'model_best_epoch%s.pth.tar' % str(state['epoch'])))
def write_log(content, epoch, filename):
if not os.path.exists(filename):
log_file = open(filename, 'w')
else:
log_file = open(filename, 'a')
log_file.write('## Epoch %d:\n' % epoch)
log_file.write('time: %s\n' % str(datetime.now()))
log_file.write(content + '\n\n')
log_file.close()
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
class Logger(object):
'''write something to txt file'''
def __init__(self, path):
self.birth_time = datetime.now()
filepath = path + "/" + self.birth_time.strftime('%Y-%m-%d %H_%M_%S')+'.log'
self.filepath = filepath
with open(filepath, 'a') as f:
f.write(self.birth_time.strftime('%Y-%m-%d %H_%M_%S')+'\n')
def log(self, string):
with open(self.filepath, 'a') as f:
time_stamp = datetime.now() - self.birth_time
f.write(strfdelta(time_stamp,"{d}-{h:02d}:{m:02d}:{s:02d}")+'\t'+string+'\n')
def calc_topk_accuracy(output, target, topk=(1,)):
'''
Modified from: https://gist.github.com/agermanidis/275b23ad7a10ee89adccf021536bb97e
Given predicted and ground truth labels,
calculate top-k accuracies.
'''
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
#correct_k = correct[:k].view(-1).float().sum(0)
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(1 / batch_size))
return res
def calc_accuracy(output, target):
'''output: (B, N); target: (B)'''
target = target.squeeze()
_, pred = torch.max(output, 1)
return torch.mean((pred == target).float())
def denorm(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
assert len(mean)==len(std)==3
inv_mean = [-mean[i]/std[i] for i in range(3)]
inv_std = [1/i for i in std]
return transforms.Normalize(mean=inv_mean, std=inv_std)
def neq_load_customized(model, pretrained_dict):
''' load pre-trained model in a not-equal way,
when new model has been partially modified '''
model_dict = model.state_dict()
tmp = {}
print('\n=======Check Weights Loading======')
print('Weights not used from pretrained file:')
for k, v in pretrained_dict.items():
if k in model_dict:
tmp[k] = v
else:
print(k)
print('---------------------------')
print('Weights not loaded into new model:')
for k, v in model_dict.items():
if k not in pretrained_dict:
print(k)
print('===================================\n')
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
del pretrained_dict
model_dict.update(tmp)
del tmp
model.load_state_dict(model_dict)
return model
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.local_history = deque([])
self.local_avg = 0
self.history = []
self.dict = {} # save all data values here
self.save_dict = {} # save mean and std here, for summary table
def update(self, val, n=1, history=0, step=5):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if history:
self.history.append(val)
if step > 0:
self.local_history.append(val)
if len(self.local_history) > step:
self.local_history.popleft()
self.local_avg = np.average(self.local_history)
def dict_update(self, val, key):
if key in self.dict.keys():
self.dict[key].append(val)
else:
self.dict[key] = [val]
def __len__(self):
return self.count
class AccuracyTable(object):
'''compute accuracy for each class'''
def __init__(self):
self.dict = {}
def update(self, pred, tar):
pred = torch.squeeze(pred)
tar = torch.squeeze(tar)
for i, j in zip(pred, tar):
i = int(i)
j = int(j)
if j not in self.dict.keys():
self.dict[j] = {'count':0,'correct':0}
self.dict[j]['count'] += 1
if i == j:
self.dict[j]['correct'] += 1
def print_table(self, label):
for key in self.dict.keys():
acc = self.dict[key]['correct'] / self.dict[key]['count']
print('%s: %2d, accuracy: %3d/%3d = %0.6f' \
% (label, key, self.dict[key]['correct'], self.dict[key]['count'], acc))
class ConfusionMeter(object):
'''compute and show confusion matrix'''
def __init__(self, num_class):
self.num_class = num_class
self.mat = np.zeros((num_class, num_class))
self.precision = []
self.recall = []
def update(self, pred, tar):
pred, tar = pred.cpu().numpy(), tar.cpu().numpy()
pred = np.squeeze(pred)
tar = np.squeeze(tar)
for p,t in zip(pred.flat, tar.flat):
self.mat[p][t] += 1
def print_mat(self):
print('Confusion Matrix: (target in columns)')
print(self.mat)
def plot_mat(self, path, dictionary=None, annotate=False):
plt.figure(dpi=600)
plt.imshow(self.mat,
cmap=plt.cm.jet,
interpolation=None,
extent=(0.5, np.shape(self.mat)[0]+0.5, np.shape(self.mat)[1]+0.5, 0.5))
width, height = self.mat.shape
if annotate:
for x in range(width):
for y in range(height):
plt.annotate(str(int(self.mat[x][y])), xy=(y+1, x+1),
horizontalalignment='center',
verticalalignment='center',
fontsize=8)
if dictionary is not None:
plt.xticks([i+1 for i in range(width)],
[dictionary[i] for i in range(width)],
rotation='vertical')
plt.yticks([i+1 for i in range(height)],
[dictionary[i] for i in range(height)])
plt.xlabel('Ground Truth')
plt.ylabel('Prediction')
plt.colorbar()
plt.tight_layout()
plt.savefig(path, format='svg')
plt.clf()
for i in range(width):
if np.sum(self.mat[i,:]) != 0:
self.precision.append(self.mat[i,i] / np.sum(self.mat[i,:]))
if np.sum(self.mat[:,i]) != 0:
self.recall.append(self.mat[i,i] / np.sum(self.mat[:,i]))
print('Average Precision: %0.4f' % np.mean(self.precision))
print('Average Recall: %0.4f' % np.mean(self.recall))
def MultiStepLR_Restart_Multiplier(epoch, gamma=0.1, step=[10,15,20], repeat=3):
'''return the multipier for LambdaLR,
0 <= ep < 10: gamma^0
10 <= ep < 15: gamma^1
15 <= ep < 20: gamma^2
20 <= ep < 30: gamma^0 ... repeat 3 cycles and then keep gamma^2'''
max_step = max(step)
#print('max_step', max_step, epoch)
effective_epoch = epoch % max_step
if epoch // max_step >= repeat:
exp = len(step) - 1
else:
exp = len([i for i in step if effective_epoch>=i])
return gamma ** exp
def strfdelta(tdelta, fmt):
d = {"d": tdelta.days}
d["h"], rem = divmod(tdelta.seconds, 3600)
d["m"], d["s"] = divmod(rem, 60)
return fmt.format(**d)
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def warmup_schedule(optimizer, num_warmup_steps, last_epoch=-1):
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
else:
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch)
| 33.918994
| 118
| 0.575146
|
04dac8a52ec4cb9223eff83bae00cb4a1da30c00
| 416
|
py
|
Python
|
nxos_config_import/migrations/0010_objectconfigurationstatus_post_url.py
|
j-sulliman/acici
|
25f20bf2cdc1ceb1b4752ef0608e9628035912d3
|
[
"BSD-3-Clause"
] | 4
|
2019-07-20T11:37:32.000Z
|
2020-02-03T07:09:12.000Z
|
nxos_config_import/migrations/0010_objectconfigurationstatus_post_url.py
|
j-sulliman/acici
|
25f20bf2cdc1ceb1b4752ef0608e9628035912d3
|
[
"BSD-3-Clause"
] | 8
|
2019-12-04T23:46:30.000Z
|
2021-06-10T18:30:30.000Z
|
nxos_config_import/migrations/0010_objectconfigurationstatus_post_url.py
|
j-sulliman/acici
|
25f20bf2cdc1ceb1b4752ef0608e9628035912d3
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-03-22 01:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nxos_config_import', '0009_auto_20190322_1408'),
]
operations = [
migrations.AddField(
model_name='objectconfigurationstatus',
name='post_url',
field=models.URLField(default='none'),
),
]
| 21.894737
| 58
| 0.622596
|
aea93112148420de46d0b46b760952c0a6b7f4b3
| 2,619
|
py
|
Python
|
tests/unit/test_vendor.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 6,263
|
2017-01-20T17:41:36.000Z
|
2022-02-15T20:48:57.000Z
|
tests/unit/test_vendor.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 1,100
|
2017-01-20T19:41:52.000Z
|
2017-12-06T09:15:13.000Z
|
tests/unit/test_vendor.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 366
|
2017-01-21T10:06:52.000Z
|
2021-11-25T17:09:19.000Z
|
# -*- coding: utf-8 -*-
# We need to import the patched packages directly from sys.path, so the
# identity checks can pass.
import pipenv # noqa
import datetime
import os
import pytest
import pytz
import tomlkit
from pipfile.api import PipfileParser
class TestPipfileParser:
def test_inject_environment_variables(self):
os.environ['PYTEST_PIPFILE_TEST'] = "XYZ"
p = PipfileParser()
parsed_dict = p.inject_environment_variables({
"a_string": "https://$PYTEST_PIPFILE_TEST@something.com",
"another_string": "https://${PYTEST_PIPFILE_TEST}@something.com",
"nested": {
"a_string": "https://$PYTEST_PIPFILE_TEST@something.com",
"another_string": "${PYTEST_PIPFILE_TEST}",
},
"list": [
{
"a_string": "https://$PYTEST_PIPFILE_TEST@something.com",
"another_string": "${PYTEST_PIPFILE_TEST}"
},
{},
],
"bool": True,
"none": None,
})
assert parsed_dict["a_string"] == "https://XYZ@something.com"
assert parsed_dict["another_string"] == "https://XYZ@something.com"
assert parsed_dict["nested"]["another_string"] == "XYZ"
assert parsed_dict["list"][0]["a_string"] == "https://XYZ@something.com"
assert parsed_dict["list"][1] == {}
assert parsed_dict["bool"] is True
assert parsed_dict["none"] is None
@pytest.mark.parametrize('dt, content', [
( # Date.
datetime.date(1992, 8, 19),
'1992-08-19',
),
( # Naive time.
datetime.time(15, 10),
'15:10:00',
),
( # Aware time in UTC.
datetime.time(15, 10, tzinfo=pytz.UTC),
'15:10:00+00:00',
),
( # Aware local time.
datetime.time(15, 10, tzinfo=pytz.FixedOffset(8 * 60)),
'15:10:00+08:00',
),
( # Naive datetime.
datetime.datetime(1992, 8, 19, 15, 10),
'1992-08-19T15:10:00',
),
( # Aware datetime in UTC.
datetime.datetime(1992, 8, 19, 15, 10, tzinfo=pytz.UTC),
'1992-08-19T15:10:00Z',
),
( # Aware local datetime.
datetime.datetime(1992, 8, 19, 15, 10, tzinfo=pytz.FixedOffset(8 * 60)),
'1992-08-19T15:10:00+08:00',
),
])
def test_token_date(dt, content):
item = tomlkit.item(dt)
assert item.as_string() == content
def test_dump_nonascii_string():
content = u'name = "Stažené"\n'
toml_content = tomlkit.dumps(tomlkit.loads(content))
assert toml_content == content
| 29.761364
| 80
| 0.568156
|
22e29be30bd1cd510331860b36fc76f97239c129
| 1,953
|
py
|
Python
|
pyro_models/arm/electric_inter.py
|
jpchen/pyro-models
|
b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b
|
[
"Apache-2.0"
] | 30
|
2019-02-22T03:03:18.000Z
|
2022-01-22T15:57:37.000Z
|
pyro_models/arm/electric_inter.py
|
jpchen/pyro-models
|
b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b
|
[
"Apache-2.0"
] | 7
|
2019-02-26T18:28:57.000Z
|
2021-06-11T17:21:06.000Z
|
pyro_models/arm/electric_inter.py
|
jpchen/pyro-models
|
b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b
|
[
"Apache-2.0"
] | 8
|
2019-02-25T22:06:14.000Z
|
2022-02-18T23:19:49.000Z
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.9/electric_inter.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'post_test' in data, 'variable not found in data: key=post_test'
assert 'treatment' in data, 'variable not found in data: key=treatment'
assert 'pre_test' in data, 'variable not found in data: key=pre_test'
# initialize data
N = data["N"]
post_test = data["post_test"]
treatment = data["treatment"]
pre_test = data["pre_test"]
def transformed_data(data):
# initialize data
N = data["N"]
post_test = data["post_test"]
treatment = data["treatment"]
pre_test = data["pre_test"]
inter = treatment * pre_test
data["inter"] = inter
def init_params(data):
params = {}
# initialize data
N = data["N"]
post_test = data["post_test"]
treatment = data["treatment"]
pre_test = data["pre_test"]
# initialize transformed data
# assign init values for parameters
params["beta"] = init_vector("beta", dims=(4)) # vector
params["sigma"] = pyro.sample("sigma", dist.Uniform(0., 100.))
return params
def model(data, params):
# initialize data
N = data["N"]
post_test = data["post_test"]
treatment = data["treatment"]
pre_test = data["pre_test"]
# initialize transformed data
inter = data["inter"]
# init parameters
beta = params["beta"]
sigma = params["sigma"]
# initialize transformed parameters
# model block
with pyro.plate("data", N):
pyro.sample('post_test', dist.Normal(beta[0] + beta[1] * treatment + beta[2] * pre_test + beta[3] * inter, sigma), obs=post_test)
| 29.590909
| 137
| 0.661546
|
ba5a7df1e415465a86688d012b7d9ff21908abca
| 5,126
|
py
|
Python
|
docs/conf.py
|
medlab/oum
|
29e0c90cca1a3c40f6dd6ced93edb06c06633c10
|
[
"BSD-3-Clause"
] | 2
|
2018-07-09T15:52:42.000Z
|
2018-08-17T19:34:02.000Z
|
docs/conf.py
|
medlab/oum
|
29e0c90cca1a3c40f6dd6ced93edb06c06633c10
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
medlab/oum
|
29e0c90cca1a3c40f6dd6ced93edb06c06633c10
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'oum'
copyright = '2018, Cong Zhang'
author = 'Cong Zhang'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'oumdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'oum.tex', 'oum Documentation',
'Cong Zhang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'oum', 'oum Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'oum', 'oum Documentation',
author, 'oum', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.152941
| 79
| 0.639875
|
fa96b13665ac46d76b641a24dff69d6663bf6c13
| 13,409
|
py
|
Python
|
sdk/python/pulumi_google_native/compute/v1/target_vpn_gateway.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/compute/v1/target_vpn_gateway.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/compute/v1/target_vpn_gateway.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['TargetVpnGatewayArgs', 'TargetVpnGateway']
@pulumi.input_type
class TargetVpnGatewayArgs:
def __init__(__self__, *,
region: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TargetVpnGateway resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] network: URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.
"""
pulumi.set(__self__, "region", region)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
class TargetVpnGateway(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a target VPN gateway in the specified project and region using the data included in the request.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] network: URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TargetVpnGatewayArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a target VPN gateway in the specified project and region using the data included in the request.
:param str resource_name: The name of the resource.
:param TargetVpnGatewayArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TargetVpnGatewayArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TargetVpnGatewayArgs.__new__(TargetVpnGatewayArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["project"] = project
if region is None and not opts.urn:
raise TypeError("Missing required property 'region'")
__props__.__dict__["region"] = region
__props__.__dict__["request_id"] = request_id
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["forwarding_rules"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["status"] = None
__props__.__dict__["tunnels"] = None
super(TargetVpnGateway, __self__).__init__(
'google-native:compute/v1:TargetVpnGateway',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TargetVpnGateway':
"""
Get an existing TargetVpnGateway resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TargetVpnGatewayArgs.__new__(TargetVpnGatewayArgs)
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["description"] = None
__props__.__dict__["forwarding_rules"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network"] = None
__props__.__dict__["region"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["status"] = None
__props__.__dict__["tunnels"] = None
return TargetVpnGateway(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="forwardingRules")
def forwarding_rules(self) -> pulumi.Output[Sequence[str]]:
"""
A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated with a VPN gateway.
"""
return pulumi.get(self, "forwarding_rules")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Type of resource. Always compute#targetVpnGateway for target VPN gateways.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> pulumi.Output[str]:
"""
URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
URL of the region where the target VPN gateway resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the VPN gateway, which can be one of the following: CREATING, READY, FAILED, or DELETING.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tunnels(self) -> pulumi.Output[Sequence[str]]:
"""
A list of URLs to VpnTunnel resources. VpnTunnels are created using the compute.vpntunnels.insert method and associated with a VPN gateway.
"""
return pulumi.get(self, "tunnels")
| 45.454237
| 475
| 0.648147
|
06c617626e6bd7977fda0a88cba35b101388aec8
| 319
|
py
|
Python
|
setup.py
|
eplouf/sleeplib
|
d54e7e4a7f4196f5b7ac59e4230232fc7fe5978f
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
eplouf/sleeplib
|
d54e7e4a7f4196f5b7ac59e4230232fc7fe5978f
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
eplouf/sleeplib
|
d54e7e4a7f4196f5b7ac59e4230232fc7fe5978f
|
[
"BSD-3-Clause"
] | null | null | null |
from distutils.core import setup
import sleeplib
setup(name='sleeplib',
version='1.0',
description='Python library to access Philips SmartSleep api',
author='Kilian Hart',
author_email='dev@afturgurluk.net',
url='https://github.com/eplouf/sleeplib',
packages=['sleeplib'],
)
| 22.785714
| 68
| 0.664577
|
c56bb4bb0a4a6c87e07a59d62fada4257958049e
| 3,657
|
py
|
Python
|
intersight/models/feedback_feedback_data_ref.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/feedback_feedback_data_ref.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/feedback_feedback_data_ref.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FeedbackFeedbackDataRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
FeedbackFeedbackDataRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this FeedbackFeedbackDataRef.
:return: The moid of this FeedbackFeedbackDataRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this FeedbackFeedbackDataRef.
:param moid: The moid of this FeedbackFeedbackDataRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this FeedbackFeedbackDataRef.
:return: The object_type of this FeedbackFeedbackDataRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this FeedbackFeedbackDataRef.
:param object_type: The object_type of this FeedbackFeedbackDataRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FeedbackFeedbackDataRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.38
| 77
| 0.547443
|
9a6f7546d3dea4cfea41f287d157b3de21714406
| 34,154
|
py
|
Python
|
lib/honcore/client.py
|
keabard/KeaBot
|
b4c39c88a11b171738d3623a9cd234e2e4342b7d
|
[
"Unlicense"
] | 1
|
2018-09-20T02:16:50.000Z
|
2018-09-20T02:16:50.000Z
|
lib/honcore/client.py
|
keabard/KeaBot
|
b4c39c88a11b171738d3623a9cd234e2e4342b7d
|
[
"Unlicense"
] | null | null | null |
lib/honcore/client.py
|
keabard/KeaBot
|
b4c39c88a11b171738d3623a9cd234e2e4342b7d
|
[
"Unlicense"
] | null | null | null |
"""
HoNCore. Python library providing connectivity and functionality
with HoN's chat server.
"""
import sys, struct, socket, time
import deserialise, common
from common import Channel
from requester import Requester
from networking import ChatSocket, GameSocket
from utils import ping_server
from constants import *
from exceptions import *
__all__ = ['HoNClient']
_config_defaults = {
"chatport" : 11031,
"protocol" : 21,
"invis" : False,
}
class HoNClient(object):
def __init__(self):
self.config = _config_defaults
self.__events = {}
self.__game_events = {}
self.__create_events()
self.__setup_events()
self.__chat_socket = ChatSocket(self.__events)
self.__game_socket = GameSocket(self.__game_events)
self.__listener = None
self.__requester = Requester()
self.account = None
self.__channels = {}
self.__users = {}
def __create_events(self):
""" Create each event that can be triggered by the client.
As more packets are reverse engineered they should be added here so that
the client can handle them.
"""
# Chat events
self.__events[HON_SC_AUTH_ACCEPTED] = Event("Auth Accepted", HON_SC_AUTH_ACCEPTED)
self.__events[HON_SC_PING] = Event("Ping", HON_SC_PING)
self.__events[HON_SC_CHANNEL_MSG] = Event("Channel Message", HON_SC_CHANNEL_MSG)
self.__events[HON_SC_JOINED_CHANNEL] = Event("Join Channel", HON_SC_JOINED_CHANNEL)
self.__events[HON_SC_ENTERED_CHANNEL] = Event("Entered Channel", HON_SC_ENTERED_CHANNEL)
self.__events[HON_SC_LEFT_CHANNEL] = Event("Left Channel", HON_SC_LEFT_CHANNEL)
self.__events[HON_SC_WHISPER] = Event("Whisper", HON_SC_WHISPER)
self.__events[HON_SC_PM] = Event("Private Message", HON_SC_PM)
self.__events[HON_SC_MESSAGE_ALL] = Event("Server Message", HON_SC_MESSAGE_ALL)
self.__events[HON_SC_TOTAL_ONLINE] = Event("Total Online", HON_SC_TOTAL_ONLINE)
self.__events[HON_SC_GAME_INVITE] = Event("Game Invite", HON_SC_GAME_INVITE)
self.__events[HON_SC_PACKET_RECV] = Event("Packet Received", HON_SC_PACKET_RECV)
self.__events[HON_SC_REQUEST_NOTIFICATION] = Event("Buddy invite received", HON_SC_REQUEST_NOTIFICATION)
# Game events
self.__game_events[HON_GSC_AUTH_ACCEPTED] = Event("Game Auth Accepted", HON_GSC_AUTH_ACCEPTED)
self.__game_events[HON_GSC_PING] = Event("Game Ping", HON_GSC_PING)
self.__game_events[HON_GSC_PACKET_RECV] = Event("Game Packet Received", HON_GSC_PACKET_RECV)
self.__game_events[HON_GSC_TIMEOUT] = Event("Game Server Timeout", HON_GSC_TIMEOUT)
self.__game_events[HON_GSC_SERVER_STATE] = Event("Game Server State", HON_GSC_SERVER_STATE)
self.__game_events[HON_GSC_SERVER_INFO] = Event("Game Server INFO", HON_GSC_SERVER_INFO)
def __setup_events(self):
""" Transparent handling of some data is needed so that the client
can track things such as users and channels.
"""
# Chat events
self.connect_event(HON_SC_JOINED_CHANNEL, self.__on_joined_channel, priority=1)
self.connect_event(HON_SC_ENTERED_CHANNEL, self.__on_entered_channel, priority=1)
self.connect_event(HON_SC_LEFT_CHANNEL, self.__on_left_channel, priority=1)
# Game events
self.connect_game_event(HON_GSC_TIMEOUT, self.__on_game_timeout, priority=1)
def __on_initial_statuses(self, users):
""" Sets the status and flags for each user. """
for account_id in users:
if account_id in self.__users:
user = self.__users[account_id]
user.status = users[account_id]['status']
user.flags = users[account_id]['flags']
def __on_joined_channel(self, channel_name, channel_id, topic, operators, users):
""" Channel names, channel ids, user nicks and user account ids need to be
contained in a hash table/dict so they can be looked up later when needed.
"""
channel = Channel(channel_id, channel_name, topic, operators, users)
self.__channels[channel_id] = channel
for user in users:
if user.account_id not in self.__users:
self.__users[user.account_id] = user
def __on_entered_channel(self, channel_id, user):
""" Transparently add the id and nick of the user who entered the channel to
the users dictionary.
"""
if user.account_id not in self.__users:
self.__users[user.account_id] = user
channel = self.__channels[channel_id]
if user not in channel.users:
channel.users.append(user)
def __on_left_channel(self, channel_id, user_id):
""" Transparently remove the id and nick of the user who left the channel to
the users dictionary.
"""
channel = self.__channels[channel_id]
for user in channel.users:
if user.account_id == user_id:
channel.users.remove(user)
break
if user.account_id in self.__users:
self.__users.pop(user.account_id)
print 'User %s left channel %s'%(user.nickname, channel.name)
def __on_game_timeout(self):
""" Handle the game server timeout gently.
"""
print 'Game server timed out, closing connection...'
self.account.game_session_key = None
self.account.game_ip = None
self.account.game_port = None
self.account.game_host_id = None
self.account.acc_key = None
self.account.acc_key_hash = None
self._game_disconnect()
def _configure(self, *args, **kwargs):
""" Set up some configuration for the client and the requester.
The requester configuration is not really needed, but just incase
it does change in the future.
"""
config_map = {
"chatport" : self.config,
"protocol" : self.config,
"invis" : self.config,
"masterserver" : self.__requester.config,
"basicserver" : self.__requester.config,
"honver" : self.__requester.config
}
for kwarg in kwargs:
if kwarg in config_map:
config_map[kwarg][kwarg] = kwargs[kwarg]
""" Master server related functions. """
def _login(self, username, password):
""" HTTP login request to the master server.
Catches the following:
* Failed to get login data after 3 attempts.
* Could not connect to the masterserver.
* Could not obtain login data
* Incorrect username/password
"""
attempts = 1
while True:
try:
response = self.__requester.login(username, password)
break
except MasterServerError:
if attempts == 3:
raise # Re-raise the last exception given
timeout = pow(2, attempts)
time.sleep(timeout)
attempts += 1
if response == None:
raise MasterServerError(100)
elif response == "":
raise MasterServerError(101)
# Pass the data to the deserialiser
try:
self.account, new_users = deserialise.parse(response)
self.account.logged_in = True
except MasterServerError:
raise MasterServerError(101)
for user in new_users:
self.__users[user.account_id] = user
return True
def _logout(self):
""" Send a logout request to the masterserver and log out the account.
Is forcing the logout okay? Breaking the connection to the chat server technically
logs the user out... What is the effect of sending the logout request to the masterserver?
TODO: Fail cases, handle them!
* Connection timed out
* Connection refused.
"""
if self.account == None:
return
if not self.account.cookie:
self.account.logged_in = False
else:
attempts = 0
while True:
try:
self.__requester.logout(self.account.cookie)
self.account.logged_in = False
break
except MasterServerError, e:
if attempts == 3:
# Force the logout and raise the error
self.account.logged_in = False
raise # Re-raise the last exception given
break
timeout = pow(2, attempts)
time.sleep(timeout)
attempts += 1
""" Chatserver related functions"""
def _chat_connect(self):
""" Sends the initial authentication request to the chatserver via the chat socket object.
Ensures the user information required for authentication is available, otherwise raises
a ChatServerError #205 (No cookie/auth hash provided)
If for some reason a ChatSocket does not exist then one is created.
Connects that chat socket to the correct address and port. Any exceptions are raised to the top method.
Finally sends a valid authentication packet. Any exceptions are raised to the top method.
"""
if self.account == None or self.account.cookie == None or self.account.auth_hash == None:
raise ChatServerError(205)
if self.__chat_socket is None:
self.__chat_socket = ChatSocket(self.events)
try:
self.__chat_socket.connect(self.account.chat_url, self.config['chatport']) # Basic connection to the socket
except HoNCoreError as e:
if e.code == 10: # Socket error.
raise ChatServerError(208) # Could not connect to the chat server.
elif e.code == 11: # Socket timed out.
raise ChatServerError(201)
# Send initial authentication request to the chat server.
# TODO: If the chat server did not respond to the auth request after a set number of attempts then increment the chat protocol version.
try:
self.__chat_socket.send_auth_info(self.account.account_id, self.account.cookie, self.account.ip, self.account.auth_hash, self.config['protocol'], self.config['invis'])
except ChatServerError:
raise # Re-raise the exception.
# The idea is to give 10 seconds for the chat server to respond to the authentication request.
# If it is accepted, then the `is_authenticated` flag will be set to true.
# NOTE: Lag will make this sort of iffy....
attempts = 1
while attempts is not 10:
if self.__chat_socket.is_authenticated:
return True
else:
time.sleep(1)
attempts += 1
raise ChatServerError(200) # Server did not respond to the authentication request
def _chat_disconnect(self):
""" Disconnect gracefully from the chat server and close & remove the socket."""
if self.__chat_socket is not None:
self.__chat_socket.disconnect()
""" Gameserver related functions"""
def _game_create(self, game_name):
""" Sends the create game request to a gameserver via the game socket object.
Ensures the user information required is available, otherwise raises
a GameServerError #205 (No session key/auth hash provided)
If for some reason a GameSocket does not exist then one is created.
Connects that game socket to the correct address and port. Any exceptions are raised to the top method.
"""
if not all([self.account, self.account.cookie, self.account.auth_hash, self.account.game_ip, self.account.game_port, self.account.acc_key, self.account.acc_key_hash]):
raise GameServerError(205)
if self.__game_socket is None:
self.__game_socket = GameSocket()
try:
self.__game_socket.connect(self.account.game_ip, self.account.game_port) # Basic connection to the socket
except HoNCoreError as e:
if e.code == 10: # Socket error.
raise GameServerError(208) # Could not connect to the game server.
elif e.code == 11: # Socket timed out.
raise GameServerError(201)
# Send initial authentication request to the game server.
try:
self.__game_socket.create_game(game_name = game_name,
player_name = self.account.nickname,
cookie = self.account.cookie,
ip = self.account.ip,
acc_key = self.account.acc_key,
account_id = self.account.account_id,
acc_key_hash = self.account.acc_key_hash,
auth_hash = self.account.auth_hash)
except GameServerError:
raise # Re-raise the exception.
# The idea is to give 10 seconds for the chat server to respond to the authentication request.
# If it is accepted, then the `is_authenticated` flag will be set to true.
# NOTE: Lag will make this sort of iffy....
attempts = 1
while attempts is not 10:
if self.__game_socket.is_authenticated:
return True
else:
time.sleep(1)
attempts += 1
raise GameServerError(200) # Server did not respond to the authentication request
def _game_connect(self):
""" Sends the join game request to a gameserver via the game socket object.
Ensures the user information required is available, otherwise raises
a GameServerError #205 (No session key/auth hash provided)
If for some reason a GameSocket does not exist then one is created.
Connects that game socket to the correct address and port. Any exceptions are raised to the top method.
"""
if not all([self.account, self.account.cookie, self.account.auth_hash, self.account.game_ip, self.account.game_port]):
raise GameServerError(205)
if self.__game_socket is None:
self.__game_socket = GameSocket()
try:
self.__game_socket.connect(self.account.game_ip, self.account.game_port) # Basic connection to the socket
except HoNCoreError as e:
if e.code == 10: # Socket error.
raise GameServerError(208) # Could not connect to the game server.
elif e.code == 11: # Socket timed out.
raise GameServerError(201)
# Send initial authentication request to the game server.
try:
self.__game_socket.join_game(player_name = self.account.nickname,
cookie = self.account.cookie,
ip = self.account.ip,
account_id = self.account.account_id,
auth_hash = self.account.auth_hash)
#self.__game_socket.send_magic_packet()
except GameServerError:
raise # Re-raise the exception.
# The idea is to give 10 seconds for the chat server to respond to the authentication request.
# If it is accepted, then the `is_authenticated` flag will be set to true.
# NOTE: Lag will make this sort of iffy....
attempts = 1
while attempts is not 10:
if self.__game_socket.is_authenticated:
return True
else:
time.sleep(1)
attempts += 1
raise GameServerError(200) # Server did not respond to the authentication request
def _game_disconnect(self):
""" Disconnect gracefully from the game server and close & remove the socket."""
if self.__game_socket is not None:
self.__game_socket.disconnect()
@property
def is_logged_in(self):
"""
Override this and provide a way to handle being logged in.
"""
pass
@property
def is_connected(self):
""" Test for chat server connection.
The line of thought here is, the client can not be connected to the chat server
until it is authenticated, the chat socket can be connected as long as the server
doesn't deny or drop the connection.
Once a user is logged in to a HoN client, they can be logged in but not connected.
This would happen if a chat server connection is dropped unexpectedly or is never initialised.
The main program would use this to check for that and then handle it itself.
"""
# Check the socket exists.
if self.__chat_socket is None:
return False
# Ensure the user is authenticated against the chat server
if self.__chat_socket.is_authenticated is False:
return False
# Check the status of the chat socket object.
if self.__chat_socket.is_connected is False:
return False
# Any other checks to be done..?
return True
@property
def is_connected_to_game(self):
""" Test for game server connection.
The line of thought here is, the client can not be connected to the game server
until it is authenticated, the game socket can be connected as long as the server
doesn't deny or drop the connection.
"""
# Check the socket exists.
if self.__game_socket is None:
return False
# Ensure the user is authenticated against the game server
if self.__game_socket.is_authenticated is False:
return False
# Check the status of the game socket object.
if self.__game_socket.is_connected is False:
return False
# Any other checks to be done..?
return True
""" Message of the day related functions"""
def motd_get(self):
""" Requests the message of the day entries from the server and then pushes them through motd_parse.
Returns a dict of motd entries.
"""
raw = self.__requester.motd()
try:
raw = deserialise.parse_raw(raw)
except ValueError:
raise MasterServerError(108)
return self.__motd_parse(raw)
def __motd_parse(self, raw):
""" Parses the message of the day entries into a dictionary of the format:
motd = {
motd_list = [
{
["title"] = "Item 1 title",
["author"] = "MsPudding",
["date"] = "6/30/2011"
["body"] = "This is the body of the message including line feeds"
},
{
["title"] = "Item 2 title",
["author"] = "Konrar",
["date"] = "6/29/2011",
["body"] = "This is the body text Sometimes there are ^rColours^*"
}
],
image = "http://icb.s2games.com/motd/4e67cffcc959e.jpg",
server_data = "We are aware of the server issues....",
honcast = 0
}
The first index will always be the newest....... Right?
"""
motd = {'motd_list': [], 'image': '', 'server_data': '', 'honcast': 0}
# Split the full string into a list of entries.
for entry in raw['motddata'].split("|"):
#try:
title, body, author, date = entry.split("`")
motd['motd_list'].append({"title" : title, "author" : author, "date" : date, "body" : body})
#except ValueError:
#raise MasterServerError(113) # Motd data error
motd['image'] = raw['motdimg']
motd['server_data'] = raw['serverdata']
motd['honcast'] = raw['honcast']
return motd
""" Server list related functions"""
def server_list_get(self):
""" Requests the server list from the server and then pushes them through __server_list_parse.
Returns a dict of server ips.
"""
print 'Getting servers list...'
raw = self.__requester.server_list(self.account.cookie, GAME_SERVER_TYPE)
try:
raw = deserialise.parse_raw(raw)
except ValueError:
raise MasterServerError(108)
return self.__server_list_parse(raw)
def __server_list_parse(self, raw):
""" Parses the server_list into a dictionary of the format:
servers_dict = {
server_list : {
server_id : {
["ip"] = Game Server 1 IP Address,
["server_id"] = Game Server 1 ID,
["session"] = Game Server 1 Session Key,
["port"] = Game Server 1 Connection Port,
["location"] = Game Server 1 Location
},
server_id : {
["ip"] = Game Server 2 IP Address,
["server_id"] = Game Server 2 ID,
["session"] = Game Server 2 Session Key,
["port"] = Game Server 2 Connection Port,
["location"] = Game Server 2 Location
},
....
},
acc_key = User Account Key,
acc_key_hash = User Account Key Hash
}
"""
servers_dict = {}
servers_dict['server_list'] = raw['server_list']
servers_dict['acc_key'] = raw['acc_key']
servers_dict['acc_key_hash'] = raw['acc_key_hash']
return servers_dict
""" The core client functions."""
def send_channel_message(self, message, channel_id):
""" Sends a message to a specified channel.
Takes 2 parameters.
`message` The message to be send.
`channel_id` The id of the channel to send it to.
"""
# TODO: Implement throttling for messages.
self.__chat_socket.send_channel_message(message, channel_id)
def join_channel(self, channel, password=None):
""" Sends a request to join a channel.
Takes 2 paramters.
`channel` A string containing the channel name.
`password` The optional password required to join the channel.
"""
if password:
self.__chat_socket.send_join_channel_password(channel, password)
elif not password:
self.__chat_socket.send_join_channel(channel)
def send_whisper(self, player, message):
""" Sends the message to the player.
Takes 2 parameters.
`player` A string containing the player's name.
`message` A string containing the message.
"""
self.__chat_socket.send_whisper(player, message)
def send_private_message(self, player, message):
""" Sends the message to the player.
Takes 2 parameters.
`player` A string containing the player's name.
`message` A string containing the message.
"""
self.__chat_socket.send_private_message(player, message)
def send_buddy_add_notify(self, player):
""" Send a buddy add notify to the player.
Takes 1 parameter.
`player` A string containing the player's name.
"""
self.__chat_socket.send_buddy_add_notify(player)
def send_buddy_accept(self, player):
""" Sends a buddy accept.
Takes 1 parameter.
`player` A string containing the player's name.
"""
self.__chat_socket.send_buddy_accept(player)
def send_join_game(self, game_server_ip):
""" Sends a join game notification.
Takes 1 parameter.
`game_server_ip` A string containing the game server IP.
"""
self.__chat_socket.send_join_game(game_server_ip)
def send_game_invite(self, player):
""" Sends a game invite to the player.
Takes 1 parameter.
`player` A string containing the player's name.
"""
self.__game_socket.send_game_invite(player)
self.__chat_socket.send_game_invite(player)
def send_mass_invite(self, channel_name):
""" Sends a game invite to all the players of a channel.
Takes 1 parameter.
`channel_name` A string containing the channel name.
"""
channel = self.name_to_channel(channel_name)
for player in channel.users:
print 'Sending invite to player : %s'%player
self.__game_socket.send_game_invite(player.nickname)
self.__chat_socket.send_game_invite(player.nickname)
def send_game_server_ip(self, server_ip):
""" Sends a chosen game server ip to the chat server.
Takes 1 parameter.
`server_ip` A string containing the chosen server IP
"""
self.__chat_socket.send_game_server_ip(server_ip)
def create_game(self, game_name):
""" Create the game with the given name.
Takes 1 parameter.
'game_name' A string containing the game name.
"""
server_infos = self.pick_game_server(MAXIMUM_SERVER_PING)
# Save game server infos into account
self.account.game_session_key = server_infos['server_info']['session']
self.account.game_ip = server_infos['server_info']['ip']
self.account.game_port = int(server_infos['server_info']['port'])
self.account.game_host_id = server_infos['server_info']['server_id']
self.account.acc_key = server_infos['acc_key']
self.account.acc_key_hash = server_infos['acc_key_hash']
self.send_join_game(self.account.game_ip)
self._game_create(game_name)
def pick_game_server(self, maximum_ping=150):
""" Request masterserver for server list, and return the first game server infos with a ping under
the maximum ping given, along with acc_key and acc_key_hash
"""
pinged_servers = []
servers_dict = self.server_list_get()
for gameserver_id, gameserver_info in servers_dict['server_list'].items():
if 'ip' in gameserver_info and gameserver_info['ip'] not in pinged_servers:
pinged_servers.append(gameserver_info['ip'])
try:
server_ping = ping_server(gameserver_info['ip'])
if 0 < server_ping < maximum_ping:
return {'server_info' : gameserver_info,
'acc_key' : servers_dict['acc_key'],
'acc_key_hash' : servers_dict['acc_key_hash']
}
except:
continue
return -1
""" Utility functions """
def connect_event(self, event_id, method, priority=5):
""" Wrapper method for connecting events. """
try:
self.__events[event_id].connect(method, priority)
except KeyError:
try:
self.__game_events[event_id].connect(method, priority)
except:
raise HoNCoreError(13) # Unknown event ID
def disconnect_event(self, event_id, method):
""" Wrapper method for disconnecting events. """
try:
self.__events[event_id].disconnect(method)
except HoNCoreError, e:
if e.id == 14: # Method is not connected to this event.
raise
except KeyError:
try:
self.__game_events[event_id].disconnect(method)
except:
raise HoNCoreError(13) # Unknown event ID
def connect_game_event(self, event_id, method, priority=5):
""" Wrapper method for connecting events. """
try:
self.__game_events[event_id].connect(method, priority)
except :
raise HoNCoreError(13) # Unknown event ID
def disconnect_game_event(self, event_id, method):
""" Wrapper method for disconnecting events. """
try:
self.__game_events[event_id].disconnect(method)
except HoNCoreError, e:
if e.id == 14: # Method is not connected to this event.
raise
except:
raise HoNCoreError(13) # Unknown event ID
def id_to_channel(self, channel_id):
""" Wrapper function to return the channel name for the given ID.
If no channel was found then return None
"""
try:
return self.__channels[channel_id]
except KeyError:
return None
def id_to_nick(self, account_id):
""" Wrapper function to return the nickname for the user associated with that account ID.
If no nickname was found then return None
"""
try:
return self.__users[account_id].nickname
except KeyError:
return None
def id_to_user(self, account_id):
""" Wrapper function to return the user object for the user associated with that account ID.
If no user was found then return None
"""
try:
return self.__users[account_id]
except KeyError:
return None
def name_to_channel(self, channel_name):
""" Wrapper function to return the channel object for the channel associated with that channel_name.
If no channel was found then return None
"""
for channel_id, channel in self.__channels.items():
if channel.name == channel_name:
return channel
return None
def get_buddies(self):
buddies = []
for buddy_id in self.account.buddy_list:
buddies.append(self.__users[buddy_id])
return buddies
""" Debugging functions """
def list_users(self):
for aid in self.__users:
print self.__users[aid]
class Event:
""" Event objects represent network level events which can have functions connected to them, which
are then triggered when the event occurs.
A standard set of events are initialised by the library which should cover nearly everything.
The core client will store a list of the standard events in client.events.
The front end client should then connect these events to functions by calling the connect
method on the specific event object. e.g.
self.events.login.connect(self.on_login_event)
The functions are stored in a list called handlers, each function is ran when the event is triggered.
The functions can be assigned a priority so that they are executed in an order. This is useful for
ensuring that lower level network/core client related functions are executed first.
On the networking side, the events are triggered after the packet data has been parsed and constructed into useful data.
The process would be as follows:
packet = sock.recv(512)
id = parse_id(packet)
useful_data = raw_parse(id, packet)
event.trigger(useful_data)
"""
class ConnectedMethod:
def __init__(self, method, priority):
self.method = method
self.priority = priority
def __repr__(self):
return "[%s %s]" % (self.method, self.priority)
def __init__(self, name, packet_id):
self.name = name # An english, human name for the event. Maybe it can be used for a lookup later. Not sure of a use for it right now.
self.packet_id = packet_id # A packet identifier, either a constant or a hex value of a packet. i.e HON_SC_TOTAL_ONLINE or 0x68.
self.handlers = [] # List of connected methods.
def __repr__(self):
return "<%s: %s>" % (self.packet_id, self.name)
def connect(self, function, priority=5):
""" Connects a function to a specific event.
The event is given as an english name, which corresponds
to a constant in the packet definition file.
"""
self.handlers.append(self.ConnectedMethod(function, priority))
def disconnect(self, method):
""" Hopefully it can be used to remove event handlers from this event
object so they are no longer triggered. Useful if say, an event only
needs to be triggered once, for a reminder or such.
"""
for cm in self.handlers:
if cm.method == method:
self.handlers.remove(cm)
else:
raise HoNCoreError(14) # Method is not connected to this event_id
pass
def trigger(self, **data):
""" Sorts the connected handlers based on their priority and calls each one in turn,
passing the dictionary of keyword arguments, or alternatively with no arguments.
"""
for cm in sorted(self.handlers, key=lambda c: c.priority):
f = cm.method
num_args = f.func_code.co_argcount
f(**data) if num_args > 0 else f()
| 43.015113
| 180
| 0.594074
|
cd09505b9c1b5dd39206095dad3834cb2cb7e951
| 9,027
|
py
|
Python
|
davis_decode.py
|
ventillo/esp8266_cc1101_davis_vantage_vue_wifi_decoder
|
d309db1c5cfe4066bd2138f350481dbd5b364239
|
[
"Unlicense"
] | 5
|
2020-06-30T11:08:34.000Z
|
2021-08-16T12:22:26.000Z
|
davis_decode.py
|
ventillo/esp8266_cc1101_davis_vantage_vue_wifi_decoder
|
d309db1c5cfe4066bd2138f350481dbd5b364239
|
[
"Unlicense"
] | null | null | null |
davis_decode.py
|
ventillo/esp8266_cc1101_davis_vantage_vue_wifi_decoder
|
d309db1c5cfe4066bd2138f350481dbd5b364239
|
[
"Unlicense"
] | 1
|
2020-12-13T18:49:32.000Z
|
2020-12-13T18:49:32.000Z
|
import urequests
import machine
try:
_DEBUG = DEBUG
except:
_DEBUG = False
def send_to_influx(host, port, db, user, password, davis_unit_id, wind, measurement, name, value, tags):
post = "http://{}:{}/write?db={}".format(host, port, db)
if _DEBUG:
print(b"SENDING TO: {}".format(post))
if measurement is False:
return (False, b"ERROR measurement set False")
if measurement is None:
data = "wind,type=speed,davis_id={_davis_id} value={_speed}\n wind,type=direction,davis_id={_davis_id} value={_direction}".format(
_davis_id = davis_unit_id,
_speed=wind['speed'],
_direction=wind['direction'])
if _DEBUG:
print(b"SEND WIND only: {}")
else:
for tag in tags.keys():
measurement = "{},{}={}".format(measurement, tag, tags[tag])
data = "{_measure},davis_id={_davis_id} {_name}={_value}\n wind,type=speed,davis_id={_davis_id} value={_speed}\n wind,type=direction,davis_id={_davis_id} value={_direction}".format(
_measure=measurement,
_name=name,
_value=value,
_davis_id = davis_unit_id,
_speed=wind['speed'],
_direction=wind['direction'])
if _DEBUG:
print(b"POST_DATA: {}".format(data))
try:
return (True, urequests.post(post, data=data))
except Exception as e:
if e.args[0] == 103:
machine.reset()
else:
return (False, b"ERROR sending data to influx: {}".format(e))
def raw_send_to_influx(host, port, db, user, password, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, rssi, lqi):
post = "http://{}:{}/write?db={}".format(host, port, db)
if _DEBUG:
print(b"SENDING TO: {}".format(post))
data = "data b0={_b0},b1={_b1},b2={_b2},b3={_b3},b4={_b4},b5={_b5},b6={_b6},b7={_b7},b8={_b8},b9={_b9},rssi={_rssi},lqi={_lqi}".format(
_b0=b0, _b1=b1, _b2=b2, _b3=b3,
_b4=b4, _b5=b5, _b6=b6, _b7=b7,
_b8=b8, _b9=b9, _rssi=rssi, _lqi=lqi)
if _DEBUG:
print(b"POST_DATA: {}".format(data))
try:
return (True, urequests.post(post, data=data))
except Exception as e:
if e.args[0] == 103:
machine.reset()
else:
return (False, b"ERROR sending RAW data to influx: {}".format(e))
def reverseBits(data):
data = "{:08b}".format(data)
z = ""
for i in range(len(data),0,-1):
z = z + (data[i-1])
return int(z, 2)
class davisDecoder(object):
def __init__(self, weather_db, stat_db, raw_db):
__name__ = 'Davis value decoder class'
self.weather_influx_db = weather_db
self.stat_influx_db = stat_db
self.raw_influx_db = raw_db
def byte_split(self, data):
msb = data >> 4
lsb = data & 0b00001111
result = {"MSB": msb, "LSB": lsb}
return result
def davis_id(self, header):
self.davis_packet_id = 0
self.battery_low = 0
self.unit_id = 0
bin_header = self.byte_split(header)
self.unit_id = bin_header['LSB'] & 0b0111
self.battery_low = bin_header['LSB'] >> 3
self.davis_packet_id = bin_header['MSB']
result = {"davis_id": self.unit_id,
"packet_id": self.davis_packet_id,
"bat_low": self.battery_low}
return result
def decode_wind(self, databytes):
# wind speed in mph, i suppose. Let's convert it
wind_speed = round(float(databytes['windspeed'] * 1.60934), 1)
wind_direction_factor = round(float(360)/float(255), 1)
wind_direction = databytes['winddir']
wind_direction = float(wind_direction) * wind_direction_factor
result = {"speed": wind_speed, "direction": wind_direction}
return result
def decode_temp(self, temp):
temp_f = (float(temp)) / float(160) # in Fahrenheit
temp_c = round((temp_f - 32) * float(5)/float(9), 1)
result = {"celsius": temp_c, "fahrenheit": temp_f}
return result
def decode_humidity(self, hum):
pass
def supercap_decode(self, byte2, byte3):
cap = (byte2 << 2) + (byte3 >> 6)
result = float(cap / 100.00)
return result
def solarvolt_decode(self, byte2, byte3):
solar = (byte2 << 1) + (byte3 >> 7)
result = float(solar)
return result
def rain_decode(self, rain):
result = float(rain & 0x7F)
return result
def rainrate_decode(self, byte2, byte3):
# if byte3(b2 here) is 0xFF, or 255, there is no rain
#print("b2:{} b3:{} = result:{}".format(byte2, byte3, byte2 + (byte3 >> 4 << 8)))
if byte2 == 255:
rainstate = 0
rainrate = 0
elif byte2 == 254:
rainstate = 1
rainrate = 0.2
else:
rainstate = 2
if byte3 > 4:
rainrate = 720 / ((byte3 >> 4 << 8) + byte2)
else:
rainrate = 0
result = {"state": float(rainstate), "rate": float(rainrate)}
#print(result)
return result
def DecodePacket(self, packet):
# By default and most of the time, write to weather
self.write_influx_db = self.weather_influx_db
# Set all to None
self.wind = False
self.measurement = False
self.name = False
self.value = False
self.tags = False
self.wind = self.decode_wind(
{"windspeed": packet[1], "winddir": packet[2]})
if self.davis_packet_id == 2:
# SuperCap charge 0x2
if _DEBUG:
print('SCAP:')
supercap = self.supercap_decode(
packet[3], packet[4]
)
if _DEBUG:
print("{}".format(supercap))
self.write_influx_db = self.stat_influx_db
self.measurement = 'iss'
self.name = 'voltage'
self.tags = {'type': 'capacitor'}
self.value = supercap
elif self.davis_packet_id == 3:
# No fucking idea 0x3
# {'hop':1,'h':48,'b0':6,'b1':237,'b2':255,'b3':195,'b4':135,'b5':50,'b6':110,'b7':255,'b8':255,'b9':179,'rssi':45,'lqi':0,'nxt':64,'cnt':163}
self.measurement = None
self.name = None
self.tags = None
self.value = None
elif self.davis_packet_id == 5:
# Rainrate 0x5
rainrate_dict = self.rainrate_decode(
packet[3],
packet[4])
if _DEBUG:
print("RAINRATE: {}".format(rainrate_dict))
self.measurement = 'rain'
self.name = 'value'
self.tags = {'type': 'rainrate'}
self.value = rainrate_dict['rate']
elif self.davis_packet_id == 6:
# Sun Irradiation 0x6 (NOT ON vantage Vue)
pass
elif self.davis_packet_id == 7:
# Super Cap voltage 0x7
solarvolt = self.solarvolt_decode(
packet[3], packet[4]
)
if _DEBUG:
print("SOLV {}".format(solarvolt))
self.write_influx_db = self.stat_influx_db
self.measurement = 'iss'
self.name = 'voltage'
self.tags = {'type': 'solar'}
self.value = solarvolt
elif self.davis_packet_id == 8:
# Temperature 0x8
raw_temp = (packet[3] << 8) + packet[4]
temp_dict = self.decode_temp(raw_temp)
temp = float(temp_dict['celsius'])
if _DEBUG:
print("TEMP: {}".format(temp))
self.measurement = 'temphumi'
self.name = 'temperature'
self.tags = {'type': 'external'}
self.value = temp
elif self.davis_packet_id == 9:
# Wind gusts 0x9
windgust = packet[3] * 1.60934
if _DEBUG:
print("WINDGUST: {}".format(windgust))
self.measurement = 'wind'
self.name = 'value'
self.tags = {'type': 'windgust'}
self.value = windgust
elif self.davis_packet_id == 10:
# Humidity 0xa
raw_humidity = (((packet[4] >> 4) & 0b0011) << 8) \
+ packet[3]
humidity = round(int(raw_humidity) / float(10), 1)
if _DEBUG:
print("HUMI: {}".format(humidity))
self.measurement = 'temphumi'
self.name = 'humidity'
self.tags = {'type': 'external'}
self.value = humidity
elif self.davis_packet_id == 14:
# Rain bucket tips 0xe
raw_rain = (packet[3]) + (packet[4] >> 7 << 8)
rain = self.rain_decode(raw_rain)
if _DEBUG:
print("RAINCOUNT: {}".format(rain))
self.measurement = 'rain'
self.name = 'value'
self.tags = {'type': 'rain_bucket_tips'}
self.value = rain
| 36.695122
| 189
| 0.535837
|
6e3cd06b07e31e79273e40337fd0ac0edf7751ef
| 28,098
|
py
|
Python
|
slack_invite_flow/quip.py
|
omaraboumrad/slack-invite-flow
|
2cf63d95cac355ae0e7c3e57c7f927975ed3639c
|
[
"Apache-2.0"
] | 3
|
2016-11-07T16:16:54.000Z
|
2017-05-25T21:38:49.000Z
|
slack_invite_flow/quip.py
|
omaraboumrad/slack-invite-flow
|
2cf63d95cac355ae0e7c3e57c7f927975ed3639c
|
[
"Apache-2.0"
] | null | null | null |
slack_invite_flow/quip.py
|
omaraboumrad/slack-invite-flow
|
2cf63d95cac355ae0e7c3e57c7f927975ed3639c
|
[
"Apache-2.0"
] | 3
|
2016-11-07T16:55:26.000Z
|
2021-11-09T09:35:40.000Z
|
# Copyright 2014 Quip
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A Quip API client library.
For full API documentation, visit https://quip.com/api/.
Typical usage:
client = quip.QuipClient(access_token=...)
user = client.get_authenticated_user()
starred = client.get_folder(user["starred_folder_id"])
print "There are", len(starred["children"]), "items in your starred folder"
In addition to standard getters and setters, we provide a few convenience
methods for document editing. For example, you can use `add_to_first_list`
to append items (in Markdown) to the first bulleted or checklist in a
given document, which is useful for automating a task list.
"""
import datetime
import json
import logging
import sys
import time
import urllib
import urllib2
import xml.etree.cElementTree
reload(sys)
sys.setdefaultencoding('utf8')
class QuipClient(object):
"""A Quip API client"""
# Edit operations
APPEND, \
PREPEND, \
AFTER_SECTION, \
BEFORE_SECTION, \
REPLACE_SECTION, \
DELETE_SECTION = range(6)
# Folder colors
MANILA, \
RED, \
ORANGE, \
GREEN, \
BLUE = range(5)
def __init__(self, access_token=None, client_id=None, client_secret=None,
base_url=None, request_timeout=None, retry_rate_limit=False):
"""Constructs a Quip API client.
If `access_token` is given, all of the API methods in the client
will work to read and modify Quip documents.
Otherwise, only `get_authorization_url` and `get_access_token`
work, and we assume the client is for a server using the Quip API's
OAuth endpoint.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.base_url = base_url if base_url else "https://platform.quip.com"
self.request_timeout = request_timeout if request_timeout else 10
self.retry_rate_limit = retry_rate_limit
def get_authorization_url(self, redirect_uri, state=None):
"""Returns the URL the user should be redirected to to sign in."""
return self._url(
"oauth/login", redirect_uri=redirect_uri, state=state,
response_type="code", client_id=self.client_id)
def get_access_token(self, redirect_uri, code,
grant_type="authorization_code",
refresh_token=None):
"""Exchanges a verification code for an access_token.
Once the user is redirected back to your server from the URL
returned by `get_authorization_url`, you can exchange the `code`
argument with this method.
"""
return self._fetch_json(
"oauth/access_token", redirect_uri=redirect_uri, code=code,
grant_type=grant_type, refresh_token=refresh_token,
client_id=self.client_id, client_secret=self.client_secret)
def get_authenticated_user(self):
"""Returns the user corresponding to our access token."""
return self._fetch_json("users/current")
def get_user(self, id):
"""Returns the user with the given ID."""
return self._fetch_json("users/" + id)
def get_users(self, ids):
"""Returns a dictionary of users for the given IDs."""
return self._fetch_json("users/", post_data={"ids": ",".join(ids)})
def get_contacts(self):
"""Returns a list of the users in the authenticated user's contacts."""
return self._fetch_json("users/contacts")
def get_folder(self, id):
"""Returns the folder with the given ID."""
return self._fetch_json("folders/" + id)
def get_folders(self, ids):
"""Returns a dictionary of folders for the given IDs."""
return self._fetch_json("folders/", post_data={"ids": ",".join(ids)})
def new_folder(self, title, parent_id=None, color=None, member_ids=[]):
return self._fetch_json("folders/new", post_data={
"title": title,
"parent_id": parent_id,
"color": color,
"member_ids": ",".join(member_ids),
})
def update_folder(self, folder_id, color=None, title=None):
return self._fetch_json("folders/update", post_data={
"folder_id": folder_id,
"color": color,
"title": title,
})
def add_folder_members(self, folder_id, member_ids):
"""Adds the given users to the given folder."""
return self._fetch_json("folders/add-members", post_data={
"folder_id": folder_id,
"member_ids": ",".join(member_ids),
})
def remove_folder_members(self, folder_id, member_ids):
"""Removes the given users from the given folder."""
return self._fetch_json("folders/remove-members", post_data={
"folder_id": folder_id,
"member_ids": ",".join(member_ids),
})
def get_messages(self, thread_id, max_created_usec=None, count=None):
"""Returns the most recent messages for the given thread.
To page through the messages, use max_created_usec, which is the
sort order for the returned messages.
count should be an integer indicating the number of messages you
want returned. The maximum is 100.
"""
return self._fetch_json(
"messages/" + thread_id, max_created_usec=max_created_usec,
count=count)
def new_message(self, thread_id, content=None, **kwargs):
"""Sends a message on the given thread.
`content` is plain text, not HTML.
"""
args = {
"thread_id": thread_id,
"content": content,
}
args.update(kwargs)
return self._fetch_json("messages/new", post_data=args)
def get_thread(self, id):
"""Returns the thread with the given ID."""
return self._fetch_json("threads/" + id)
def get_threads(self, ids):
"""Returns a dictionary of threads for the given IDs."""
return self._fetch_json("threads/", post_data={"ids": ",".join(ids)})
def get_recent_threads(self, max_updated_usec=None, count=None):
"""Returns the recently updated threads for a given user."""
return self._fetch_json(
"threads/recent", max_updated_usec=max_updated_usec,
count=count)
def add_thread_members(self, thread_id, member_ids):
"""Adds the given folder or user IDs to the given thread."""
return self._fetch_json("threads/add-members", post_data={
"thread_id": thread_id,
"member_ids": ",".join(member_ids),
})
def remove_thread_members(self, thread_id, member_ids):
"""Removes the given folder or user IDs from the given thread."""
return self._fetch_json("threads/remove-members", post_data={
"thread_id": thread_id,
"member_ids": ",".join(member_ids),
})
def move_thread(self, thread_id, source_folder_id, destination_folder_id):
"""Moves the given thread from the source folder to the destination one.
"""
self.add_thread_members(thread_id, [destination_folder_id])
self.remove_thread_members(thread_id, [source_folder_id])
def new_document(self, content, format="html", title=None, member_ids=[]):
"""Creates a new document from the given content.
To create a document in a folder, include the folder ID in the list
of member_ids, e.g.,
client = quip.QuipClient(...)
user = client.get_authenticated_user()
client.new_document(..., member_ids=[user["private_folder_id"]])
"""
return self._fetch_json("threads/new-document", post_data={
"content": content,
"format": format,
"title": title,
"member_ids": ",".join(member_ids),
})
def copy_document(self, id, title=None, member_ids=[]):
"""Creates a new document from the given thread ID.
To create it in a folder, include the folder ID in member_ids.
"""
old_thread = self.get_thread(id)
return self.new_document(
old_thread["html"], title=title or old_thread["thread"]["title"],
member_ids=member_ids)
def merge_comments(self, original_id, children_ids):
"""Given an original document and a set of exact duplicates, copies
all comments and messages on the duplicates to the original.
"""
import re
threads = self.get_threads(children_ids + [original_id])
original_section_ids = re.findall(r" id='([a-zA-Z0-9]{11})'",
threads[original_id]["html"])
for thread_id in children_ids:
thread = threads[thread_id]
child_section_ids = re.findall(r" id='([a-zA-Z0-9]{11})'",
thread["html"])
parent_map = dict(zip(child_section_ids, original_section_ids))
messages = self.get_messages(thread_id)
for message in reversed(messages):
kwargs = {}
if "parts" in message:
kwargs["parts"] = json.dumps(message["parts"])
else:
kwargs["content"] = message["text"]
if "annotation" in message:
section_id = None
if "highlight_section_ids" in message["annotation"]:
section_id = message["annotation"][
"highlight_section_ids"][0]
else:
anno_loc = thread["html"].find(
'<annotation id="%s"' % message["annotation"]["id"])
loc = thread["html"].rfind("id=", 0, anno_loc)
if anno_loc >= 0 and loc >= 0:
section_id = thread["html"][loc+4:loc+15]
if section_id and section_id in parent_map:
kwargs["section_id"] = parent_map[section_id]
if "files" in message:
attachments = []
for blob_info in message["files"]:
blob = self.get_blob(thread_id, blob_info["hash"])
new_blob = self.put_blob(
original_id, blob, name=blob_info["name"])
attachments.append(new_blob["id"])
if attachments:
kwargs["attachments"] = ",".join(attachments)
self.new_message(original_id, **kwargs)
def edit_document(self, thread_id, content, operation=APPEND, format="html",
section_id=None, **kwargs):
"""Edits the given document, adding the given content.
`operation` should be one of the constants described above. If
`operation` is relative to another section of the document, you must
also specify the `section_id`.
"""
args = {
"thread_id": thread_id,
"content": content,
"location": operation,
"format": format,
"section_id": section_id,
}
args.update(kwargs)
return self._fetch_json("threads/edit-document", post_data=args)
def add_to_first_list(self, thread_id, *items, **kwargs):
"""Adds the given items to the first list in the given document.
client = quip.QuipClient(...)
client.add_to_first_list(thread_id, "Try the Quip API")
"""
items = [item.replace("\n", " ") for item in items]
args = {
"thread_id": thread_id,
"content": "\n\n".join(items),
"format": "markdown",
"operation": self.AFTER_SECTION
}
args.update(kwargs)
if "section_id" not in args:
first_list = self.get_first_list(thread_id)
if not first_list:
return None
args["section_id"] = self.get_last_list_item_id(first_list)
if not args["section_id"]:
# section_id = first_list.attrib["id"]
return None
return self.edit_document(**args)
def add_to_spreadsheet(self, thread_id, *rows, **kwargs):
"""Adds the given rows to the named (or first) spreadsheet in the
given document.
client = quip.QuipClient(...)
client.add_to_spreadsheet(thread_id, ["5/1/2014", 2.24])
"""
content = "".join(["<tr>%s</tr>" % "".join(
["<td>%s</td>" % cell for cell in row]) for row in rows])
if kwargs.get("name"):
spreadsheet = self.get_named_spreadsheet(kwargs["name"], thread_id)
else:
spreadsheet = self.get_first_spreadsheet(thread_id)
section_id = self.get_last_row_item_id(spreadsheet)
return self.edit_document(
thread_id=thread_id,
content=content,
section_id=section_id,
operation=self.AFTER_SECTION)
def update_spreadsheet_row(self, thread_id, header, value, updates, **args):
"""Finds the row where the given header column is the given value, and
applies the given updates. Updates is a dict from header to
new value. In both cases headers can either be a string that matches, or
"A", "B", "C", 1, 2, 3 etc. If no row is found, adds a new one.
client = quip.QuipClient(...)
client.update_spreadsheet_row(
thread_id, "customer", "Acme", {"Billed": "6/24/2015"})
"""
response = None
spreadsheet = self.get_first_spreadsheet(thread_id)
headers = self.get_spreadsheet_header_items(spreadsheet)
row = self.find_row_from_header(spreadsheet, header, value)
if row:
ids = self.get_row_ids(row)
for head, val in updates.iteritems():
index = self.get_index_of_header(headers, head)
if not index or index >= len(ids) or not ids[index]:
continue
response = self.edit_document(
thread_id=thread_id,
content=val,
format="markdown",
section_id=ids[index],
operation=self.REPLACE_SECTION,
**args)
else:
updates[header] = value
indexed_items = {}
extra_items = []
for head, val in updates.iteritems():
index = self.get_index_of_header(
headers, head, default=None)
if index is None or index in indexed_items:
extra_items.append(val)
else:
indexed_items[index] = val
cells = []
for i in range(max(indexed_items.keys()) + 1):
if i in indexed_items:
cells.append(indexed_items[i])
elif len(extra_items):
cells.append(extra_items.pop(0))
else:
cells.append("")
cells.extend(extra_items)
content = "<tr>%s</tr>" % "".join(
["<td>%s</td>" % cell for cell in cells])
section_id = self.get_last_row_item_id(spreadsheet)
response = self.edit_document(
thread_id=thread_id,
content=content,
section_id=section_id,
operation=self.AFTER_SECTION,
**args)
return response
def toggle_checkmark(self, thread_id, item, checked=True):
"""Sets the checked state of the given list item to the given state.
client = quip.QuipClient(...)
list = client.get_first_list(thread_id)
client.toggle_checkmark(thread_id, list[0])
"""
if checked:
item.attrib["class"] = "checked"
else:
item.attrib["class"] = ""
return self.edit_document(thread_id=thread_id,
content=xml.etree.cElementTree.tostring(item),
section_id=item.attrib["id"],
operation=self.REPLACE_SECTION)
def get_first_list(self, thread_id=None, document_html=None):
"""Returns the `ElementTree` of the first list in the document.
The list can be any type (bulleted, numbered, or checklist).
If `thread_id` is given, we download the document. If you have
already downloaded the document, you can specify `document_html`
directly.
"""
return self._get_container(thread_id, document_html, "ul", 0)
def get_last_list(self, thread_id=None, document_html=None):
"""Like `get_first_list`, but the last list in the document."""
return self._get_container(thread_id, document_html, "ul", -1)
def get_section(self, section_id, thread_id=None, document_html=None):
if not document_html:
document_html = self.get_thread(thread_id).get("html")
if not document_html:
return None
tree = self.parse_document_html(document_html)
element = list(tree.iterfind(".//*[@id='%s']" % section_id))
if not element:
return None
return element[0]
def get_named_spreadsheet(self, name, thread_id=None, document_html=None):
if not document_html:
document_html = self.get_thread(thread_id).get("html")
if not document_html:
return None
tree = self.parse_document_html(document_html)
element = list(tree.iterfind(".//*[@title='%s']" % name))
if not element:
return None
return element[0]
def _get_container(self, thread_id, document_html, container, index):
if not document_html:
document_html = self.get_thread(thread_id).get("html")
if not document_html:
return None
tree = self.parse_document_html(document_html)
lists = list(tree.iter(container))
if not lists:
return None
try:
return lists[index]
except IndexError:
return None
def get_last_list_item_id(self, list_tree):
"""Returns the last item in the given list `ElementTree`."""
items = list(list_tree.iter("li"))
return items[-1].attrib["id"] if items else None
def get_first_list_item_id(self, list_tree):
"""Like `get_last_list_item_id`, but the first item in the list."""
for item in list_tree.iter("li"):
return item.attrib["id"]
return None
def get_first_spreadsheet(self, thread_id=None, document_html=None):
"""Returns the `ElementTree` of the first spreadsheet in the document.
If `thread_id` is given, we download the document. If you have
already downloaded the document, you can specify `document_html`
directly.
"""
return self._get_container(thread_id, document_html, "table", 0)
def get_last_spreadsheet(self, thread_id=None, document_html=None):
"""Like `get_first_spreadsheet`, but the last spreadsheet."""
return self._get_container(thread_id, document_html, "table", -1)
def get_last_row_item_id(self, spreadsheet_tree):
"""Returns the last row in the given spreadsheet `ElementTree`."""
items = list(spreadsheet_tree.iter("tr"))
return items[-1].attrib["id"] if items else None
def get_row_items(self, row_tree):
"""Returns the text of items in the given row `ElementTree`."""
return [(list(x.itertext()) or [None])[0] for x in row_tree]
def get_row_ids(self, row_tree):
"""Returns the ids of items in the given row `ElementTree`."""
return [x.attrib["id"] for x in row_tree]
def get_spreadsheet_header_items(self, spreadsheet_tree):
"""Returns the header row in the given spreadsheet `ElementTree`."""
return self.get_row_items(list(spreadsheet_tree.iterfind(".//tr"))[0])
def get_index_of_header(self, header_items, header, default=0):
"""Find the index of the given header in the items"""
if header:
header = str(header)
lower_headers = [str(h).lower() for h in header_items]
if header in header_items:
return header_items.index(header)
elif header.lower() in lower_headers:
return lower_headers.index(header.lower())
elif header.isdigit():
return int(header)
elif len(header) == 1:
char = ord(header.upper())
if ord('A') < char < ord('Z'):
return char - ord('A') + 1
else:
logging.warning("Could not find header, using first column")
return default
def find_row_from_header(self, spreadsheet_tree, header, value):
"""Find the row in the given spreadsheet `ElementTree` where header is
value.
"""
headers = self.get_spreadsheet_header_items(spreadsheet_tree)
index = self.get_index_of_header(headers, header)
for row in spreadsheet_tree.iterfind(".//tr"):
if len(row) <= index:
continue
cell = row[index]
if cell.tag != "td":
continue
if list(cell.itertext())[0].lower() == value.lower():
return row
def parse_spreadsheet_contents(self, spreadsheet_tree):
"""Returns a python-friendly representation of the given spreadsheet
`ElementTree`
"""
import collections
spreadsheet = {
"id": spreadsheet_tree.attrib.get("id"),
"headers": self.get_spreadsheet_header_items(spreadsheet_tree),
"rows": [],
}
for row in spreadsheet_tree.iterfind(".//tr"):
value = {
"id": row.attrib.get("id"),
"cells": collections.OrderedDict(),
}
for i, cell in enumerate(row):
if cell.tag != "td":
continue
value["cells"][spreadsheet["headers"][i]] = {
"id": cell.attrib.get("id"),
"content": list(cell.itertext())[0],
}
if len(value["cells"]):
spreadsheet["rows"].append(value)
return spreadsheet
def parse_document_html(self, document_html):
"""Returns an `ElementTree` for the given Quip document HTML"""
document_xml = "<html>" + document_html + "</html>"
return xml.etree.cElementTree.fromstring(document_xml.encode("utf-8"))
def parse_micros(self, usec):
"""Returns a `datetime` for the given microsecond string"""
return datetime.datetime.utcfromtimestamp(usec / 1000000.0)
def get_blob(self, thread_id, blob_id):
"""Returns a file-like object with the contents of the given blob from
the given thread.
The object is described in detail here:
https://docs.python.org/2/library/urllib2.html#urllib2.urlopen
"""
request = urllib2.Request(
url=self._url("blob/%s/%s" % (thread_id, blob_id)))
if self.access_token:
request.add_header("Authorization", "Bearer " + self.access_token)
try:
return urllib2.urlopen(request, timeout=self.request_timeout)
except urllib2.HTTPError, error:
try:
# Extract the developer-friendly error message from the response
message = json.loads(error.read())["error_description"]
except Exception:
raise error
if (self.retry_rate_limit and error.code == 503 and
message == "Over Rate Limit"):
# Retry later.
reset_time = float(error.headers.get("X-RateLimit-Reset"))
delay = max(2, reset_time - time.time() + 1)
logging.warning("Rate Limit, delaying for %d seconds" % delay)
time.sleep(delay)
return self.get_blob(thread_id, blob_id)
else:
raise QuipError(error.code, message, error)
def put_blob(self, thread_id, blob, name=None):
"""Uploads an image or other blob to the given Quip thread. Returns an
ID that can be used to add the image to the document of the thread.
blob can be any file-like object. Requires the 'requests' module.
"""
import requests
url = "blob/" + thread_id
headers = None
if self.access_token:
headers = {"Authorization": "Bearer " + self.access_token}
if name:
blob = (name, blob)
try:
response = requests.request(
"post", self._url(url), timeout=self.request_timeout,
files={"blob": blob}, headers=headers)
response.raise_for_status()
return response.json()
except requests.RequestException, error:
try:
# Extract the developer-friendly error message from the response
message = error.response.json()["error_description"]
except Exception:
raise error
raise QuipError(error.response.status_code, message, error)
def _fetch_json(self, path, post_data=None, **args):
request = urllib2.Request(url=self._url(path, **args))
if post_data:
post_data = dict((k, v) for k, v in post_data.items()
if v or isinstance(v, int))
request.data = urllib.urlencode(self._clean(**post_data))
if self.access_token:
request.add_header("Authorization", "Bearer " + self.access_token)
try:
return json.loads(
urllib2.urlopen(request, timeout=self.request_timeout).read())
except urllib2.HTTPError, error:
try:
# Extract the developer-friendly error message from the response
message = json.loads(error.read())["error_description"]
except Exception:
raise error
if (self.retry_rate_limit and error.code == 503 and
message == "Over Rate Limit"):
# Retry later.
reset_time = float(error.headers.get("X-RateLimit-Reset"))
delay = max(2, reset_time - time.time() + 1)
logging.warning("Rate Limit, delaying for %d seconds" % delay)
time.sleep(delay)
return self._fetch_json(path, post_data, **args)
else:
raise QuipError(error.code, message, error)
def _clean(self, **args):
# We only expect ints or strings, but on Windows ints can become longs
return dict((k, str(v) if isinstance(
v, (int, float, long, complex)) else v.encode("utf-8"))
for k, v in args.items() if v or isinstance(
v, (int, float, long, complex)))
def _url(self, path, **args):
url = self.base_url + "/1/" + path
args = self._clean(**args)
if args:
url += "?" + urllib.urlencode(args)
return url
class QuipError(Exception):
def __init__(self, code, message, http_error):
Exception.__init__(self, "%d: %s" % (code, message))
self.code = code
self.http_error = http_error
| 40.780842
| 80
| 0.588583
|
efc9ac3b6c6bd3a8a2ee90360d4a909f9bafe70b
| 23
|
py
|
Python
|
python/module/hl2d/__init__.py
|
DerThorsten/hl2d
|
d21b711f3427188b1de63cb5a77f791f4a7f0c8b
|
[
"MIT"
] | null | null | null |
python/module/hl2d/__init__.py
|
DerThorsten/hl2d
|
d21b711f3427188b1de63cb5a77f791f4a7f0c8b
|
[
"MIT"
] | null | null | null |
python/module/hl2d/__init__.py
|
DerThorsten/hl2d
|
d21b711f3427188b1de63cb5a77f791f4a7f0c8b
|
[
"MIT"
] | null | null | null |
from . _hl2d import *
| 7.666667
| 21
| 0.652174
|
3885739c3deabf6fcc7d9e96a67ba881c8c33451
| 963
|
py
|
Python
|
src/sentry/api/endpoints/team_stats.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/api/endpoints/team_stats.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/api/endpoints/team_stats.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from rest_framework.response import Response
from six.moves import range
from sentry.app import tsdb
from sentry.api.base import BaseStatsEndpoint
from sentry.api.permissions import assert_perm
from sentry.models import Team, Project
class TeamStatsEndpoint(BaseStatsEndpoint):
def get(self, request, team_id):
team = Team.objects.get(id=team_id)
assert_perm(team, request.user, request.auth)
projects = Project.objects.get_for_user(request.user, team=team)
if not projects:
return Response([])
data = tsdb.get_range(
model=tsdb.models.project,
keys=[p.id for p in projects],
**self._parse_args(request)
).values()
summarized = []
for n in range(len(data[0])):
total = sum(d[n][1] for d in data)
summarized.append((data[0][n][0], total))
return Response(summarized)
| 27.514286
| 72
| 0.656282
|
62722c7eb5ddf4057a4cfd1d79d52b55c096de35
| 2,590
|
py
|
Python
|
playground/detection/widerface/fcos/fcos.res50.fpn.widerface.600size.0.5x_crop.plus.norm_sync/config.py
|
hanqiu-hq/cvpods
|
597fa669151fdad87c250fa118a9e3a555f4fb5e
|
[
"Apache-2.0"
] | 548
|
2020-12-03T11:30:43.000Z
|
2022-03-31T11:19:26.000Z
|
playground/detection/widerface/fcos/fcos.res50.fpn.widerface.600size.0.5x_crop.plus.norm_sync/config.py
|
wondervictor/cvpods
|
614a975e5425bbaeb66bbd1ffca552d633ba89ca
|
[
"Apache-2.0"
] | 58
|
2020-12-04T19:47:10.000Z
|
2022-03-30T06:52:13.000Z
|
playground/detection/widerface/fcos/fcos.res50.fpn.widerface.600size.0.5x_crop.plus.norm_sync/config.py
|
wondervictor/cvpods
|
614a975e5425bbaeb66bbd1ffca552d633ba89ca
|
[
"Apache-2.0"
] | 76
|
2020-12-03T11:58:36.000Z
|
2022-03-25T08:05:15.000Z
|
import os.path as osp
from cvpods.configs.fcos_config import FCOSConfig
_config_dict = dict(
MODEL=dict(
WEIGHTS="detectron2://ImageNetPretrained/MSRA/R-50.pkl",
RESNETS=dict(DEPTH=50),
FCOS=dict(
NUM_CLASSES=1,
CENTERNESS_ON_REG=True,
NORM_REG_TARGETS=True,
TOPK_CANDIDATES_TEST=5000,
NMS_THRESH_TEST=0.6,
BBOX_REG_WEIGHTS=(1.0, 1.0, 1.0, 1.0),
FOCAL_LOSS_GAMMA=2.0,
FOCAL_LOSS_ALPHA=0.25,
IOU_LOSS_TYPE="giou",
CENTER_SAMPLING_RADIUS=1.5,
OBJECT_SIZES_OF_INTEREST=[
[-1, 64],
[64, 128],
[128, 256],
[256, 512],
[512, float("inf")],
],
),
),
DATASETS=dict(
TRAIN=("widerface_2019_train",),
TEST=("widerface_2019_val",),
),
SOLVER=dict(
LR_SCHEDULER=dict(
MAX_ITER=45000,
STEPS=(30000, 40000),
),
OPTIMIZER=dict(
BASE_LR=0.01,
),
CHECKPOINT_PERIOD=2500,
IMS_PER_BATCH=16,
),
INPUT=dict(
AUG=dict(
TRAIN_PIPELINES=[
("RandomCropWithInstance", dict(
crop_type="relative_range", crop_size=(0.25, 0.25))),
("ResizeShortestEdge", dict(
short_edge_length=(600,), max_size=1500, sample_style="choice")),
("ShuffleList", dict(transforms=[
("RandomBrightness", dict(intensity_min=0.6, intensity_max=1.4)),
("RandomContrast", dict(intensity_min=0.6, intensity_max=1.4)),
("RandomSaturation", dict(intensity_min=0.6, intensity_max=1.4)),
("RandomLighting", dict(scale=0.1)),
])),
("RandomFlip", dict()),
],
TEST_PIPELINES=[
("ResizeShortestEdge", dict(
short_edge_length=1000, max_size=2500, sample_style="choice")),
],
),
# CROP=dict(ENABLED=True, TYPE="relative_range", SIZE=[0.25, 0.25],),
),
TEST=dict(
DETECTIONS_PER_IMAGE=1000,
),
OUTPUT_DIR=osp.join(
'/data/Outputs/model_logs/cvpods_playground',
osp.split(osp.realpath(__file__))[0].split("playground/")[-1]
),
)
class CustomFCOSConfig(FCOSConfig):
def __init__(self):
super(CustomFCOSConfig, self).__init__()
self._register_configuration(_config_dict)
config = CustomFCOSConfig()
| 31.204819
| 85
| 0.526641
|
122e4a2b5d25fbeb4ac53055f8aca8f63f4f9060
| 1,217
|
py
|
Python
|
setup.py
|
asdfkaba/markdown-urlize
|
27f1fa8c0b187df48647f3b7a72a86731e0608e4
|
[
"BSD-2-Clause"
] | 8
|
2015-12-17T11:27:13.000Z
|
2021-06-09T23:54:04.000Z
|
setup.py
|
asdfkaba/markdown-urlize
|
27f1fa8c0b187df48647f3b7a72a86731e0608e4
|
[
"BSD-2-Clause"
] | 3
|
2015-02-20T15:48:26.000Z
|
2019-11-25T16:24:48.000Z
|
setup.py
|
asdfkaba/markdown-urlize
|
27f1fa8c0b187df48647f3b7a72a86731e0608e4
|
[
"BSD-2-Clause"
] | 8
|
2015-02-20T16:08:33.000Z
|
2019-01-21T09:11:57.000Z
|
import os
from setuptools import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='markdown-urlize',
version='0.2.0',
description='A more liberal autolink extension for python Markdown',
long_description=(read('README.md')),
url='https://github.com/r0wb0t/markdown-urlize',
license='BSD',
author='Rowan Nairn',
author_email='rnairn@gmail.com',
py_modules=['mdx_urlize'],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[ "markdown", ]
)
| 32.891892
| 72
| 0.619556
|
e158c4972dbb4672dc18aba5f47986b791bf7aec
| 384
|
py
|
Python
|
invenio_theme_tugraz/version.py
|
freelion93/invenio-theme-tugraz
|
336613e0d8ae9286c88ea49c584d108a2739f2e2
|
[
"MIT"
] | null | null | null |
invenio_theme_tugraz/version.py
|
freelion93/invenio-theme-tugraz
|
336613e0d8ae9286c88ea49c584d108a2739f2e2
|
[
"MIT"
] | null | null | null |
invenio_theme_tugraz/version.py
|
freelion93/invenio-theme-tugraz
|
336613e0d8ae9286c88ea49c584d108a2739f2e2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 mojib wali.
#
# invenio-theme-tugraz is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Version information for invenio-theme-tugraz.
This file is imported by ``invenio_theme_tugraz.__init__``,
and parsed by ``setup.py``.
"""
__version__ = '1.0.3'
| 24
| 73
| 0.710938
|
01d8f1fa8a5419f352d0419a6b9a2ff4fdc8f790
| 418
|
py
|
Python
|
python-seleninm.py
|
ichengzi/learn-python
|
c1753ef624b2d0bfcfa7058818b4ea36fdf060de
|
[
"MIT"
] | null | null | null |
python-seleninm.py
|
ichengzi/learn-python
|
c1753ef624b2d0bfcfa7058818b4ea36fdf060de
|
[
"MIT"
] | null | null | null |
python-seleninm.py
|
ichengzi/learn-python
|
c1753ef624b2d0bfcfa7058818b4ea36fdf060de
|
[
"MIT"
] | null | null | null |
# coding = utf-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome()
driver.get('http://www.baidu.com')
el = driver.find_element_by_id('kw')
el.send_keys('chengzi'+Keys.RETURN)
time.sleep(1)
print driver.title
# 返回百度页面底部备案信息
text = driver.find_element_by_id("cp").text
print(text)
name = input("please input :")
driver.quit()
| 19.904762
| 48
| 0.717703
|
25969d9b24d7a57531fea41b00ca1e5a37bc0ee0
| 286
|
py
|
Python
|
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0013.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | 1
|
2020-07-03T13:54:18.000Z
|
2020-07-03T13:54:18.000Z
|
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0013.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | null | null | null |
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0013.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | null | null | null |
print('[-- Faça um algoritmo que leia o salário de um funcionário e mostre seu novo salário, com 15% de aumento. --]\n')
salario = float(input('Qual o salário do funcionário? R$ '))
novosalario = salario*1.15
print('O Novo Salário do Funcionário é: R${:.2f} ' .format(novosalario))
| 35.75
| 120
| 0.702797
|
b7250f5fc5fe7164b1c31ec9d1418728dcabdd3b
| 2,936
|
py
|
Python
|
lib/spack/spack/s3_handler.py
|
radical-cybertools/spack
|
c11ce3bd1fc87cbb3cc8519d13c27892a9f1d66e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
lib/spack/spack/s3_handler.py
|
radical-cybertools/spack
|
c11ce3bd1fc87cbb3cc8519d13c27892a9f1d66e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
lib/spack/spack/s3_handler.py
|
radical-cybertools/spack
|
c11ce3bd1fc87cbb3cc8519d13c27892a9f1d66e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from io import BufferedReader
import six.moves.urllib.error as urllib_error
import six.moves.urllib.request as urllib_request
import six.moves.urllib.response as urllib_response
import spack.util.s3 as s3_util
import spack.util.url as url_util
# NOTE(opadron): Workaround issue in boto where its StreamingBody
# implementation is missing several APIs expected from IOBase. These missing
# APIs prevent the streams returned by boto from being passed as-are along to
# urllib.
#
# https://github.com/boto/botocore/issues/879
# https://github.com/python/cpython/pull/3249
class WrapStream(BufferedReader):
def __init__(self, raw):
raw.readable = lambda: True
raw.writable = lambda: False
raw.seekable = lambda: False
raw.closed = False
raw.flush = lambda: None
super(WrapStream, self).__init__(raw)
def detach(self):
self.raw = None
def read(self, *args, **kwargs):
return self.raw.read(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.raw, key)
def _s3_open(url):
parsed = url_util.parse(url)
s3 = s3_util.create_s3_session(parsed,
connection=s3_util.get_mirror_connection(parsed)) # noqa: E501
bucket = parsed.netloc
key = parsed.path
if key.startswith('/'):
key = key[1:]
obj = s3.get_object(Bucket=bucket, Key=key)
# NOTE(opadron): Apply workaround here (see above)
stream = WrapStream(obj['Body'])
headers = obj['ResponseMetadata']['HTTPHeaders']
return url, headers, stream
class UrllibS3Handler(urllib_request.HTTPSHandler):
def s3_open(self, req):
orig_url = req.get_full_url()
from botocore.exceptions import ClientError
try:
url, headers, stream = _s3_open(orig_url)
return urllib_response.addinfourl(stream, headers, url)
except ClientError as err:
# if no such [KEY], but [KEY]/index.html exists,
# return that, instead.
if err.response['Error']['Code'] == 'NoSuchKey':
try:
_, headers, stream = _s3_open(
url_util.join(orig_url, 'index.html'))
return urllib_response.addinfourl(
stream, headers, orig_url)
except ClientError as err2:
if err.response['Error']['Code'] == 'NoSuchKey':
# raise original error
raise urllib_error.URLError(err)
raise urllib_error.URLError(err2)
raise urllib_error.URLError(err)
S3OpenerDirector = urllib_request.build_opener(UrllibS3Handler())
open = S3OpenerDirector.open
| 31.913043
| 98
| 0.64203
|
3f1bde024acb2d4261a827f93aa051152861937d
| 3,892
|
py
|
Python
|
genesis.py
|
wonabru/chainnet
|
f8ec1e2b580af837cba3322ffe69b95156b1b9a1
|
[
"MIT"
] | 5
|
2019-04-20T18:54:55.000Z
|
2019-08-23T09:17:20.000Z
|
genesis.py
|
wonabru/chainnet
|
f8ec1e2b580af837cba3322ffe69b95156b1b9a1
|
[
"MIT"
] | null | null | null |
genesis.py
|
wonabru/chainnet
|
f8ec1e2b580af837cba3322ffe69b95156b1b9a1
|
[
"MIT"
] | null | null | null |
from wallet import CWallet
class CGenesis:
def __init__(self):
self.e = 65537
#self.initAccountPubKey = CWallet().getPublicKey(self.getPrivKey())
self.initAccountPubKey = 'g9MsRcZGTHomRZD6pN4Nr1uq8L+scPZ/mOAsWBsU3LSBS99k4A5rA/XP5n7VfFpF4DNXhSJce4ZAo' \
'Qbm3v0qyfrs5J9F9blhYZT2N8eNgmj2lCC0IC1YmZ5Pn/iSkpNKWk/ttGJj/hy+ozjcT0py8NJ' \
'JzDLzeydCMrd1aG4Snbg='
self.first_accountPubKey = '2x0PpHUGF1P2q/wB3j+Fi1WHrl5zoPp/XvaEPhnL+lDwTNdjyMviD9T0AT' \
'RX6dKwtQkJraRBryLL/gdooa3VVRJ+thSH11suQNsJ4peI6vEAMwvyamF5M' \
'TJ3Hn+U0SJ0DgtTe7k5D7qAwu4k5MfbbpEwVAu0qsMcIjSqxvSd5+Q='
self.signature_init = 'MV1q7Ok7rDtrDVLms4IkBNxqpudRR0gmjfY5AcfrfyBGKjjwFM/aivoYq8+Za' \
'PA/J2oevSBW+outASBzHBqkFVFem8ZosBE260fNKfUvFGiDh+2Xc7QtJ3QVTPm' \
'U0pL+PQj4gUjMRcHp8+OvJsRJcTQr823tQSvaOqt7qD/bwJI='
self.signature_wonabru = 'o4OgtH371UgJlluvS247Dh09OKCAtJ2V5NvUoXXCUAF81WdUoQruxrDIpk' \
'2GLWmDJ5cjNGaGqsLqa56C1WJ0Od3SpxIr8dESjyg76ocLZln8VEq' \
'vaFM1EkIF+uYooLahRrax4dpbUT9C8ePX2wEIBtWoxuqvcG9MNY1AJAr0c2w='
self.signature_init = 'Rh/cLRPQu17zypC674QRCY1ExiUqGlHN4CQ9H3RN6aEyLi+0PjqViY6Bfggax24R3Rr6Vdk3dDsOD2niYBlqtUjEm76SZUrS87eYtug4ONiIaNg1IboyC8vKGQrnngNj8cNH9zi4cG+Vxhe3RF0IKrQrdpmKhEA8nJM6caHfsYk='
self.signature_wonabru = 'QuTyCR2zHZoAK6z8BPFcZMAJB/vZ87utIMoPbaj6M/wvGd0WoERueY2LWOP4C7yeru8TXySK9GPt8W3szIaj/gya8Jy1VPxADdaCnY8tBP1dYEJauhW6UzR7uu5SGMRkfDSesooUUjXj5JoAcNDKfmWNufKt6U0bZTYHQPCfMn4='
self.first_accountPubKey = 'fSmWlIfKCxdmKA9ESrOaCqj0GMyOtDcfEKcsKKcXUJCnC7TJBkcHOUCZNFhxJKZzzFCB3eqiTzJRVaoN48icOvId/hlHyIHJgTBxMz88wo9e/ULEvp5oeYRhYlFArihVzDQFa+FQjNadcpIYxH1SB/W5dBRwbjVbhx+wH7FuAcc='
def getPrivKey(self):
return CWallet().privfromJson({"d": "260360158136291685259871724233088127320409021650909687793"
"2879118742715555180033795778602066631893544447789492234164910"
"385949806819743078737167196436727012038203821157078420974670926"
"06301999711804406195907397851657077319070828630702591501084799914"
"32733151210696830300122267288326422618641019322363130479430113",
"q": "12594088244373820944436226815539318310684829844188840940167646926"
"25535036425497468032905730900044799851526676498396726898246440347"
"4063608333703134281537371",
"p": "1029372272012708261757893385925179370640841586366892692717034035205"
"9760819835453753397459089964949202673384397510362334982430"
"481530324517540053232321434873",
"u": "377596467689180211077614258124840469687532496364253032467368"
"5873812089354681429077815909347193266596757509199560433033881"
"070862812708412505555990015444924",
"n": "12964005230039620252907015036517168059715579738592666926689492"
"387558224383819622768115958337703571434620481686107769771460567"
"73207305137146313072123338232928678776923321210634716327013768448"
"473054123152212622679157860646693498709948178821887146362034334062"
"40728215174705916932152806228964670724820983092138883",
"e": "65537"})
| 82.808511
| 209
| 0.656732
|
4e31c0b8983263aad44761f77a9773f225432b82
| 2,336
|
py
|
Python
|
rdflib/exceptions.py
|
ROZBEH/rdflib
|
5b9da927714a92a8888407f42b46249002964e8e
|
[
"BSD-3-Clause"
] | 1,424
|
2015-01-04T13:10:22.000Z
|
2022-03-29T15:12:38.000Z
|
rdflib/exceptions.py
|
ROZBEH/rdflib
|
5b9da927714a92a8888407f42b46249002964e8e
|
[
"BSD-3-Clause"
] | 1,148
|
2015-01-01T18:26:18.000Z
|
2022-03-31T21:51:53.000Z
|
rdflib/exceptions.py
|
ROZBEH/rdflib
|
5b9da927714a92a8888407f42b46249002964e8e
|
[
"BSD-3-Clause"
] | 459
|
2015-01-03T14:41:34.000Z
|
2022-03-14T22:06:47.000Z
|
"""
TODO:
"""
__all__ = [
"Error",
"TypeCheckError",
"SubjectTypeError",
"PredicateTypeError",
"ObjectTypeError",
"ContextTypeError",
"ParserError",
]
class Error(Exception):
"""Base class for rdflib exceptions."""
def __init__(self, msg=None):
Exception.__init__(self, msg)
self.msg = msg
class TypeCheckError(Error):
"""Parts of assertions are subject to type checks."""
def __init__(self, node):
Error.__init__(self, node)
self.type = type(node)
self.node = node
class SubjectTypeError(TypeCheckError):
"""Subject of an assertion must be an instance of URIRef."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = "Subject must be instance of URIRef or BNode: %s(%s)" % (
self.node,
self.type,
)
class PredicateTypeError(TypeCheckError):
"""Predicate of an assertion must be an instance of URIRef."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = "Predicate must be a URIRef instance: %s(%s)" % (
self.node,
self.type,
)
class ObjectTypeError(TypeCheckError):
"""Object of an assertion must be an instance of URIRef, Literal,
or BNode."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = (
"\
Object must be instance of URIRef, Literal, or BNode: %s(%s)"
% (self.node, self.type)
)
class ContextTypeError(TypeCheckError):
"""Context of an assertion must be an instance of URIRef."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = "Context must be instance of URIRef or BNode: %s(%s)" % (
self.node,
self.type,
)
class ParserError(Error):
"""RDF Parser error."""
def __init__(self, msg):
Error.__init__(self, msg)
self.msg = msg
def __str__(self):
return self.msg
class UniquenessError(Error):
"""A uniqueness assumption was made in the context, and that is not true"""
def __init__(self, values):
Error.__init__(
self,
"\
Uniqueness assumption is not fulfilled. Multiple values are: %s"
% values,
)
| 23.36
| 79
| 0.598887
|
0bf2d04b802848fed914565caed1893b47d9d583
| 14,015
|
py
|
Python
|
openmdao.lib/src/openmdao/lib/drivers/test/test_broydensolver.py
|
mjfwest/OpenMDAO-Framework
|
a5521f47ad7686c25b203de74e1c7dff5fd7a52b
|
[
"Apache-2.0"
] | 69
|
2015-01-02T19:10:08.000Z
|
2021-11-14T04:42:28.000Z
|
openmdao.lib/src/openmdao/lib/drivers/test/test_broydensolver.py
|
jcchin/OpenMDAO-Framework
|
038e89b06da1c74f00918f4c6fbd8bd365e25657
|
[
"Apache-2.0"
] | 3
|
2015-01-15T23:08:18.000Z
|
2015-03-11T16:57:35.000Z
|
openmdao.lib/src/openmdao/lib/drivers/test/test_broydensolver.py
|
jcchin/OpenMDAO-Framework
|
038e89b06da1c74f00918f4c6fbd8bd365e25657
|
[
"Apache-2.0"
] | 31
|
2015-09-16T00:37:35.000Z
|
2022-01-10T06:27:55.000Z
|
"""
Test the broyden solver component.
"""
import unittest
import numpy
from openmdao.main.api import Assembly, Component, set_as_top, Driver
from openmdao.main.interfaces import IHasParameters, implements
from openmdao.main.hasparameters import HasParameters
from openmdao.util.decorators import add_delegate
from openmdao.lib.drivers.api import BroydenSolver
from openmdao.main.datatypes.api import Array, Float
from openmdao.util.testutil import assert_rel_error, assert_raises
# pylint: disable=E1101,E1103
# "Instance of <class> has no <attr> member"
class SellarDiscipline1(Component):
"""Component containing Discipline 1"""
# pylint: disable=E1101
z1 = Float(0.0, iotype='in', desc='Global Design Variable')
z2 = Float(0.0, iotype='in', desc='Global Design Variable')
x1 = Float(0.0, iotype='in', desc='Local Design Variable')
y2 = Float(0.0, iotype='in', desc='Disciplinary Coupling')
y1 = Float(iotype='out', desc='Output of this Discipline')
def execute(self):
"""Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2"""
z1 = self.z1
z2 = self.z2
x1 = self.x1
y2 = self.y2
self.y1 = z1**2 + z2 + x1 - 0.2*y2
class SellarDiscipline2(Component):
"""Component containing Discipline 2"""
# pylint: disable=E1101
z1 = Float(0.0, iotype='in', desc='Global Design Variable')
z2 = Float(0.0, iotype='in', desc='Global Design Variable')
y1 = Float(0.0, iotype='in', desc='Disciplinary Coupling')
y2 = Float(iotype='out', desc='Output of this Discipline')
def execute(self):
"""Evaluates the equation
y1 = y1**(.5) + z1 + z2"""
z1 = self.z1
z2 = self.z2
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
y1 = abs(self.y1)
self.y2 = y1**(.5) + z1 + z2
class SellarBroyden(Assembly):
"""Solution of the sellar analytical problem using MDF.
Sellar, R. S., Batill, S. M., and Renaud, J. E., Response Surface Based, Concur-
rent Subspace Optimization for Multidisciplinary System Design," Proceedings
References 79 of the 34th AIAA Aerospace Sciences Meeting and Exhibit, Reno, NV,
January 1996.
"""
def configure(self):
""" Creates a new Assembly with this problem
Optimal Design at (1.9776, 0, 0)
Optimal Objective = 3.18339"""
# pylint: disable=E1101
# create solver instance
self.add('driver', BroydenSolver())
self.add('dis1', SellarDiscipline1())
self.add('dis2', SellarDiscipline2())
self.driver.workflow.add(['dis1', 'dis2'])
self.connect('dis1.y1', 'dis2.y1')
# solver connections
self.driver.add_parameter('dis1.y2')
self.driver.add_constraint('dis2.y2 = dis1.y2')
self.driver.itmax = 10
self.driver.alpha = .4
self.driver.tol = .000000001
class MIMOEquation(Component):
"""Equation with 2 inputs and 2 outputs"""
# pylint: disable=E1101
x = Array([1., 1., 1., 1., 1.], iotype='in', desc='Global Design Variables')
f1 = Float(iotype='out', desc='Output of this Discipline')
f2 = Float(iotype='out', desc='Output of this Discipline')
f3 = Float(iotype='out', desc='Output of this Discipline')
f4 = Float(iotype='out', desc='Output of this Discipline')
f5 = Float(iotype='out', desc='Output of this Discipline')
ff = Array([0., 0., 0., 0., 0.], iotype='out')
def execute(self):
"""Should converge to x=[0,0,0,0,0]"""
d = numpy.array([3, 2, 1.5, 1, 0.5])
c = 0.01
self.ff = -d*self.x - c*self.x**3
self.f1 = self.ff[0]
self.f2 = self.ff[1]
self.f3 = self.ff[2]
self.f4 = self.ff[3]
self.f5 = self.ff[4]
class DumbComp(Component):
"""A component whose output is independent of the input."""
# pylint: disable=E1101
x1 = Float(1.0, iotype='in', desc='Global Design Variable')
f1 = Float(3.14, iotype='out', desc='Output of this Discipline')
def execute(self):
"""Do nothing"""
pass
class DumbAssembly(Assembly):
"""Assembly with DumbComp.
"""
def configure(self):
# create solver instance
self.add('driver', BroydenSolver())
self.add('dis1', DumbComp())
self.driver.workflow.add(['dis1'])
# solver connections
self.driver.add_parameter('dis1.x1')
self.driver.add_constraint('dis1.f1 = 0.0')
class MIMOBroyden(Assembly):
"""Solution of the MIMO problem using MDF.
"""
def configure(self):
""" Creates a new Assembly with this problem
root at (0,1)
"""
# create solver instance
self.add('driver', BroydenSolver())
self.add('dis1', MIMOEquation())
self.driver.workflow.add(['dis1'])
# solver connections
self.driver.itmax = 40
self.driver.alpha = .8
self.driver.tol = .000001
class TestCase(unittest.TestCase):
""" Test the broyden solver. """
def setUp(self):
""" Called before each test. """
self.prob = None
def tearDown(self):
""" Called after each test. """
self.prob = None
def test_Broyden2(self):
self.prob = SellarBroyden()
set_as_top(self.prob)
self.prob.dis1.z1_in = 5.0
self.prob.dis1.z2_in = 2.0
self.prob.dis1.x1 = 1.0
self.prob.dis2.z1_in = 5.0
self.prob.dis2.z2_in = 2.0
self.prob.driver.algorithm = "broyden2"
self.prob.run()
assert_rel_error(self, self.prob.dis1.y1, 0.819002, 0.0001)
assert_rel_error(self, self.prob.dis2.y1, 0.819002, 0.0001)
assert_rel_error(self, self.prob.dis1.y2, 0.904988, 0.0001)
assert_rel_error(self, self.prob.dis2.y2, 0.904988, 0.0001)
def test_Broyden3(self):
self.prob = SellarBroyden()
set_as_top(self.prob)
self.prob.dis1.z1_in = 5.0
self.prob.dis1.z2_in = 2.0
self.prob.dis1.x1 = 1.0
self.prob.dis2.z1_in = 5.0
self.prob.dis2.z2_in = 2.0
self.prob.driver.algorithm = "broyden3"
self.prob.run()
assert_rel_error(self, self.prob.dis1.y1, 0.819002, 0.0001)
assert_rel_error(self, self.prob.dis2.y1, 0.819002, 0.0001)
assert_rel_error(self, self.prob.dis1.y2, 0.904988, 0.0001)
assert_rel_error(self, self.prob.dis2.y2, 0.904988, 0.0001)
def test_ExcitingMixing(self):
self.prob = SellarBroyden()
set_as_top(self.prob)
self.prob.dis1.z1_in = 5.0
self.prob.dis1.z2_in = 2.0
self.prob.dis1.x1 = 1.0
self.prob.dis2.z1_in = 5.0
self.prob.dis2.z2_in = 2.0
self.prob.driver.algorithm = "excitingmixing"
self.prob.run()
assert_rel_error(self, self.prob.dis1.y1, 0.819002, 0.0001)
assert_rel_error(self, self.prob.dis2.y1, 0.819002, 0.0001)
assert_rel_error(self, self.prob.dis1.y2, 0.904988, 0.0001)
assert_rel_error(self, self.prob.dis2.y2, 0.904988, 0.0001)
def test_MIMO_Broyden2(self):
# Testing Broyden on a 2 input 2 output case
self.prob = MIMOBroyden()
set_as_top(self.prob)
driver = self.prob.driver
driver.add_parameter('dis1.x[0]')
driver.add_parameter('dis1.x[1]')
driver.add_parameter('dis1.x[2]')
driver.add_parameter('dis1.x[3]')
driver.add_parameter('dis1.x[4]')
driver.add_constraint('dis1.f1 = 0.0')
driver.add_constraint('dis1.f2 = 0.0')
driver.add_constraint('dis1.f3 = 0.0')
driver.add_constraint('dis1.f4 = 0.0')
driver.add_constraint('dis1.f5 = 0.0')
self.prob.dis1.x = [1., 1., 1., 1., 1.]
driver.algorithm = "broyden2"
self.prob.run()
assert_rel_error(self, 1.0 - self.prob.dis1.x[0], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[1], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[2], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[3], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[4], 1.0, 0.0001)
def test_MIMO_Broyden2_array(self):
# Testing Broyden with an ArrayParameter.
self.prob = MIMOBroyden()
set_as_top(self.prob)
driver = self.prob.driver
driver.add_parameter('dis1.x')
driver.add_constraint('dis1.ff = 0.0')
self.prob.dis1.x = [1., 1., 1., 1., 1.]
self.prob.dis1.trace = True
driver.algorithm = "broyden2"
self.prob.run()
assert_rel_error(self, 1.0 - self.prob.dis1.x[0], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[1], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[2], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[3], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[4], 1.0, 0.0001)
def test_MIMO_Broyden3(self):
# Testing Broyden on a 2 input 2 output case
self.prob = MIMOBroyden()
set_as_top(self.prob)
driver = self.prob.driver
driver.add_parameter('dis1.x[0]')
driver.add_parameter('dis1.x[1]')
driver.add_parameter('dis1.x[2]')
driver.add_parameter('dis1.x[3]')
driver.add_parameter('dis1.x[4]')
driver.add_constraint('dis1.f1 = 0.0')
driver.add_constraint('dis1.f2 = 0.0')
driver.add_constraint('dis1.f3 = 0.0')
driver.add_constraint('dis1.f4 = 0.0')
driver.add_constraint('dis1.f5 = 0.0')
self.prob.dis1.x = [1., 1., 1., 1., 1.]
driver.algorithm = "broyden3"
self.prob.run()
assert_rel_error(self, 1.0 - self.prob.dis1.x[0], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[1], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[2], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[3], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[4], 1.0, 0.0001)
def test_MIMO_ExcitingMixing(self):
# Testing Broyden on a 2 input 2 output case
self.prob = MIMOBroyden()
set_as_top(self.prob)
driver = self.prob.driver
driver.add_parameter('dis1.x[0]')
driver.add_parameter('dis1.x[1]')
driver.add_parameter('dis1.x[2]')
driver.add_parameter('dis1.x[3]')
driver.add_parameter('dis1.x[4]')
driver.add_constraint('dis1.f1 = 0.0')
driver.add_constraint('dis1.f2 = 0.0')
driver.add_constraint('dis1.f3 = 0.0')
driver.add_constraint('dis1.f4 = 0.0')
driver.add_constraint('dis1.f5 = 0.0')
self.prob.dis1.x = [1., 1., 1., 1., 1.]
driver.algorithm = "excitingmixing"
driver.alpha = 0.1
self.prob.run()
assert_rel_error(self, 1.0 - self.prob.dis1.x[0], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[1], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[2], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[3], 1.0, 0.0001)
assert_rel_error(self, 1.0 - self.prob.dis1.x[4], 1.0, 0.0001)
def test_no_change_in_value(self):
self.prob = DumbAssembly()
set_as_top(self.prob)
self.prob.driver.algorithm = "broyden2"
msg = "Broyden iteration has stopped converging. Change in " \
"input has produced no change in output. This could " \
"indicate a problem with your component connections. " \
"It could also mean that this solver method is " \
"inadequate for your problem."
assert_raises(self, 'self.prob.run()', globals(), locals(),
RuntimeError, msg)
self.prob.driver.algorithm = "broyden3"
msg = "Broyden iteration has stopped converging. Change in " \
"input has produced no change in output. This could " \
"indicate a problem with your component connections. " \
"It could also mean that this solver method is " \
"inadequate for your problem."
assert_raises(self, 'self.prob.run()', globals(), locals(),
RuntimeError, msg)
def test_AAAinitial_run(self):
# The reason for putting the AAA in the name is so it runs
# first. We should have to do that. There is some kind
# of testing bug that is forcing us to do that
# Test the fix that peforms an initial run
# at the top of the execute method
class MyComp(Component):
x = Float(0.0, iotype='in', low=-100000, high=100000)
xx = Float(0.0, iotype='in', low=-100000, high=100000)
f_x = Float(iotype='out')
y = Float(iotype='out')
def execute(self):
if self.xx != 1.0:
self.raise_exception("Lazy", RuntimeError)
self.f_x = 2.0*self.x
self.y = self.x
@add_delegate(HasParameters)
class SpecialDriver(Driver):
implements(IHasParameters)
def execute(self):
self.set_parameters([1.0])
self.prob = set_as_top(Assembly())
self.prob.add('comp', MyComp())
self.prob.add('driver', BroydenSolver())
self.prob.add('subdriver', SpecialDriver())
self.prob.driver.workflow.add('subdriver')
self.prob.subdriver.workflow.add('comp')
self.prob.subdriver.add_parameter('comp.xx')
self.prob.driver.add_parameter('comp.x')
self.prob.driver.add_constraint('comp.y = comp.x')
print "initial run test"
self.prob.run()
if __name__ == '__main__':
import nose
import sys
sys.argv.append('--cover-package=openmdao')
sys.argv.append('--cover-erase')
nose.runmodule()
| 32.292627
| 84
| 0.600642
|
5b7a43fba0eda764f69300b1a36bf7b3ce0948b5
| 21,203
|
py
|
Python
|
test/functional/pruning.py
|
gautes/TrollCoinCore
|
21392fb66e6abf50d48598827dbbf15a129e51b1
|
[
"MIT"
] | null | null | null |
test/functional/pruning.py
|
gautes/TrollCoinCore
|
21392fb66e6abf50d48598827dbbf15a129e51b1
|
[
"MIT"
] | null | null | null |
test/functional/pruning.py
|
gautes/TrollCoinCore
|
21392fb66e6abf50d48598827dbbf15a129e51b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
from test_framework.test_framework import TrollcoinTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(TrollcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
self.nodes.append(start_node(3, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
self.nodes.append(start_node(4, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
# Create nodes 5 to test wallet in prune mode, but do not connect
self.nodes.append(start_node(5, self.options.tmpdir, ["-prune=550"]))
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, timewait=900)
assert_equal(node.getblockcount(), 995)
assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500)
self.stop_node(node_number)
# now re-start in manual pruning mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=1"], timewait=900)
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=550"], timewait=900)
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
start_node(2, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
# check that wallet loads loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
start_node(5, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| 47.754505
| 158
| 0.605245
|
3d3cdb0a5a58ddfcd7cbc69348751eb940fa87de
| 5,574
|
py
|
Python
|
2021/util.py
|
blin00/advent-of-code
|
0a8cafb1eb6c2ec0b35af2af1fdbf6498ca0a83f
|
[
"Unlicense"
] | 7
|
2021-12-11T00:04:11.000Z
|
2021-12-30T11:13:36.000Z
|
2021/util.py
|
blin00/advent-of-code
|
0a8cafb1eb6c2ec0b35af2af1fdbf6498ca0a83f
|
[
"Unlicense"
] | null | null | null |
2021/util.py
|
blin00/advent-of-code
|
0a8cafb1eb6c2ec0b35af2af1fdbf6498ca0a83f
|
[
"Unlicense"
] | 2
|
2021-12-18T10:15:43.000Z
|
2021-12-22T05:11:32.000Z
|
import re
from operator import add
from collections import deque, defaultdict, Counter
import copy
import sys
sys.setrecursionlimit(int(1e7))
# convention that positive y is down
# increment to clockwise/turn right, decrement to counterclockwise/turn left
DIRS = {
0: (0, -1),
1: (1, 0),
2: (0, 1),
3: (-1, 0),
}
DIRS_M = {
'U': 0,
'R': 1,
'D': 2,
'L': 3,
'N': 0,
'E': 1,
'S': 2,
'W': 3,
}
INF = float('inf')
class UniqueQueue():
def __init__(self, contents=None):
self.deque = deque()
self.set = set()
if contents is not None:
for x in contents:
self.push(x)
def __len__(self):
return len(self.deque)
def push(self, x):
if x not in self.set:
self.deque.appendleft(x)
self.set.add(x)
def pop(self):
x = self.deque.pop()
self.set.remove(x)
return x
def read_input(fname, t=lambda x: x, strip_lines=True, force_multi=False):
with open(fname, 'r') as f:
contents = f.read()
if strip_lines:
lines = contents.strip().split('\n')
else:
lines = contents.split('\n')
if len(lines) == 1 and not force_multi:
return t(lines[0])
return list(map(t, lines))
def maybe_int(s):
try:
return int(s)
except ValueError:
return s
def keep_by_index(indices, arr):
result = []
for i in sorted(indices):
if i < len(arr):
result.append(arr[i])
return result
def remove_by_index(indices, arr):
result = []
to_remove = set(indices)
for i in range(len(arr)):
if i not in to_remove:
result.append(arr[i])
return result
def min_by(f, arr):
return min([(f(x), x) for x in arr])[1]
def max_by(f, arr):
return max([(f(x), x) for x in arr])[1]
def parse_coord(line):
return tuple(map(int, line.split(',')))
def metric_taxi(a, b):
return sum(abs(a[i] - b[i]) for i in range(len(a)))
def move_by(d, p):
if isinstance(d, int):
d = DIRS[d]
return tuple(map(add, d, p))
def parse_list(s):
s = s.strip()
return [int(x.strip('()[]<>')) for x in s.split(',')]
def fatal(*args, **kwargs):
print(*args, **kwargs)
exit()
def automata(grid, rule, iterations):
R = len(grid)
C = len(grid[0])
def get_neighbors(i, j):
# for ii, jj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
for ii, jj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1), (i - 1, j - 1), (i - 1, j + 1), (i + 1, j - 1), (i + 1, j + 1)):
if 0 <= ii < R and 0 <= jj < C:
yield ii, jj
for _ in range(iterations):
new_grid = [[None] * C for _ in range(R)]
for i in range(R):
for j in range(C):
neighbors = map(lambda x: grid[x[0]][x[1]], get_neighbors(i, j))
new_grid[i][j] = rule(grid[i][j], Counter(neighbors))
grid = new_grid
return grid
def print_grid(grid, t=lambda x: x):
for row in grid:
print(''.join(map(t, row)))
def rule_gol(me, neighbors):
if me == '*':
return '*' if 2 <= neighbors['*'] <= 3 else '.'
else:
return '*' if neighbors['*'] == 3 else '.'
def prod(L):
result = 1
for x in L:
result *= x
return result
def reverse_dict(d):
result = defaultdict(list)
for k, v in d.items():
for x in v:
result[x].append(k)
return result
builtin_map = map
def map(*args, **kwargs):
return list(builtin_map(*args, **kwargs))
def do_ps(lst):
prefix = [0]
for x in lst:
prefix.append(prefix[-1] + x)
return prefix
def transpose(A):
N = len(A)
M = len(A[0])
res = []
for j in range(M):
row = [A[i][j] for i in range(N)]
res.append(row)
return res
def crt(n, a):
from functools import reduce
sum = 0
prod = reduce(lambda a, b: a * b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * pow(p, -1, n_i) * p
return sum % prod
def dump_dict_grid(d, t=lambda x: x):
min_x = min(x for x, y in d.keys())
max_x = max(x for x, y in d.keys())
min_y = min(y for x, y in d.keys())
max_y = max(y for x, y in d.keys())
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
print(t(d[(x, y)]), end='')
print()
def ordch(ch: str) -> int:
assert len(ch) == 1
x = ord(ch)
if x >= ord('a') and x <= ord('z'): return x - ord('a')
if x >= ord('A') and x <= ord('Z'): return x - ord('A')
raise Exception(f"{ch} is not alphabetic")
def add_interval(ss, L, R):
# [L, R)
assert L <= R
if L == R:
return None
idx = ss.bisect_left((L, R))
while idx < len(ss):
ival = ss[idx]
if ival[0] > R:
break
R = max(R, ival[1])
ss.pop(idx)
if idx > 0:
idx -= 1
ival = ss[idx]
if ival[1] >= L:
L = min(L, ival[0])
R = max(R, ival[1])
ss.pop(idx)
res = (L, R)
ss.add(res)
return res
def remove_interval(ss, L, R):
# [L, R)
assert L <= R
if L == R:
return
added = add_interval(ss, L, R)
r2 = added[1]
ss.remove(added)
if added[0] != L:
ss.add((added[0], L))
if R != r2:
ss.add((R, r2))
def pad_grid(grid, ch=' '):
C = max(len(row) for row in grid)
for i in range(len(grid)):
if len(grid[i]) < C:
grid[i] += ch * (C - len(grid[i]))
return grid
| 23.420168
| 135
| 0.507714
|
4946a6cbbe993ed6eb7117832b266839d20da7e8
| 14,235
|
py
|
Python
|
fanficfare/adapters/adapter_samandjacknet.py
|
davidferguson/FanFicUpload
|
dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2
|
[
"Apache-2.0"
] | 1
|
2019-06-13T11:20:33.000Z
|
2019-06-13T11:20:33.000Z
|
fanficfare/adapters/adapter_samandjacknet.py
|
davidferguson/FanFicUpload
|
dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2
|
[
"Apache-2.0"
] | null | null | null |
fanficfare/adapters/adapter_samandjacknet.py
|
davidferguson/FanFicUpload
|
dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
# By virtue of being recent and requiring both is_adult and user/pass,
# adapter_fanficcastletvnet.py is the best choice for learning to
# write adapters--especially for sites that use the eFiction system.
# Most sites that have ".../viewstory.php?sid=123" in the story URL
# are eFiction.
# For non-eFiction sites, it can be considerably more complex, but
# this is still a good starting point.
# In general an 'adapter' needs to do these five things:
# - 'Register' correctly with the downloader
# - Site Login (if needed)
# - 'Are you adult?' check (if needed--some do one, some the other, some both)
# - Grab the chapter list
# - Grab the story meta-data (some (non-eFiction) adapters have to get it from the author page)
# - Grab the chapter texts
# Search for XXX comments--that's where things are most likely to need changing.
# This function is called by the downloader in all adapter_*.py files
# in this dir to register the adapter class. So it needs to be
# updated to reflect the class below it. That, plus getSiteDomain()
# take care of 'Registering'.
def getClass():
return SamAndJackNetAdapter # XXX
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class SamAndJackNetAdapter(BaseSiteAdapter): # XXX
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.decode = ["Windows-1252",
"utf8"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
# XXX Most sites don't have the /fanfic part. Replace all to remove it usually.
self._setURL('http://' + self.getSiteDomain() + '/fanfics/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','sjn') # XXX
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%b %d, %Y" # XXX
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'samandjack.net' # XXX
@classmethod
def getSiteExampleURLs(self):
return "http://"+self.getSiteDomain()+"/fanfics/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain()+"/fanfics/viewstory.php?sid=")+r"\d+$"
## Login seems to be reasonably standard across eFiction sites.
def needToLoginCheck(self, data):
if 'Registered Users Only' in data \
or 'There is no such account on our website' in data \
or "That password doesn't match the one in our database" in data:
return True
else:
return False
def performLogin(self, url):
params = {}
if self.password:
params['penname'] = self.username
params['password'] = self.password
else:
params['penname'] = self.getConfig("username")
params['password'] = self.getConfig("password")
params['cookiecheck'] = '1'
params['submit'] = 'Submit'
loginUrl = 'http://' + self.getSiteDomain() + '/fanfics/user.php?action=login'
logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
params['penname']))
d = self._fetchUrl(loginUrl, params)
if "Member Account" not in d : #Member Account
logger.info("Failed to login to URL %s as %s" % (loginUrl,
params['penname']))
raise exceptions.FailedToLogin(url,params['penname'])
return False
else:
return True
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
if self.is_adult or self.getConfig("is_adult"):
# Weirdly, different sites use different warning numbers.
# If the title search below fails, there's a good chance
# you need a different number. print data at that point
# and see what the 'click here to continue' url says.
# Furthermore, there's a couple sites now with more than
# one warning level for different ratings. And they're
# fussy about it. midnightwhispers has three: 10, 3 & 5.
# we'll try 5 first.
addurl = "&ageconsent=ok&warning=5" # XXX
else:
addurl=""
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = self.url+'&index=1'+addurl
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
# The actual text that is used to announce you need to be an
# adult varies from site to site. Again, print data before
# the title search to troubleshoot.
# Since the warning text can change by warning level, let's
# look for the warning pass url. nfacommunity uses
# &warning= -- actually, so do other sites. Must be an
# eFiction book.
# viewstory.php?sid=1882&warning=4
# viewstory.php?sid=1654&ageconsent=ok&warning=5
#print data
#m = re.search(r"'viewstory.php\?sid=1882(&warning=4)'",data)
m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)'",data)
if m != None:
if self.is_adult or self.getConfig("is_adult"):
# We tried the default and still got a warning, so
# let's pull the warning number from the 'continue'
# link and reload data.
addurl = m.group(1)
# correct stupid & error in url.
addurl = addurl.replace("&","&")
url = self.url+'&index=1'+addurl
logger.debug("URL 2nd try: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
else:
raise exceptions.AdultCheckRequired(self.url)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# print data
# Now go hunting for all the meta data and the chapter list.
pagetitle = soup.find('div',{'id':'pagetitle'})
## Title
a = pagetitle.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',stripHTML(a))
# Find authorid and URL from... author url.
# (fetch multiple authors)
alist = soup.findAll('a', href=re.compile(r"viewuser.php\?uid=\d+"))
for a in alist:
self.story.addToList('authorId',a['href'].split('=')[1])
self.story.addToList('authorUrl','http://'+self.host+'/fanfics/'+a['href'])
self.story.addToList('author',a.string)
# Reviews
reviewdata = soup.find('div', {'id' : 'sort'})
a = reviewdata.findAll('a', href=re.compile(r'reviews.php\?type=ST&(amp;)?item='+self.story.getMetadata('storyId')+"$"))[1] # second one.
self.story.setMetadata('reviews',stripHTML(a))
# Find the chapters:
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/fanfics/'+chapter['href']+addurl))
self.story.setMetadata('numChapters',len(self.chapterUrls))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:
return d[k]
except:
return ""
# <span class="label">Rated:</span> NC-17<br /> etc
labels = soup.findAll('span',{'class':'label'})
for labelspan in labels:
value = labelspan.nextSibling
label = labelspan.string
if 'Summary' in label:
self.setDescription(url,value)
if 'Rated' in label:
self.story.setMetadata('rating', value)
if 'Word count' in label:
self.story.setMetadata('numWords', value)
if 'Categories' in label:
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
catstext = [cat.string for cat in cats]
for cat in catstext:
self.story.addToList('category',cat.string)
if 'Characters' in label:
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
charstext = [char.string for char in chars]
for char in charstext:
self.story.addToList('characters',char.string)
## Not all sites use Genre, but there's no harm to
## leaving it in. Check to make sure the type_id number
## is correct, though--it's site specific.
if 'Genre' in label:
genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1')) # XXX
genrestext = [genre.string for genre in genres]
self.genre = ', '.join(genrestext)
for genre in genrestext:
self.story.addToList('genre',genre.string)
## Not all sites use Warnings, but there's no harm to
## leaving it in. Check to make sure the type_id number
## is correct, though--it's site specific.
if 'Warnings' in label:
warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2')) # XXX
warningstext = [warning.string for warning in warnings]
self.warning = ', '.join(warningstext)
for warning in warningstext:
self.story.addToList('warnings',warning.string)
if 'Completed' in label:
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if 'Published' in label:
value=value.replace(' | ','')
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
if 'Updated' in label:
# there's a stray [ at the end.
#value = value[0:-1]
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
try:
# Find Series name from series URL.
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
series_name = a.string
series_url = 'http://'+self.host+'/fanfics/'+a['href']
# use BeautifulSoup HTML parser to make everything easier to find.
seriessoup = self.make_soup(self._fetchUrl(series_url))
storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
i=1
for a in storyas:
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
self.setSeries(series_name, i)
self.story.setMetadata('seriesUrl',series_url)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
| 41.622807
| 157
| 0.593888
|
d89a7458776c93889d3ad9bd9f3e1fb5b9804a54
| 2,707
|
py
|
Python
|
nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_ReduceOnPlateau.py
|
nasyxx/nnUNet
|
92d5f2352349eed278e22f7a38cb86b0fccd7c75
|
[
"Apache-2.0"
] | 1,621
|
2019-02-14T02:56:51.000Z
|
2022-03-31T02:53:17.000Z
|
nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_ReduceOnPlateau.py
|
nasyxx/nnUNet
|
92d5f2352349eed278e22f7a38cb86b0fccd7c75
|
[
"Apache-2.0"
] | 72
|
2019-02-21T04:45:38.000Z
|
2022-02-09T23:49:43.000Z
|
nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_ReduceOnPlateau.py
|
nasyxx/nnUNet
|
92d5f2352349eed278e22f7a38cb86b0fccd7c75
|
[
"Apache-2.0"
] | 424
|
2019-02-15T04:05:55.000Z
|
2022-03-30T02:06:34.000Z
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from torch.optim import lr_scheduler
class nnUNetTrainerV2_SGD_ReduceOnPlateau(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
def initialize_optimizer_and_scheduler(self):
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
patience=self.lr_scheduler_patience,
verbose=True, threshold=self.lr_scheduler_eps,
threshold_mode="abs")
def maybe_update_lr(self, epoch=None):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
if self.epoch > 0: # otherwise self.train_loss_MA is None
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def on_epoch_end(self):
return nnUNetTrainer.on_epoch_end(self)
| 53.078431
| 116
| 0.669006
|
11ad8e3acabea4ba3b75419ee9e4dad9037911b9
| 1,397
|
py
|
Python
|
blog/main/forms.py
|
leni1/microblog
|
d51f47618b3eb446e351f3d3ae493cb6a5a15d45
|
[
"MIT"
] | null | null | null |
blog/main/forms.py
|
leni1/microblog
|
d51f47618b3eb446e351f3d3ae493cb6a5a15d45
|
[
"MIT"
] | null | null | null |
blog/main/forms.py
|
leni1/microblog
|
d51f47618b3eb446e351f3d3ae493cb6a5a15d45
|
[
"MIT"
] | null | null | null |
from flask import request
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Length, ValidationError
from blog.models import User
class EditProfileForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
about_me = TextAreaField('About Me', validators=[Length(min=0, max=140)])
submit = SubmitField('Submit')
def __init__(self, orig_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, *kwargs)
self.orig_username = orig_username
def validate_username(self, username):
if username.data != self.orig_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
class PostForm(FlaskForm):
post = TextAreaField('Say something', validators=[
DataRequired(), Length(min=1, max=140)])
submit = SubmitField('Submit')
class SearchForm(FlaskForm):
q = StringField('Search', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
if 'formdata' not in kwargs:
kwargs['formdata'] = request.args
if 'csrf_enabled' not in kwargs:
kwargs['csrf_enabled'] = False
super(SearchForm, self).__init__(*args, **kwargs)
| 34.925
| 77
| 0.686471
|
f5caa832736b359d3224b8637472c7b99155288c
| 1,170
|
py
|
Python
|
isi_sdk_8_0_1/test/test_filepool_policy_file_matching_pattern_or_criteria_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0_1/test/test_filepool_policy_file_matching_pattern_or_criteria_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0_1/test/test_filepool_policy_file_matching_pattern_or_criteria_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.filepool_policy_file_matching_pattern_or_criteria_item import FilepoolPolicyFileMatchingPatternOrCriteriaItem # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestFilepoolPolicyFileMatchingPatternOrCriteriaItem(unittest.TestCase):
"""FilepoolPolicyFileMatchingPatternOrCriteriaItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFilepoolPolicyFileMatchingPatternOrCriteriaItem(self):
"""Test FilepoolPolicyFileMatchingPatternOrCriteriaItem"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.filepool_policy_file_matching_pattern_or_criteria_item.FilepoolPolicyFileMatchingPatternOrCriteriaItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.536585
| 157
| 0.77265
|
e79dfa02b3d24e30a1b89946d9c283565690bbb9
| 3,559
|
py
|
Python
|
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/concurrency_test.py
|
AdeshChoudhar/tensorflow
|
1065fdc54e336a6278a4795ffa69c17c4336dfec
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/concurrency_test.py
|
AdeshChoudhar/tensorflow
|
1065fdc54e336a6278a4795ffa69c17c4336dfec
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/concurrency_test.py
|
AdeshChoudhar/tensorflow
|
1065fdc54e336a6278a4795ffa69c17c4336dfec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Concurrency tests for quantize_model."""
from concurrent import futures
import numpy as np
import tensorflow # pylint: disable=unused-import
from tensorflow.compiler.mlir.quantization.tensorflow import quantization_options_pb2 as quant_opts_pb2
from tensorflow.compiler.mlir.quantization.tensorflow.python import quantize_model
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save as saved_model_save
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.trackable import autotrackable
class MultiThreadedTest(test.TestCase):
"""Tests involving multiple threads."""
def setUp(self):
super(MultiThreadedTest, self).setUp()
self.pool = futures.ThreadPoolExecutor(max_workers=4)
def _convert_with_calibration(self):
class ModelWithAdd(autotrackable.AutoTrackable):
"""Basic model with addition."""
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[10], dtype=dtypes.float32, name='x'),
tensor_spec.TensorSpec(shape=[10], dtype=dtypes.float32, name='y')
])
def add(self, x, y):
res = math_ops.add(x, y)
return {'output': res}
def data_gen():
for _ in range(255):
yield {
'x':
ops.convert_to_tensor(
np.random.uniform(size=(10)).astype('f4')),
'y':
ops.convert_to_tensor(
np.random.uniform(size=(10)).astype('f4'))
}
root = ModelWithAdd()
temp_path = self.create_tempdir().full_path
saved_model_save.save(
root, temp_path, signatures=root.add.get_concrete_function())
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
experimental_method=quant_opts_pb2.QuantizationMethod
.ExperimentalMethod.STATIC_RANGE))
model = quantize_model.quantize(
temp_path, ['serving_default'], [tag_constants.SERVING],
quantization_options=quantization_options,
representative_dataset=data_gen)
return model
@test_util.run_in_graph_and_eager_modes
def test_multiple_conversion_jobs_with_calibration(self):
# Ensure that multiple conversion jobs with calibration won't encounter any
# concurrency issue.
with self.pool:
jobs = []
for _ in range(10):
jobs.append(self.pool.submit(self._convert_with_calibration))
for job in jobs:
self.assertIsNotNone(job.result())
if __name__ == '__main__':
test.main()
| 35.949495
| 103
| 0.710593
|
8e9952fd707a97411332fd3c2673a4694d4e8d34
| 21,147
|
py
|
Python
|
official/vision/beta/projects/yolo/ops/preprocess_ops.py
|
kia-ctw/models
|
007070820109ec57cfb048ff505be090d4e88d10
|
[
"Apache-2.0"
] | 3
|
2022-03-05T10:46:52.000Z
|
2022-03-22T06:00:05.000Z
|
official/vision/beta/projects/yolo/ops/preprocess_ops.py
|
kia-ctw/models
|
007070820109ec57cfb048ff505be090d4e88d10
|
[
"Apache-2.0"
] | 1
|
2021-09-02T12:43:42.000Z
|
2021-09-02T12:43:42.000Z
|
official/vision/beta/projects/yolo/ops/preprocess_ops.py
|
kia-ctw/models
|
007070820109ec57cfb048ff505be090d4e88d10
|
[
"Apache-2.0"
] | 2
|
2021-08-17T22:07:17.000Z
|
2021-12-25T12:25:47.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo preprocess ops."""
import tensorflow as tf
import tensorflow_addons as tfa
from official.vision.beta.projects.yolo.ops import box_ops
def resize_crop_filter(image, boxes, default_width, default_height,
target_width, target_height):
"""Apply zooming to the image and boxes.
Args:
image: a `Tensor` representing the image.
boxes: a `Tensor` represeting the boxes.
default_width: a `Tensor` representing the width of the image.
default_height: a `Tensor` representing the height of the image.
target_width: a `Tensor` representing the desired width of the image.
target_height: a `Tensor` representing the desired height of the image.
Returns:
images: a `Tensor` representing the augmented image.
boxes: a `Tensor` representing the augmented boxes.
"""
with tf.name_scope('resize_crop_filter'):
image = tf.image.resize(image, (target_width, target_height))
image = tf.image.resize_with_crop_or_pad(image,
target_height=default_height,
target_width=default_width)
default_width = tf.cast(default_width, boxes.dtype)
default_height = tf.cast(default_height, boxes.dtype)
target_width = tf.cast(target_width, boxes.dtype)
target_height = tf.cast(target_height, boxes.dtype)
aspect_change_width = target_width / default_width
aspect_change_height = target_height / default_height
x, y, width, height = tf.split(boxes, 4, axis=-1)
x = (x - 0.5) * target_width / default_width + 0.5
y = (y - 0.5) * target_height / default_height + 0.5
width = width * aspect_change_width
height = height * aspect_change_height
boxes = tf.concat([x, y, width, height], axis=-1)
return image, boxes
def random_translate(image, box, t, seed=None):
"""Randomly translate the image and boxes.
Args:
image: a `Tensor` representing the image.
box: a `Tensor` represeting the boxes.
t: an `int` representing the translation factor
seed: an optional seed for tf.random operations
Returns:
image: a `Tensor` representing the augmented image.
box: a `Tensor` representing the augmented boxes.
"""
t_x = tf.random.uniform(minval=-t,
maxval=t,
shape=(),
dtype=tf.float32,
seed=seed)
t_y = tf.random.uniform(minval=-t,
maxval=t,
shape=(),
dtype=tf.float32,
seed=seed)
box = translate_boxes(box, t_x, t_y)
image = translate_image(image, t_x, t_y)
return image, box
def translate_boxes(box, translate_x, translate_y):
"""Randomly translate the boxes.
Args:
box: a `Tensor` represeitng the boxes.
translate_x: a `Tensor` represting the translation on the x-axis.
translate_y: a `Tensor` represting the translation on the y-axis.
Returns:
box: a `Tensor` representing the augmented boxes.
"""
with tf.name_scope('translate_boxs'):
x = box[..., 0] + translate_x
y = box[..., 1] + translate_y
box = tf.stack([x, y, box[..., 2], box[..., 3]], axis=-1)
box.set_shape([None, 4])
return box
def translate_image(image, translate_x, translate_y):
"""Randomly translate the image.
Args:
image: a `Tensor` representing the image.
translate_x: a `Tensor` represting the translation on the x-axis.
translate_y: a `Tensor` represting the translation on the y-axis.
Returns:
box: a `Tensor` representing the augmented boxes.
"""
with tf.name_scope('translate_image'):
if (translate_x != 0 and translate_y != 0):
image_jitter = tf.convert_to_tensor([translate_x, translate_y])
image_jitter.set_shape([2])
image = tfa.image.translate(
image, image_jitter * tf.cast(tf.shape(image)[1], tf.float32))
return image
def pad_max_instances(value, instances, pad_value=0, pad_axis=0):
"""Pads tensors to max number of instances."""
shape = tf.shape(value)
dim1 = shape[pad_axis]
take = tf.math.reduce_min([instances, dim1])
value, _ = tf.split(value, [take, -1],
axis=pad_axis) # value[:instances, ...]
pad = tf.convert_to_tensor([tf.math.reduce_max([instances - dim1, 0])])
nshape = tf.concat([shape[:pad_axis], pad, shape[(pad_axis + 1):]], axis=0)
pad_tensor = tf.fill(nshape, tf.cast(pad_value, dtype=value.dtype))
value = tf.concat([value, pad_tensor], axis=pad_axis)
return value
def fit_preserve_aspect_ratio(image,
boxes,
width=None,
height=None,
target_dim=None):
"""Resizes the image while peserving the image aspect ratio.
Args:
image: a `Tensor` representing the image.
boxes: a `Tensor` representing the boxes.
width: int for the image width.
height: int for the image height.
target_dim: list or a Tensor of height and width.
Returns:
image: a `Tensor` representing the image.
box: a `Tensor` representing the boxes.
"""
if width is None or height is None:
shape = tf.shape(image)
if tf.shape(shape)[0] == 4:
width = shape[1]
height = shape[2]
else:
width = shape[0]
height = shape[1]
clipper = tf.math.maximum(width, height)
if target_dim is None:
target_dim = clipper
pad_width = clipper - width
pad_height = clipper - height
image = tf.image.pad_to_bounding_box(image, pad_width // 2, pad_height // 2,
clipper, clipper)
boxes = box_ops.yxyx_to_xcycwh(boxes)
x, y, w, h = tf.split(boxes, 4, axis=-1)
y *= tf.cast(width / clipper, tf.float32)
x *= tf.cast(height / clipper, tf.float32)
y += tf.cast((pad_width / clipper) / 2, tf.float32)
x += tf.cast((pad_height / clipper) / 2, tf.float32)
h *= tf.cast(width / clipper, tf.float32)
w *= tf.cast(height / clipper, tf.float32)
boxes = tf.concat([x, y, w, h], axis=-1)
boxes = box_ops.xcycwh_to_yxyx(boxes)
image = tf.image.resize(image, (target_dim, target_dim))
return image, boxes
def get_best_anchor(y_true, anchors, width=1, height=1):
"""Gets the correct anchor that is assoiciated with each box using IOU.
Args:
y_true: `tf.Tensor[]` for the list of bounding boxes in the yolo format.
anchors: list or tensor for the anchor boxes to be used in prediction
found via Kmeans.
width: int for the image width.
height: int for the image height.
Returns:
tf.Tensor: y_true with the anchor associated with each ground truth
box known.
"""
with tf.name_scope('get_anchor'):
width = tf.cast(width, dtype=tf.float32)
height = tf.cast(height, dtype=tf.float32)
# split the boxes into center and width height
anchor_xy = y_true[..., 0:2]
# scale thhe boxes
anchors = tf.convert_to_tensor(anchors, dtype=tf.float32)
anchors_x = anchors[..., 0] / width
anchors_y = anchors[..., 1] / height
anchors = tf.stack([anchors_x, anchors_y], axis=-1)
k = tf.shape(anchors)[0]
# build a matrix of anchor boxes of shape [num_anchors, num_boxes, 4]
anchors = tf.transpose(anchors, perm=[1, 0])
anchor_xy = tf.tile(tf.expand_dims(anchor_xy, axis=-1),
[1, 1, tf.shape(anchors)[-1]])
anchors = tf.tile(tf.expand_dims(anchors, axis=0),
[tf.shape(anchor_xy)[0], 1, 1])
# stack the xy so, each anchor is asscoaited once with each center from
# the ground truth input
anchors = tf.concat([anchor_xy, anchors], axis=1)
anchors = tf.transpose(anchors, perm=[2, 0, 1])
# copy the gt n times so that each anchor from above can be compared to
# input ground truth to shape: [num_anchors, num_boxes, 4]
truth_comp = tf.tile(tf.expand_dims(y_true[..., 0:4], axis=-1),
[1, 1, tf.shape(anchors)[0]])
truth_comp = tf.transpose(truth_comp, perm=[2, 0, 1])
# compute intersection over union of the boxes, and take the argmax of
# comuted iou for each box. thus each box is associated with the
# largest interection over union
iou_raw = box_ops.compute_iou(truth_comp, anchors)
values, indexes = tf.math.top_k(tf.transpose(iou_raw, perm=[1, 0]),
k=tf.cast(k, dtype=tf.int32),
sorted=True)
ind_mask = tf.cast(values > 0.213, dtype=indexes.dtype)
# pad the indexs such that all values less than the thresh are -1
# add one, multiply the mask to zeros all the bad locations
# subtract 1 makeing all the bad locations 0.
iou_index = tf.concat([
tf.keras.backend.expand_dims(indexes[..., 0], axis=-1),
((indexes[..., 1:] + 1) * ind_mask[..., 1:]) - 1
],
axis=-1)
iou_index = iou_index[..., :6]
return tf.cast(iou_index, dtype=tf.float32)
def build_grided_gt(y_true, mask, size, dtype, use_tie_breaker):
"""Converts ground truth for use in loss functions.
Args:
y_true: tf.Tensor[] ground truth
[box coords[0:4], classes_onehot[0:-1], best_fit_anchor_box].
mask: list of the anchor boxes choresponding to the output,
ex. [1, 2, 3] tells this layer to predict only the first 3
anchors in the total.
size: The dimensions of this output, for regular, it progresses
from 13, to 26, to 52.
dtype: The expected output dtype.
use_tie_breaker: boolean value for wether or not to use the tie_breaker.
Returns:
tf.Tensor[] of shape [size, size, #of_anchors, 4, 1, num_classes].
"""
# unpack required components from the input ground truth
boxes = tf.cast(y_true['bbox'], dtype)
classes = tf.expand_dims(tf.cast(y_true['classes'], dtype=dtype), axis=-1)
anchors = tf.cast(y_true['best_anchors'], dtype)
# get the number of boxes in the ground truth boxs
num_boxes = tf.shape(boxes)[0]
# get the number of anchor boxes used for this anchor scale
len_masks = tf.shape(mask)[0]
# init a fixed memeory size grid for this prediction scale
# [size, size, # of anchors, 1 + 1 + number of anchors per scale]
full = tf.zeros([size, size, len_masks, 6], dtype=dtype)
# init a grid to use to track which locations have already
# been used before (for the tie breaker)
depth_track = tf.zeros((size, size, len_masks), dtype=tf.int32)
# rescale the x and y centers to the size of the grid [size, size]
x = tf.cast(boxes[..., 0] * tf.cast(size, dtype=dtype), dtype=tf.int32)
y = tf.cast(boxes[..., 1] * tf.cast(size, dtype=dtype), dtype=tf.int32)
# init all the tensorArrays to be used in storeing the index
# and the values to be used to update both depth_track and full
update_index = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
update = tf.TensorArray(dtype, size=0, dynamic_size=True)
# init constants and match data types before entering loop
i = 0
anchor_id = 0
const = tf.cast(tf.convert_to_tensor([1.]), dtype=dtype)
mask = tf.cast(mask, dtype=dtype)
rand_update = 0.0
for box_id in range(num_boxes):
# If the width or height of the box is zero, skip it.
# After pre processing, if the box is not in the i image bounds anymore,
# skip it.
if tf.keras.backend.all(tf.math.equal(
boxes[box_id, 2:4], 0)) or tf.keras.backend.any(
tf.math.less(boxes[box_id, 0:2], 0.0)) or tf.keras.backend.any(
tf.math.greater_equal(boxes[box_id, 0:2], 1.0)):
continue
if use_tie_breaker:
for anchor_id in range(tf.shape(anchors)[-1]):
index = tf.math.equal(anchors[box_id, anchor_id], mask)
if tf.keras.backend.any(index):
# using the boolean index mask to determine exactly which
# anchor box was used
p = tf.cast(
tf.keras.backend.argmax(tf.cast(index, dtype=tf.int32)),
dtype=tf.int32)
# determine if the index was used or not
used = depth_track[y[box_id], x[box_id], p]
# defualt used upadte value
uid = 1
# if anchor_id is 0, this is the best matched anchor for this box
# with the highest IOU
if anchor_id == 0:
# write the box to the update list
# create random numbr to trigger a replacment if the cell
# is used already
if tf.math.equal(used, 1):
rand_update = tf.random.uniform([], maxval=1)
else:
rand_update = 1.0
if rand_update > 0.5:
# write the box to the update list
update_index = update_index.write(i, [y[box_id], x[box_id], p])
value = tf.concat([boxes[box_id], const, classes[box_id]],
axis=-1)
update = update.write(i, value)
# if used is 2, this cell is filled with a non-optimal box
# if used is 0, the cell in the ground truth is not yet consumed
# in either case you can replace that cell with a new box, as long
# as it is not consumed by an optimal box with anchor_id = 0
elif tf.math.equal(used, 2) or tf.math.equal(used, 0):
uid = 2
# write the box to the update list
update_index = update_index.write(i, [y[box_id], x[box_id], p])
value = tf.concat([boxes[box_id], const, classes[box_id]], axis=-1)
update = update.write(i, value)
depth_track = tf.tensor_scatter_nd_update(
depth_track, [(y[box_id], x[box_id], p)], [uid])
i += 1
else:
index = tf.math.equal(anchors[box_id, 0], mask)
# if any there is an index match
if tf.keras.backend.any(index):
# find the index
p = tf.cast(
tf.keras.backend.argmax(tf.cast(index, dtype=tf.int32)),
dtype=tf.int32)
# update the list of used boxes
update_index = update_index.write(i, [y[box_id], x[box_id], p])
value = tf.concat([boxes[box_id], const, classes[box_id]], axis=-1)
update = update.write(i, value)
i += 1
# if the size of the update list is not 0, do an update, other wise,
# no boxes and pass an empty grid
if tf.math.greater(update_index.size(), 0):
update_index = update_index.stack()
update = update.stack()
full = tf.tensor_scatter_nd_update(full, update_index, update)
return full
def build_batch_grided_gt(y_true, mask, size, dtype, use_tie_breaker):
"""Converts ground truth for use in loss functions.
Args:
y_true: tf.Tensor[] ground truth
[batch, box coords[0:4], classes_onehot[0:-1], best_fit_anchor_box].
mask: list of the anchor boxes choresponding to the output,
ex. [1, 2, 3] tells this layer to predict only the first 3 anchors
in the total.
size: the dimensions of this output, for regular, it progresses from
13, to 26, to 52.
dtype: expected output datatype.
use_tie_breaker: boolean value for whether or not to use the tie
breaker.
Returns:
tf.Tensor[] of shape [batch, size, size, #of_anchors, 4, 1, num_classes].
"""
# unpack required components from the input ground truth
boxes = tf.cast(y_true['bbox'], dtype)
classes = tf.expand_dims(tf.cast(y_true['classes'], dtype=dtype), axis=-1)
anchors = tf.cast(y_true['best_anchors'], dtype)
# get the batch size
batches = tf.shape(boxes)[0]
# get the number of boxes in the ground truth boxs
num_boxes = tf.shape(boxes)[1]
# get the number of anchor boxes used for this anchor scale
len_masks = tf.shape(mask)[0]
# init a fixed memeory size grid for this prediction scale
# [batch, size, size, # of anchors, 1 + 1 + number of anchors per scale]
full = tf.zeros([batches, size, size, len_masks, 1 + 4 + 1], dtype=dtype)
# init a grid to use to track which locations have already
# been used before (for the tie breaker)
depth_track = tf.zeros((batches, size, size, len_masks), dtype=tf.int32)
# rescale the x and y centers to the size of the grid [size, size]
x = tf.cast(boxes[..., 0] * tf.cast(size, dtype=dtype), dtype=tf.int32)
y = tf.cast(boxes[..., 1] * tf.cast(size, dtype=dtype), dtype=tf.int32)
# init all the tensorArrays to be used in storeing the index and the values
# to be used to update both depth_track and full
update_index = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
update = tf.TensorArray(dtype, size=0, dynamic_size=True)
# init constants and match data types before entering loop
i = 0
anchor_id = 0
const = tf.cast(tf.convert_to_tensor([1.]), dtype=dtype)
mask = tf.cast(mask, dtype=dtype)
rand_update = 0.0
for batch in range(batches):
for box_id in range(num_boxes):
# if the width or height of the box is zero, skip it
if tf.keras.backend.all(tf.math.equal(boxes[batch, box_id, 2:4], 0)):
continue
# after pre processing, if the box is not in the image bounds anymore
# skip the box
if tf.keras.backend.any(tf.math.less(
boxes[batch, box_id, 0:2], 0.0)) or tf.keras.backend.any(
tf.math.greater_equal(boxes[batch, box_id, 0:2], 1.0)):
continue
if use_tie_breaker:
for anchor_id in range(tf.shape(anchors)[-1]):
index = tf.math.equal(anchors[batch, box_id, anchor_id], mask)
if tf.keras.backend.any(index):
# using the boolean index mask to determine exactly which anchor
# box was used
p = tf.cast(tf.keras.backend.argmax(tf.cast(index, dtype=tf.int32)),
dtype=tf.int32)
# determine if the index was used or not
used = depth_track[batch, y[batch, box_id], x[batch, box_id], p]
# defualt used upadte value
uid = 1
# if anchor_id is 0, this is the best matched anchor for this box
# with the highest IOU
if anchor_id == 0:
# create random number to trigger a replacment if the cell
# is used already
if tf.math.equal(used, 1):
rand_update = tf.random.uniform([], maxval=1)
else:
rand_update = 1.0
if rand_update > 0.5:
# write the box to the update list
update_index = update_index.write(
i, [batch, y[batch, box_id], x[batch, box_id], p])
value = tf.concat(
[boxes[batch, box_id], const, classes[batch, box_id]],
axis=-1)
update = update.write(i, value)
# if used is 2, this cell is filled with a non-optimal box
# if used is 0, the cell in the ground truth is not yet consumed
# in either case you can replace that cell with a new box, as long
# as it is not consumed by an optimal box with anchor_id = 0
elif tf.math.equal(used, 2) or tf.math.equal(used, 0):
uid = 2
# write the box to the update list
update_index = update_index.write(
i, [batch, y[batch, box_id], x[batch, box_id], p])
value = ([boxes[batch, box_id], const, classes[batch, box_id]])
update = update.write(i, value)
# update the used index for where and how the box was placed
depth_track = tf.tensor_scatter_nd_update(
depth_track, [(batch, y[batch, box_id], x[batch, box_id], p)],
[uid])
i += 1
else:
index = tf.math.equal(anchors[batch, box_id, 0], mask)
if tf.keras.backend.any(index):
# if any there is an index match
p = tf.cast(
tf.keras.backend.argmax(tf.cast(index, dtype=tf.int32)),
dtype=tf.int32)
# write the box to the update list
update_index = update_index.write(
i, [batch, y[batch, box_id], x[batch, box_id], p])
value = tf.concat(
[boxes[batch, box_id], const, classes[batch, box_id]], axis=-1)
update = update.write(i, value)
i += 1
# if the size of the update list is not 0, do an update, other wise,
# no boxes and pass an empty grid
if tf.math.greater(update_index.size(), 0):
update_index = update_index.stack()
update = update.stack()
full = tf.tensor_scatter_nd_update(full, update_index, update)
return full
| 40.35687
| 80
| 0.627181
|
29c39053b6c531b448d818701277b8183cb177b2
| 17,367
|
py
|
Python
|
test/python/quantum_info/operators/channel/test_choi.py
|
diego-plan9/qiskit-terra
|
a4120d70bd631ad2add228fdb1f86706bc5f2339
|
[
"Apache-2.0"
] | 1
|
2018-05-29T03:58:03.000Z
|
2018-05-29T03:58:03.000Z
|
test/python/quantum_info/operators/channel/test_choi.py
|
diego-plan9/qiskit-terra
|
a4120d70bd631ad2add228fdb1f86706bc5f2339
|
[
"Apache-2.0"
] | 3
|
2018-11-13T17:33:37.000Z
|
2018-12-03T09:35:00.000Z
|
test/python/quantum_info/operators/channel/test_choi.py
|
diego-plan9/qiskit-terra
|
a4120d70bd631ad2add228fdb1f86706bc5f2339
|
[
"Apache-2.0"
] | 2
|
2017-12-03T15:48:14.000Z
|
2018-03-11T13:08:03.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for Choi quantum channel representation class."""
import copy
import unittest
import numpy as np
from numpy.testing import assert_allclose
from qiskit import QiskitError
from qiskit.quantum_info.states import DensityMatrix
from qiskit.quantum_info.operators.channel import Choi
from .channel_test_case import ChannelTestCase
class TestChoi(ChannelTestCase):
"""Tests for Choi channel representation."""
def test_init(self):
"""Test initialization"""
mat4 = np.eye(4) / 2.0
chan = Choi(mat4)
assert_allclose(chan.data, mat4)
self.assertEqual(chan.dim, (2, 2))
mat8 = np.eye(8) / 2.0
chan = Choi(mat8, input_dims=4)
assert_allclose(chan.data, mat8)
self.assertEqual(chan.dim, (4, 2))
chan = Choi(mat8, input_dims=2)
assert_allclose(chan.data, mat8)
self.assertEqual(chan.dim, (2, 4))
mat16 = np.eye(16) / 4
chan = Choi(mat16)
assert_allclose(chan.data, mat16)
self.assertEqual(chan.dim, (4, 4))
# Wrong input or output dims should raise exception
self.assertRaises(
QiskitError, Choi, mat8, input_dims=[4], output_dims=[4])
def test_circuit_init(self):
"""Test initialization from a circuit."""
circuit, target = self.simple_circuit_no_measure()
op = Choi(circuit)
target = Choi(target)
self.assertEqual(op, target)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Choi, circuit)
def test_equal(self):
"""Test __eq__ method"""
mat = self.rand_matrix(4, 4)
self.assertEqual(Choi(mat), Choi(mat))
def test_copy(self):
"""Test copy method"""
mat = np.eye(2)
with self.subTest("Deep copy"):
orig = Choi(mat)
cpy = orig.copy()
cpy._data[0, 0] = 0.0
self.assertFalse(cpy == orig)
with self.subTest("Shallow copy"):
orig = Choi(mat)
clone = copy.copy(orig)
clone._data[0, 0] = 0.0
self.assertTrue(clone == orig)
def test_clone(self):
"""Test clone method"""
mat = np.eye(4)
orig = Choi(mat)
clone = copy.copy(orig)
clone._data[0, 0] = 0.0
self.assertTrue(clone == orig)
def test_is_cptp(self):
"""Test is_cptp method."""
self.assertTrue(Choi(self.depol_choi(0.25)).is_cptp())
# Non-CPTP should return false
self.assertFalse(
Choi(1.25 * self.choiI - 0.25 * self.depol_choi(1)).is_cptp())
def test_conjugate(self):
"""Test conjugate method."""
# Test channel measures in Z basis and prepares in Y basis
# Zp -> Yp, Zm -> Ym
Zp, Zm = np.diag([1, 0]), np.diag([0, 1])
Yp, Ym = np.array([[1, -1j], [1j, 1]]) / 2, np.array([[1, 1j],
[-1j, 1]]) / 2
chan = Choi(np.kron(Zp, Yp) + np.kron(Zm, Ym))
# Conjugate channel swaps Y-basis states
targ = Choi(np.kron(Zp, Ym) + np.kron(Zm, Yp))
chan_conj = chan.conjugate()
self.assertEqual(chan_conj, targ)
def test_transpose(self):
"""Test transpose method."""
# Test channel measures in Z basis and prepares in Y basis
# Zp -> Yp, Zm -> Ym
Zp, Zm = np.diag([1, 0]), np.diag([0, 1])
Yp, Ym = np.array([[1, -1j], [1j, 1]]) / 2, np.array([[1, 1j],
[-1j, 1]]) / 2
chan = Choi(np.kron(Zp, Yp) + np.kron(Zm, Ym))
# Transpose channel swaps basis
targ = Choi(np.kron(Yp, Zp) + np.kron(Ym, Zm))
chan_t = chan.transpose()
self.assertEqual(chan_t, targ)
def test_adjoint(self):
"""Test adjoint method."""
# Test channel measures in Z basis and prepares in Y basis
# Zp -> Yp, Zm -> Ym
Zp, Zm = np.diag([1, 0]), np.diag([0, 1])
Yp, Ym = np.array([[1, -1j], [1j, 1]]) / 2, np.array([[1, 1j],
[-1j, 1]]) / 2
chan = Choi(np.kron(Zp, Yp) + np.kron(Zm, Ym))
# Ajoint channel swaps Y-basis elements and Z<->Y bases
targ = Choi(np.kron(Ym, Zp) + np.kron(Yp, Zm))
chan_adj = chan.adjoint()
self.assertEqual(chan_adj, targ)
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError,
Choi(np.eye(4)).compose, Choi(np.eye(8)))
self.assertRaises(QiskitError, Choi(np.eye(4)).compose, 2)
def test_compose(self):
"""Test compose method."""
# UnitaryChannel evolution
chan1 = Choi(self.choiX)
chan2 = Choi(self.choiY)
chan = chan1.compose(chan2)
targ = Choi(self.choiZ)
self.assertEqual(chan, targ)
# 50% depolarizing channel
chan1 = Choi(self.depol_choi(0.5))
chan = chan1.compose(chan1)
targ = Choi(self.depol_choi(0.75))
self.assertEqual(chan, targ)
# Measure and rotation
Zp, Zm = np.diag([1, 0]), np.diag([0, 1])
Xp, Xm = np.array([[1, 1], [1, 1]]) / 2, np.array([[1, -1], [-1, 1]
]) / 2
chan1 = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))
chan2 = Choi(self.choiX)
# X-gate second does nothing
targ = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))
self.assertEqual(chan1.compose(chan2), targ)
self.assertEqual(chan1 @ chan2, targ)
# X-gate first swaps Z states
targ = Choi(np.kron(Zm, Xp) + np.kron(Zp, Xm))
self.assertEqual(chan2.compose(chan1), targ)
self.assertEqual(chan2 @ chan1, targ)
# Compose different dimensions
chan1 = Choi(np.eye(8) / 4, input_dims=2, output_dims=4)
chan2 = Choi(np.eye(8) / 2, input_dims=4, output_dims=2)
chan = chan1.compose(chan2)
self.assertEqual(chan.dim, (2, 2))
chan = chan2.compose(chan1)
self.assertEqual(chan.dim, (4, 4))
def test_dot(self):
"""Test dot method."""
# UnitaryChannel evolution
chan1 = Choi(self.choiX)
chan2 = Choi(self.choiY)
targ = Choi(self.choiZ)
self.assertEqual(chan1.dot(chan2), targ)
self.assertEqual(chan1 * chan2, targ)
# 50% depolarizing channel
chan1 = Choi(self.depol_choi(0.5))
targ = Choi(self.depol_choi(0.75))
self.assertEqual(chan1.dot(chan1), targ)
self.assertEqual(chan1 * chan1, targ)
# Measure and rotation
Zp, Zm = np.diag([1, 0]), np.diag([0, 1])
Xp, Xm = np.array([[1, 1], [1, 1]]) / 2, np.array([[1, -1], [-1, 1]
]) / 2
chan1 = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))
chan2 = Choi(self.choiX)
# X-gate second does nothing
targ = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))
self.assertEqual(chan2.dot(chan1), targ)
self.assertEqual(chan2 * chan1, targ)
# X-gate first swaps Z states
targ = Choi(np.kron(Zm, Xp) + np.kron(Zp, Xm))
self.assertEqual(chan1.dot(chan2), targ)
self.assertEqual(chan1 * chan2, targ)
# Compose different dimensions
chan1 = Choi(np.eye(8) / 4, input_dims=2, output_dims=4)
chan2 = Choi(np.eye(8) / 2, input_dims=4, output_dims=2)
chan = chan1.dot(chan2)
self.assertEqual(chan.dim, (4, 4))
chan = chan2.dot(chan1)
self.assertEqual(chan.dim, (2, 2))
def test_compose_front(self):
"""Test front compose method."""
# UnitaryChannel evolution
chan1 = Choi(self.choiX)
chan2 = Choi(self.choiY)
chan = chan1.compose(chan2, front=True)
targ = Choi(self.choiZ)
self.assertEqual(chan, targ)
# 50% depolarizing channel
chan1 = Choi(self.depol_choi(0.5))
chan = chan1.compose(chan1, front=True)
targ = Choi(self.depol_choi(0.75))
self.assertEqual(chan, targ)
# Measure and rotation
Zp, Zm = np.diag([1, 0]), np.diag([0, 1])
Xp, Xm = np.array([[1, 1], [1, 1]]) / 2, np.array([[1, -1], [-1, 1]
]) / 2
chan1 = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))
chan2 = Choi(self.choiX)
# X-gate second does nothing
chan = chan2.compose(chan1, front=True)
targ = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))
self.assertEqual(chan, targ)
# X-gate first swaps Z states
chan = chan1.compose(chan2, front=True)
targ = Choi(np.kron(Zm, Xp) + np.kron(Zp, Xm))
self.assertEqual(chan, targ)
# Compose different dimensions
chan1 = Choi(np.eye(8) / 4, input_dims=2, output_dims=4)
chan2 = Choi(np.eye(8) / 2, input_dims=4, output_dims=2)
chan = chan1.compose(chan2, front=True)
self.assertEqual(chan.dim, (4, 4))
chan = chan2.compose(chan1, front=True)
self.assertEqual(chan.dim, (2, 2))
def test_expand(self):
"""Test expand method."""
rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])
rho_init = DensityMatrix(np.kron(rho0, rho0))
chan1 = Choi(self.choiI)
chan2 = Choi(self.choiX)
# X \otimes I
chan = chan1.expand(chan2)
rho_targ = DensityMatrix(np.kron(rho1, rho0))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# I \otimes X
chan = chan2.expand(chan1)
rho_targ = DensityMatrix(np.kron(rho0, rho1))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Completely depolarizing
chan_dep = Choi(self.depol_choi(1))
chan = chan_dep.expand(chan_dep)
rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_tensor(self):
"""Test tensor method."""
rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])
rho_init = DensityMatrix(np.kron(rho0, rho0))
chan1 = Choi(self.choiI)
chan2 = Choi(self.choiX)
# X \otimes I
rho_targ = DensityMatrix(np.kron(rho1, rho0))
chan = chan2.tensor(chan1)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
chan = chan2 ^ chan1
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# I \otimes X
rho_targ = DensityMatrix(np.kron(rho0, rho1))
chan = chan1.tensor(chan2)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
chan = chan1 ^ chan2
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Completely depolarizing
rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)
chan_dep = Choi(self.depol_choi(1))
chan = chan_dep.tensor(chan_dep)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
chan = chan_dep ^ chan_dep
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_power(self):
"""Test power method."""
# 10% depolarizing channel
p_id = 0.9
depol = Choi(self.depol_choi(1 - p_id))
# Compose 3 times
p_id3 = p_id**3
chan3 = depol.power(3)
targ3 = Choi(self.depol_choi(1 - p_id3))
self.assertEqual(chan3, targ3)
def test_power_except(self):
"""Test power method raises exceptions."""
chan = Choi(self.depol_choi(1))
# Non-integer power raises error
self.assertRaises(QiskitError, chan.power, 0.5)
def test_add(self):
"""Test add method."""
mat1 = 0.5 * self.choiI
mat2 = 0.5 * self.depol_choi(1)
chan1 = Choi(mat1)
chan2 = Choi(mat2)
targ = Choi(mat1 + mat2)
self.assertEqual(chan1._add(chan2), targ)
self.assertEqual(chan1 + chan2, targ)
targ = Choi(mat1 - mat2)
self.assertEqual(chan1 - chan2, targ)
def test_add_qargs(self):
"""Test add method with qargs."""
mat = self.rand_matrix(8 ** 2, 8 ** 2)
mat0 = self.rand_matrix(4, 4)
mat1 = self.rand_matrix(4, 4)
op = Choi(mat)
op0 = Choi(mat0)
op1 = Choi(mat1)
op01 = op1.tensor(op0)
eye = Choi(self.choiI)
with self.subTest(msg='qargs=[0]'):
value = op + op0([0])
target = op + eye.tensor(eye).tensor(op0)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[1]'):
value = op + op0([1])
target = op + eye.tensor(op0).tensor(eye)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[2]'):
value = op + op0([2])
target = op + op0.tensor(eye).tensor(eye)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[0, 1]'):
value = op + op01([0, 1])
target = op + eye.tensor(op1).tensor(op0)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[1, 0]'):
value = op + op01([1, 0])
target = op + eye.tensor(op0).tensor(op1)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[0, 2]'):
value = op + op01([0, 2])
target = op + op1.tensor(eye).tensor(op0)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[2, 0]'):
value = op + op01([2, 0])
target = op + op0.tensor(eye).tensor(op1)
self.assertEqual(value, target)
def test_sub_qargs(self):
"""Test subtract method with qargs."""
mat = self.rand_matrix(8 ** 2, 8 ** 2)
mat0 = self.rand_matrix(4, 4)
mat1 = self.rand_matrix(4, 4)
op = Choi(mat)
op0 = Choi(mat0)
op1 = Choi(mat1)
op01 = op1.tensor(op0)
eye = Choi(self.choiI)
with self.subTest(msg='qargs=[0]'):
value = op - op0([0])
target = op - eye.tensor(eye).tensor(op0)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[1]'):
value = op - op0([1])
target = op - eye.tensor(op0).tensor(eye)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[2]'):
value = op - op0([2])
target = op - op0.tensor(eye).tensor(eye)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[0, 1]'):
value = op - op01([0, 1])
target = op - eye.tensor(op1).tensor(op0)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[1, 0]'):
value = op - op01([1, 0])
target = op - eye.tensor(op0).tensor(op1)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[0, 2]'):
value = op - op01([0, 2])
target = op - op1.tensor(eye).tensor(op0)
self.assertEqual(value, target)
with self.subTest(msg='qargs=[2, 0]'):
value = op - op01([2, 0])
target = op - op0.tensor(eye).tensor(op1)
self.assertEqual(value, target)
def test_add_except(self):
"""Test add method raises exceptions."""
chan1 = Choi(self.choiI)
chan2 = Choi(np.eye(8))
self.assertRaises(QiskitError, chan1._add, chan2)
self.assertRaises(QiskitError, chan1._add, 5)
def test_multiply(self):
"""Test multiply method."""
chan = Choi(self.choiI)
val = 0.5
targ = Choi(val * self.choiI)
self.assertEqual(chan._multiply(val), targ)
self.assertEqual(val * chan, targ)
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
chan = Choi(self.choiI)
self.assertRaises(QiskitError, chan._multiply, 's')
self.assertRaises(QiskitError, chan.__rmul__, 's')
self.assertRaises(QiskitError, chan._multiply, chan)
self.assertRaises(QiskitError, chan.__rmul__, chan)
def test_negate(self):
"""Test negate method"""
chan = Choi(self.choiI)
targ = Choi(-1 * self.choiI)
self.assertEqual(-chan, targ)
if __name__ == '__main__':
unittest.main()
| 36.18125
| 77
| 0.559279
|
0f8baee0e403f41a1bed9b0c40acc83d1ebb9ef5
| 4,510
|
py
|
Python
|
tests/unit/deployers/gcp/test_gcpserving.py
|
suomitekai/fairing
|
9ca6a1138529b3f0b21979d62c7cb1f303bc52e0
|
[
"Apache-2.0"
] | 334
|
2018-09-03T23:10:02.000Z
|
2022-03-07T23:12:24.000Z
|
tests/unit/deployers/gcp/test_gcpserving.py
|
suomitekai/fairing
|
9ca6a1138529b3f0b21979d62c7cb1f303bc52e0
|
[
"Apache-2.0"
] | 562
|
2018-09-03T21:33:42.000Z
|
2022-03-29T12:47:43.000Z
|
tests/unit/deployers/gcp/test_gcpserving.py
|
suomitekai/fairing
|
9ca6a1138529b3f0b21979d62c7cb1f303bc52e0
|
[
"Apache-2.0"
] | 160
|
2018-11-06T17:55:32.000Z
|
2022-02-15T09:59:10.000Z
|
"""Tests for GCPServing Deployer."""
import json
import httplib2
from unittest.mock import patch
from kubeflow.fairing.deployers.gcp.gcpserving import GCPServingDeployer
from googleapiclient.errors import HttpError
def create_http_error(error_code, message):
error_content = json.dumps({
'error': {
'code': error_code,
'message': message,
'details': message
}
}).encode()
headers = {'status': str(error_code), 'content-type': 'application/json'}
response = httplib2.Response(headers)
response.reason = message
return HttpError(response, error_content)
# Test that deployment fails if an invalid model request is provided.
def test_invalid_model_request(capsys):
with patch('kubeflow.fairing.deployers.gcp.gcpserving.discovery.build') as mock_ml:
deployer = GCPServingDeployer(
project_id='test_project', model_dir='test_model_dir',
model_name='test_model', version_name='test_version')
(mock_ml.return_value.projects.return_value.models.return_value
.get.side_effect) = create_http_error(
error_code=400, message='invalid request')
deployer.deploy(None)
captured = capsys.readouterr()
assert 'Error retrieving the model' in captured.out
# Test that deployment fails if an invalid model creation request is provided.
def test_invalid_model_creation(capsys):
with patch('kubeflow.fairing.deployers.gcp.gcpserving.discovery.build') as mock_ml:
deployer = GCPServingDeployer(
project_id='test_project', model_dir='test_model_dir',
model_name='test_model', version_name='test_version')
(mock_ml.return_value.projects.return_value.models.return_value
.get.return_value.execute.return_value) = None
(mock_ml.return_value.projects.return_value.models.return_value
.create.side_effect) = create_http_error(
error_code=400, message='invalid request')
deployer.deploy(None)
captured = capsys.readouterr()
assert 'Error creating the model' in captured.out
# Test that a new model is created if not found.
def test_model_creation_with_404():
with patch('kubeflow.fairing.deployers.gcp.gcpserving.discovery.build') as mock_ml:
deployer = GCPServingDeployer(
project_id='test_project', model_dir='test_model_dir',
model_name='test_model', version_name='test_version')
(mock_ml.return_value.projects.return_value.models.return_value
.get.side_effect) = create_http_error(
error_code=404, message='model not found')
deployer.deploy(None)
args, kwargs = (mock_ml.return_value.projects.return_value #pylint:disable=unused-variable
.models.return_value.create.call_args)
assert kwargs['parent'] == 'projects/test_project'
assert kwargs['body'] == {'name': 'test_model'}
# Test that deployment fails if an invalid version creation request is provided.
def test_invalid_version_creation(capsys):
with patch('kubeflow.fairing.deployers.gcp.gcpserving.discovery.build') as mock_ml:
deployer = GCPServingDeployer(
project_id='test_project', model_dir='test_model_dir',
model_name='test_model', version_name='test_version')
(mock_ml.return_value.projects.return_value.models.return_value
.versions.return_value.create.return_value
.execute.side_effect) = create_http_error(
error_code=400, message='invalid request')
deployer.deploy(None)
captured = capsys.readouterr()
assert 'Error creating the version' in captured.out
# Test that a new version is created with the correct arguments.
def test_valid_creation(capsys):
with patch('kubeflow.fairing.deployers.gcp.gcpserving.discovery.build') as mock_ml:
deployer = GCPServingDeployer(
project_id='test_project', model_dir='test_model_dir',
model_name='test_model', version_name='test_version')
deployer.deploy(None)
args, kwargs = (mock_ml.return_value.projects.return_value.models #pylint:disable=unused-variable
.return_value.versions.return_value.create.call_args)
assert kwargs['parent'] == 'projects/test_project/models/test_model'
assert kwargs['body'] == {
'name': 'test_version',
'deploymentUri': 'test_model_dir',
'python_version': '3.5',
'runtime_version': '1.13'
}
captured = capsys.readouterr()
assert 'Version submitted successfully' in captured.out
| 37.89916
| 101
| 0.71663
|
a4c692e12071e94e8e15d3ac7e5627369e577939
| 1,076
|
py
|
Python
|
isi_sdk_8_0_1/test/test_cluster_node_state_servicelight_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0_1/test/test_cluster_node_state_servicelight_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0_1/test/test_cluster_node_state_servicelight_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.cluster_node_state_servicelight_extended import ClusterNodeStateServicelightExtended # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestClusterNodeStateServicelightExtended(unittest.TestCase):
"""ClusterNodeStateServicelightExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterNodeStateServicelightExtended(self):
"""Test ClusterNodeStateServicelightExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.cluster_node_state_servicelight_extended.ClusterNodeStateServicelightExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.243902
| 132
| 0.752788
|
63e390f785bfd16e747eb3b80d0391695d2cc4d8
| 5,960
|
py
|
Python
|
nevergrad/instrumentation/core.py
|
ClementWalter/nevergrad
|
b9c3c3840abc5948d0f0a2560d811aea6cbc5ce9
|
[
"MIT"
] | null | null | null |
nevergrad/instrumentation/core.py
|
ClementWalter/nevergrad
|
b9c3c3840abc5948d0f0a2560d811aea6cbc5ce9
|
[
"MIT"
] | null | null | null |
nevergrad/instrumentation/core.py
|
ClementWalter/nevergrad
|
b9c3c3840abc5948d0f0a2560d811aea6cbc5ce9
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
from typing import Any, Tuple, Optional, Dict, Set, TypeVar, Callable
import numpy as np
from ..common.typetools import ArrayLike
ArgsKwargs = Tuple[Tuple[Any, ...], Dict[str, Any]]
T = TypeVar('T', bound="Variable")
class VarSpecs:
# pylint: disable=too-many-arguments, unused-argument
def __init__(self) -> None:
self.dimension = -1
self.nargs = 1
self.kwargs_keys: Set[str] = set()
self.continuous = True
self.noisy = False
self.name: Optional[str] = None
def update(self,
dimension: Optional[int] = None,
nargs: Optional[int] = None,
kwargs_keys: Optional[Set[str]] = None,
continuous: Optional[bool] = None,
noisy: Optional[bool] = None,
name: Optional[str] = None
) -> None:
for key, value in locals().items():
if key != "self" and value is not None:
setattr(self, key, value)
def _default_checker(*args: Any, **kwargs: Any) -> bool: # pylint: disable=unused-argument
return True
class Variable:
def __init__(self) -> None:
self._random_state: Optional[np.random.RandomState] = None # lazy initialization
self._specs = VarSpecs()
self._constraint_checker = _default_checker
def set_cheap_constraint_checker(self, func: Callable[..., bool]) -> None:
self._constraint_checker = func
def cheap_constraint_check(self, *args: Any, **kwargs: Any) -> bool:
return self._constraint_checker(*args, **kwargs)
@property
def random_state(self) -> np.random.RandomState:
"""Random state the instrumentation and the optimizers pull from.
It can be seeded/replaced.
"""
if self._random_state is None:
# use the setter, to make sure the random state is propagated to the variables
seed = np.random.randint(2 ** 32, dtype=np.uint32)
self._set_random_state(np.random.RandomState(seed))
assert self._random_state is not None
return self._random_state
@random_state.setter
def random_state(self, random_state: np.random.RandomState) -> None:
self._set_random_state(random_state)
def _set_random_state(self, random_state: np.random.RandomState) -> None:
self._random_state = random_state
def with_name(self: T, name: str) -> T:
"""Sets a name and return the current instrumentation (for chaining)
"""
self._specs.update(name=name)
return self
@property
def name(self) -> str:
"""Short identifier for the variables
"""
if self._specs.name is not None:
return self._specs.name
return repr(self)
def copy(self: T) -> T: # TODO, use deepcopy directly in the code if it works?
"""Return a new instrumentation with the same variable and same name
(but a different random state)
"""
instru = copy.deepcopy(self)
instru._random_state = None
return instru
@property
def dimension(self) -> int:
return self._specs.dimension
@property
def nargs(self) -> int:
return self._specs.nargs
@property
def kwargs_keys(self) -> Set[str]:
return self._specs.kwargs_keys
@property
def continuous(self) -> bool:
return self._specs.continuous
@property
def noisy(self) -> bool:
return self._specs.noisy
def arguments_to_data(self, *args: Any, **kwargs: Any) -> np.ndarray:
"""Converts args and kwargs into data in np.ndarray format
"""
if len(args) != self.nargs:
raise TypeError(f"Expected {self.nargs} arguments ({len(args)} given: {args})")
if self.kwargs_keys != set(kwargs.keys()):
raise TypeError(f"Expected arguments {self.kwargs_keys} ({set(kwargs.keys())} given: {kwargs})")
return self._arguments_to_data(*args, **kwargs)
def _arguments_to_data(self, *args: Any, **kwargs: Any) -> np.ndarray:
raise RuntimeError(f"arguments_to_data is not defined for {self.__class__.__name__}")
def data_to_arguments(self, data: ArrayLike, deterministic: bool = False) -> ArgsKwargs:
"""Converts data to arguments
Parameters
----------
data: ArrayLike (list/tuple of floats, np.ndarray)
the data in the optimization space
deterministic: bool
whether the conversion should be deterministic (some variables can be stochastic, if deterministic=True
the most likely output will be used)
Returns
-------
args: Tuple[Any]
the positional arguments corresponding to the instance initialization positional arguments
kwargs: Dict[str, Any]
the keyword arguments corresponding to the instance initialization keyword arguments
"""
# trigger random_state creation (may require to be propagated to sub-variables
assert self.random_state is not None
array = np.array(data, copy=False)
if array.shape != (self.dimension,):
raise ValueError(f"Unexpected shape {array.shape} of {array} for {self} with dimension {self.dimension}")
return self._data_to_arguments(array, deterministic)
def _data_to_arguments(self, data: np.ndarray, deterministic: bool) -> ArgsKwargs:
raise NotImplementedError
def get_summary(self, data: ArrayLike) -> str: # pylint: disable=unused-argument
output = self.data_to_arguments(np.array(data, copy=False), deterministic=True)
return f"Value {output[0][0]}, from data: {data}"
def freeze(self) -> None:
pass # forward compatibility
| 36.564417
| 117
| 0.63943
|
2ff41257cb55bff03ae6cc32f2c1a8f1e98b5ed9
| 43
|
py
|
Python
|
pythonCore/ch11/mymodule/test.py
|
Furzoom/learnpython
|
a3034584e481d4e7c55912d9da06439688aa67ea
|
[
"MIT"
] | null | null | null |
pythonCore/ch11/mymodule/test.py
|
Furzoom/learnpython
|
a3034584e481d4e7c55912d9da06439688aa67ea
|
[
"MIT"
] | null | null | null |
pythonCore/ch11/mymodule/test.py
|
Furzoom/learnpython
|
a3034584e481d4e7c55912d9da06439688aa67ea
|
[
"MIT"
] | null | null | null |
__author__ = 'MN'
print('in module test')
| 10.75
| 23
| 0.674419
|
74d21cccf5b485dd1c1ff6cfb087bea9b5dc6a73
| 14,152
|
py
|
Python
|
tests/_core/test_step.py
|
vishalbelsare/baikal
|
332623c1d6121d3321f9cd9972fc36b6d16462d4
|
[
"BSD-3-Clause"
] | 622
|
2019-11-17T02:58:25.000Z
|
2022-01-23T11:14:31.000Z
|
tests/_core/test_step.py
|
vishalbelsare/baikal
|
332623c1d6121d3321f9cd9972fc36b6d16462d4
|
[
"BSD-3-Clause"
] | 41
|
2019-10-30T14:08:53.000Z
|
2021-10-21T18:41:15.000Z
|
tests/_core/test_step.py
|
vishalbelsare/baikal
|
332623c1d6121d3321f9cd9972fc36b6d16462d4
|
[
"BSD-3-Clause"
] | 31
|
2019-11-18T00:25:03.000Z
|
2021-07-28T04:05:26.000Z
|
from contextlib import contextmanager
from functools import partial
import pytest
from sklearn.base import TransformerMixin, BaseEstimator
from baikal import Input, Step, set_config
from baikal._core.data_placeholder import DataPlaceholder
from baikal._core.step import InputStep
from tests.helpers.fixtures import teardown
from tests.helpers.dummy_steps import DummyMIMO, DummySISO, DummyEstimator
from tests.helpers.sklearn_steps import LogisticRegression, PCA
@contextmanager
def does_not_raise():
yield
class TestInput:
def test_instantiation(self, teardown):
x0 = Input()
assert isinstance(x0, DataPlaceholder)
assert x0.name == "InputStep_0"
def test_instantiate_two_without_name(self, teardown):
x0 = Input()
x1 = Input()
assert x0.name == "InputStep_0"
assert x1.name == "InputStep_1"
class TestInputStep:
def test_repr(self):
step = InputStep(name="x1")
assert repr(step) == "InputStep(name='x1')"
class TestStep:
def test_instantiate_two_without_name(self, teardown):
lr0 = LogisticRegression()
lr1 = LogisticRegression()
assert lr0.name == "LogisticRegression_0"
assert lr1.name == "LogisticRegression_1"
def test_instantiate_with_invalid_compute_func(self):
class DummyStep(Step):
def somefunc(self, X):
pass
class DummyStepWithPredict(Step):
def predict(self, X):
pass
class DummyStepWithTransform(Step):
def transform(self, X):
pass
x = Input()
with pytest.raises(ValueError):
step = DummyStep()
step(x, compute_func="auto")
with pytest.raises(ValueError):
step = DummyStep()
step(x, compute_func=123)
step = DummyStep()
step(x, compute_func="somefunc")
assert step.compute_func == step.somefunc
def anotherfunc():
pass
step = DummyStep()
step(x, compute_func=anotherfunc)
assert step.compute_func == anotherfunc
step = DummyStepWithPredict()
step(x)
assert step.compute_func == step.predict
step = DummyStepWithTransform()
step(x)
assert step.compute_func == step.transform
def test_instantiate_with_invalid_fit_compute_func(self):
class DummyStepWithoutFit(Step):
def predict(self, X):
pass
def somefunc(self, X):
pass
class DummyStepWithFitPredict(Step):
def predict(self, X):
pass
def fit_predict(self, X, y):
pass
class DummyStepWithFitTransform(Step):
def transform(self, X):
pass
def fit_transform(self, X, y):
pass
x = Input()
step = DummyStepWithoutFit()
step(x, fit_compute_func="auto")
assert step.fit_compute_func is None
with pytest.raises(ValueError):
step = DummyStepWithoutFit()
step(x, fit_compute_func=123)
step = DummyStepWithoutFit()
step(x, fit_compute_func="somefunc")
assert step.fit_compute_func == step.somefunc
def anotherfunc():
pass
step = DummyStepWithoutFit()
step(x, fit_compute_func=anotherfunc)
assert step.fit_compute_func == anotherfunc
step = DummyStepWithFitPredict()
step(x)
assert step.fit_compute_func == step.fit_predict
step = DummyStepWithFitTransform()
step(x)
assert step.fit_compute_func == step.fit_transform
step = DummyStepWithFitTransform()
step(x, fit_compute_func=None)
assert step.fit_compute_func is None
def test_call_with_invalid_input_type(self, teardown):
with pytest.raises(ValueError):
LogisticRegression()([[1, 2], [3, 4]])
def test_call_with_invalid_target_type(self, teardown):
x = Input()
with pytest.raises(ValueError):
LogisticRegression()(x, [0, 1])
# Below tests are parametrized to take two kind of fittable steps:
# - step that requires y (e.g. Logistic Regression)
# - step that does not require y (e.g. PCA)
@pytest.mark.parametrize("step_class", [LogisticRegression, PCA])
@pytest.mark.parametrize("trainable", [True, False])
def test_call_without_targets(self, step_class, trainable, teardown):
x = Input()
step_class()(x, trainable=trainable)
@pytest.mark.parametrize("step_class", [LogisticRegression, PCA])
@pytest.mark.parametrize(
"trainable,expectation",
[(True, does_not_raise), (False, partial(pytest.warns, UserWarning))],
)
def test_call_with_targets(self, step_class, trainable, expectation, teardown):
x = Input()
y_t = Input()
with expectation():
step_class()(x, y_t, trainable=trainable)
def test_call_without_targets_without_fit_method(self, teardown):
x = Input()
DummySISO()(x)
def test_call_with_targets_without_fit_method(self, teardown):
x = Input()
y_t = Input()
with pytest.raises(RuntimeError):
DummySISO()(x, y_t)
def test_call_with_two_inputs(self, teardown):
x0 = Input()
x1 = Input()
y0, y1 = DummyMIMO()([x0, x1])
assert isinstance(y0, DataPlaceholder)
assert isinstance(y1, DataPlaceholder)
assert y0.name == "DummyMIMO_0:0/0"
assert y1.name == "DummyMIMO_0:0/1"
def test_call_twice(self, teardown):
x0 = Input()
x1 = Input()
step = DummySISO()
y0 = step(x0)
y1 = step(x1)
assert isinstance(y0, DataPlaceholder)
assert isinstance(y1, DataPlaceholder)
assert y0.name == "DummySISO_0:0/0"
assert y1.name == "DummySISO_0:1/0"
@pytest.mark.parametrize(
"print_changed_only,expected",
[
(None, "DummyEstimator(x=456, name='DE', n_outputs=1)"),
(True, "DummyEstimator(x=456, name='DE', n_outputs=1)"),
(False, "DummyEstimator(x=456, y='abc', name='DE', n_outputs=1)"),
],
)
def test_repr(self, print_changed_only, expected, teardown):
set_config(print_changed_only=print_changed_only)
step = DummyEstimator(x=456, name="DE")
assert repr(step) == expected
def test_get_params(self, teardown):
step = DummyEstimator()
params = step.get_params()
expected = {"x": 123, "y": "abc"}
assert params == expected
def test_get_params_without_init(self, teardown):
"""Test edge case where the base class does not define
an __init__ method. get_params should resolve to object.__init__
which results in an empty dict.
"""
class TransformerWithoutInit(TransformerMixin, BaseEstimator):
pass
class TransformerWithoutInitStep(Step, TransformerWithoutInit):
pass
step = TransformerWithoutInitStep()
assert step.get_params() == {}
def test_set_params(self, teardown):
step = DummyEstimator()
new_params_wrong = {"non_existent_param": 42}
with pytest.raises(ValueError):
step.set_params(**new_params_wrong)
new_params = {"x": 456}
step.set_params(**new_params)
params = step.get_params()
expected = {"x": 456, "y": "abc"}
assert params == expected
# Below are tests for properties
@pytest.fixture
def simple_step(self):
return DummyEstimator()
@pytest.fixture
def shared_step(self):
return DummyEstimator()
@pytest.fixture
def dataplaceholders(self, simple_step, shared_step):
x1 = Input(name="x1")
x2 = Input(name="x2")
y_t = Input(name="y_t")
y_simple = simple_step(x1, y_t)
y_shared_1 = shared_step(x1, y_t)
y_shared_2 = shared_step(
x2,
compute_func="predict_proba",
fit_compute_func="fit_predict_proba",
trainable=False,
)
return x1, x2, y_t, y_simple, y_shared_1, y_shared_2
def test_inputs(self, simple_step, shared_step, dataplaceholders, teardown):
x1 = dataplaceholders[0]
assert simple_step.inputs == [x1]
with pytest.raises(AttributeError):
shared_step.inputs
with pytest.raises(AttributeError):
# because the step hasn't been called
LogisticRegression().inputs
def test_outputs(self, simple_step, shared_step, dataplaceholders, teardown):
*_, y_simple, y_shared_1, y_shared_2 = dataplaceholders
assert simple_step.outputs == [y_simple]
with pytest.raises(AttributeError):
shared_step.outputs
with pytest.raises(AttributeError):
# because the step hasn't been called
LogisticRegression().outputs
def test_targets(self, simple_step, shared_step, dataplaceholders, teardown):
y_t = dataplaceholders[2]
assert simple_step.targets == [y_t]
with pytest.raises(AttributeError):
shared_step.targets
with pytest.raises(AttributeError):
# because the step hasn't been called
LogisticRegression().targets
def test_compute_func(self, simple_step, shared_step, dataplaceholders, teardown):
assert simple_step.compute_func == simple_step.predict
simple_step.compute_func = simple_step.predict_proba
assert simple_step.compute_func == simple_step.predict_proba
with pytest.raises(AttributeError):
shared_step.compute_func
with pytest.raises(AttributeError):
shared_step.compute_func = shared_step.predict_proba
with pytest.raises(AttributeError):
# because the step hasn't been called
LogisticRegression().compute_func
with pytest.raises(AttributeError):
# because the step hasn't been called
LogisticRegression().compute_func = lambda x: x
def test_fit_compute_func(
self, simple_step, shared_step, dataplaceholders, teardown
):
assert simple_step.fit_compute_func == simple_step.fit_predict
simple_step.fit_compute_func = simple_step.fit_predict_proba
assert simple_step.fit_compute_func == simple_step.fit_predict_proba
with pytest.raises(AttributeError):
shared_step.fit_compute_func
with pytest.raises(AttributeError):
shared_step.fit_compute_func = shared_step.fit_predict_proba
with pytest.raises(AttributeError):
# because the step hasn't been called
DummyEstimator().fit_compute_func
with pytest.raises(AttributeError):
# because the step hasn't been called
DummyEstimator().fit_compute_func = lambda x: x
def test_trainable(self, simple_step, shared_step, dataplaceholders, teardown):
assert simple_step.trainable
simple_step.trainable = False
assert not simple_step.trainable
with pytest.raises(AttributeError):
shared_step.trainable
with pytest.raises(AttributeError):
shared_step.trainable = True
with pytest.raises(AttributeError):
# because the step hasn't been called
LogisticRegression().trainable
with pytest.raises(AttributeError):
# because the step hasn't been called
LogisticRegression().trainable = False
def test_get_inputs_at(self, simple_step, shared_step, dataplaceholders, teardown):
x1, x2, *_ = dataplaceholders
assert simple_step.get_inputs_at(0) == [x1]
assert shared_step.get_inputs_at(0) == [x1]
assert shared_step.get_inputs_at(1) == [x2]
def test_get_outputs_at(self, simple_step, shared_step, dataplaceholders, teardown):
*_, y_simple, y_shared_1, y_shared_2 = dataplaceholders
assert simple_step.get_outputs_at(0) == [y_simple]
assert shared_step.get_outputs_at(0) == [y_shared_1]
assert shared_step.get_outputs_at(1) == [y_shared_2]
def test_get_targets_at(self, simple_step, shared_step, dataplaceholders, teardown):
y_t = dataplaceholders[2]
assert simple_step.get_targets_at(0) == [y_t]
assert shared_step.get_targets_at(0) == [y_t]
assert shared_step.get_targets_at(1) == []
def test_get_compute_func_at(
self, simple_step, shared_step, dataplaceholders, teardown
):
assert simple_step.get_compute_func_at(0) == simple_step.predict
assert shared_step.get_compute_func_at(0) == shared_step.predict
assert shared_step.get_compute_func_at(1) == shared_step.predict_proba
def test_set_compute_func_at(self, shared_step, dataplaceholders, teardown):
shared_step.set_compute_func_at(1, shared_step.predict)
assert shared_step.get_compute_func_at(1) == shared_step.predict
def test_get_fit_compute_func_at(
self, simple_step, shared_step, dataplaceholders, teardown
):
assert simple_step.get_fit_compute_func_at(0) == simple_step.fit_predict
assert shared_step.get_fit_compute_func_at(0) == shared_step.fit_predict
assert shared_step.get_fit_compute_func_at(1) == shared_step.fit_predict_proba
def test_set_fit_compute_func_at(self, shared_step, dataplaceholders, teardown):
shared_step.set_fit_compute_func_at(1, shared_step.fit_predict)
assert shared_step.get_fit_compute_func_at(1) == shared_step.fit_predict
def test_get_trainable_at(
self, simple_step, shared_step, dataplaceholders, teardown
):
assert simple_step.get_trainable_at(0)
assert shared_step.get_trainable_at(0)
assert not shared_step.get_trainable_at(1)
def test_set_trainable_at(self, shared_step, dataplaceholders, teardown):
shared_step.set_trainable_at(1, True)
assert shared_step.get_trainable_at(1)
| 33.535545
| 88
| 0.651851
|
a202e0bbd8f87396163757c11019034dac56efd3
| 8,154
|
py
|
Python
|
qqbot/qrcodemanager.py
|
hatsuyuki280/qqbot
|
afc6f276f31fd6ba93a43785207735c0392ab3fe
|
[
"MIT"
] | 7
|
2019-08-06T09:42:34.000Z
|
2021-06-18T00:34:02.000Z
|
qqbot/qrcodemanager.py
|
hatsuyuki280/qqbot
|
afc6f276f31fd6ba93a43785207735c0392ab3fe
|
[
"MIT"
] | 4
|
2021-06-08T19:22:30.000Z
|
2022-03-11T23:32:13.000Z
|
qqbot/qrcodemanager.py
|
hatsuyuki280/qqbot
|
afc6f276f31fd6ba93a43785207735c0392ab3fe
|
[
"MIT"
] | 5
|
2018-12-04T14:41:31.000Z
|
2020-08-27T10:54:30.000Z
|
# -*- coding: utf-8 -*-
import sys, os
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if p not in sys.path:
sys.path.insert(0, p)
import os, platform, uuid, subprocess, time
from qqbot.utf8logger import WARN, INFO, DEBUG, ERROR
from qqbot.common import StartDaemonThread, LockedValue, HasCommand, SYSTEMSTR2STR
from qqbot.qrcodeserver import QrcodeServer
from qqbot.mailagent import MailAgent
Image = None
class QrcodeManager(object):
def __init__(self, conf):
qrcodeId = uuid.uuid4().hex
self.qrcodePath = conf.QrcodePath(qrcodeId)
if conf.httpServerIP:
self.qrcodeServer = QrcodeServer(
conf.httpServerIP,
conf.httpServerPort,
self.qrcodePath,
qrcodeId
)
StartDaemonThread(self.qrcodeServer.Run)
else:
self.qrcodeServer = None
if conf.mailAccount:
self.mailAgent = MailAgent(
conf.mailAccount, conf.mailAuthCode, name='QQBot管理员'
)
if self.qrcodeServer:
html = ('<p>您的 QQBot 正在登录,请尽快用手机 QQ 扫描下面的二维码。'
'若二维码已过期,请重新打开本邮件。若您看不到二维码图片,请确保'
'图片地址 <a href="{0}">{0}</a> 可以通过公网访问。</p>'
'<p><img src="{0}"></p>').format(self.qrcodeServer.qrcodeURL)
else:
html = ('<p>您的 QQBot 正在登录,请尽快用手机 QQ 扫描下面的二维码。'
'若二维码已过期,请将本邮件设为已读邮件,之后 QQBot 会在'
'1~2分钟内将最新的二维码发送到本邮箱。</p>'
'<p>{{png}}</p>')
html += '<p>conf.user=%r, conf.qq=%r</p>' % (conf.user, conf.qq)
self.qrcodeMail = {
'to_addr': conf.mailAccount,
'html': html,
'subject': ('%s[%s]' % ('QQBot二维码', qrcodeId)),
'to_name': '我'
}
self.qrcode = LockedValue(None)
else:
self.mailAgent = None
self.cmdQrcode = conf.cmdQrcode
if self.cmdQrcode:
global Image
try:
from PIL import Image as i
import wcwidth
Image = i
except ImportError:
ERROR('需要安装 pillow,wcwidth 才能使用文本模式显示二维码')
sys.exit(1)
def Show(self, qrcode):
with open(self.qrcodePath, 'wb') as f:
f.write(qrcode)
from qqbot import _bot
if hasattr(_bot, 'onQrcode'):
_bot.onQrcode(self.qrcodePath, qrcode)
if self.cmdQrcode:
try:
showCmdQRCode(self.qrcodePath)
except Exception as e:
WARN('无法以文本模式显示二维码图片 file://%s 。%s',
SYSTEMSTR2STR(self.qrcodePath), e)
if not (self.qrcodeServer or self.mailAgent or self.cmdQrcode):
try:
showImage(self.qrcodePath)
except Exception as e:
WARN('无法弹出二维码图片 file://%s 。%s', SYSTEMSTR2STR(self.qrcodePath), e)
if self.qrcodeServer:
INFO('请使用浏览器访问二维码,图片地址:%s', self.qrcodeServer.qrcodeURL)
if self.mailAgent:
if self.qrcode.getVal() is None:
self.qrcode.setVal(qrcode)
# first show, start a thread to send emails
StartDaemonThread(self.sendEmail)
else:
self.qrcode.setVal(qrcode)
def sendEmail(self):
lastSubject = ''
while True:
if lastSubject != self.qrcodeMail['subject']:
qrcode = self.qrcode.getVal()
if qrcode is None:
break
qrcode = '' if self.qrcodeServer else qrcode
try:
with self.mailAgent.SMTP() as smtp:
smtp.send(png_content=qrcode, **self.qrcodeMail)
except Exception as e:
WARN('无法将二维码发送至邮箱%s %s', self.mailAgent.account, e, exc_info=True)
else:
INFO('已将二维码发送至邮箱%s', self.mailAgent.account)
if self.qrcodeServer:
break
else:
lastSubject = self.qrcodeMail['subject']
else:
time.sleep(65)
qrcode = self.qrcode.getVal()
if qrcode is None:
break
try:
DEBUG('开始查询邮箱 %s 中的最近的邮件', self.mailAgent.account)
with self.mailAgent.IMAP() as imap:
lastSubject = imap.getSubject(-1)
except Exception as e:
WARN('查询邮箱 %s 中的邮件失败 %s', self.mailAgent.account, e)
else:
DEBUG('最近的邮件: %s', lastSubject)
def Destroy(self):
if self.mailAgent:
self.qrcode.setVal(None)
if self.qrcodeServer:
self.qrcodeServer.Stop()
try:
os.remove(self.qrcodePath)
except OSError:
pass
def showImage(filename):
osName = platform.system()
if osName == 'Windows':
subprocess.Popen([filename], shell=True)
elif osName == 'Linux':
if HasCommand('gvfs-open'):
subprocess.Popen(['gvfs-open', filename])
elif HasCommand('shotwell'):
subprocess.Popen(['shotwell', filename])
else:
raise
elif osName == 'Darwin': # by @Naville
subprocess.Popen(['open', filename])
else:
raise Exception('other system')
def showCmdQRCode(filename):
global Image
import wcwidth
# 165x165 -> 33x33
size=33
padding=1
rgb=Image.open(filename).resize((size,size)).convert('RGB')
qrtext = '0' * (size + padding * 2) + '\n'
for rr in range(size):
qrtext += '0'*padding
for cc in range(size):
r,g,b = rgb.getpixel((cc,rr))
if (r > 127 or g > 127 or b > 127):
qrtext += '0'
else:
qrtext += '1'
qrtext += '0'*padding
qrtext += '\n'
qrtext = qrtext + '0' * (size + padding * 2) + '\n'
try:
b = u'\u2588'
sys.stdout.write(b + '\r')
sys.stdout.flush()
except UnicodeEncodeError:
white = 'MM'
else:
white = b
black=' '
# currently for Windows, '\u2588' is not correct. So use 'MM' for windows.
osName = platform.system()
if osName == 'Windows':
white = '@@'
blockCount = int(2/wcwidth.wcswidth(white))
white *= abs(blockCount)
sys.stdout.write(' '*50 + '\r')
sys.stdout.flush()
qr = qrtext.replace('0', white).replace('1', black)
qr = '\033[37m\033[40m\n' + qr + '\033[0m\n' # force to use white/black.
sys.stdout.write(qr)
sys.stdout.flush()
# A space-saving text QRCode
if osName != 'Windows':
charlist = [u' ', u'\u2598', u'\u259D', u'\u2580', u'\u2596', u'\u258C', u'\u259E', u'\u259B',
u'\u2597', u'\u259A', u'\u2590', u'\u259C', u'\u2584', u'\u2599', u'\u259F', u'\u2588']
qrarray = map(lambda x: map(lambda y: y, x), qrtext.split('\n'))
qrtext = ''
for rr in range(0, size + padding * 2, 2):
for cc in range(0, size + padding * 2, 2):
index = int(''.join([x for row in qrarray[rr:rr+2] for x in (row + ['0'])[cc:cc+2]][::-1]), 2)
qrtext += hex(15 - index)[-1]
qrtext += '\n'
qr = ''.join(map(lambda x: charlist[int(x, 16)] if x != '\n' else x, qrtext))
qr = '\033[37m\033[40m\n' + qr + '\033[0m\n' # force to use white/black.
sys.stdout.write(qr)
sys.stdout.flush()
if __name__ == '__main__':
from qconf import QConf
# 需要先在 ~/.qqbot-tmp/v2.x.conf 文件中设置好邮箱帐号和授权码
conf = QConf()
conf.Display()
qrm = QrcodeManager(conf)
with open('tmp.png', 'rb') as f:
qrcode = f.read()
qrm.Show(qrcode)
time.sleep(5)
qrm.Show(qrcode)
qrm.Destroy()
| 33.418033
| 110
| 0.50466
|
541ce25e98c8e49118dacc2e87b5ff09a7cdae2c
| 17,182
|
py
|
Python
|
ironic/tests/unit/db/test_conductor.py
|
markbeierl/ironic
|
bcf5b37c736bc36abe94489c366fe26f198a7e7a
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/db/test_conductor.py
|
markbeierl/ironic
|
bcf5b37c736bc36abe94489c366fe26f198a7e7a
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/db/test_conductor.py
|
markbeierl/ironic
|
bcf5b37c736bc36abe94489c366fe26f198a7e7a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating Conductors via the DB API"""
import datetime
import mock
import oslo_db
from oslo_db import exception as db_exc
from oslo_db import sqlalchemy
from oslo_utils import timeutils
from ironic.common import exception
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
class DbConductorTestCase(base.DbTestCase):
def test_register_conductor_existing_fails(self):
c = utils.get_test_conductor()
self.dbapi.register_conductor(c)
self.assertRaises(
exception.ConductorAlreadyRegistered,
self.dbapi.register_conductor,
c)
def test_register_conductor_override(self):
c = utils.get_test_conductor()
self.dbapi.register_conductor(c)
self.dbapi.register_conductor(c, update_existing=True)
def _create_test_cdr(self, hardware_types=None, **kwargs):
hardware_types = hardware_types or []
c = utils.get_test_conductor(**kwargs)
cdr = self.dbapi.register_conductor(c)
for ht in hardware_types:
self.dbapi.register_conductor_hardware_interfaces(cdr.id, ht,
'power',
['ipmi', 'fake'],
'ipmi')
return cdr
def test_register_conductor_hardware_interfaces(self):
c = self._create_test_cdr()
interfaces = ['direct', 'iscsi']
self.dbapi.register_conductor_hardware_interfaces(c.id, 'generic',
'deploy', interfaces,
'iscsi')
ifaces = self.dbapi.list_conductor_hardware_interfaces(c.id)
ci1, ci2 = ifaces
self.assertEqual(2, len(ifaces))
self.assertEqual('generic', ci1.hardware_type)
self.assertEqual('generic', ci2.hardware_type)
self.assertEqual('deploy', ci1.interface_type)
self.assertEqual('deploy', ci2.interface_type)
self.assertEqual('direct', ci1.interface_name)
self.assertEqual('iscsi', ci2.interface_name)
self.assertFalse(ci1.default)
self.assertTrue(ci2.default)
def test_register_conductor_hardware_interfaces_duplicate(self):
c = self._create_test_cdr()
interfaces = ['direct', 'iscsi']
self.dbapi.register_conductor_hardware_interfaces(c.id, 'generic',
'deploy', interfaces,
'iscsi')
ifaces = self.dbapi.list_conductor_hardware_interfaces(c.id)
ci1, ci2 = ifaces
self.assertEqual(2, len(ifaces))
# do it again for the duplicates
self.assertRaises(
exception.ConductorHardwareInterfacesAlreadyRegistered,
self.dbapi.register_conductor_hardware_interfaces,
c.id, 'generic', 'deploy', interfaces, 'iscsi')
def test_unregister_conductor_hardware_interfaces(self):
c = self._create_test_cdr()
interfaces = ['direct', 'iscsi']
self.dbapi.register_conductor_hardware_interfaces(c.id, 'generic',
'deploy', interfaces,
'iscsi')
self.dbapi.unregister_conductor_hardware_interfaces(c.id)
ifaces = self.dbapi.list_conductor_hardware_interfaces(c.id)
self.assertEqual([], ifaces)
def test_get_conductor(self):
c1 = self._create_test_cdr()
c2 = self.dbapi.get_conductor(c1.hostname)
self.assertEqual(c1.id, c2.id)
def test_get_inactive_conductor_ignore_online(self):
c1 = self._create_test_cdr()
self.dbapi.unregister_conductor(c1.hostname)
c2 = self.dbapi.get_conductor(c1.hostname, online=None)
self.assertEqual(c1.id, c2.id)
def test_get_inactive_conductor_with_online_true(self):
c1 = self._create_test_cdr()
self.dbapi.unregister_conductor(c1.hostname)
self.assertRaises(exception.ConductorNotFound,
self.dbapi.get_conductor, c1.hostname)
def test_get_conductor_not_found(self):
self._create_test_cdr()
self.assertRaises(
exception.ConductorNotFound,
self.dbapi.get_conductor,
'bad-hostname')
def test_unregister_conductor(self):
c = self._create_test_cdr()
self.dbapi.unregister_conductor(c.hostname)
self.assertRaises(
exception.ConductorNotFound,
self.dbapi.unregister_conductor,
c.hostname)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_touch_conductor(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
c = self._create_test_cdr()
self.assertEqual(test_time, timeutils.normalize_time(c.updated_at))
test_time = datetime.datetime(2000, 1, 1, 0, 1)
mock_utcnow.return_value = test_time
self.dbapi.touch_conductor(c.hostname)
c = self.dbapi.get_conductor(c.hostname)
self.assertEqual(test_time, timeutils.normalize_time(c.updated_at))
@mock.patch.object(oslo_db.api.time, 'sleep', autospec=True)
@mock.patch.object(sqlalchemy.orm.Query, 'update', autospec=True)
def test_touch_conductor_deadlock(self, mock_update, mock_sleep):
mock_sleep.return_value = None
mock_update.side_effect = [db_exc.DBDeadlock(), None]
c = self._create_test_cdr()
self.dbapi.touch_conductor(c.hostname)
self.assertEqual(2, mock_update.call_count)
self.assertEqual(2, mock_sleep.call_count)
def test_touch_conductor_not_found(self):
# A conductor's heartbeat will not create a new record,
# it will only update existing ones
self._create_test_cdr()
self.assertRaises(
exception.ConductorNotFound,
self.dbapi.touch_conductor,
'bad-hostname')
def test_touch_offline_conductor(self):
# Ensure that a conductor's periodic heartbeat task can make the
# conductor visible again, even if it was spuriously marked offline
c = self._create_test_cdr()
self.dbapi.unregister_conductor(c.hostname)
self.assertRaises(
exception.ConductorNotFound,
self.dbapi.get_conductor,
c.hostname)
self.dbapi.touch_conductor(c.hostname)
self.dbapi.get_conductor(c.hostname)
def test_clear_node_reservations_for_conductor(self):
node1 = self.dbapi.create_node({'reservation': 'hostname1'})
node2 = self.dbapi.create_node({'reservation': 'hostname2'})
node3 = self.dbapi.create_node({'reservation': None})
self.dbapi.clear_node_reservations_for_conductor('hostname1')
node1 = self.dbapi.get_node_by_id(node1.id)
node2 = self.dbapi.get_node_by_id(node2.id)
node3 = self.dbapi.get_node_by_id(node3.id)
self.assertIsNone(node1.reservation)
self.assertEqual('hostname2', node2.reservation)
self.assertIsNone(node3.reservation)
def test_clear_node_target_power_state(self):
node1 = self.dbapi.create_node({'reservation': 'hostname1',
'target_power_state': 'power on'})
node2 = self.dbapi.create_node({'reservation': 'hostname2',
'target_power_state': 'power on'})
node3 = self.dbapi.create_node({'reservation': None,
'target_power_state': 'power on'})
self.dbapi.clear_node_target_power_state('hostname1')
node1 = self.dbapi.get_node_by_id(node1.id)
node2 = self.dbapi.get_node_by_id(node2.id)
node3 = self.dbapi.get_node_by_id(node3.id)
self.assertIsNone(node1.target_power_state)
self.assertIn('power operation was aborted', node1.last_error)
self.assertEqual('power on', node2.target_power_state)
self.assertIsNone(node2.last_error)
self.assertEqual('power on', node3.target_power_state)
self.assertIsNone(node3.last_error)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_one_host_no_ht(self, mock_utcnow):
h = 'fake-host'
expected = {}
mock_utcnow.return_value = datetime.datetime.utcnow()
self._create_test_cdr(hostname=h, drivers=[], hardware_types=[])
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_one_host_one_ht(self, mock_utcnow):
h = 'fake-host'
ht = 'hardware-type'
expected = {ht: {h}}
mock_utcnow.return_value = datetime.datetime.utcnow()
self._create_test_cdr(hostname=h, drivers=[], hardware_types=[ht])
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_one_host_one_ht_groups(
self, mock_utcnow):
h = 'fake-host'
ht = 'hardware-type'
group = 'foogroup'
key = '%s:%s' % (group, ht)
expected = {key: {h}}
mock_utcnow.return_value = datetime.datetime.utcnow()
self._create_test_cdr(hostname=h, drivers=[], hardware_types=[ht],
conductor_group=group)
result = self.dbapi.get_active_hardware_type_dict(use_groups=True)
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_one_host_many_ht(self, mock_utcnow):
h = 'fake-host'
ht1 = 'hardware-type'
ht2 = 'another-hardware-type'
expected = {ht1: {h}, ht2: {h}}
mock_utcnow.return_value = datetime.datetime.utcnow()
self._create_test_cdr(hostname=h, drivers=[],
hardware_types=[ht1, ht2])
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_many_host_one_ht(self, mock_utcnow):
h1 = 'host-one'
h2 = 'host-two'
ht = 'hardware-type'
expected = {ht: {h1, h2}}
mock_utcnow.return_value = datetime.datetime.utcnow()
self._create_test_cdr(id=1, hostname=h1, drivers=[],
hardware_types=[ht])
self._create_test_cdr(id=2, hostname=h2, drivers=[],
hardware_types=[ht])
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_many_host_many_ht(self,
mock_utcnow):
h1 = 'host-one'
h2 = 'host-two'
ht1 = 'hardware-type'
ht2 = 'another-hardware-type'
expected = {ht1: {h1, h2}, ht2: {h1, h2}}
mock_utcnow.return_value = datetime.datetime.utcnow()
self._create_test_cdr(id=1, hostname=h1, drivers=[],
hardware_types=[ht1, ht2])
self._create_test_cdr(id=2, hostname=h2, drivers=[],
hardware_types=[ht1, ht2])
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_hardware_type_dict_with_old_conductor(self,
mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
present = past + datetime.timedelta(minutes=2)
ht = 'hardware-type'
h1 = 'old-host'
ht1 = 'old-hardware-type'
mock_utcnow.return_value = past
self._create_test_cdr(id=1, hostname=h1, drivers=[],
hardware_types=[ht, ht1])
h2 = 'new-host'
ht2 = 'new-hardware-type'
mock_utcnow.return_value = present
self._create_test_cdr(id=2, hostname=h2, drivers=[],
hardware_types=[ht, ht2])
# verify that old-host does not show up in current list
self.config(heartbeat_timeout=60, group='conductor')
expected = {ht: {h2}, ht2: {h2}}
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
# change the heartbeat timeout, and verify that old-host appears
self.config(heartbeat_timeout=120, group='conductor')
expected = {ht: {h1, h2}, ht1: {h1}, ht2: {h2}}
result = self.dbapi.get_active_hardware_type_dict()
self.assertEqual(expected, result)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_offline_conductors(self, mock_utcnow):
self.config(heartbeat_timeout=60, group='conductor')
time_ = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = time_
c = self._create_test_cdr()
# Only 30 seconds passed since last heartbeat, it's still
# considered alive
mock_utcnow.return_value = time_ + datetime.timedelta(seconds=30)
self.assertEqual([], self.dbapi.get_offline_conductors())
# 61 seconds passed since last heartbeat, it's dead
mock_utcnow.return_value = time_ + datetime.timedelta(seconds=61)
self.assertEqual([c.hostname], self.dbapi.get_offline_conductors())
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_online_conductors(self, mock_utcnow):
self.config(heartbeat_timeout=60, group='conductor')
time_ = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = time_
c = self._create_test_cdr()
# Only 30 seconds passed since last heartbeat, it's still
# considered alive
mock_utcnow.return_value = time_ + datetime.timedelta(seconds=30)
self.assertEqual([c.hostname], self.dbapi.get_online_conductors())
# 61 seconds passed since last heartbeat, it's dead
mock_utcnow.return_value = time_ + datetime.timedelta(seconds=61)
self.assertEqual([], self.dbapi.get_online_conductors())
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_list_hardware_type_interfaces(self, mock_utcnow):
self.config(heartbeat_timeout=60, group='conductor')
time_ = datetime.datetime(2000, 1, 1, 0, 0)
h = 'fake-host'
ht1 = 'hw-type-1'
ht2 = 'hw-type-2'
mock_utcnow.return_value = time_
self._create_test_cdr(hostname=h, hardware_types=[ht1, ht2])
expected = [
{
'hardware_type': ht1,
'interface_type': 'power',
'interface_name': 'ipmi',
'default': True,
},
{
'hardware_type': ht1,
'interface_type': 'power',
'interface_name': 'fake',
'default': False,
},
{
'hardware_type': ht2,
'interface_type': 'power',
'interface_name': 'ipmi',
'default': True,
},
{
'hardware_type': ht2,
'interface_type': 'power',
'interface_name': 'fake',
'default': False,
},
]
def _verify(expected, result):
for expected_row, row in zip(expected, result):
for k, v in expected_row.items():
self.assertEqual(v, getattr(row, k))
# with both hw types
result = self.dbapi.list_hardware_type_interfaces([ht1, ht2])
_verify(expected, result)
# with one hw type
result = self.dbapi.list_hardware_type_interfaces([ht1])
_verify(expected[:2], result)
# 61 seconds passed since last heartbeat, it's dead
mock_utcnow.return_value = time_ + datetime.timedelta(seconds=61)
result = self.dbapi.list_hardware_type_interfaces([ht1, ht2])
self.assertEqual([], result)
| 41.907317
| 79
| 0.623501
|
420afb42ef6ccb43a7bdc75f6128dd7f20b063ce
| 4,113
|
py
|
Python
|
python/torch_mlir/eager_mode/torch_mlir_tensor.py
|
rdadolf/torch-mlir
|
86eb493a443ffceada475e33dcd648628b16a808
|
[
"Apache-2.0"
] | 1
|
2021-11-30T06:55:32.000Z
|
2021-11-30T06:55:32.000Z
|
python/torch_mlir/eager_mode/torch_mlir_tensor.py
|
rdadolf/torch-mlir
|
86eb493a443ffceada475e33dcd648628b16a808
|
[
"Apache-2.0"
] | null | null | null |
python/torch_mlir/eager_mode/torch_mlir_tensor.py
|
rdadolf/torch-mlir
|
86eb493a443ffceada475e33dcd648628b16a808
|
[
"Apache-2.0"
] | 1
|
2022-03-07T12:29:58.000Z
|
2022-03-07T12:29:58.000Z
|
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
import warnings
import torch
from torch.utils._pytree import tree_map
from torch_mlir.eager_mode.torch_mlir_dispatch import (
try_torch_mlir_eager,
UnsupportedByTorchMlirEagerMode,
)
from torch_mlir_e2e_test.linalg_on_tensors_backends import refbackend
class TorchMLIRTensor(torch.Tensor):
"""Wrap torch.Tensor in orer to dispatch through torch-mlir instead of aten.
This class uses the _make_wrapper_subclass pattern to override __torch_dispatch__
in order to dispatch through torch-mlir instead of aten. Here we basically only unwrap and wrap
torch.Tensors. Most of the heavy lifting is done in the adjacent torch_mlir_dispatch module.
More documentation on how this pattern works can be found in this forum post
https://dev-discuss.pytorch.org/t/what-and-why-is-torch-dispatch/557
and this RFC
https://github.com/pytorch/rfcs/blob/master/RFC-0001-torch-function-for-methods.md#process-followed-during-a-functionmethod-call
and this repo with many examples
https://github.com/albanD/subclass_zoo
"""
elem: torch.Tensor
__slots__ = ["elem"]
@staticmethod
def __new__(cls, elem, *args, **kwargs):
r = torch.Tensor._make_wrapper_subclass(
cls,
elem.size(),
strides=elem.stride(),
storage_offset=elem.storage_offset(),
dtype=elem.dtype,
layout=elem.layout,
device=elem.device,
# Only float tensors can have gradients.
requires_grad=elem.dtype in {torch.float, torch.float32, torch.float64}
and (kwargs.get("requires_grad", False) or elem.requires_grad),
)
r.elem = elem.detach() if r.requires_grad else elem
return r
def __repr__(self):
if self.grad_fn:
return f"TorchMLIRTensor({self.elem}, grad_fn={self.grad_fn})"
else:
return f"TorchMLIRTensor({self.elem})"
@classmethod
def __torch_dispatch__(cls, func, _types, args=(), kwargs=None):
requires_grad = False
def check_grad(e):
nonlocal requires_grad
if isinstance(e, TorchMLIRTensor):
requires_grad |= e.requires_grad
tree_map(check_grad, args)
tree_map(check_grad, kwargs)
def unwrap(e):
if isinstance(e, TorchMLIRTensor):
return e.elem
if isinstance(e, torch.nn.Parameter):
return e.detach()
return e
def wrap(e):
nonlocal requires_grad
return (
TorchMLIRTensor(e, requires_grad=requires_grad)
if isinstance(e, torch.Tensor)
else e
)
unwrapped_args = tree_map(unwrap, args)
unwrapped_kwargs = tree_map(unwrap, kwargs)
try:
out = try_torch_mlir_eager(
func,
unwrapped_args,
unwrapped_kwargs,
backend=refbackend.RefBackendLinalgOnTensorsBackend(),
)
if isinstance(out, tuple):
out = [torch.from_numpy(o) for o in out]
else:
out = torch.from_numpy(out)
return tree_map(wrap, out)
except Exception as e:
if isinstance(e, UnsupportedByTorchMlirEagerMode):
warnings.warn(
f"Couldn't use TorchMLIR eager because current incompatibility: *{str(e)}*; running through PyTorch eager."
)
else:
warnings.warn(
f"Couldn't use TorchMLIR eager because of error: *{str(e)}*; "
f"running through PyTorch eager. Please file an issue at https://github.com/llvm/torch-mlir/issues"
)
return tree_map(wrap, func(*unwrapped_args, **unwrapped_kwargs))
| 36.723214
| 132
| 0.625577
|
1adce2048b27813cb20c0d4f95ce3b08350bc956
| 3,614
|
py
|
Python
|
tensorflow/contrib/eager/python/tfe_test.py
|
priyanshu-max/tensorflow
|
5e97d96c809232a716d6b04ec9b128ebaf1b73f7
|
[
"Apache-2.0"
] | 3
|
2017-10-27T05:37:59.000Z
|
2018-05-25T02:46:40.000Z
|
tensorflow/contrib/eager/python/tfe_test.py
|
priyanshu-max/tensorflow
|
5e97d96c809232a716d6b04ec9b128ebaf1b73f7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/eager/python/tfe_test.py
|
priyanshu-max/tensorflow
|
5e97d96c809232a716d6b04ec9b128ebaf1b73f7
|
[
"Apache-2.0"
] | 2
|
2018-09-24T10:57:31.000Z
|
2018-10-26T03:03:53.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Eager Execution: Sanity tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.eager.python import tfe
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TFETest(test_util.TensorFlowTestCase):
def testMatmul(self):
x = [[2.]]
y = math_ops.matmul(x, x) # tf.matmul
self.assertAllEqual([[4.]], y.numpy())
def testInstantError(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices = 7 is not in \[0, 3\)'):
array_ops.gather([0, 1, 2], 7)
def testGradients(self):
def square(x):
return math_ops.multiply(x, x)
grad = tfe.gradients_function(square)
self.assertEquals([6], [x.numpy() for x in grad(3)])
def testGradOfGrad(self):
def square(x):
return math_ops.multiply(x, x)
grad = tfe.gradients_function(square)
gradgrad = tfe.gradients_function(lambda x: grad(x)[0])
self.assertEquals([2], [x.numpy() for x in gradgrad(3)])
def testCustomGrad(self):
@tfe.custom_gradient
def f(x):
y = math_ops.multiply(x, x)
def grad_fn(_):
return [x + y]
return y, grad_fn
# TODO(ashankar): This [0] should ideally not be needed.
grad = tfe.gradients_function(f, [0])
self.assertEquals([12], [x.numpy() for x in grad(3)])
def testGPU(self):
if tfe.num_gpus() <= 0:
self.skipTest('No GPUs available')
# tf.Tensor.as_gpu_device() moves a tensor to GPU.
x = constant_op.constant([[1., 2.], [3., 4.]]).as_gpu_tensor()
# Alternatively, tfe.device() as a context manager places tensors and
# operations.
with tfe.device('gpu:0'):
x += 1.
# Without a device context, heuristics are used to place ops.
# In this case, ops.reduce_mean runs on the GPU.
reduction_indices = range(x.shape.ndims)
m = math_ops.reduce_mean(x, reduction_indices)
# m is on GPU, bring it back to CPU and compare.
self.assertEqual(3.5, m.as_cpu_tensor().numpy())
def testListDevices(self):
# Expect at least one device.
self.assertTrue(tfe.list_devices())
def testNumGPUs(self):
devices = tfe.list_devices()
self.assertEqual(len(devices) - 1, tfe.num_gpus())
def testCallingEnableEagerExecutionMoreThanOnce(self):
# Note that eager.test.main() has already invoked enable_eager_exceution().
with self.assertRaisesRegexp(
ValueError, r'Do not call tfe\.%s more than once in the same process' %
tfe.enable_eager_execution.__name__):
tfe.enable_eager_execution()
if __name__ == '__main__':
tfe.enable_eager_execution()
test.main()
| 33.155963
| 80
| 0.684283
|
9346377d0bdae552639bae8cb3b52ca0b2effa50
| 66,526
|
py
|
Python
|
tensorflow/python/keras/callbacks_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 7
|
2018-04-12T07:48:57.000Z
|
2021-12-03T12:35:02.000Z
|
tensorflow/python/keras/callbacks_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 6
|
2022-01-15T07:17:47.000Z
|
2022-02-14T15:28:22.000Z
|
tensorflow/python/keras/callbacks_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 2
|
2018-04-06T14:28:15.000Z
|
2018-11-30T03:53:55.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2 if not is_sequence else None,
steps_per_epoch=5 if is_sequence else None,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=30,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_custom_summary(self):
if not testing_utils.should_run_tf_function():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = summary_pb2.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with summary_ops_v2.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return summary_ops_v2.write(
tag=tag,
tensor=math_ops.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', math_ops.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
| 34.309438
| 80
| 0.646529
|
cd7645219b1b846450894e90a50a6d38a267615f
| 57
|
py
|
Python
|
mikaponics/task/filters/__init__.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 2
|
2019-04-30T23:51:41.000Z
|
2019-05-04T00:35:52.000Z
|
mikaponics/task/filters/__init__.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 27
|
2019-04-30T20:22:28.000Z
|
2022-02-10T08:10:32.000Z
|
mikaponics/task/filters/__init__.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | null | null | null |
from task.filters.task_item_filter import TaskItemFilter
| 28.5
| 56
| 0.894737
|
9fe6d221551805b6aba8278921e2231ca331d825
| 9,624
|
py
|
Python
|
bop_toolkit/scripts/eval_bop19.py
|
shbe-aau/multi-view-pose-estimation
|
22cea6cd09684fe655fb2214bc14856f589048e1
|
[
"MIT"
] | 2
|
2020-04-27T11:28:06.000Z
|
2020-05-16T18:37:40.000Z
|
bop_toolkit/scripts/eval_bop19.py
|
shbe-aau/multi-view-pose-estimation
|
22cea6cd09684fe655fb2214bc14856f589048e1
|
[
"MIT"
] | null | null | null |
bop_toolkit/scripts/eval_bop19.py
|
shbe-aau/multi-view-pose-estimation
|
22cea6cd09684fe655fb2214bc14856f589048e1
|
[
"MIT"
] | null | null | null |
# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)
# Center for Machine Perception, Czech Technical University in Prague
"""Evaluation script for the BOP Challenge 2019/2020."""
import os
import time
import argparse
import subprocess
import numpy as np
from bop_toolkit_lib import config
from bop_toolkit_lib import inout
from bop_toolkit_lib import misc
# PARAMETERS (some can be overwritten by the command line arguments below).
################################################################################
p = {
# Errors to calculate.
'errors': [
{
'n_top': -1,
'type': 'vsd',
'vsd_deltas': {
'hb': 15,
'icbin': 15,
'icmi': 15,
'itodd': 5,
'lm': 15,
'lmo': 15,
'ruapc': 15,
'tless': 15,
'tudl': 15,
'tyol': 15,
'ycbv': 15,
},
'vsd_taus': list(np.arange(0.05, 0.51, 0.05)),
'vsd_normalized_by_diameter': True,
'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
},
{
'n_top': -1,
'type': 'mssd',
'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
},
{
'n_top': -1,
'type': 'mspd',
'correct_th': [[th] for th in np.arange(5, 51, 5)]
},
],
# Minimum visible surface fraction of a valid GT pose.
# -1 == k most visible GT poses will be considered, where k is given by
# the "inst_count" item loaded from "targets_filename".
#'visib_gt_min': -1,
'visib_gt_min': 0.1,
# See misc.get_symmetry_transformations().
'max_sym_disc_step': 0.01,
# Type of the renderer (used for the VSD pose error function).
'renderer_type': 'python', # Options: 'cpp', 'python'.
# Names of files with results for which to calculate the errors (assumed to be
# stored in folder p['results_path']). See docs/bop_challenge_2019.md for a
# description of the format. Example results can be found at:
# http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/
'result_filenames': [
'/relative/path/to/csv/with/results',
],
# Folder with results to be evaluated.
'results_path': config.results_path,
# Folder for the calculated pose errors and performance scores.
'eval_path': config.eval_path,
# File with a list of estimation targets to consider. The file is assumed to
# be stored in the dataset folder.
'targets_filename': 'test_targets_bop19.json',
# Folder containing the BOP datasets.
'datasets_path': config.datasets_path,
}
################################################################################
# Command line arguments.
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--renderer_type', default=p['renderer_type'])
parser.add_argument('--result_filenames',
default=','.join(p['result_filenames']),
help='Comma-separated names of files with results.')
parser.add_argument('--results_path', default=p['results_path'])
parser.add_argument('--eval_path', default=p['eval_path'])
parser.add_argument('--targets_filename', default=p['targets_filename'])
parser.add_argument('--datasets_path', default=p['datasets_path'])
args = parser.parse_args()
p['renderer_type'] = str(args.renderer_type)
p['result_filenames'] = args.result_filenames.split(',')
p['results_path'] = str(args.results_path)
p['eval_path'] = str(args.eval_path)
p['targets_filename'] = str(args.targets_filename)
p['datasets_path'] = str(args.datasets_path)
# Evaluation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:
misc.log('===========')
misc.log('EVALUATING: {}'.format(result_filename))
misc.log('===========')
time_start = time.time()
# Volume under recall surface (VSD) / area under recall curve (MSSD, MSPD).
average_recalls = {}
# Name of the result and the dataset.
result_name = os.path.splitext(os.path.basename(result_filename))[0]
dataset = str(result_name.split('_')[1].split('-')[0])
# Calculate the average estimation time per image.
ests = inout.load_bop_results(
os.path.join(p['results_path'], result_filename), version='bop19')
times = {}
times_available = True
for est in ests:
result_key = '{:06d}_{:06d}'.format(est['scene_id'], est['im_id'])
if est['time'] < 0:
# All estimation times must be provided.
times_available = False
break
elif result_key in times:
if abs(times[result_key] - est['time']) > 0.001:
raise ValueError(
'The running time for scene {} and image {} is not the same for '
'all estimates.'.format(est['scene_id'], est['im_id']))
else:
times[result_key] = est['time']
if times_available:
average_time_per_image = np.mean(list(times.values()))
else:
average_time_per_image = -1.0
# Evaluate the pose estimates.
for error in p['errors']:
# Calculate error of the pose estimates.
calc_errors_cmd = [
'python',
os.path.join(os.path.dirname(os.path.abspath(__file__)),'eval_calc_errors.py'),
'--n_top={}'.format(error['n_top']),
'--error_type={}'.format(error['type']),
'--result_filenames={}'.format(result_filename),
'--renderer_type={}'.format(p['renderer_type']),
'--results_path={}'.format(p['results_path']),
'--eval_path={}'.format(p['eval_path']),
'--targets_filename={}'.format(p['targets_filename']),
'--max_sym_disc_step={}'.format(p['max_sym_disc_step']),
'--skip_missing=1',
'--datasets_path={}'.format(p['datasets_path']),
]
if error['type'] == 'vsd':
vsd_deltas_str = \
','.join(['{}:{}'.format(k, v) for k, v in error['vsd_deltas'].items()])
calc_errors_cmd += [
'--vsd_deltas={}'.format(vsd_deltas_str),
'--vsd_taus={}'.format(','.join(map(str, error['vsd_taus']))),
'--vsd_normalized_by_diameter={}'.format(
error['vsd_normalized_by_diameter'])
]
misc.log('Running: ' + ' '.join(calc_errors_cmd))
if subprocess.call(calc_errors_cmd) != 0:
raise RuntimeError('Calculation of pose errors failed.')
# Paths (rel. to p['eval_path']) to folders with calculated pose errors.
# For VSD, there is one path for each setting of tau. For the other pose
# error functions, there is only one path.
error_dir_paths = {}
if error['type'] == 'vsd':
for vsd_tau in error['vsd_taus']:
error_sign = misc.get_error_signature(
error['type'], error['n_top'], vsd_delta=error['vsd_deltas'][dataset],
vsd_tau=vsd_tau)
error_dir_paths[error_sign] = os.path.join(result_name, error_sign)
else:
error_sign = misc.get_error_signature(error['type'], error['n_top'])
error_dir_paths[error_sign] = os.path.join(result_name, error_sign)
# Recall scores for all settings of the threshold of correctness (and also
# of the misalignment tolerance tau in the case of VSD).
recalls = []
# Calculate performance scores.
for error_sign, error_dir_path in error_dir_paths.items():
for correct_th in error['correct_th']:
calc_scores_cmd = [
'python',
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'eval_calc_scores.py'),
'--error_dir_paths={}'.format(error_dir_path),
'--eval_path={}'.format(p['eval_path']),
'--targets_filename={}'.format(p['targets_filename']),
'--visib_gt_min={}'.format(p['visib_gt_min']),
'--datasets_path={}'.format(p['datasets_path']),
]
calc_scores_cmd += ['--correct_th_{}={}'.format(
error['type'], ','.join(map(str, correct_th)))]
misc.log('Running: ' + ' '.join(calc_scores_cmd))
if subprocess.call(calc_scores_cmd) != 0:
raise RuntimeError('Calculation of scores failed.')
# Path to file with calculated scores.
score_sign = misc.get_score_signature(correct_th, p['visib_gt_min'])
scores_filename = 'scores_{}.json'.format(score_sign)
scores_path = os.path.join(
p['eval_path'], result_name, error_sign, scores_filename)
# Load the scores.
misc.log('Loading calculated scores from: {}'.format(scores_path))
scores = inout.load_json(scores_path)
recalls.append(scores['recall'])
average_recalls[error['type']] = np.mean(recalls)
misc.log('Recall scores: {}'.format(' '.join(map(str, recalls))))
misc.log('Average recall: {}'.format(average_recalls[error['type']]))
time_total = time.time() - time_start
misc.log('Evaluation of {} took {}s.'.format(result_filename, time_total))
# Calculate the final scores.
final_scores = {}
for error in p['errors']:
final_scores['bop19_average_recall_{}'.format(error['type'])] =\
average_recalls[error['type']]
# Final score for the given dataset.
final_scores['bop19_average_recall'] = np.mean([average_recalls['vsd'], average_recalls['mssd'], average_recalls['mspd']])
#final_scores['bop19_average_recall'] = np.mean([average_recalls['vsd']])
# Average estimation time per image.
final_scores['bop19_average_time_per_image'] = average_time_per_image
# Save the final scores.
final_scores_path = os.path.join(
p['eval_path'], result_name, 'scores_bop19.json')
inout.save_json(final_scores_path, final_scores)
# Print the final scores.
misc.log('FINAL SCORES:')
for score_name, score_value in final_scores.items():
misc.log('- {}: {}'.format(score_name, score_value))
misc.log('Done.')
| 36.180451
| 124
| 0.628221
|
c183023f61b1f05fd90932fc2a38d1f9cee4097a
| 584
|
py
|
Python
|
tumblr_scraper/__init__.py
|
giosali/tumblr-scraper
|
bfa91ba4576e3bc742f21b7be6807f712b4c2fd1
|
[
"MIT"
] | 1
|
2022-01-15T10:50:53.000Z
|
2022-01-15T10:50:53.000Z
|
tumblr_scraper/__init__.py
|
giosali/tumblr-scraper
|
bfa91ba4576e3bc742f21b7be6807f712b4c2fd1
|
[
"MIT"
] | null | null | null |
tumblr_scraper/__init__.py
|
giosali/tumblr-scraper
|
bfa91ba4576e3bc742f21b7be6807f712b4c2fd1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# _
# _/_ / //
# / , , _ _ _ / // _ ( _, _ __, ,_ _ _
# (__(_/_/ / / /_/_)(/_/ (_---/_)_(__/ (_(_/(__/|_)_(/_/ (_
# /|
# (/
"""
tumblr-scraper
~~~~~~~~~~~~~~
tumblr-scraper is a command-line scraper written in Python for tumblr.
"""
from .__version__ import __title__, __description__, __url__, __version__
from .__version__ import __author__, __author_email__, __license__
from .__version__ import __copyright__
| 29.2
| 73
| 0.462329
|
6567e697eda68ad77e2dad94cb7cb361057064de
| 4,701
|
py
|
Python
|
mp/eval/evaluate.py
|
MECLabTUDA/OOD-Gen
|
f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e
|
[
"MIT"
] | null | null | null |
mp/eval/evaluate.py
|
MECLabTUDA/OOD-Gen
|
f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e
|
[
"MIT"
] | null | null | null |
mp/eval/evaluate.py
|
MECLabTUDA/OOD-Gen
|
f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Functions to calculate metrics and losses for subject dataloaders and datasets.
# The differences lie in that dataloaders may transform (e.g. resize) the
# targets in a way that affects the result.
# ------------------------------------------------------------------------------
from mp.data.pytorch.pytorch_dataset import PytorchDataset
from mp.eval.accumulator import Accumulator
from mp.eval.metrics.mean_scores import get_mean_scores
def dl_losses(dl, agent, loss_f):
r"""Calculate components of the given loss for a Dataloader"""
acc = Accumulator()
for data in dl:
inputs, targets = agent.get_inputs_targets(data)
outputs = agent.get_outputs(inputs)
# Calculate losses
loss_dict = loss_f.get_evaluation_dict(outputs, targets)
# Add to the accumulator
for key, value in loss_dict.items():
acc.add(key, value, count=len(inputs))
return acc
def dl_metrics(dl, agent, metrics):
r"""Calculate metrics for a Dataloader"""
acc = Accumulator()
for data in dl:
inputs, targets = agent.get_inputs_targets(data)
one_channeled_target = agent.predict_from_outputs(targets)
outputs = agent.get_outputs(inputs)
pred = agent.predict_from_outputs(outputs)
# Calculate metrics
scores_dict = get_mean_scores(one_channeled_target, pred, metrics=metrics,
label_names=agent.label_names,
label_weights=agent.scores_label_weights)
# Add to the accumulator
for key, value in scores_dict.items():
acc.add(key, value, count=len(inputs))
return acc
def ds_losses(ds, agent, loss_f):
r"""Calculate components of the loss function for a Dataset.
Args:
ds(PytorchDataset): a PytorchDataset
agent(Argent): an agent
loss_f(LossAbstract): a loss function descending from LossAbstract
Returns (dict[str -> dict]): {loss -> {subject_name -> value}}}, with 2
additional entries per loss for 'mean' and 'std'. Note that the metric
is calculated per dataloader per dataset. So, for instance, the scores
for slices in a 2D dataloader are averaged.
"""
eval_dict = dict()
acc = Accumulator()
for instance_ix, instance in enumerate(ds.instances):
subject_name = instance.name
dl = ds.get_subject_dataloader(instance_ix)
subject_acc = dl_losses(dl, agent, loss_f)
# Add to the accumulator and eval_dict
for loss_key in subject_acc.get_keys():
value = subject_acc.mean(loss_key)
acc.add(loss_key, value, count=1)
if loss_key not in eval_dict:
eval_dict[loss_key] = dict()
eval_dict[loss_key][subject_name] = value
# Add mean and std values to the eval_dict
for loss_key in acc.get_keys():
eval_dict[loss_key]['mean'] = acc.mean(loss_key)
eval_dict[loss_key]['std'] = acc.std(loss_key)
return eval_dict
def ds_metrics(ds, agent, metrics):
r"""Calculate metrics for a Dataset.
Args:
ds(PytorchDataset): a PytorchDataset
agent(Argent): an agent
metrics(list[str]): a list of metric names
Returns (dict[str -> dict]): {metric -> {subject_name -> value}}}, with 2
additional entries per metric for 'mean' and 'std'.
"""
eval_dict = dict()
acc = Accumulator()
for instance_ix, instance in enumerate(ds.instances):
subject_name = instance.name
target = instance.y.tensor.to(agent.device)
pred = ds.predictor.get_subject_prediction(agent, instance_ix)
# Calculate metrics
scores_dict = get_mean_scores(target, pred, metrics=metrics,
label_names=agent.label_names,
label_weights=agent.scores_label_weights)
# Add to the accumulator and eval_dict
for metric_key, value in scores_dict.items():
acc.add(metric_key, value, count=1)
if metric_key not in eval_dict:
eval_dict[metric_key] = dict()
eval_dict[metric_key][subject_name] = value
# Add mean and std values to the eval_dict
for metric_key in acc.get_keys():
eval_dict[metric_key]['mean'] = acc.mean(metric_key)
eval_dict[metric_key]['std'] = acc.std(metric_key)
return eval_dict
def ds_losses_metrics(ds, agent, loss_f, metrics):
r"""Combination of metrics and losses into one dictionary."""
eval_dict = ds_losses(ds, agent, loss_f)
if metrics:
eval_dict.update(ds_metrics(ds, agent, metrics))
return eval_dict
| 42.351351
| 82
| 0.639226
|
95f0db267734f743e7ac4f286cb84b72a4cf0c47
| 906
|
py
|
Python
|
scripts/fundamentals/list_comprehension.py
|
duttashi/learnpy
|
c08b76b173b06d66187e51a6939d55d5dd12cb5a
|
[
"MIT"
] | null | null | null |
scripts/fundamentals/list_comprehension.py
|
duttashi/learnpy
|
c08b76b173b06d66187e51a6939d55d5dd12cb5a
|
[
"MIT"
] | 77
|
2019-04-20T06:54:19.000Z
|
2022-01-16T08:15:20.000Z
|
scripts/fundamentals/list_comprehension.py
|
duttashi/learnpy
|
c08b76b173b06d66187e51a6939d55d5dd12cb5a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 17:12:03 2020
List comprehension
@author: Ashish
"""
numbers = [1, 3, 5]
squares = [x * 2 for x in numbers]
# -- Dealing with strings --
friends = ["Rolf", "Sam", "Samantha", "Saurabh", "Jen"]
starts_s = []
for friend in friends:
if friend.startswith("S"):
starts_s.append(friend)
print(starts_s)
# -- Can make a new list of friends whose name starts with S --
friends = ["Rolf", "Sam", "Samantha", "Saurabh", "Jen"]
starts_s = [friend for friend in friends if friend.startswith("S")]
print(starts_s)
# -- List comprehension creates a _new_ list --
friends = ["Sam", "Samantha", "Saurabh"]
starts_s = [friend for friend in friends if friend.startswith("S")] # same as above
print(friends)
print(starts_s)
print(friends is starts_s)
print("friends: ", id(friends), " starts_s: ", id(starts_s))
| 23.230769
| 85
| 0.63245
|
7f5f32c154fe6d976c43b7dd0ead68234a49816f
| 17,112
|
py
|
Python
|
uuv_simulator/uuv_world_plugins/uuv_nc_parser/scripts/connect_ocean_data.py
|
laughlinbarker/underice_ekf
|
d74a83b2d02cef986fc904cf588a408382d728a6
|
[
"BSD-2-Clause"
] | null | null | null |
uuv_simulator/uuv_world_plugins/uuv_nc_parser/scripts/connect_ocean_data.py
|
laughlinbarker/underice_ekf
|
d74a83b2d02cef986fc904cf588a408382d728a6
|
[
"BSD-2-Clause"
] | null | null | null |
uuv_simulator/uuv_world_plugins/uuv_nc_parser/scripts/connect_ocean_data.py
|
laughlinbarker/underice_ekf
|
d74a83b2d02cef986fc904cf588a408382d728a6
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import rostopic
import rosgraph
import message_filters
from uuv_nc_parser import NCParser
from os.path import isfile
import sys
import numpy as np
from threading import Lock
from copy import deepcopy
from tf.transformations import quaternion_matrix, quaternion_about_axis
from uuv_nc_parser.srv import *
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
from geometry_msgs.msg import Vector3, Point32
from sensor_msgs.msg import Temperature, PointCloud, ChannelFloat32
from gazebo_msgs.msg import ModelStates
from gazebo_msgs.srv import GetModelProperties
class OceanDataPlayBack:
def __init__(self):
q = quaternion_about_axis(np.pi, (1, 0, 0))
self._toNEDrot = quaternion_matrix(q)[0:3, 0:3]
self._lock = Lock()
self._filename = ''
if rospy.has_param('~filename'):
self._filename = rospy.get_param('~filename')
if rospy.has_param('~labels'):
labels = rospy.get_param('~labels')
else:
labels = None
self._ocean_data = NCParser(self._filename, labels)
print self._ocean_data
else:
raise rospy.ROSException('NC data filename not available')
if rospy.has_param('~fixed_time'):
self._fixed_time = rospy.get_param('~fixed_time')
print 'Using fixed time=', self._fixed_time
else:
self._fixed_time = None
if rospy.has_param('~loop_time'):
self._loop_time = rospy.get_param('~loop_time')
else:
self._loop_time = False
if rospy.has_param('~x_offset'):
self._x_offset = rospy.get_param('~x_offset')
else:
self._x_offset = 0.0
if rospy.has_param('~center_x'):
if rospy.get_param('~center_x'):
self._x_offset = self._ocean_data.min_x + (self._ocean_data.max_x - self._ocean_data.min_x) / 2.0
if rospy.has_param('~y_offset'):
self._y_offset = rospy.get_param('~y_offset')
else:
self._y_offset = 0.0
if rospy.has_param('~center_y'):
if rospy.get_param('~center_y'):
self._y_offset = self._ocean_data.min_y + (self._ocean_data.max_y - self._ocean_data.min_y) / 2.0
if rospy.has_param('~variables'):
self._variables = rospy.get_param('~variables')
else:
self._variables = None
self._time_offset = 0
if rospy.has_param('~time_offset'):
offset = rospy.get_param('~time_offset')
if 'day' in offset:
self._time_offset += int(offset['day']) * self._ocean_data.DAY2HOUR * self._ocean_data.HOUR2MIN * self._ocean_data.MIN2SEC
if 'hour' in offset:
self._time_offset += int(offset['hour']) * self._ocean_data.HOUR2MIN * self._ocean_data.MIN2SEC
if 'min' in offset:
self._time_offset += int(offset['min']) * self._ocean_data.MIN2SEC
print 'Starting time: %d days, %d hours, %d minutes' % (int(offset['day']),
int(offset['hour']),
int(offset['min']))
print 'Starting time in seconds:', self._time_offset
self._services = dict()
self._services['interpolate_nc_data'] = rospy.Service(
'interpolate_nc_data', GetNCData, self.get_nc_data)
self._services['get_nc_variables'] = rospy.Service(
'get_nc_variables', GetNCVariables, self.get_nc_variables)
self._services['get_nc_wind_velocity'] = rospy.Service(
'get_nc_wind_velocity', GetNCVelocity, self.get_nc_wind_velocity)
self._services['get_nc_current_velocity'] = rospy.Service(
'get_nc_current_velocity', GetNCVelocity,
self.get_nc_current_velocity)
###############################################################
# Initializing topics for each vehicle
###############################################################
# Table of vehicle positions
self._vehicle_pos = dict()
# Individual topics for each interpolated data
self._topics = dict()
# List of static objects to be ignored
self._static_objs = list()
# Generate point clouds with all the sampled point variables
self._pc_variables = dict()
for var in self._variables:
self._pc_variables[var] = rospy.Publisher('/ocean_data/%s' % var,
PointCloud,
queue_size=1)
try:
rospy.wait_for_service('/gazebo/get_model_properties', timeout=2)
except rospy.ROSException:
print 'Service not available! Closing node...'
sys.exit(-1)
try:
self._model_prop_srv = rospy.ServiceProxy(
'/gazebo/get_model_properties', GetModelProperties)
except rospy.ServiceException, e:
print 'Service call failed, error=', e
sys.exit(-1)
self._sub_gazebo_models = rospy.Subscriber(
'/gazebo/model_states', ModelStates, self.update_vehicle_pos)
self._update_rate = 50.0
if rospy.has_param('~update_rate'):
rate = rospy.get_param('~update_rate')
if rate > 0:
self._update_rate = float(rate)
else:
print 'Invalid update rate, keeping default of 50 Hz'
if rospy.has_param('~current_velocity'):
self._current_vel_config = rospy.get_param('~current_velocity')
self._timer_current = rospy.Timer(rospy.Duration(1 / self._update_rate),
self.publish_current_velocity)
print 'Publishing local current velocity'
else:
self._current_vel_config = None
if rospy.has_param('~wind_velocity'):
self._wind_vel_config = rospy.get_param('~wind_velocity')
self._timer_wind = rospy.Timer(rospy.Duration(1 / self._update_rate),
self.publish_wind_velocity)
print 'Publishing local wind velocity'
else:
self._wind_vel_config = None
self._timer_var = rospy.Timer(rospy.Duration(1 / self._update_rate),
self.publish_variables)
def update_vehicle_pos(self, msg):
with self._lock:
# Removing vehicles that have been removed
remove_names = list()
for name in self._vehicle_pos:
if name not in msg.name:
remove_names.append(name)
if len(remove_names):
for name in remove_names:
del self._vehicle_pos[name]
del self._topics[name]
# Checking in any new objects are available
new_vehicles = list()
for name in msg.name:
if name not in self._vehicle_pos and name not in self._static_objs:
new_vehicles.append(name)
if len(new_vehicles) > 0:
for name in new_vehicles:
resp = self._model_prop_srv(name)
if not resp.is_static and rospy.has_param('/%s/robot_description' % name):
print 'NEW VEHICLE DETECTED:', name
self._vehicle_pos[name] = np.zeros(3)
# Create the topics for the new vehicle
self._topics[name] = dict()
self._topics[name]['current_velocity'] = rospy.Publisher('/%s/current_velocity' % name, Vector3, queue_size=1)
self._topics[name]['wind_velocity'] = rospy.Publisher('/%s/wind_velocity' % name, Vector3, queue_size=1)
for var in self._variables:
if 'temperature' in var.lower():
self._topics[name][var] = rospy.Publisher('/%s/%s' % (name, var), Temperature, queue_size=1)
else:
self._topics[name][var] = rospy.Publisher('/%s/%s' % (name, var), FloatStamped, queue_size=1)
else:
print 'Static object found:', name
self._static_objs.append(name)
# Updating the position of the non-static objects in the simulation
for name in self._vehicle_pos:
for i in range(len(msg.name)):
if name == msg.name[i]:
self._vehicle_pos[name] = np.array([msg.pose[i].position.x,
msg.pose[i].position.y,
msg.pose[i].position.z])
break
def publish_wind_velocity(self, event):
if self._wind_vel_config is None:
return True
t = rospy.get_time()
with self._lock:
for name in self._vehicle_pos:
w_east = self._interpolate(
self._wind_vel_config['w_east'],
self._vehicle_pos[name][0],
self._vehicle_pos[name][1],
self._vehicle_pos[name][2],
t)
w_north = self._interpolate(
self._wind_vel_config['w_north'],
self._vehicle_pos[name][0],
self._vehicle_pos[name][1],
self._vehicle_pos[name][2],
t)
nedVel = np.array([w_north, w_east, 0])
enuVel = np.dot(self._toNEDrot.T, nedVel)
output = Vector3(*enuVel)
self._topics[name]['wind_velocity'].publish(output)
return True
def publish_current_velocity(self, event):
if self._current_vel_config is None:
return True
t = rospy.get_time()
with self._lock:
for name in self._vehicle_pos:
u_east = self._interpolate(
self._current_vel_config['u_east'],
self._vehicle_pos[name][0],
self._vehicle_pos[name][1],
self._vehicle_pos[name][2],
t)
v_north = self._interpolate(
self._current_vel_config['v_north'],
self._vehicle_pos[name][0],
self._vehicle_pos[name][1],
self._vehicle_pos[name][2],
t)
nedVel = np.array([v_north, u_east, 0])
enuVel = np.dot(self._toNEDrot.T, nedVel)
output = Vector3(*enuVel)
self._topics[name]['current_velocity'].publish(output)
return True
def publish_variables(self, event):
with self._lock:
t = rospy.get_time()
for var in self._variables:
pc_msg = PointCloud()
pc_msg.header.stamp = rospy.Time.now()
pc_msg.header.frame_id = 'world'
for name in self._vehicle_pos:
value = self._interpolate(
var,
self._vehicle_pos[name][0],
self._vehicle_pos[name][1],
self._vehicle_pos[name][2],
t)
# Updating the point cloud for this variable
pc_msg.points.append(Point32(self._vehicle_pos[name][0],
self._vehicle_pos[name][1],
self._vehicle_pos[name][2]))
pc_msg.channels.append(ChannelFloat32())
pc_msg.channels[-1].name = 'intensity'
pc_msg.channels[-1].values.append(value)
# Create the message objects
if 'temperature' in var.lower():
msg = Temperature()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = '%s/base_link' % name
# TODO Read from the unit of temperature from NC file
# Converting to Celsius
msg.temperature = value - 273.15
else:
msg = FloatStamped()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = '%s/base_link' % name
msg.data = value
self._topics[name][var].publish(msg)
self._pc_variables[var].publish(pc_msg)
return True
def _get_all_variables(self):
var = list()
options = deepcopy(self._variables)
if self._current_vel_config is not None:
options += self._current_vel_config.values()
if self._wind_vel_config is not None:
options += self._wind_vel_config.values()
for v in options:
if v in self._ocean_data.variable_names:
var.append(v)
else:
print '%s not a valid variable' % v
return var
def get_nc_wind_velocity(self, request):
output = Vector3(0, 0, 0)
if self._wind_vel_config is not None:
w_east = self._interpolate(
self._wind_vel_config['w_east'],
request.x,
request.y,
request.z,
request.time)
w_north = self._interpolate(
self._wind_vel_config['w_north'],
request.x,
request.y,
request.z,
request.time)
nedVel = np.array([w_north, w_east, 0])
enuVel = np.dot(self._toNEDrot.T, nedVel)
output = Vector3(*enuVel)
return GetNCVelocityResponse(output)
def get_nc_current_velocity(self, request):
output = Vector3(0, 0, 0)
if self._current_vel_config is not None:
u_east = self._interpolate(
self._current_vel_config['u_east'],
request.x,
request.y,
request.z,
request.time)
v_north = self._interpolate(
self._current_vel_config['v_north'],
request.x,
request.y,
request.z,
request.time)
nedVel = np.array([v_north, u_east, 0])
enuVel = np.dot(self._toNEDrot.T, nedVel)
output = Vector3(*enuVel)
return GetNCVelocityResponse(output)
def get_nc_variables(self, request):
return GetNCVariablesResponse(self._variables)
def get_nc_data(self, request):
if request.variable not in self._get_all_variables():
print 'Invalid variable, var_name=', request.variable
return GetNCDataResponse(0.0)
return GetNCDataResponse(
self._interpolate(request.variable,
request.x,
request.y,
request.z,
request.time))
def _interpolate(self, variable, x, y, z, time):
ENUpos = np.array([x, y, z])
# Since the odometry given by Gazebo is in ENU standard, transform into
# NED to interpolate on the ocean data
pos = np.dot(self._toNEDrot, ENUpos)
# Add x and y offsets
x = pos[0] + self._x_offset
y = pos[1] + self._y_offset
z = pos[2]
if not self._loop_time:
# Use fixed or simulation time
t = (time if self._fixed_time is None else self._fixed_time)
else:
# Loop the time vector, if needed
t = time % self._ocean_data.end_time
t += float(self._time_offset)
# Interpolate the given variables on the current position and time
output = self._ocean_data.interpolate(
variable, x, y, z, t)
return output
if __name__ == '__main__':
print 'Ocean data playback'
rospy.init_node('connect_to_ocean_data')
try:
pb = OceanDataPlayBack()
rospy.spin()
except rospy.ROSInterruptException:
print('caught exception')
| 40.453901
| 138
| 0.541316
|
3d1cd72766d275cab484315ae40f22625bd864e3
| 3,048
|
py
|
Python
|
SCRAPERS/YFSpider/YFSpider/settings.py
|
tgtads/yahooFinanceEventStudy
|
46d8f64a302178328622c14a538686342d4012bc
|
[
"MIT"
] | null | null | null |
SCRAPERS/YFSpider/YFSpider/settings.py
|
tgtads/yahooFinanceEventStudy
|
46d8f64a302178328622c14a538686342d4012bc
|
[
"MIT"
] | null | null | null |
SCRAPERS/YFSpider/YFSpider/settings.py
|
tgtads/yahooFinanceEventStudy
|
46d8f64a302178328622c14a538686342d4012bc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for YFSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'YFSpider'
SPIDER_MODULES = ['YFSpider.spiders']
NEWSPIDER_MODULE = 'YFSpider.spiders'
LOG_LEVEL = 'WARNING'
# LOG_LEVEL = 'INFO'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'YFSpider (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'YFSpider.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'YFSpider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'YFSpider.pipelines.MainPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.247191
| 109
| 0.783136
|
1b751c2214d7cb3ceda7d11fb5a9b61abafade8f
| 5,886
|
py
|
Python
|
examples/pipeline/hetero_sbt/pipeline-hetero-sbt-regression-multi-host.py
|
hubert-he/FATE
|
6758e150bd7ca7d6f788f9a7a8c8aea7e6500363
|
[
"Apache-2.0"
] | 3,787
|
2019-08-30T04:55:10.000Z
|
2022-03-31T23:30:07.000Z
|
examples/pipeline/hetero_sbt/pipeline-hetero-sbt-regression-multi-host.py
|
hubert-he/FATE
|
6758e150bd7ca7d6f788f9a7a8c8aea7e6500363
|
[
"Apache-2.0"
] | 1,439
|
2019-08-29T16:35:52.000Z
|
2022-03-31T11:55:31.000Z
|
examples/pipeline/hetero_sbt/pipeline-hetero-sbt-regression-multi-host.py
|
hubert-he/FATE
|
6758e150bd7ca7d6f788f9a7a8c8aea7e6500363
|
[
"Apache-2.0"
] | 1,179
|
2019-08-29T16:18:32.000Z
|
2022-03-31T12:55:38.000Z
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component.dataio import DataIO
from pipeline.component.hetero_secureboost import HeteroSecureBoost
from pipeline.component.intersection import Intersection
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from pipeline.component.evaluation import Evaluation
from pipeline.interface.model import Model
from pipeline.utils.tools import load_job_config
from pipeline.runtime.entity import JobParameters
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
backend = config.backend
work_mode = config.work_mode
# data sets
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data_0 = {"name": "motor_hetero_host_1", "namespace": f"experiment{namespace}"}
host_train_data_1 = {"name": "motor_hetero_host_2", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data_0 = {"name": "motor_hetero_host_1", "namespace": f"experiment{namespace}"}
host_validate_data_1 = {"name": "motor_hetero_host_2", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=hosts,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=hosts[0]).component_param(table=host_train_data_0)
reader_0.get_party_instance(role="host", party_id=hosts[1]).component_param(table=host_train_data_1)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=hosts[0]).component_param(table=host_validate_data_0)
reader_1.get_party_instance(role="host", party_id=hosts[1]).component_param(table=host_validate_data_1)
dataio_0, dataio_1 = DataIO(name="dataio_0"), DataIO(name="dataio_1")
dataio_0.get_party_instance(role="guest", party_id=guest).component_param(with_label=True, output_format="dense",
label_name='motor_speed', label_type="float")
dataio_0.get_party_instance(role="host", party_id=hosts[0]).component_param(with_label=False)
dataio_0.get_party_instance(role="host", party_id=hosts[1]).component_param(with_label=False)
dataio_1.get_party_instance(role="guest", party_id=guest).component_param(with_label=True, output_format="dense",
label_name="motor_speed", label_type="float")
dataio_1.get_party_instance(role="host", party_id=hosts[0]).component_param(with_label=False)
dataio_1.get_party_instance(role="host", party_id=hosts[1]).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="regression",
objective_param={"objective": "lse"},
encrypt_param={"method": "iterativeAffine"},
tree_param={"max_depth": 3},
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
pipeline.add_component(dataio_1, data=Data(data=reader_1.output.data), model=Model(dataio_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=dataio_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=dataio_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
job_parameters = JobParameters(backend=backend, work_mode=work_mode)
pipeline.fit(job_parameters)
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 48.644628
| 123
| 0.698097
|
11d964943942a907dfddfc40812d1f47c1cd057e
| 5,428
|
py
|
Python
|
tests/test_trainer/test_pipeline/model/resnet.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 1,630
|
2021-10-30T01:00:27.000Z
|
2022-03-31T23:02:41.000Z
|
tests/test_trainer/test_pipeline/model/resnet.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 166
|
2021-10-30T01:03:01.000Z
|
2022-03-31T14:19:07.000Z
|
tests/test_trainer/test_pipeline/model/resnet.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 253
|
2021-10-30T06:10:29.000Z
|
2022-03-31T13:30:06.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from colossalai.registry import LAYERS
from colossalai.registry import MODELS
from colossalai.nn.model import ModelFromConfig
@MODELS.register_module
class VanillaResNet(ModelFromConfig):
"""ResNet from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
"""
def __init__(
self,
num_cls: int,
block_type: str,
layers: List[int],
norm_layer_type: str = 'BatchNorm2d',
in_channels: int = 3,
groups: int = 1,
width_per_group: int = 64,
zero_init_residual: bool = False,
replace_stride_with_dilation: Optional[List[bool]] = None,
dilations=(1, 1, 1, 1)
) -> None:
super().__init__()
self.inplanes = 64
self.zero_init_residual = zero_init_residual
self.blocks = layers
self.block_expansion = LAYERS.get_module(block_type).expansion
self.dilations = dilations
self.reslayer_common_cfg = dict(
type='ResLayer',
block_type=block_type,
norm_layer_type=norm_layer_type,
groups=groups,
base_width=width_per_group
)
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.layers_cfg = [
# conv1
dict(type='Conv2d',
in_channels=in_channels,
out_channels=self.inplanes,
kernel_size=7,
stride=2,
padding=3,
bias=False),
# bn1
dict(
type=norm_layer_type,
num_features=self.inplanes
),
# relu
dict(
type='ReLU',
inplace=True
),
# maxpool
dict(
type='MaxPool2d',
kernel_size=3,
stride=2,
padding=1
),
# layer 1
dict(
inplanes=self.inplanes,
planes=64,
blocks=self.blocks[0],
dilation=self.dilations[0],
**self.reslayer_common_cfg
),
# layer 2
dict(
inplanes=64 * self.block_expansion,
planes=128,
blocks=self.blocks[1],
stride=2,
dilate=replace_stride_with_dilation[0],
dilation=self.dilations[1],
**self.reslayer_common_cfg
),
# layer 3
dict(
inplanes=128 * self.block_expansion,
planes=256,
blocks=layers[2],
stride=2,
dilate=replace_stride_with_dilation[1],
dilation=self.dilations[2],
**self.reslayer_common_cfg
),
# layer 4
dict(
inplanes=256 * self.block_expansion,
planes=512,
blocks=layers[3], stride=2,
dilate=replace_stride_with_dilation[2],
dilation=self.dilations[3],
**self.reslayer_common_cfg
),
# avg pool
dict(
type='AdaptiveAvgPool2d',
output_size=(1, 1)
),
# flatten
dict(
type='LambdaWrapper',
func=lambda mod, x: torch.flatten(x, 1)
),
# linear
dict(
type='Linear',
in_features=512 * self.block_expansion,
out_features=num_cls
)
]
def forward(self, x: Tensor):
for layer in self.layers:
x = layer(x)
return x
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, LAYERS.get_module('ResNetBottleneck')):
# type: ignore[arg-type]
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, LAYERS.get_module('ResNetBasicBlock')):
# type: ignore[arg-type]
nn.init.constant_(m.bn2.weight, 0)
| 33.097561
| 106
| 0.511606
|
3aa5764d9d841dc42d19b58f35d730dbfc9460ee
| 1,455
|
py
|
Python
|
functional_test/live_tests/orders_test.py
|
ikr0m/scala-pet-store
|
82e90c788e8daf14348b4cf56d6b3840ee8bda80
|
[
"Apache-2.0"
] | 997
|
2017-05-22T06:21:17.000Z
|
2022-03-29T20:36:23.000Z
|
functional_test/live_tests/orders_test.py
|
ikr0m/scala-pet-store
|
82e90c788e8daf14348b4cf56d6b3840ee8bda80
|
[
"Apache-2.0"
] | 222
|
2017-07-12T17:05:28.000Z
|
2022-03-28T14:36:48.000Z
|
functional_test/live_tests/orders_test.py
|
ikr0m/scala-pet-store
|
82e90c788e8daf14348b4cf56d6b3840ee8bda80
|
[
"Apache-2.0"
] | 227
|
2017-08-04T03:31:07.000Z
|
2022-03-29T20:37:22.000Z
|
import pytest
from hamcrest import assert_that, is_, none, not_none
from pet_store_client import PetStoreClient
def test_place_order(pet_context, customer_context, pet_store_client):
order = {
"petId": pet_context['id'],
"status": "Placed",
"complete": False
}
response = pet_store_client.place_order(order)
order = response.json()
assert_that(order['status'], is_('Placed'))
assert_that(order['complete'], is_(False))
assert_that(order['id'], is_(not_none()))
assert_that(order['shipDate'], is_(none()))
def test_get_order(pet_context, customer_context, pet_store_client):
order = {
"petId": pet_context['id'],
"status": "Placed",
"complete": False
}
response = pet_store_client.place_order(order)
placed_order = response.json()
order_id = placed_order['id']
response = pet_store_client.get_order(order_id)
order = response.json()
assert_that(order, is_(placed_order))
def test_delete_order(pet_context, customer_context, admin_context, pet_store_client):
order = {
"petId": pet_context['id'],
"status": "Placed",
"complete": False
}
customer_context()
response = pet_store_client.place_order(order)
placed_order = response.json()
order_id = placed_order['id']
admin_context()
response = pet_store_client.delete_order(order_id)
assert_that(response.status_code, is_(200))
| 28.529412
| 86
| 0.679038
|
e4024d9760dd5ad661b916bccd11ca4eb9eb5ce6
| 12,904
|
py
|
Python
|
sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mariadb/operations/_server_security_alert_policies_operations.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mariadb/operations/_server_security_alert_policies_operations.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mariadb/operations/_server_security_alert_policies_operations.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServerSecurityAlertPoliciesOperations(object):
"""ServerSecurityAlertPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mariadb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
server_name, # type: str
security_alert_policy_name, # type: Union[str, "_models.SecurityAlertPolicyName"]
**kwargs # type: Any
):
# type: (...) -> "_models.ServerSecurityAlertPolicy"
"""Get a server's security alert policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param security_alert_policy_name: The name of the security alert policy.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mariadb.models.SecurityAlertPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerSecurityAlertPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mariadb.models.ServerSecurityAlertPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'securityAlertPolicyName': self._serialize.url("security_alert_policy_name", security_alert_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
security_alert_policy_name, # type: Union[str, "_models.SecurityAlertPolicyName"]
parameters, # type: "_models.ServerSecurityAlertPolicy"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ServerSecurityAlertPolicy"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServerSecurityAlertPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'securityAlertPolicyName': self._serialize.url("security_alert_policy_name", security_alert_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServerSecurityAlertPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
server_name, # type: str
security_alert_policy_name, # type: Union[str, "_models.SecurityAlertPolicyName"]
parameters, # type: "_models.ServerSecurityAlertPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServerSecurityAlertPolicy"]
"""Creates or updates a threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param security_alert_policy_name: The name of the threat detection policy.
:type security_alert_policy_name: str or ~azure.mgmt.rdbms.mariadb.models.SecurityAlertPolicyName
:param parameters: The server security alert policy.
:type parameters: ~azure.mgmt.rdbms.mariadb.models.ServerSecurityAlertPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServerSecurityAlertPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mariadb.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerSecurityAlertPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
security_alert_policy_name=security_alert_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServerSecurityAlertPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'securityAlertPolicyName': self._serialize.url("security_alert_policy_name", security_alert_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}'} # type: ignore
| 52.455285
| 236
| 0.686376
|
f323c9e7bccb6a07d971e30e8bb403b80c1564b5
| 809
|
py
|
Python
|
h1watch.py
|
zadewg/VARIOUS
|
3ab6df37b95c6e98ccbee1912d7497cd4aca7e98
|
[
"MIT"
] | 3
|
2019-04-18T10:31:34.000Z
|
2022-03-26T03:09:29.000Z
|
h1watch.py
|
zadewg/VARIOUS
|
3ab6df37b95c6e98ccbee1912d7497cd4aca7e98
|
[
"MIT"
] | null | null | null |
h1watch.py
|
zadewg/VARIOUS
|
3ab6df37b95c6e98ccbee1912d7497cd4aca7e98
|
[
"MIT"
] | 1
|
2018-09-15T09:59:10.000Z
|
2018-09-15T09:59:10.000Z
|
import urllib, json, io
#baseurl = 'https://hackerone.com/programs/search?query=type:hackerone&sort=published_at:descending&page={}'
baseurl = "https://hackerone.com/directory?query=type%3Ahackerone&sort=published_at%3Adescending&page={}"
paths, scope = [], []
i = 1
while True:
r = urllib.urlopen(baseurl.format(i))
if r.getcode() == 500:
break
else:
scope.append(baseurl.format(i))
i += 1
print(scope)
for url in scope:
response = urllib.urlopen(url)
outdata = json.loads(json.dumps(response.read(), ensure_ascii=False, indent=4))
a = json.loads(outdata)
with io.open("testfile.json", "w", encoding='utf-8') as FILE:
for i in range(0,len(a["results"])):
data = ("{} : {}\n").format((a["results"][i]["name"]), ("https://hackerone.com" + a["results"][i]["url"]))
FILE.write(data)
| 26.966667
| 109
| 0.669963
|
0a84f5d557e36b93d97e364a599cfdeb6be77e7e
| 483
|
py
|
Python
|
blog_api/migrations/0002_alter_posts_tags.py
|
islam-kamel/cookie_blog
|
c285e51f32d3f29dae74d59720c620f7f4007de3
|
[
"MIT"
] | null | null | null |
blog_api/migrations/0002_alter_posts_tags.py
|
islam-kamel/cookie_blog
|
c285e51f32d3f29dae74d59720c620f7f4007de3
|
[
"MIT"
] | null | null | null |
blog_api/migrations/0002_alter_posts_tags.py
|
islam-kamel/cookie_blog
|
c285e51f32d3f29dae74d59720c620f7f4007de3
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-11 06:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='tags',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='post_tags', to='blog_api.tags'),
),
]
| 24.15
| 127
| 0.637681
|
77fc993e8202f9afcd76db7fd3eb166c34402dc7
| 1,937
|
py
|
Python
|
tests/unit/test_tensor_tf2.py
|
triton-inference-server/model_navigator
|
ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 49
|
2021-04-09T18:32:07.000Z
|
2022-03-29T07:32:24.000Z
|
tests/unit/test_tensor_tf2.py
|
triton-inference-server/model_navigator
|
ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2021-07-13T09:00:12.000Z
|
2021-11-15T17:16:35.000Z
|
tests/unit/test_tensor_tf2.py
|
triton-inference-server/model_navigator
|
ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2021-04-09T18:31:56.000Z
|
2022-03-01T08:08:04.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# pytype: disable=import-error
import tensorflow as tf
from model_navigator.tensor import NPTensorUtils, TensorUtils
# pytype: enable=import-error
def test_tf2_eq():
with tf.device("CPU:0"):
a = tf.constant([1, 1, 1], dtype=tf.float32)
utils = TensorUtils.for_data(a)
assert utils.eq(a, a)
b = tf.Variable(a)
assert utils.eq(a, b)
with tf.device("GPU:0"):
# not equal due to different devices
c = tf.Variable(a)
assert not utils.eq(a, c)
# simple change
d = tf.constant([0, 1, 1], dtype=tf.float32)
assert not utils.eq(a, d)
# different dtypes should return False
e = tf.constant([1, 1, 1], dtype=tf.float64)
assert not utils.eq(a, e)
# different shapes also should return False
f = tf.constant([1, 1, 1, 1], dtype=tf.float32)
assert not utils.eq(a, f)
def test_tf2_to_numpy():
with tf.device("GPU:0"):
a = tf.ones(4, dtype=tf.float32)
utils = TensorUtils.for_data(a)
b = utils.to_numpy(a)
assert NPTensorUtils.eq(b, np.ones(4, dtype=np.float32))
a = tf.Variable([np.nan, 1, 1, 1], dtype=tf.float32)
c = utils.to_numpy(a)
c_expected = np.array([np.nan, 1, 1, 1], dtype=np.float32)
assert NPTensorUtils.eq(c, c_expected)
| 31.241935
| 74
| 0.660816
|
381dc442d0c8cbcd7fa7de8fcf3d4a59de313acb
| 138,918
|
py
|
Python
|
sdk/python/pulumi_azure_native/datamigration/v20180419/_inputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datamigration/v20180419/_inputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datamigration/v20180419/_inputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AzureActiveDirectoryAppArgs',
'BlobShareArgs',
'ConnectToSourcePostgreSqlSyncTaskInputArgs',
'ConnectToSourcePostgreSqlSyncTaskPropertiesArgs',
'ConnectToSourceSqlServerSyncTaskPropertiesArgs',
'ConnectToSourceSqlServerTaskInputArgs',
'ConnectToSourceSqlServerTaskPropertiesArgs',
'ConnectToTargetAzureDbForMySqlTaskInputArgs',
'ConnectToTargetAzureDbForMySqlTaskPropertiesArgs',
'ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs',
'ConnectToTargetAzureDbForPostgreSqlSyncTaskPropertiesArgs',
'ConnectToTargetSqlDbTaskInputArgs',
'ConnectToTargetSqlDbTaskPropertiesArgs',
'ConnectToTargetSqlMISyncTaskInputArgs',
'ConnectToTargetSqlMISyncTaskPropertiesArgs',
'ConnectToTargetSqlMITaskInputArgs',
'ConnectToTargetSqlMITaskPropertiesArgs',
'ConnectToTargetSqlSqlDbSyncTaskInputArgs',
'ConnectToTargetSqlSqlDbSyncTaskPropertiesArgs',
'DatabaseInfoArgs',
'FileShareArgs',
'GetTdeCertificatesSqlTaskInputArgs',
'GetTdeCertificatesSqlTaskPropertiesArgs',
'GetUserTablesSqlSyncTaskInputArgs',
'GetUserTablesSqlSyncTaskPropertiesArgs',
'GetUserTablesSqlTaskInputArgs',
'GetUserTablesSqlTaskPropertiesArgs',
'MiSqlConnectionInfoArgs',
'MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs',
'MigrateMySqlAzureDbForMySqlSyncTaskInputArgs',
'MigrateMySqlAzureDbForMySqlSyncTaskPropertiesArgs',
'MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs',
'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs',
'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskPropertiesArgs',
'MigrateSqlServerSqlDbDatabaseInputArgs',
'MigrateSqlServerSqlDbSyncDatabaseInputArgs',
'MigrateSqlServerSqlDbSyncTaskInputArgs',
'MigrateSqlServerSqlDbSyncTaskPropertiesArgs',
'MigrateSqlServerSqlDbTaskInputArgs',
'MigrateSqlServerSqlDbTaskPropertiesArgs',
'MigrateSqlServerSqlMIDatabaseInputArgs',
'MigrateSqlServerSqlMISyncTaskInputArgs',
'MigrateSqlServerSqlMISyncTaskPropertiesArgs',
'MigrateSqlServerSqlMITaskInputArgs',
'MigrateSqlServerSqlMITaskPropertiesArgs',
'MigrationValidationOptionsArgs',
'MySqlConnectionInfoArgs',
'PostgreSqlConnectionInfoArgs',
'SelectedCertificateInputArgs',
'ServiceSkuArgs',
'SqlConnectionInfoArgs',
'ValidateMigrationInputSqlServerSqlDbSyncTaskPropertiesArgs',
'ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs',
'ValidateMigrationInputSqlServerSqlMISyncTaskPropertiesArgs',
'ValidateMigrationInputSqlServerSqlMITaskInputArgs',
'ValidateMigrationInputSqlServerSqlMITaskPropertiesArgs',
'ValidateSyncMigrationInputSqlServerTaskInputArgs',
]
@pulumi.input_type
class AzureActiveDirectoryAppArgs:
def __init__(__self__, *,
app_key: pulumi.Input[str],
application_id: pulumi.Input[str],
tenant_id: pulumi.Input[str]):
"""
Azure Active Directory Application
:param pulumi.Input[str] app_key: Key used to authenticate to the Azure Active Directory Application
:param pulumi.Input[str] application_id: Application ID of the Azure Active Directory Application
:param pulumi.Input[str] tenant_id: Tenant id of the customer
"""
pulumi.set(__self__, "app_key", app_key)
pulumi.set(__self__, "application_id", application_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="appKey")
def app_key(self) -> pulumi.Input[str]:
"""
Key used to authenticate to the Azure Active Directory Application
"""
return pulumi.get(self, "app_key")
@app_key.setter
def app_key(self, value: pulumi.Input[str]):
pulumi.set(self, "app_key", value)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> pulumi.Input[str]:
"""
Application ID of the Azure Active Directory Application
"""
return pulumi.get(self, "application_id")
@application_id.setter
def application_id(self, value: pulumi.Input[str]):
pulumi.set(self, "application_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
Tenant id of the customer
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class BlobShareArgs:
def __init__(__self__, *,
sas_uri: pulumi.Input[str]):
"""
Blob container storage information.
:param pulumi.Input[str] sas_uri: SAS URI of Azure Storage Account Container.
"""
pulumi.set(__self__, "sas_uri", sas_uri)
@property
@pulumi.getter(name="sasUri")
def sas_uri(self) -> pulumi.Input[str]:
"""
SAS URI of Azure Storage Account Container.
"""
return pulumi.get(self, "sas_uri")
@sas_uri.setter
def sas_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "sas_uri", value)
@pulumi.input_type
class ConnectToSourcePostgreSqlSyncTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs']):
"""
Input for the task that validates connection to PostgreSQL and source server requirements
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] source_connection_info: Connection information for source PostgreSQL server
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for source PostgreSQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@pulumi.input_type
class ConnectToSourcePostgreSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to PostgreSQL server and source server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToSource.PostgreSql.Sync'.
:param pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToSource.PostgreSql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToSource.PostgreSql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToSourceSqlServerSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL Server and source server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToSource.SqlServer.Sync'.
:param pulumi.Input['ConnectToSourceSqlServerTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToSource.SqlServer.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToSource.SqlServer.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToSourceSqlServerTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
check_permissions_group: Optional[pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']]] = None,
collect_agent_jobs: Optional[pulumi.Input[bool]] = None,
collect_logins: Optional[pulumi.Input[bool]] = None):
"""
Input for the task that validates connection to SQL Server and also validates source server requirements
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for Source SQL Server
:param pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']] check_permissions_group: Permission group for validations
:param pulumi.Input[bool] collect_agent_jobs: Flag for whether to collect agent jobs from source server.
:param pulumi.Input[bool] collect_logins: Flag for whether to collect logins from source server.
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
if check_permissions_group is not None:
pulumi.set(__self__, "check_permissions_group", check_permissions_group)
if collect_agent_jobs is None:
collect_agent_jobs = False
if collect_agent_jobs is not None:
pulumi.set(__self__, "collect_agent_jobs", collect_agent_jobs)
if collect_logins is None:
collect_logins = False
if collect_logins is not None:
pulumi.set(__self__, "collect_logins", collect_logins)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for Source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="checkPermissionsGroup")
def check_permissions_group(self) -> Optional[pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']]]:
"""
Permission group for validations
"""
return pulumi.get(self, "check_permissions_group")
@check_permissions_group.setter
def check_permissions_group(self, value: Optional[pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']]]):
pulumi.set(self, "check_permissions_group", value)
@property
@pulumi.getter(name="collectAgentJobs")
def collect_agent_jobs(self) -> Optional[pulumi.Input[bool]]:
"""
Flag for whether to collect agent jobs from source server.
"""
return pulumi.get(self, "collect_agent_jobs")
@collect_agent_jobs.setter
def collect_agent_jobs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "collect_agent_jobs", value)
@property
@pulumi.getter(name="collectLogins")
def collect_logins(self) -> Optional[pulumi.Input[bool]]:
"""
Flag for whether to collect logins from source server.
"""
return pulumi.get(self, "collect_logins")
@collect_logins.setter
def collect_logins(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "collect_logins", value)
@pulumi.input_type
class ConnectToSourceSqlServerTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL Server and also validates source server requirements
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToSource.SqlServer'.
:param pulumi.Input['ConnectToSourceSqlServerTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToSource.SqlServer')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToSource.SqlServer'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetAzureDbForMySqlTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['MySqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['MySqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure Database for MySQL and target server requirements
:param pulumi.Input['MySqlConnectionInfoArgs'] source_connection_info: Connection information for source MySQL server
:param pulumi.Input['MySqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for MySQL server
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for source MySQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for MySQL server
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetAzureDbForMySqlTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure Database for MySQL and target server requirements
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureDbForMySql'.
:param pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureDbForMySql')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureDbForMySql'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure Database for PostgreSQL and target server requirements
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] source_connection_info: Connection information for source PostgreSQL server
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for PostgreSQL server
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for source PostgreSQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for PostgreSQL server
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetAzureDbForPostgreSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure Database For PostgreSQL server and target server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureDbForPostgreSql.Sync'.
:param pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureDbForPostgreSql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureDbForPostgreSql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlDbTaskInputArgs:
def __init__(__self__, *,
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that validates connection to SQL DB and target server requirements
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for target SQL DB
"""
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for target SQL DB
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlDbTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlDbTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL DB and target server requirements
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.SqlDb'.
:param pulumi.Input['ConnectToTargetSqlDbTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.SqlDb')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.SqlDb'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlDbTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlDbTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlMISyncTaskInputArgs:
def __init__(__self__, *,
azure_app: pulumi.Input['AzureActiveDirectoryAppArgs'],
target_connection_info: pulumi.Input['MiSqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure SQL Database Managed Instance online scenario.
:param pulumi.Input['AzureActiveDirectoryAppArgs'] azure_app: Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
:param pulumi.Input['MiSqlConnectionInfoArgs'] target_connection_info: Connection information for Azure SQL Database Managed Instance
"""
pulumi.set(__self__, "azure_app", azure_app)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="azureApp")
def azure_app(self) -> pulumi.Input['AzureActiveDirectoryAppArgs']:
"""
Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
"""
return pulumi.get(self, "azure_app")
@azure_app.setter
def azure_app(self, value: pulumi.Input['AzureActiveDirectoryAppArgs']):
pulumi.set(self, "azure_app", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MiSqlConnectionInfoArgs']:
"""
Connection information for Azure SQL Database Managed Instance
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MiSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlMISyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI.Sync.LRS'.
:param pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureSqlDbMI.Sync.LRS')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI.Sync.LRS'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlMITaskInputArgs:
def __init__(__self__, *,
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure SQL Database Managed Instance.
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for target SQL Server
"""
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for target SQL Server
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlMITaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlMITaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI'.
:param pulumi.Input['ConnectToTargetSqlMITaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureSqlDbMI')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlMITaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlMITaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlSqlDbSyncTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure SQL DB and target server requirements
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for source SQL Server
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for target SQL DB
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for target SQL DB
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlSqlDbSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL DB and target server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.SqlDb.Sync'.
:param pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.SqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.SqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class DatabaseInfoArgs:
def __init__(__self__, *,
source_database_name: pulumi.Input[str]):
"""
Project Database Details
:param pulumi.Input[str] source_database_name: Name of the database
"""
pulumi.set(__self__, "source_database_name", source_database_name)
@property
@pulumi.getter(name="sourceDatabaseName")
def source_database_name(self) -> pulumi.Input[str]:
"""
Name of the database
"""
return pulumi.get(self, "source_database_name")
@source_database_name.setter
def source_database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "source_database_name", value)
@pulumi.input_type
class FileShareArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
File share information with Path, Username, and Password.
:param pulumi.Input[str] path: The folder path for this share.
:param pulumi.Input[str] password: Password credential used to connect to the share location.
:param pulumi.Input[str] user_name: User name credential to connect to the share location
"""
pulumi.set(__self__, "path", path)
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The folder path for this share.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential used to connect to the share location.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name credential to connect to the share location
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class GetTdeCertificatesSqlTaskInputArgs:
def __init__(__self__, *,
backup_file_share: pulumi.Input['FileShareArgs'],
connection_info: pulumi.Input['SqlConnectionInfoArgs'],
selected_certificates: pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]]):
"""
Input for the task that gets TDE certificates in Base64 encoded format.
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for file share to be used for temporarily storing files.
:param pulumi.Input['SqlConnectionInfoArgs'] connection_info: Connection information for SQL Server
:param pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]] selected_certificates: List containing certificate names and corresponding password to use for encrypting the exported certificate.
"""
pulumi.set(__self__, "backup_file_share", backup_file_share)
pulumi.set(__self__, "connection_info", connection_info)
pulumi.set(__self__, "selected_certificates", selected_certificates)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> pulumi.Input['FileShareArgs']:
"""
Backup file share information for file share to be used for temporarily storing files.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: pulumi.Input['FileShareArgs']):
pulumi.set(self, "backup_file_share", value)
@property
@pulumi.getter(name="connectionInfo")
def connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL Server
"""
return pulumi.get(self, "connection_info")
@connection_info.setter
def connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "connection_info", value)
@property
@pulumi.getter(name="selectedCertificates")
def selected_certificates(self) -> pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]]:
"""
List containing certificate names and corresponding password to use for encrypting the exported certificate.
"""
return pulumi.get(self, "selected_certificates")
@selected_certificates.setter
def selected_certificates(self, value: pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]]):
pulumi.set(self, "selected_certificates", value)
@pulumi.input_type
class GetTdeCertificatesSqlTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['GetTdeCertificatesSqlTaskInputArgs']] = None):
"""
Properties for the task that gets TDE certificates in Base64 encoded format.
:param pulumi.Input[str] task_type: Task type.
Expected value is 'GetTDECertificates.Sql'.
:param pulumi.Input['GetTdeCertificatesSqlTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'GetTDECertificates.Sql')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'GetTDECertificates.Sql'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['GetTdeCertificatesSqlTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['GetTdeCertificatesSqlTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class GetUserTablesSqlSyncTaskInputArgs:
def __init__(__self__, *,
selected_source_databases: pulumi.Input[Sequence[pulumi.Input[str]]],
selected_target_databases: pulumi.Input[Sequence[pulumi.Input[str]]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that collects user tables for the given list of databases
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_source_databases: List of source database names to collect tables for
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_target_databases: List of target database names to collect tables for
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for SQL Server
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for SQL DB
"""
pulumi.set(__self__, "selected_source_databases", selected_source_databases)
pulumi.set(__self__, "selected_target_databases", selected_target_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedSourceDatabases")
def selected_source_databases(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of source database names to collect tables for
"""
return pulumi.get(self, "selected_source_databases")
@selected_source_databases.setter
def selected_source_databases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "selected_source_databases", value)
@property
@pulumi.getter(name="selectedTargetDatabases")
def selected_target_databases(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of target database names to collect tables for
"""
return pulumi.get(self, "selected_target_databases")
@selected_target_databases.setter
def selected_target_databases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "selected_target_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL DB
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class GetUserTablesSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['GetUserTablesSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that collects user tables for the given list of databases
:param pulumi.Input[str] task_type: Task type.
Expected value is 'GetUserTables.AzureSqlDb.Sync'.
:param pulumi.Input['GetUserTablesSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'GetUserTables.AzureSqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'GetUserTables.AzureSqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['GetUserTablesSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['GetUserTablesSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class GetUserTablesSqlTaskInputArgs:
def __init__(__self__, *,
connection_info: pulumi.Input['SqlConnectionInfoArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
Input for the task that collects user tables for the given list of databases
:param pulumi.Input['SqlConnectionInfoArgs'] connection_info: Connection information for SQL Server
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_databases: List of database names to collect tables for
"""
pulumi.set(__self__, "connection_info", connection_info)
pulumi.set(__self__, "selected_databases", selected_databases)
@property
@pulumi.getter(name="connectionInfo")
def connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL Server
"""
return pulumi.get(self, "connection_info")
@connection_info.setter
def connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "connection_info", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of database names to collect tables for
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "selected_databases", value)
@pulumi.input_type
class GetUserTablesSqlTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['GetUserTablesSqlTaskInputArgs']] = None):
"""
Properties for the task that collects user tables for the given list of databases
:param pulumi.Input[str] task_type: Task type.
Expected value is 'GetUserTables.Sql'.
:param pulumi.Input['GetUserTablesSqlTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'GetUserTables.Sql')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'GetUserTables.Sql'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['GetUserTablesSqlTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['GetUserTablesSqlTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MiSqlConnectionInfoArgs:
def __init__(__self__, *,
managed_instance_resource_id: pulumi.Input[str],
type: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Properties required to create a connection to Azure SQL database Managed instance
:param pulumi.Input[str] managed_instance_resource_id: Resource id for Azure SQL database Managed instance
:param pulumi.Input[str] type: Type of connection info
Expected value is 'MiSqlConnectionInfo'.
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "managed_instance_resource_id", managed_instance_resource_id)
pulumi.set(__self__, "type", 'MiSqlConnectionInfo')
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="managedInstanceResourceId")
def managed_instance_resource_id(self) -> pulumi.Input[str]:
"""
Resource id for Azure SQL database Managed instance
"""
return pulumi.get(self, "managed_instance_resource_id")
@managed_instance_resource_id.setter
def managed_instance_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_instance_resource_id", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'MiSqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs:
def __init__(__self__, *,
migration_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
source_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None,
target_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Database specific information for MySQL to Azure Database for MySQL migration task inputs
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] migration_setting: Migration settings which tune the migration behavior
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_setting: Source settings to tune source endpoint migration behavior
:param pulumi.Input[str] target_database_name: Name of target database. Note: Target database will be truncated before starting migration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] target_setting: Target settings to tune target endpoint migration behavior
"""
if migration_setting is not None:
pulumi.set(__self__, "migration_setting", migration_setting)
if name is not None:
pulumi.set(__self__, "name", name)
if source_setting is not None:
pulumi.set(__self__, "source_setting", source_setting)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
if target_setting is not None:
pulumi.set(__self__, "target_setting", target_setting)
@property
@pulumi.getter(name="migrationSetting")
def migration_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Migration settings which tune the migration behavior
"""
return pulumi.get(self, "migration_setting")
@migration_setting.setter
def migration_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "migration_setting", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sourceSetting")
def source_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Source settings to tune source endpoint migration behavior
"""
return pulumi.get(self, "source_setting")
@source_setting.setter
def source_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_setting", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of target database. Note: Target database will be truncated before starting migration.
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@property
@pulumi.getter(name="targetSetting")
def target_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Target settings to tune target endpoint migration behavior
"""
return pulumi.get(self, "target_setting")
@target_setting.setter
def target_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "target_setting", value)
@pulumi.input_type
class MigrateMySqlAzureDbForMySqlSyncTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['MySqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['MySqlConnectionInfoArgs']):
"""
Input for the task that migrates MySQL databases to Azure Database for MySQL for online migrations
:param pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['MySqlConnectionInfoArgs'] source_connection_info: Connection information for source MySQL
:param pulumi.Input['MySqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for MySQL
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for source MySQL
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for MySQL
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class MigrateMySqlAzureDbForMySqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs']] = None):
"""
Properties for the task that migrates MySQL databases to Azure Database for MySQL for online migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.MySql.AzureDbForMySql.Sync'.
:param pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.MySql.AzureDbForMySql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.MySql.AzureDbForMySql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs:
def __init__(__self__, *,
migration_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
source_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None,
target_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Database specific information for PostgreSQL to Azure Database for PostgreSQL migration task inputs
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] migration_setting: Migration settings which tune the migration behavior
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_setting: Source settings to tune source endpoint migration behavior
:param pulumi.Input[str] target_database_name: Name of target database. Note: Target database will be truncated before starting migration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] target_setting: Target settings to tune target endpoint migration behavior
"""
if migration_setting is not None:
pulumi.set(__self__, "migration_setting", migration_setting)
if name is not None:
pulumi.set(__self__, "name", name)
if source_setting is not None:
pulumi.set(__self__, "source_setting", source_setting)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
if target_setting is not None:
pulumi.set(__self__, "target_setting", target_setting)
@property
@pulumi.getter(name="migrationSetting")
def migration_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Migration settings which tune the migration behavior
"""
return pulumi.get(self, "migration_setting")
@migration_setting.setter
def migration_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "migration_setting", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sourceSetting")
def source_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Source settings to tune source endpoint migration behavior
"""
return pulumi.get(self, "source_setting")
@source_setting.setter
def source_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_setting", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of target database. Note: Target database will be truncated before starting migration.
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@property
@pulumi.getter(name="targetSetting")
def target_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Target settings to tune target endpoint migration behavior
"""
return pulumi.get(self, "target_setting")
@target_setting.setter
def target_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "target_setting", value)
@pulumi.input_type
class MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs']):
"""
Input for the task that migrates PostgreSQL databases to Azure Database for PostgreSQL for online migrations
:param pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] source_connection_info: Connection information for source PostgreSQL
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for PostgreSQL
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for source PostgreSQL
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for PostgreSQL
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class MigratePostgreSqlAzureDbForPostgreSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that migrates PostgreSQL databases to Azure Database for PostgreSQL for online migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.PostgreSql.AzureDbForPostgreSql.Sync'.
:param pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.PostgreSql.AzureDbForPostgreSql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.PostgreSql.AzureDbForPostgreSql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlDbDatabaseInputArgs:
def __init__(__self__, *,
make_source_db_read_only: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
table_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None):
"""
Database specific information for SQL to Azure SQL DB migration task inputs
:param pulumi.Input[bool] make_source_db_read_only: Whether to set database read only before migration
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] table_map: Mapping of source to target tables
:param pulumi.Input[str] target_database_name: Name of target database. Note: Target database will be truncated before starting migration.
"""
if make_source_db_read_only is not None:
pulumi.set(__self__, "make_source_db_read_only", make_source_db_read_only)
if name is not None:
pulumi.set(__self__, "name", name)
if table_map is not None:
pulumi.set(__self__, "table_map", table_map)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
@property
@pulumi.getter(name="makeSourceDbReadOnly")
def make_source_db_read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to set database read only before migration
"""
return pulumi.get(self, "make_source_db_read_only")
@make_source_db_read_only.setter
def make_source_db_read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "make_source_db_read_only", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="tableMap")
def table_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Mapping of source to target tables
"""
return pulumi.get(self, "table_map")
@table_map.setter
def table_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "table_map", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of target database. Note: Target database will be truncated before starting migration.
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@pulumi.input_type
class MigrateSqlServerSqlDbSyncDatabaseInputArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
migration_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
schema_name: Optional[pulumi.Input[str]] = None,
source_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
table_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None,
target_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Database specific information for SQL to Azure SQL DB sync migration task inputs
:param pulumi.Input[str] id: Unique identifier for database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] migration_setting: Migration settings which tune the migration behavior
:param pulumi.Input[str] name: Name of database
:param pulumi.Input[str] schema_name: Schema name to be migrated
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_setting: Source settings to tune source endpoint migration behavior
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] table_map: Mapping of source to target tables
:param pulumi.Input[str] target_database_name: Target database name
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] target_setting: Target settings to tune target endpoint migration behavior
"""
if id is not None:
pulumi.set(__self__, "id", id)
if migration_setting is not None:
pulumi.set(__self__, "migration_setting", migration_setting)
if name is not None:
pulumi.set(__self__, "name", name)
if schema_name is not None:
pulumi.set(__self__, "schema_name", schema_name)
if source_setting is not None:
pulumi.set(__self__, "source_setting", source_setting)
if table_map is not None:
pulumi.set(__self__, "table_map", table_map)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
if target_setting is not None:
pulumi.set(__self__, "target_setting", target_setting)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifier for database
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="migrationSetting")
def migration_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Migration settings which tune the migration behavior
"""
return pulumi.get(self, "migration_setting")
@migration_setting.setter
def migration_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "migration_setting", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> Optional[pulumi.Input[str]]:
"""
Schema name to be migrated
"""
return pulumi.get(self, "schema_name")
@schema_name.setter
def schema_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema_name", value)
@property
@pulumi.getter(name="sourceSetting")
def source_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Source settings to tune source endpoint migration behavior
"""
return pulumi.get(self, "source_setting")
@source_setting.setter
def source_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_setting", value)
@property
@pulumi.getter(name="tableMap")
def table_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Mapping of source to target tables
"""
return pulumi.get(self, "table_map")
@table_map.setter
def table_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "table_map", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Target database name
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@property
@pulumi.getter(name="targetSetting")
def target_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Target settings to tune target endpoint migration behavior
"""
return pulumi.get(self, "target_setting")
@target_setting.setter
def target_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "target_setting", value)
@pulumi.input_type
class MigrateSqlServerSqlDbSyncTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
validation_options: Optional[pulumi.Input['MigrationValidationOptionsArgs']] = None):
"""
Input for the task that migrates on-prem SQL Server databases to Azure SQL Database for online migrations
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['MigrationValidationOptionsArgs'] validation_options: Validation options
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if validation_options is not None:
pulumi.set(__self__, "validation_options", validation_options)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="validationOptions")
def validation_options(self) -> Optional[pulumi.Input['MigrationValidationOptionsArgs']]:
"""
Validation options
"""
return pulumi.get(self, "validation_options")
@validation_options.setter
def validation_options(self, value: Optional[pulumi.Input['MigrationValidationOptionsArgs']]):
pulumi.set(self, "validation_options", value)
@pulumi.input_type
class MigrateSqlServerSqlDbSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs']] = None):
"""
Properties for the task that migrates on-prem SQL Server databases to Azure SQL Database for online migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDb.Sync'.
:param pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.AzureSqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlDbTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
validation_options: Optional[pulumi.Input['MigrationValidationOptionsArgs']] = None):
"""
Input for the task that migrates on-prem SQL Server databases to Azure SQL Database
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['MigrationValidationOptionsArgs'] validation_options: Options for enabling various post migration validations. Available options,
1.) Data Integrity Check: Performs a checksum based comparison on source and target tables after the migration to ensure the correctness of the data.
2.) Schema Validation: Performs a thorough schema comparison between the source and target tables and provides a list of differences between the source and target database, 3.) Query Analysis: Executes a set of queries picked up automatically either from the Query Plan Cache or Query Store and execute them and compares the execution time between the source and target database.
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if validation_options is not None:
pulumi.set(__self__, "validation_options", validation_options)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="validationOptions")
def validation_options(self) -> Optional[pulumi.Input['MigrationValidationOptionsArgs']]:
"""
Options for enabling various post migration validations. Available options,
1.) Data Integrity Check: Performs a checksum based comparison on source and target tables after the migration to ensure the correctness of the data.
2.) Schema Validation: Performs a thorough schema comparison between the source and target tables and provides a list of differences between the source and target database, 3.) Query Analysis: Executes a set of queries picked up automatically either from the Query Plan Cache or Query Store and execute them and compares the execution time between the source and target database.
"""
return pulumi.get(self, "validation_options")
@validation_options.setter
def validation_options(self, value: Optional[pulumi.Input['MigrationValidationOptionsArgs']]):
pulumi.set(self, "validation_options", value)
@pulumi.input_type
class MigrateSqlServerSqlDbTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs']] = None):
"""
Properties for the task that migrates on-prem SQL Server databases to Azure SQL Database
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.SqlDb'.
:param pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.SqlDb')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.SqlDb'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlMIDatabaseInputArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
restore_database_name: pulumi.Input[str],
backup_file_paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None):
"""
Database specific information for SQL to Azure SQL DB Managed Instance migration task inputs
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[str] restore_database_name: Name of the database at destination
:param pulumi.Input[Sequence[pulumi.Input[str]]] backup_file_paths: The list of backup files to be used in case of existing backups.
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for backing up this database.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "restore_database_name", restore_database_name)
if backup_file_paths is not None:
pulumi.set(__self__, "backup_file_paths", backup_file_paths)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="restoreDatabaseName")
def restore_database_name(self) -> pulumi.Input[str]:
"""
Name of the database at destination
"""
return pulumi.get(self, "restore_database_name")
@restore_database_name.setter
def restore_database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "restore_database_name", value)
@property
@pulumi.getter(name="backupFilePaths")
def backup_file_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of backup files to be used in case of existing backups.
"""
return pulumi.get(self, "backup_file_paths")
@backup_file_paths.setter
def backup_file_paths(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "backup_file_paths", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for backing up this database.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@pulumi.input_type
class MigrateSqlServerSqlMISyncTaskInputArgs:
def __init__(__self__, *,
azure_app: pulumi.Input['AzureActiveDirectoryAppArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
storage_resource_id: pulumi.Input[str],
target_connection_info: pulumi.Input['MiSqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None):
"""
Input for task that migrates SQL Server databases to Azure SQL Database Managed Instance online scenario.
:param pulumi.Input['AzureActiveDirectoryAppArgs'] azure_app: Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for source SQL Server
:param pulumi.Input[str] storage_resource_id: Fully qualified resourceId of storage
:param pulumi.Input['MiSqlConnectionInfoArgs'] target_connection_info: Connection information for Azure SQL Database Managed Instance
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
"""
pulumi.set(__self__, "azure_app", azure_app)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
@property
@pulumi.getter(name="azureApp")
def azure_app(self) -> pulumi.Input['AzureActiveDirectoryAppArgs']:
"""
Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
"""
return pulumi.get(self, "azure_app")
@azure_app.setter
def azure_app(self, value: pulumi.Input['AzureActiveDirectoryAppArgs']):
pulumi.set(self, "azure_app", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
Fully qualified resourceId of storage
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MiSqlConnectionInfoArgs']:
"""
Connection information for Azure SQL Database Managed Instance
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MiSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@pulumi.input_type
class MigrateSqlServerSqlMISyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs']] = None):
"""
Properties for task that migrates SQL Server databases to Azure SQL Database Managed Instance sync scenario
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI.Sync.LRS'.
:param pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.AzureSqlDbMI.Sync.LRS')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI.Sync.LRS'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlMITaskInputArgs:
def __init__(__self__, *,
backup_blob_share: pulumi.Input['BlobShareArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None,
backup_mode: Optional[pulumi.Input[Union[str, 'BackupMode']]] = None,
selected_agent_jobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
selected_logins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input for task that migrates SQL Server databases to Azure SQL Database Managed Instance.
:param pulumi.Input['BlobShareArgs'] backup_blob_share: SAS URI of Azure Storage Account Container to be used for storing backup files.
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
:param pulumi.Input[Union[str, 'BackupMode']] backup_mode: Backup Mode to specify whether to use existing backup or create new backup. If using existing backups, backup file paths are required to be provided in selectedDatabases.
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_agent_jobs: Agent Jobs to migrate.
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_logins: Logins to migrate.
"""
pulumi.set(__self__, "backup_blob_share", backup_blob_share)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
if backup_mode is not None:
pulumi.set(__self__, "backup_mode", backup_mode)
if selected_agent_jobs is not None:
pulumi.set(__self__, "selected_agent_jobs", selected_agent_jobs)
if selected_logins is not None:
pulumi.set(__self__, "selected_logins", selected_logins)
@property
@pulumi.getter(name="backupBlobShare")
def backup_blob_share(self) -> pulumi.Input['BlobShareArgs']:
"""
SAS URI of Azure Storage Account Container to be used for storing backup files.
"""
return pulumi.get(self, "backup_blob_share")
@backup_blob_share.setter
def backup_blob_share(self, value: pulumi.Input['BlobShareArgs']):
pulumi.set(self, "backup_blob_share", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@property
@pulumi.getter(name="backupMode")
def backup_mode(self) -> Optional[pulumi.Input[Union[str, 'BackupMode']]]:
"""
Backup Mode to specify whether to use existing backup or create new backup. If using existing backups, backup file paths are required to be provided in selectedDatabases.
"""
return pulumi.get(self, "backup_mode")
@backup_mode.setter
def backup_mode(self, value: Optional[pulumi.Input[Union[str, 'BackupMode']]]):
pulumi.set(self, "backup_mode", value)
@property
@pulumi.getter(name="selectedAgentJobs")
def selected_agent_jobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Agent Jobs to migrate.
"""
return pulumi.get(self, "selected_agent_jobs")
@selected_agent_jobs.setter
def selected_agent_jobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "selected_agent_jobs", value)
@property
@pulumi.getter(name="selectedLogins")
def selected_logins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Logins to migrate.
"""
return pulumi.get(self, "selected_logins")
@selected_logins.setter
def selected_logins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "selected_logins", value)
@pulumi.input_type
class MigrateSqlServerSqlMITaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlMITaskInputArgs']] = None):
"""
Properties for task that migrates SQL Server databases to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI'.
:param pulumi.Input['MigrateSqlServerSqlMITaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.AzureSqlDbMI')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlMITaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlMITaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrationValidationOptionsArgs:
def __init__(__self__, *,
enable_data_integrity_validation: Optional[pulumi.Input[bool]] = None,
enable_query_analysis_validation: Optional[pulumi.Input[bool]] = None,
enable_schema_validation: Optional[pulumi.Input[bool]] = None):
"""
Types of validations to run after the migration
:param pulumi.Input[bool] enable_data_integrity_validation: Allows to perform a checksum based data integrity validation between source and target for the selected database / tables .
:param pulumi.Input[bool] enable_query_analysis_validation: Allows to perform a quick and intelligent query analysis by retrieving queries from the source database and executes them in the target. The result will have execution statistics for executions in source and target databases for the extracted queries.
:param pulumi.Input[bool] enable_schema_validation: Allows to compare the schema information between source and target.
"""
if enable_data_integrity_validation is not None:
pulumi.set(__self__, "enable_data_integrity_validation", enable_data_integrity_validation)
if enable_query_analysis_validation is not None:
pulumi.set(__self__, "enable_query_analysis_validation", enable_query_analysis_validation)
if enable_schema_validation is not None:
pulumi.set(__self__, "enable_schema_validation", enable_schema_validation)
@property
@pulumi.getter(name="enableDataIntegrityValidation")
def enable_data_integrity_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows to perform a checksum based data integrity validation between source and target for the selected database / tables .
"""
return pulumi.get(self, "enable_data_integrity_validation")
@enable_data_integrity_validation.setter
def enable_data_integrity_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_data_integrity_validation", value)
@property
@pulumi.getter(name="enableQueryAnalysisValidation")
def enable_query_analysis_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows to perform a quick and intelligent query analysis by retrieving queries from the source database and executes them in the target. The result will have execution statistics for executions in source and target databases for the extracted queries.
"""
return pulumi.get(self, "enable_query_analysis_validation")
@enable_query_analysis_validation.setter
def enable_query_analysis_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_query_analysis_validation", value)
@property
@pulumi.getter(name="enableSchemaValidation")
def enable_schema_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows to compare the schema information between source and target.
"""
return pulumi.get(self, "enable_schema_validation")
@enable_schema_validation.setter
def enable_schema_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_schema_validation", value)
@pulumi.input_type
class MySqlConnectionInfoArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
server_name: pulumi.Input[str],
type: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Information for connecting to MySQL server
:param pulumi.Input[int] port: Port for Server
:param pulumi.Input[str] server_name: Name of the server
:param pulumi.Input[str] type: Type of connection info
Expected value is 'MySqlConnectionInfo'.
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "type", 'MySqlConnectionInfo')
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port for Server
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
Name of the server
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'MySqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class PostgreSqlConnectionInfoArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
server_name: pulumi.Input[str],
type: pulumi.Input[str],
database_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Information for connecting to PostgreSQL server
:param pulumi.Input[int] port: Port for Server
:param pulumi.Input[str] server_name: Name of the server
:param pulumi.Input[str] type: Type of connection info
Expected value is 'PostgreSqlConnectionInfo'.
:param pulumi.Input[str] database_name: Name of the database
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "type", 'PostgreSqlConnectionInfo')
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port for Server
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
Name of the server
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'PostgreSqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class SelectedCertificateInputArgs:
def __init__(__self__, *,
certificate_name: pulumi.Input[str],
password: pulumi.Input[str]):
"""
Info for certificate to be exported for TDE enabled databases.
:param pulumi.Input[str] certificate_name: Name of certificate to be exported.
:param pulumi.Input[str] password: Password to use for encrypting the exported certificate.
"""
pulumi.set(__self__, "certificate_name", certificate_name)
pulumi.set(__self__, "password", password)
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> pulumi.Input[str]:
"""
Name of certificate to be exported.
"""
return pulumi.get(self, "certificate_name")
@certificate_name.setter
def certificate_name(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
Password to use for encrypting the exported certificate.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@pulumi.input_type
class ServiceSkuArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input[int]] = None,
family: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
An Azure SKU instance
:param pulumi.Input[int] capacity: The capacity of the SKU, if it supports scaling
:param pulumi.Input[str] family: The SKU family, used when the service has multiple performance classes within a tier, such as 'A', 'D', etc. for virtual machines
:param pulumi.Input[str] name: The unique name of the SKU, such as 'P3'
:param pulumi.Input[str] size: The size of the SKU, used when the name alone does not denote a service size or when a SKU has multiple performance classes within a family, e.g. 'A1' for virtual machines
:param pulumi.Input[str] tier: The tier of the SKU, such as 'Free', 'Basic', 'Standard', or 'Premium'
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if name is not None:
pulumi.set(__self__, "name", name)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
The capacity of the SKU, if it supports scaling
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
"""
The SKU family, used when the service has multiple performance classes within a tier, such as 'A', 'D', etc. for virtual machines
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of the SKU, such as 'P3'
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The size of the SKU, used when the name alone does not denote a service size or when a SKU has multiple performance classes within a family, e.g. 'A1' for virtual machines
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
The tier of the SKU, such as 'Free', 'Basic', 'Standard', or 'Premium'
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class SqlConnectionInfoArgs:
def __init__(__self__, *,
data_source: pulumi.Input[str],
type: pulumi.Input[str],
additional_settings: Optional[pulumi.Input[str]] = None,
authentication: Optional[pulumi.Input[Union[str, 'AuthenticationType']]] = None,
encrypt_connection: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input[Union[str, 'SqlSourcePlatform']]] = None,
trust_server_certificate: Optional[pulumi.Input[bool]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Information for connecting to SQL database server
:param pulumi.Input[str] data_source: Data source in the format Protocol:MachineName\SQLServerInstanceName,PortNumber
:param pulumi.Input[str] type: Type of connection info
Expected value is 'SqlConnectionInfo'.
:param pulumi.Input[str] additional_settings: Additional connection settings
:param pulumi.Input[Union[str, 'AuthenticationType']] authentication: Authentication type to use for connection
:param pulumi.Input[bool] encrypt_connection: Whether to encrypt the connection
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[Union[str, 'SqlSourcePlatform']] platform: Server platform type for connection
:param pulumi.Input[bool] trust_server_certificate: Whether to trust the server certificate
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "data_source", data_source)
pulumi.set(__self__, "type", 'SqlConnectionInfo')
if additional_settings is not None:
pulumi.set(__self__, "additional_settings", additional_settings)
if authentication is not None:
pulumi.set(__self__, "authentication", authentication)
if encrypt_connection is None:
encrypt_connection = True
if encrypt_connection is not None:
pulumi.set(__self__, "encrypt_connection", encrypt_connection)
if password is not None:
pulumi.set(__self__, "password", password)
if platform is not None:
pulumi.set(__self__, "platform", platform)
if trust_server_certificate is None:
trust_server_certificate = False
if trust_server_certificate is not None:
pulumi.set(__self__, "trust_server_certificate", trust_server_certificate)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> pulumi.Input[str]:
"""
Data source in the format Protocol:MachineName\SQLServerInstanceName,PortNumber
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: pulumi.Input[str]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'SqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="additionalSettings")
def additional_settings(self) -> Optional[pulumi.Input[str]]:
"""
Additional connection settings
"""
return pulumi.get(self, "additional_settings")
@additional_settings.setter
def additional_settings(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "additional_settings", value)
@property
@pulumi.getter
def authentication(self) -> Optional[pulumi.Input[Union[str, 'AuthenticationType']]]:
"""
Authentication type to use for connection
"""
return pulumi.get(self, "authentication")
@authentication.setter
def authentication(self, value: Optional[pulumi.Input[Union[str, 'AuthenticationType']]]):
pulumi.set(self, "authentication", value)
@property
@pulumi.getter(name="encryptConnection")
def encrypt_connection(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to encrypt the connection
"""
return pulumi.get(self, "encrypt_connection")
@encrypt_connection.setter
def encrypt_connection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypt_connection", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def platform(self) -> Optional[pulumi.Input[Union[str, 'SqlSourcePlatform']]]:
"""
Server platform type for connection
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: Optional[pulumi.Input[Union[str, 'SqlSourcePlatform']]]):
pulumi.set(self, "platform", value)
@property
@pulumi.getter(name="trustServerCertificate")
def trust_server_certificate(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to trust the server certificate
"""
return pulumi.get(self, "trust_server_certificate")
@trust_server_certificate.setter
def trust_server_certificate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "trust_server_certificate", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlDbSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs']] = None):
"""
Properties for task that validates migration input for SQL to Azure SQL DB sync migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ValidateMigrationInput.SqlServer.SqlDb.Sync'.
:param pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ValidateMigrationInput.SqlServer.SqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ValidateMigrationInput.SqlServer.SqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs:
def __init__(__self__, *,
azure_app: pulumi.Input['AzureActiveDirectoryAppArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
storage_resource_id: pulumi.Input[str],
target_connection_info: pulumi.Input['MiSqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None):
"""
Input for task that migrates SQL Server databases to Azure SQL Database Managed Instance online scenario.
:param pulumi.Input['AzureActiveDirectoryAppArgs'] azure_app: Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for source SQL Server
:param pulumi.Input[str] storage_resource_id: Fully qualified resourceId of storage
:param pulumi.Input['MiSqlConnectionInfoArgs'] target_connection_info: Connection information for Azure SQL Database Managed Instance
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
"""
pulumi.set(__self__, "azure_app", azure_app)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
@property
@pulumi.getter(name="azureApp")
def azure_app(self) -> pulumi.Input['AzureActiveDirectoryAppArgs']:
"""
Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
"""
return pulumi.get(self, "azure_app")
@azure_app.setter
def azure_app(self, value: pulumi.Input['AzureActiveDirectoryAppArgs']):
pulumi.set(self, "azure_app", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
Fully qualified resourceId of storage
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MiSqlConnectionInfoArgs']:
"""
Connection information for Azure SQL Database Managed Instance
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MiSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMISyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs']] = None):
"""
Properties for task that validates migration input for SQL to Azure SQL Database Managed Instance sync scenario
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS'.
:param pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMITaskInputArgs:
def __init__(__self__, *,
backup_blob_share: pulumi.Input['BlobShareArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None,
backup_mode: Optional[pulumi.Input[Union[str, 'BackupMode']]] = None,
selected_logins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input for task that validates migration input for SQL to Azure SQL Managed Instance
:param pulumi.Input['BlobShareArgs'] backup_blob_share: SAS URI of Azure Storage Account Container to be used for storing backup files.
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
:param pulumi.Input[Union[str, 'BackupMode']] backup_mode: Backup Mode to specify whether to use existing backup or create new backup.
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_logins: Logins to migrate
"""
pulumi.set(__self__, "backup_blob_share", backup_blob_share)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
if backup_mode is not None:
pulumi.set(__self__, "backup_mode", backup_mode)
if selected_logins is not None:
pulumi.set(__self__, "selected_logins", selected_logins)
@property
@pulumi.getter(name="backupBlobShare")
def backup_blob_share(self) -> pulumi.Input['BlobShareArgs']:
"""
SAS URI of Azure Storage Account Container to be used for storing backup files.
"""
return pulumi.get(self, "backup_blob_share")
@backup_blob_share.setter
def backup_blob_share(self, value: pulumi.Input['BlobShareArgs']):
pulumi.set(self, "backup_blob_share", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@property
@pulumi.getter(name="backupMode")
def backup_mode(self) -> Optional[pulumi.Input[Union[str, 'BackupMode']]]:
"""
Backup Mode to specify whether to use existing backup or create new backup.
"""
return pulumi.get(self, "backup_mode")
@backup_mode.setter
def backup_mode(self, value: Optional[pulumi.Input[Union[str, 'BackupMode']]]):
pulumi.set(self, "backup_mode", value)
@property
@pulumi.getter(name="selectedLogins")
def selected_logins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Logins to migrate
"""
return pulumi.get(self, "selected_logins")
@selected_logins.setter
def selected_logins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "selected_logins", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMITaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs']] = None):
"""
Properties for task that validates migration input for SQL to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI'.
:param pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ValidateMigrationInput.SqlServer.AzureSqlDbMI')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ValidateSyncMigrationInputSqlServerTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for task that validates migration input for SQL sync migrations
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source SQL server
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source SQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
| 41.554891
| 395
| 0.681172
|
9d86431ab563cff83673d49c61b10f18b9125389
| 250
|
py
|
Python
|
clients/keto/python/ory_keto_client/api/__init__.py
|
mojotalantikite/sdk
|
00fc86e98570e88911cfc66ce76637f8f1ac9dbe
|
[
"Apache-2.0"
] | null | null | null |
clients/keto/python/ory_keto_client/api/__init__.py
|
mojotalantikite/sdk
|
00fc86e98570e88911cfc66ce76637f8f1ac9dbe
|
[
"Apache-2.0"
] | null | null | null |
clients/keto/python/ory_keto_client/api/__init__.py
|
mojotalantikite/sdk
|
00fc86e98570e88911cfc66ce76637f8f1ac9dbe
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from ory_keto_client.api.engines_api import EnginesApi
from ory_keto_client.api.health_api import HealthApi
from ory_keto_client.api.version_api import VersionApi
| 27.777778
| 54
| 0.856
|
4ac9fa743eb5135d43262ae894f9753cf0960711
| 787
|
py
|
Python
|
tests/plantcv/test_image_fusion.py
|
ygarrot/plantcv
|
e934a891e0d1bf8987ca6a9f982a4ac1f420bfe7
|
[
"MIT"
] | 1
|
2022-02-03T12:08:59.000Z
|
2022-02-03T12:08:59.000Z
|
tests/plantcv/test_image_fusion.py
|
HUISTENCOFFEE/plantcv
|
f38f7de53663522eb770870b70823d5fc46d0c0f
|
[
"MIT"
] | null | null | null |
tests/plantcv/test_image_fusion.py
|
HUISTENCOFFEE/plantcv
|
f38f7de53663522eb770870b70823d5fc46d0c0f
|
[
"MIT"
] | null | null | null |
import pytest
import cv2
import numpy as np
from skimage import img_as_ubyte
from plantcv.plantcv import image_fusion, Spectral_data
def test_image_fusion(test_data):
"""Test for PlantCV."""
# Read in test data
# 16-bit image
img1 = cv2.imread(test_data.fmax, -1)
img2 = cv2.imread(test_data.fmin)
# 8-bit image
img2 = img_as_ubyte(img2)
fused_img = image_fusion(img1, img2, [480.0], [550.0, 640.0, 800.0])
assert isinstance(fused_img, Spectral_data)
def test_image_fusion_size_diff(test_data):
"""Test for PlantCV."""
img1 = cv2.imread(test_data.small_bin_img, 0)
img2 = np.copy(img1)
img2 = img2[0:10, 0:10]
with pytest.raises(RuntimeError):
_ = image_fusion(img1, img2, [480.0, 550.0, 670.0], [480.0, 550.0, 670.0])
| 29.148148
| 82
| 0.681067
|
494f50174b6ec4e2ba418a6b603b0ffe37032e89
| 5,698
|
py
|
Python
|
bin/select-lines.py
|
nrc-cnrc/PortageTextProcessing
|
08db31a0c02a99024cc9019be959ae25945eb4ae
|
[
"MIT"
] | null | null | null |
bin/select-lines.py
|
nrc-cnrc/PortageTextProcessing
|
08db31a0c02a99024cc9019be959ae25945eb4ae
|
[
"MIT"
] | 1
|
2022-01-13T21:46:45.000Z
|
2022-01-13T22:01:49.000Z
|
bin/select-lines.py
|
nrc-cnrc/PortageTextProcessing
|
08db31a0c02a99024cc9019be959ae25945eb4ae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# coding=utf-8
# @file select-lines.py
# @brief Select a set of lines by index from a file.
#
# @author Darlene Stewart
#
# Traitement multilingue de textes / Multilingual Text Processing
# Centre de recherche en technologies numériques / Digital Technologies Research Centre
# Conseil national de recherches Canada / National Research Council Canada
# Copyright 2018, Sa Majeste la Reine du Chef du Canada /
# Copyright 2018, Her Majesty in Right of Canada
from __future__ import print_function, unicode_literals, division, absolute_import
import time
start_time = time.time()
import sys
import codecs
import re
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import os
import os.path
# If this script is run from within src/ rather than from the installed bin
# directory, we add src/utils to the Python module include path (sys.path).
if sys.argv[0] not in ('', '-c'):
bin_path = os.path.dirname(sys.argv[0])
if os.path.basename(bin_path) != "bin":
sys.path.insert(1, os.path.normpath(os.path.join(bin_path, "..", "utils")))
from portage_utils import *
def get_args():
"""Command line argument processing."""
usage = "select-lines.py [options] indexfile [infile [outfile]]"
help = """
Select a set of lines by index from a file.
indexfile contains 1-based integer indicies of lines to be extracted.
indexfile is assumed to be sorted.
"""
parser = ArgumentParser(usage=usage, description=help,
formatter_class=RawDescriptionHelpFormatter, add_help=False)
parser.add_argument("-h", "-help", "--help", action=HelpAction)
parser.add_argument("-v", "--verbose", action=VerboseAction)
parser.add_argument("-d", "--debug", action=DebugAction)
parser.add_argument("-a", "--alignment-column", dest="alignment_column", default=0, type=int,
help="indexfile is an alignment info file from ssal -a; process given column: 1 or 2")
parser.add_argument("--joiner", dest="joiner", default=" ", type=str,
help="with -a, join lines in a range with given joiner [one space]")
parser.add_argument("--separator", dest="separator", default="\n", type=str,
help="with -a, separate ranges with given separator [one newline]")
parser.add_argument("indexfile", type=open, help="sorted index file")
parser.add_argument("infile", nargs='?', type=open, default=sys.stdin,
help="input file [sys.stdin]")
parser.add_argument("outfile", nargs='?', type=lambda f: open(f,'w'), default=sys.stdout,
help="output file [sys.stdout]")
cmd_args = parser.parse_args()
return cmd_args
def parse_alignment_line(line, column):
tokens = line.split()
try:
(start,end) = tokens[column-1].split('-', 1)
start=int(start)
end=int(end)
except:
fatal_error("Invalid alignment info line:", line.strip())
if end < start:
fatal_error("Invalid alignment has end<start at:", line.strip())
return (start,end)
def main():
printCopyright("select-lines.py", 2018);
os.environ['PORTAGE_INTERNAL_CALL'] = '1';
cmd_args = get_args()
encoding = "utf-8"
try:
codecs.lookup(encoding)
except LookupError:
fatal_error("utf-8 codec not found.")
indexfile = codecs.getreader(encoding)(cmd_args.indexfile)
infile = codecs.getreader(encoding)(cmd_args.infile)
outfile = codecs.getwriter(encoding)(cmd_args.outfile)
# The following allows stderr to handle non-ascii characters:
sys.stderr = codecs.getwriter(encoding)(sys.stderr)
line_number = 0
if cmd_args.alignment_column == 0:
index_line = indexfile.readline()
if index_line:
index = int(index_line)
for in_line in infile:
if not index_line:
break
line_number += 1
if line_number == index:
print(in_line, file=outfile, end='')
index_line = indexfile.readline()
if index_line:
index = int(index_line)
elif line_number > index:
fatal_error("Index file out of sort order at index:", index, "input line:", line_number)
if index_line:
fatal_error("Out of input before end of index file at index:", index)
elif cmd_args.alignment_column == 1 or cmd_args.alignment_column == 2:
col = cmd_args.alignment_column
index_line = indexfile.readline()
if index_line:
(start, end) = parse_alignment_line(index_line, col)
if start < 0:
fatal_error("Alignment file specifies negative line number at:", index_line.strip())
for in_line in infile:
if not index_line:
break
if line_number >= start and line_number < end:
print(in_line.strip('\n'), file=outfile, end='')
if line_number+1 < end:
print(cmd_args.joiner, file=outfile, end='')
line_number += 1
while line_number == end:
print(cmd_args.separator, file=outfile, end='')
index_line = indexfile.readline()
if index_line:
(start, end) = parse_alignment_line(index_line, col)
if start < line_number:
fatal_error("Alignment file out of order at:", index_line.strip())
else:
break
if index_line:
fatal_error("Out of input before end of alignment index file at:", index_line.strip())
else:
fatal_error("invalid -a/--alignment-column value: use 1 or 2 (or 0 for none).")
indexfile.close()
infile.close()
outfile.close()
if __name__ == '__main__':
main()
| 34.325301
| 109
| 0.649702
|
73db102f283c0a1bebd140f5658651290ffd47bc
| 1,223
|
py
|
Python
|
setup.py
|
chaos-dremel/autobuild
|
90499aeeb2d322b0901d617c3784a10f7f1b47e9
|
[
"MIT"
] | null | null | null |
setup.py
|
chaos-dremel/autobuild
|
90499aeeb2d322b0901d617c3784a10f7f1b47e9
|
[
"MIT"
] | null | null | null |
setup.py
|
chaos-dremel/autobuild
|
90499aeeb2d322b0901d617c3784a10f7f1b47e9
|
[
"MIT"
] | 1
|
2020-01-13T21:40:44.000Z
|
2020-01-13T21:40:44.000Z
|
import os
from setuptools import setup, Extension
from agibuild.settings import VERSION
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
os.environ['CC'] = 'g++'
module_mpkg = Extension('_support',
sources = ['mpkg/support.i'],
swig_opts=['-c++'],
libraries=['mpkgsupport'])
setup(
name = "agibuild",
version = VERSION,
author = "Sir Anthony",
author_email = "anthony@adsorbtion.org",
description = ("AgiliaLinux package builder"),
license = "MIT",
keywords = "agilia package",
url = "",
packages=['agibuild',],
ext_package = 'mpkg',
ext_modules = [module_mpkg],
py_modules = ['mpkg.support'],
long_description = read('README.md'),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
],
)
| 28.44186
| 79
| 0.615699
|
0c08e084992d15af059e133030abd016d6cafe2c
| 1,398
|
py
|
Python
|
opportune/models/accounts.py
|
Mildly-Sketchy/Mildly-Sketchy
|
d215d5c478bfbdac0056ed0144edfe8c9efa557f
|
[
"MIT"
] | null | null | null |
opportune/models/accounts.py
|
Mildly-Sketchy/Mildly-Sketchy
|
d215d5c478bfbdac0056ed0144edfe8c9efa557f
|
[
"MIT"
] | 2
|
2019-12-26T16:41:26.000Z
|
2020-01-06T18:52:13.000Z
|
opportune/models/accounts.py
|
Mildly-Sketchy/Mildly-Sketchy
|
d215d5c478bfbdac0056ed0144edfe8c9efa557f
|
[
"MIT"
] | 1
|
2018-06-13T18:16:45.000Z
|
2018-06-13T18:16:45.000Z
|
from .meta import Base
from sqlalchemy.exc import DBAPIError
from cryptacular import bcrypt
from sqlalchemy.orm import relationship
from sqlalchemy import (
Column,
Integer,
String,
Boolean,
)
manager = bcrypt.BCRYPTPasswordManager()
class Account(Base):
__tablename__ = 'accounts'
id = Column(Integer, primary_key=True)
username = Column(String, unique=True, nullable=False)
password = Column(String, nullable=False)
email = Column(String, nullable=False)
admin = Column(Boolean, nullable=False, default=False)
keywords = relationship(
'Keyword',
secondary='user_keywords')
def __init__(self, username, email, password, admin=False):
"""Initialize a new user with encoded password."""
self.username = username
self.email = email
self.password = manager.encode(password, 10)
self.admin = admin
@classmethod
def check_credentials(cls, request=None, username=None, password=None):
"""Authenticate a user."""
if request.dbsession is None:
raise DBAPIError
is_authenticated = False
query = request.dbsession.query(cls).filter(cls.username == username).one_or_none()
if query is not None:
if manager.check(query.password, password):
is_authenticated = True
return (is_authenticated, username)
| 28.530612
| 91
| 0.669528
|
52cb466d000ba2ada5b9df18700fed472b601c13
| 1,470
|
py
|
Python
|
contrib/devtools/fix-copyright-headers.py
|
CoinWarden/Mrcoin
|
1c9f336f34342bca0d4530d214eb71c7dbdcf682
|
[
"MIT"
] | null | null | null |
contrib/devtools/fix-copyright-headers.py
|
CoinWarden/Mrcoin
|
1c9f336f34342bca0d4530d214eb71c7dbdcf682
|
[
"MIT"
] | null | null | null |
contrib/devtools/fix-copyright-headers.py
|
CoinWarden/Mrcoin
|
1c9f336f34342bca0d4530d214eb71c7dbdcf682
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Run this script inside of src/ and it will look for all the files
that were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The Mrcoin developers
it will change it to
// Copyright (c) 2009-2014 The Mrcoin developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The Mrcoin/%s The Mrcoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,year,filePath))
n = n + 1
| 27.222222
| 81
| 0.692517
|
b4770408ad5bcb425356c1235bead29f39584e12
| 183
|
py
|
Python
|
07-17/character-count.py
|
bronsonavila/automate-boring-stuff-python-notes
|
4de654f4da0db8a12a4abb3c7856b57b454b7b58
|
[
"Unlicense"
] | 37
|
2019-01-07T12:07:38.000Z
|
2022-03-18T08:59:56.000Z
|
07-17/character-count.py
|
bronsonavila/automate-boring-stuff-python-notes
|
4de654f4da0db8a12a4abb3c7856b57b454b7b58
|
[
"Unlicense"
] | null | null | null |
07-17/character-count.py
|
bronsonavila/automate-boring-stuff-python-notes
|
4de654f4da0db8a12a4abb3c7856b57b454b7b58
|
[
"Unlicense"
] | 33
|
2019-06-10T07:00:02.000Z
|
2022-02-14T13:14:31.000Z
|
import pprint
message = 'All cows eat grass'
count = {}
for character in message:
count.setdefault(character.lower(), 0)
count[character.lower()] += 1
pprint.pprint(count)
| 16.636364
| 42
| 0.693989
|
cbc723d6e21dd8292a33f71a7899ded245fb5be8
| 1,927
|
py
|
Python
|
csm2txt.py
|
ssb22/wm6-utils
|
f50a7ab9d8af0ec66d8c0ef0a1940a0f0aa47110
|
[
"Unlicense"
] | null | null | null |
csm2txt.py
|
ssb22/wm6-utils
|
f50a7ab9d8af0ec66d8c0ef0a1940a0f0aa47110
|
[
"Unlicense"
] | null | null | null |
csm2txt.py
|
ssb22/wm6-utils
|
f50a7ab9d8af0ec66d8c0ef0a1940a0f0aa47110
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python2
# .csm (PIMBackup Messages dump) to .txt
# v1.1 Silas S. Brown 2018-2019
# public domain - no warranty
# Where to find history:
# on GitHub at https://github.com/ssb22/wm6-utils
# and on GitLab at https://gitlab.com/ssb22/wm6-utils
# and on BitBucket https://bitbucket.org/ssb22/wm6-utils
# and at https://gitlab.developers.cam.ac.uk/ssb22/wm6-utils
# and in China: https://gitee.com/ssb22/wm6-utils
indent = 16
ignore = [
# Customise this - headers to ignore
"Msg Id",
"BodyType",
"Folder",
"Account",
"Msg Class",
"Msg Size",
"Msg Flags",
"Msg Status",
"Recipient Nbr",
"Attachment Nbr",
"Content Length",
]
import csv
from cStringIO import StringIO
class csm(csv.excel):
delimiter = ';'
escapechar = "\\"
quoting = csv.QUOTE_NONE
doublequote = False
def escape_newlines(s):
inBody = False ; o = []
for l in s.split("\n"):
numQuotes=max(0,len(l.replace('\\"','').split('"'))-1)
if numQuotes % 2: inBody = not inBody
if inBody: o.append(l+r"\\n"+" "*indent)
else: o.append(l+"\n")
return "".join(o)
csv.register_dialect("csm",csm)
headers = [] ; out = []
for r in csv.reader(StringIO(escape_newlines(open("msgs.csm").read().decode('utf-16').encode('utf-8').replace("\r\n","\n").replace("\r","\n"))),"csm"):
if headers:
row = [] ; tt = None
for k,v in zip(headers,r):
if v.startswith('"') and v.endswith('"'):
v=v[1:-1]
if k in ignore: continue
if k.endswith("Time"):
try: y,m,d,h,mm,s = v.split(",")
except: continue # no time ?
tt=v="%s-%s-%s %s:%s:%s" % (y,m,d,h,mm,s)
if v: row.append(k+":"+" "*max(1,indent-1-len(k))+v.replace(r"\n","\n"))
if row: out.append((tt,"\n".join(row)))
else: headers = r
print "\n\n".join(o for _,o in sorted(out))
| 30.587302
| 151
| 0.565127
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.