repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
XFL | XFL-master/test/algorithm/framework/vertical/test_xgboost.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import random
import shutil
import string
from multiprocess.pool import ApplyResult
import numpy as np
import pandas as pd
import pytest
import service.fed_config
from algorithm.core.paillier_acceleration import embed, umbed
from algorithm.core.tree.tree_structure import Node, Tree
from algorithm.framework.vertical.xgboost import (decision_tree_label_trainer,
decision_tree_trainer)
from algorithm.framework.vertical.xgboost.label_trainer import \
VerticalXgboostLabelTrainer
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
from common.communication.gRPC.python.channel import (BroadcastChannel,
DualChannel)
from common.communication.gRPC.python.commu import Commu
from common.crypto.paillier.paillier import Paillier
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.tree.tree_structure import Node, SplitInfo
random.seed(1)
private_context = Paillier.context(2048, True)
public_context = private_context.to_public()
def prepare_data():
case_df = pd.DataFrame({
'x0': np.arange(100),
'x1': np.arange(100),
'x2': 2 * np.arange(100) - 40.0,
'x3': 3 * np.arange(100) + 1.0,
'x4': np.arange(100)[::-1]
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 40, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].head(80).to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True
)
case_df[['y', 'x0', 'x1', 'x2']].tail(20).to_csv(
"/opt/dataset/unit_test/test_guest.csv", index=True
)
case_df[['x3', 'x4']].head(80).to_csv(
"/opt/dataset/unit_test/train_host.csv", index=True
)
case_df[['x3', 'x4']].tail(20).to_csv(
"/opt/dataset/unit_test/test_host.csv", index=True
)
FedNode.node_id = "node-1"
FedNode.node_name = "node-1"
def prepare_test_data():
case_df = pd.DataFrame({
'x0': np.arange(99),
'x1': np.arange(99),
'x2': 2 * np.arange(99) - 40.0,
'x3': 3 * np.arange(99) + 1.0,
'x4': np.arange(99)[::-1]
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 40, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].to_csv(
"/opt/dataset/unit_test/infer_guest.csv", index=True
)
case_df[['x3', 'x4']].to_csv(
"/opt/dataset/unit_test/infer_host.csv", index=True
)
xgb_output = {
"suggest_threshold": 0.6161117553710938,
"lr": [0.3],
"max_depth": [2],
"trees": [
{
"party_id": "node-1",
"tree_index": 0,
"root_node_id": "0_4lN0P7QTwWq25Eei",
"nodes": {
"0_4lN0P7QTwWq25Eei": {
"id": "0_4lN0P7QTwWq25Eei", "depth": 0, "left_node_id": "0_gw94EBW5tiD8kCqG",
"right_node_id": "0_vpKZWumTxYcojXLq",
"split_info": {
"owner_id": "node-1", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [4, 2, 6, 1]
},
"is_leaf": False,
"weight": None, "linkage": None
}, "0_gw94EBW5tiD8kCqG": {
"id": "0_gw94EBW5tiD8kCqG", "depth": 1, "left_node_id": None, "right_node_id": None,
"split_info": None,
"is_leaf": True, "weight": 1.5769230769230769, "linkage": "left"
},
"0_vpKZWumTxYcojXLq": {
"id": "0_vpKZWumTxYcojXLq", "depth": 1, "left_node_id": None,
"right_node_id": None,
"split_info": None, "is_leaf": True, "weight": -1.5, "linkage": "right"
}
}
}
],
"version": "1.0", "loss_method": "BCEWithLogitsLoss", "num_trees": 1,
"node_id_group": {
"0_4lN0P7QTwWq25Eei": ["0_4lN0P7QTwWq25Eei"]
}
}
with open("/opt/checkpoints/unit_test/node-1/vertical_xgboost_guest.pmodel", 'w') as f:
json.dump(xgb_output, f)
xgb_output = {"4_WTqDQjPt39iMc7Ug": {"id": "4_WTqDQjPt39iMc7Ug",
"split_info": {"owner_id": "node-2", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [1, 0, 2, 5]}}}
with open("/opt/checkpoints/unit_test/node-2/vertical_xgboost_host.pmodel", 'w') as f:
json.dump(xgb_output, f)
def enc_grad_hess(grad, hess):
if grad is None:
return Paillier.encrypt(context=private_context,
data=hess,
precision=7,
obfuscation=True,
num_cores=1)
elif hess is None:
return Paillier.encrypt(context=private_context,
data=grad,
precision=7,
obfuscation=True,
num_cores=1)
else:
grad_hess = embed([grad, hess], interval=(
1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(context=private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=1)
return enc_grad_hess
@pytest.fixture()
def get_label_trainer_infer_conf():
conf = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost",
"config": {}
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": "/opt/dataset/unit_test",
"name": "infer_guest.csv",
"has_label": True,
"has_id": True
}
],
"pretrained_model": {
"path": "/opt/checkpoints/unit_test/node-1",
"name": "vertical_xgboost_guest.pmodel"
}
},
"output": {
"path": "/opt/checkpoints/unit_test/node-1",
"testset": {
"name": "predicted_probabilities_train.csv"
}
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 99
}
}
}
yield conf
@pytest.fixture()
def get_trainer_infer_conf():
conf = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost",
"config": {}
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": "/opt/dataset/unit_test",
"name": "infer_host.csv",
"has_label": False,
"has_id": True
}
],
"pretrained_model": {
"path": "/opt/checkpoints/unit_test/node-2",
"name": "vertical_xgboost_host.pmodel"
}
},
"output": {
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 99
}
}
}
yield conf
@pytest.fixture()
def get_label_trainer_conf():
with open("algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["input"]["testset"] = []
conf["output"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["prediction_train"]["name"] = "/opt/checkpoints/unit_test/predicted_probabilities_train.csv"
conf["output"]["prediction_val"]["name"] = "/opt/checkpoints/unit_test/predicted_probabilities_val.csv"
conf["output"]["model"]["name"] = "vertical_xgboost_guest.model"
conf["output"]["proto_model"]["name"] = "vertical_xgboost_guest.pmodel"
conf["output"]["feature_importance"]["name"] = "/opt/checkpoints/unit_test/feature_importances.csv"
conf["train_info"]["train_params"]["num_bins"] = 10
conf["train_info"]["train_params"]["max_depth"] = 2
conf["train_info"]["train_params"]["min_sample_split"] = 1
conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["num_trees"] = 1
conf["train_info"]["train_params"]["max_num_cores"] = 2
conf["train_info"]["train_params"]["metric"] = {
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
}
conf["train_info"]["train_params"]["early_stopping"]["key"] = "acc"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_host.csv"
conf["input"]["testset"] = []
conf["output"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["model"]["name"] = "vertical_xgboost_host.model"
conf["output"]["proto_model"]["name"] = "vertical_xgboost_host.pmodel"
conf["train_info"]["train_params"]["num_bins"] = 10
conf["train_info"]["train_params"]["max_depth"] = 2
conf["train_info"]["train_params"]["min_sample_split"] = 1
conf["train_info"]["train_params"]["num_trees"] = 1
conf["train_info"]["train_params"]["max_num_cores"] = 2
conf["train_info"]["train_params"]["advanced"]["row_batch"] = 20
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
os.chdir("python")
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test/node-1"):
os.makedirs("/opt/checkpoints/unit_test/node-1")
if not os.path.exists("/opt/checkpoints/unit_test/node-2"):
os.makedirs("/opt/checkpoints/unit_test/node-2")
if not os.path.exists("/opt/config/unit_test"):
os.makedirs("/opt/config/unit_test")
prepare_data()
prepare_test_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/config/unit_test"):
shutil.rmtree("/opt/config/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
os.chdir("..")
class TestVerticalXgboost:
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.mark.parametrize('embed', [(True), (False)])
def test_label_trainer(self, get_label_trainer_conf, embed, mocker):
def mock_generate_id(*args, **kwargs):
return str(mock_tree_generate_id.call_count)
def mock_dualchannel_recv(*args, **kwargs):
if embed:
# recv summed_grad_hess
if mock_channel_recv.call_count in [1, 2, 4]:
hist_list = [(np.zeros(8), np.array([8] * 10))
for _ in range(2)]
return [False, hist_list, [2]]
elif mock_channel_recv.call_count in [6, 7]:
return {'1': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'2': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'3': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
}
elif mock_channel_recv.call_count <= 5 or (
mock_channel_recv.call_count >= 8 and mock_channel_recv.call_count <= 12):
# features = pd.DataFrame({
# 'x3': np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9]),
# 'x4': np.array([9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
# }
# )
# sample_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 48, 52, 53, 55, 59, 60, 61, 63, 64, 65, 66, 68, 70, 73, 74, 75, 76, 77, 78, 79]
# grad = [0.8333333, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.5, -0.5]
# hess = [0.41666666, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.25, 0.25]
# grad_hess = embed([grad, hess], interval=(
# 1 << 128), precision=64)
# enc_grad_hess = Paillier.encrypt(context=private_context,
# data=grad_hess,
# precision=0, # must be 0
# obfuscation=True,
# num_cores=1)
# enc_grad_hess = Paillier.serialize(enc_grad_hess, compression=False)
# grad_hess = Paillier.ciphertext_from(public_context, enc_grad_hess, compression=False)
# big_feature = Feature.create(values=features.iloc[sample_index,:],sample_index=sample_index, grad_hess=grad_hess)
# res = []
# for col_name in big_feature.feature_columns:
# res.append(big_feature.data.groupby([col_name])['xfl_grad_hess'].agg({'count', 'sum'}))
# hist_list = [(res_hist['sum'].to_numpy(), res_hist['count'].to_numpy()) for res_hist in res]
hist_list = [(np.zeros(8), np.array([8] * 10))
for _ in range(2)]
return [False, hist_list]
elif mock_channel_recv.call_count <= 7:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
elif mock_channel_recv.call_count <= 14 and mock_channel_recv.call_count >= 13:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
elif not embed:
if mock_channel_recv.call_count in [1, 2, 4]:
hist_list = [(np.zeros(8), np.zeros(
8), np.array([8] * 10)) for _ in range(2)]
return [False, hist_list, [2]]
elif mock_channel_recv.call_count in [6, 7]:
return {'1': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'2': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'3': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
}
elif mock_channel_recv.call_count <= 5 or (
mock_channel_recv.call_count >= 8 and mock_channel_recv.call_count <= 12):
hist_list = [(np.zeros(8), np.zeros(
8), np.array([8] * 10)) for _ in range(2)]
return [False, hist_list]
elif mock_channel_recv.call_count <= 7:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
elif mock_channel_recv.call_count <= 14 and mock_channel_recv.call_count >= 13:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
def mock_broadcasetchannel_recv():
pass
if not embed:
mocker.patch.object(decision_tree_label_trainer, "EMBEDING", False)
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(FedConfig, "get_trainer", return_value=["node-2"])
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(Commu, "node_id", "node-1")
mocker.patch.object(Commu, "trainer_ids", ["node-1", "node-2"])
mocker.patch.object(Commu, "scheduler_id", "scheduler")
mocker.patch.object(
BroadcastChannel, "collect", return_value=[{"train": (80, 3), "valid": (20, 3)}]
)
mocker.patch.object(
BroadcastChannel, "broadcast"
)
mocker.patch.object(
DualChannel, "send"
)
mock_channel_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_dualchannel_recv
)
mock_tree_generate_id = mocker.patch.object(
Tree, "_generate_id", side_effect=mock_generate_id
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(
FedNode, "config", return_value={"trainer": {"node-2": []}}
)
xgb_label_trainer = VerticalXgboostLabelTrainer(
get_label_trainer_conf)
mocker.patch.object(
xgb_label_trainer.channels["sync"], "collect", return_value=[{"node-2": [1, 2]}, {"node-3": [1, 2]}]
)
xgb_label_trainer.fit()
self.check_label_trainer_output()
@pytest.mark.parametrize('embed', [(True), (False)])
def test_trainer(self, get_trainer_conf, embed, mocker):
def mock_broadcastchannel_recv(*args, **kwargs):
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 1,
"num_bins": 10,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"batch_size_val": 40960
}
}
}
if broadchannel_recv_mocker.call_count == 1:
return config
# elif broadchannel_recv_mocker.call_count == 2:
# encryption = config["train_info"]["train_params"]["encryption"]
# if "paillier" in encryption:
# encryption = encryption["paillier"]
# private_context = Paillier.context(
# encryption["key_bit_size"], encryption["djn_on"])
# return private_context.to_public().serialize()
# else:
# return None
if embed:
# recv embed grad hess
if broadchannel_recv_mocker.call_count in [3, 6]:
grad = np.array(
[0.8333333, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -
0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.5, -0.5])
hess = np.array(
[0.41666666, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.25, 0.25])
return Paillier.serialize(enc_grad_hess(grad, hess), compression=False)
# recv public context for Paillier
elif broadchannel_recv_mocker.call_count == 2:
return public_context.serialize()
# recv tree node
elif broadchannel_recv_mocker.call_count == 4:
def _generate_id():
id = ''.join(random.sample(
string.ascii_letters + string.digits, 16))
return id
return Node(id=_generate_id(), depth=0)
elif not embed:
# recv grad and hess
if broadchannel_recv_mocker.call_count in [3, 6]:
grad = np.array(
[0.8333333, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -
0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.5, -0.5])
hess = np.array(
[0.41666666, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.25, 0.25])
return Paillier.serialize(enc_grad_hess(grad, None), compression=False), Paillier.serialize(
enc_grad_hess(None, hess), compression=False)
# recv public context for Paillier
elif broadchannel_recv_mocker.call_count == 2:
return public_context.serialize()
# recv tree node
elif broadchannel_recv_mocker.call_count == 4:
def _generate_id():
id = ''.join(random.sample(
string.ascii_letters + string.digits, 16))
return id
return Node(id=_generate_id(), depth=0)
def mock_dualchannel_recv(*args, **kwargs):
# recv min split info
if dualchannel_recv_mocker.call_count == 1:
return -1, 1, 1
# recv early stop
elif dualchannel_recv_mocker.call_count == 2:
return True
if not embed:
mocker.patch.object(decision_tree_trainer, "EMBEDING", False)
mocker.patch.object(FedConfig, "get_label_trainer",
return_value=["node-1"])
mocker.patch.object(FedNode, "node_id", "node-2")
mocker.patch.object(FedNode, "create_channel")
mocker.patch.object(Commu, "node_id", "node-2")
mocker.patch.object(Commu, "trainer_ids", ["node-1", "node-2"])
mocker.patch.object(Commu, "scheduler_id", "scheduler")
broadchannel_recv_mocker = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_broadcastchannel_recv
)
dualchannel_recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_dualchannel_recv
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(get_trainer_conf)
xgb_trainer.fit()
self.check_trainer_output()
@staticmethod
def check_label_trainer_output():
# 检查是否正确输出了预测值文件
assert os.path.exists(
"/opt/checkpoints/unit_test/predicted_probabilities_train.csv")
assert os.path.exists(
"/opt/checkpoints/unit_test/predicted_probabilities_val.csv")
# 检查是否正确输出了模型文件
assert os.path.exists(
"/opt/checkpoints/unit_test/node-2/vertical_xgboost_host.pmodel")
assert os.path.exists(
"/opt/checkpoints/unit_test/node-1/vertical_xgboost_guest.pmodel")
# 检查是否正确输出了model config
assert os.path.exists("/opt/checkpoints/unit_test/model_config.json")
with open("/opt/checkpoints/unit_test/model_config.json") as f:
model_config = json.load(f)
assert model_config[0]["class_name"] == "VerticalXGBooster"
assert model_config[0]["filename"] == "vertical_xgboost_guest.pmodel"
# 检查是否正确输出了feature importance文件
assert os.path.exists(
"/opt/checkpoints/unit_test/feature_importances.csv")
@staticmethod
def check_trainer_output():
# 检查是否正确输出了模型文件
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_xgboost_host.pmodel")
# 检查是否正确输出了model config
assert os.path.exists("/opt/checkpoints/unit_test/model_config.json")
with open("/opt/checkpoints/unit_test/model_config.json") as f:
model_config = json.load(f)
assert model_config[2]["class_name"] == "VerticalXGBooster"
assert model_config[2]["filename"] == "vertical_xgboost_host.pmodel"
| 33,748 | 48.053779 | 557 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_xgb.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import pytest
import json
from multiprocess.pool import ApplyResult
import pandas as pd
import numpy as np
from google.protobuf import json_format
import service.fed_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.paillier_acceleration import embed
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.paillier.paillier import Paillier
from algorithm.core.tree.tree_structure import Node
from algorithm.framework.vertical.xgboost.label_trainer import VerticalXgboostLabelTrainer
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
from algorithm.framework.vertical.xgboost.decision_tree_label_trainer import VerticalDecisionTreeLabelTrainer
from algorithm.framework.vertical.xgboost.decision_tree_trainer import VerticalDecisionTreeTrainer
from common.model.python.tree_model_pb2 import XGBoostModel, NodeModel
@pytest.fixture(scope='module', autouse=True)
def prepare_data(tmp_factory):
df = pd.DataFrame({
"x0": np.random.random(200),
"x1": np.round(np.random.random(200) * 10.0),
"x2": np.random.uniform(200) * 2.0,
"x3": np.random.random(200) * 3.0,
"x4": np.arange(0, 200, 1),
'y': np.round(np.random.random(200))
})
df[['y', 'x0', 'x1', 'x2']].head(120).to_csv(
tmp_factory.join("train_guest.csv"), index=True, index_label='id'
)
df[['y', 'x0', 'x1', 'x2']].tail(80).to_csv(
tmp_factory.join("test_guest.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].head(120).to_csv(
tmp_factory.join("train_host.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].tail(80).to_csv(
tmp_factory.join("test_host.csv"), index=True, index_label='id'
)
Commu.node_id = "node-1"
FedNode.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
service.fed_node.FedNode.node_name = 'node-1'
@pytest.fixture(scope='module', autouse=True)
def prepare_model(tmp_factory):
d = {
"suggest_threshold": 0.6161117553710938,
"lr": [0.3],
"max_depth": [2],
"trees": [
{
"party_id": "node-1",
"tree_index": 0,
"root_node_id": "0_4lN0P7QTwWq25Eei",
"nodes": {
"0_4lN0P7QTwWq25Eei": {
"id": "0_4lN0P7QTwWq25Eei", "depth": 0, "left_node_id": "0_gw94EBW5tiD8kCqG",
"right_node_id": "0_vpKZWumTxYcojXLq",
"split_info": {
"owner_id": "node-1", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [4, 2, 6, 1]
},
"is_leaf": False,
"weight": None, "linkage": None
}, "0_gw94EBW5tiD8kCqG": {
"id": "0_gw94EBW5tiD8kCqG", "depth": 1, "left_node_id": None, "right_node_id": None,
"split_info": None,
"is_leaf": True, "weight": 1.5769230769230769, "linkage": "left"
},
"0_vpKZWumTxYcojXLq": {
"id": "0_vpKZWumTxYcojXLq", "depth": 1, "left_node_id": None,
"right_node_id": None,
"split_info": None, "is_leaf": True, "weight": -1.5, "linkage": "right"
}
}
}
],
"version": "1.0", "loss_method": "BCEWithLogitsLoss", "num_trees": 1,
"node_id_group": {
"0_4lN0P7QTwWq25Eei": {"node_id_list": ["0_4lN0P7QTwWq25Eei"]}
}
}
xgb = XGBoostModel()
json_format.ParseDict(d, xgb)
xgb_output = xgb.SerializeToString()
with open(tmp_factory.join("vertical_xgboost_guest.pmodel"), 'wb') as f:
f.write(xgb_output)
d = {"nodes": {"4_WTqDQjPt39iMc7Ug": {"id": "4_WTqDQjPt39iMc7Ug",
"split_info": {"owner_id": "node-2", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [1, 0, 2, 5]}}}}
xgb = NodeModel()
json_format.ParseDict(d, xgb)
xgb_output = xgb.SerializeToString()
with open(tmp_factory.join("vertical_xgboost_host.pmodel"), 'wb') as f:
f.write(xgb_output)
@pytest.fixture()
def get_label_trainer_infer_conf(tmp_factory):
conf = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": str(tmp_factory),
"name": "test_guest.csv",
"has_label": True,
"has_id": True
}
],
"pretrained_model": {
"path": str(tmp_factory),
"name": "vertical_xgboost_guest.pmodel"
}
},
"output": {
"path": str(tmp_factory),
"testset": {
"name": "predicted_probabilities_train.csv"
}
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 80
}
}
}
yield conf
@pytest.fixture()
def get_trainer_infer_conf(tmp_factory):
conf = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": str(tmp_factory),
"name": "test_host.csv",
"has_label": False,
"has_id": True
}
],
"pretrained_model": {
"path": str(tmp_factory),
"name": "vertical_xgboost_host.pmodel"
}
},
"output": {
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 99
}
}
}
yield conf
class TestVerticalXGBoost:
@pytest.mark.parametrize("dataset, empty", [("train", False), ("valid", False), ("test", False),
("train", True), ("valid", True), ("test", True)])
def test_check_dataset(self, get_label_trainer_infer_conf, mocker, tmp_factory, dataset, empty):
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
if dataset == "test":
conf["inference"] = True
conf["input"]["testset"][0]["path"] = str(tmp_factory)
conf["input"]["testset"][0]["name"] = "test_guest.csv"
else:
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[{}]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
def mock_dim():
if empty:
d = {"train": (120, 2), "valid": (80, 2), "test": (80, 2)}
d[dataset] = (d[dataset][0], 0)
dims = [d]
else:
dims = [{"train": (120, 2), "valid": (80, 2), "test": (80, 2)}]
d = {"train": (120, 2), "valid": (80, 2), "test": (80, 2)}
d[dataset] = (1, 2)
dims.append(d)
return dims
mocker.patch.object(
xgb_label_trainer.channels["check_dataset_com"], "collect", side_effect=mock_dim
)
if empty:
if dataset == "train":
xgb_label_trainer.train_features = pd.DataFrame()
elif dataset == "valid":
xgb_label_trainer.val_features = pd.DataFrame()
elif dataset == "test":
xgb_label_trainer.test_features = pd.DataFrame()
with pytest.raises(ValueError) as e:
xgb_label_trainer.check_dataset()
@pytest.mark.parametrize("num_bins", [2, 8, 128, 1024, 100000])
def test_cat_label_trainer(self, mocker, tmp_factory, num_bins):
"""
通过参数化测试当分类数大于小于num_bins的两种情况,同时测试num_bins在三段区间下的初始化
Args:
tmp_factory:
num_bins:
Returns:
"""
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = str(tmp_factory)
conf["train_info"]["train_params"]["category"]["cat_features"]["col_names"] = [
"x1"]
conf["train_info"]["train_params"]["num_bins"] = num_bins
del conf["input"]["testset"]
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[]
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
mocker.patch.object(
xgb_label_trainer.channels["check_dataset_com"], "collect", return_value=[]
)
mocker.patch.object(
xgb_label_trainer.channels["sync"], "collect", return_value=[{}]
)
mocker.patch.object(
FedNode, "config", return_value={"trainer": {}}
)
xgb_label_trainer.fit()
self.check_label_trainer_output(tmp_factory)
def test_trainer(self, mocker, tmp_factory):
# load default config
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
del conf["input"]["testset"]
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
xgb_trainer = VerticalXgboostTrainer(conf)
# mock for iters
private_context = Paillier.context(2048, True)
public_context = private_context.to_public()
xgb_trainer.public_context = public_context
def mock_grad_hess(*args, **kwargs):
"""
mock the grad and hess calculation in the label trainer.
Args:
*args:
**kwargs:
Returns:
paillier encrypted grad and hess vec
"""
y = np.array([0, 1] * 60)
y_pred = np.array([0.5] * 120)
loss_inst = get_xgb_loss_inst("BCEWithLogitsLoss")
grad = loss_inst.cal_grad(y, y_pred, after_prediction=True)
hess = loss_inst.cal_hess(y, y_pred, after_prediction=True)
grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(context=private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=999)
return Paillier.serialize(enc_grad_hess, compression=False)
def mock_node(*args, **kwargs):
"""
mock the node passing to the trainer
Args:
*args:
**kwargs:
Returns:
an empty None
"""
if node_mocker.call_count <= 1:
return Node(id="mock_id")
else:
return None
# mock results from the label trainer according to difference channels
mocker.patch.object(
xgb_trainer.channels["individual_grad_hess"], "recv", side_effect=mock_grad_hess
)
node_mocker = mocker.patch.object(
xgb_trainer.channels["tree_node"], "recv", side_effect=mock_node
)
mocker.patch.object(
xgb_trainer.channels["min_split_info"], "recv", return_value=[-1, -1, -1]
)
mocker.patch.object(
xgb_trainer.channels["restart_com"], "recv", return_value=0
)
mocker.patch.object(
xgb_trainer.channels["early_stop_com"], "recv", return_value=False
)
xgb_trainer.fit()
self.check_trainer_output(tmp_factory)
def test_label_trainer(self, mocker, tmp_factory):
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = str(tmp_factory)
del conf["input"]["testset"]
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[{}]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
mocker.patch.object(
xgb_label_trainer.channels["check_dataset_com"], "collect", return_value=[]
)
mocker.patch.object(
FedNode, "config", return_value={"trainer": {}}
)
xgb_label_trainer.fit()
self.check_label_trainer_output(tmp_factory)
# cover dual channel created in: VerticalXgboostLabelTrainer.__init__
mocker.patch.object(
FedConfig, "get_trainer", return_value=["node_id"]
)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
VerticalXgboostLabelTrainer(conf)
@staticmethod
def check_label_trainer_output(tmp_factory):
# 检查是否正确输出了预测值文件
assert os.path.exists(tmp_factory.join(
"xgb_prediction_train_[STAGE_ID].csv"))
assert os.path.exists(tmp_factory.join(
"xgb_prediction_val_[STAGE_ID].csv"))
# 检查是否正确输出了模型文件
assert os.path.exists(tmp_factory.join(
"vertical_xgboost_[STAGE_ID].model"))
# 检查是否正确输出了model config
assert os.path.exists(tmp_factory.join("model_config.json"))
with open(tmp_factory.join("model_config.json")) as f:
model_config = json.load(f)
assert model_config[0]["class_name"] == "VerticalXGBooster"
assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].pmodel"
# 检查是否正确输出了feature importance文件
assert os.path.exists(tmp_factory.join(
"xgb_feature_importance_[STAGE_ID].csv"))
@staticmethod
def check_trainer_output(tmp_factory):
# 检查是否正确输出了模型文件
assert os.path.exists(tmp_factory.join(
"vertical_xgboost_[STAGE_ID].pmodel"))
# 检查是否正确输出了model config
assert os.path.exists(tmp_factory.join("model_config.json"))
with open(tmp_factory.join("model_config.json")) as f:
model_config = json.load(f)
assert model_config[0]["class_name"] == "VerticalXGBooster"
assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].pmodel"
def test_predict_label_trainer(self, get_label_trainer_infer_conf, mocker, tmp_factory):
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
def mock_collect(*args, **kwargs):
if collect_mocker.call_count == 2:
return [{"test": (80, 2)}]
else:
return {}
mocker.patch.object(
ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
"0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
"0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
collect_mocker = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=None
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(
get_label_trainer_infer_conf)
xgb_label_trainer.predict()
df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
assert (df["pred"] > 0.5).sum() == 50
def test_predict_empty_testset(self, get_label_trainer_infer_conf, mocker, tmp_factory):
conf = copy.deepcopy(get_label_trainer_infer_conf)
del conf["input"]["testset"]
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
def mock_collect(*args, **kwargs):
if collect_mocker.call_count == 2:
return [{"test": (80, 2)}]
else:
return {}
mocker.patch.object(
ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
"0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
"0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
collect_mocker = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=None
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
xgb_label_trainer.predict()
df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
assert df.shape == (80, 2)
def test_predict_trainer(self, get_trainer_infer_conf, mocker, tmp_factory):
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
config = {
"train_info": {
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"batch_size_val": 40960
}
}
}
return config
mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(get_trainer_infer_conf)
xgb_trainer.predict()
| 24,834 | 36.402108 | 117 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_xgb2.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import pytest
import json
from multiprocess.pool import ApplyResult
import pandas as pd
import numpy as np
import service.fed_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.paillier_acceleration import embed
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.paillier.paillier import Paillier
from algorithm.core.tree.tree_structure import Node
from algorithm.framework.vertical.xgboost.label_trainer import VerticalXgboostLabelTrainer
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
from algorithm.framework.vertical.xgboost.decision_tree_label_trainer import VerticalDecisionTreeLabelTrainer
from algorithm.framework.vertical.xgboost.decision_tree_trainer import VerticalDecisionTreeTrainer
@pytest.fixture(scope='module', autouse=True)
def prepare_data(tmp_factory):
df = pd.DataFrame({
"x0": np.random.random(200),
# np.round(np.random.random(200) * 10.0),
"x1": np.random.randint(0, 10, 200),
"x2": np.random.uniform(200) * 2.0,
"x3": np.random.random(200) * 3.0,
"x4": np.random.randint(0, 10, 200), # np.arange(0, 200, 1),
'y': np.round(np.random.random(200))
})
df[['y', 'x0', 'x1', 'x2']].head(120).to_csv(
tmp_factory.join("train_guest.csv"), index=True, index_label='id'
)
df[['y', 'x0', 'x1', 'x2']].tail(80).to_csv(
tmp_factory.join("test_guest.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].head(120).to_csv(
tmp_factory.join("train_host.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].tail(80).to_csv(
tmp_factory.join("test_host.csv"), index=True, index_label='id'
)
Commu.node_id = "node-1"
FedNode.node_id = "node-1"
FedNode.config = {"trainer": []}
Commu.trainer_ids = ['node-1', 'node-2']
class TestVerticalXGBoost:
@pytest.mark.parametrize('feature_index', [(1), (0)])
def test_decision_tree_trainer(self, mocker, tmp_factory, feature_index):
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["category"]["cat_features"]["col_index"] = "1"
conf["train_info"]["train_params"]["advanced"]["col_batch"] = 1
conf["train_info"]["train_params"]["advanced"]["row_batch"] = 1
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(conf)
sampled_features, feature_id_mapping = xgb_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
sample_index = [2, 4, 6, 7, 8, 10]
def mock_grad_hess(*args, **kwargs):
private_context = Paillier.context(xgb_trainer.xgb_config.encryption_param.key_bit_size,
xgb_trainer.xgb_config.encryption_param.djn_on)
# grad = np.random.random(xgb_trainer.xgb_config.num_bins)
# hess = np.random.random(xgb_trainer.xgb_config.num_bins)
grad = np.random.random(len(sample_index))
hess = np.random.random(len(sample_index))
grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=999)
return Paillier.serialize(enc_grad_hess, compression=False)
mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_grad_hess
)
decision_tree = VerticalDecisionTreeTrainer(tree_param=xgb_trainer.xgb_config,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_trainer.channels,
encryption_context=xgb_trainer.public_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
def mock_node(*args, **kwargs):
"""
mock the node passing to the trainer
Args:
*args:
**kwargs:
Returns:
an empty None
"""
if node_mocker.call_count == 1:
return Node(id="mock_id",
depth=1,
sample_index=sample_index,
)
elif node_mocker.call_count == 2:
return Node(id="mock_id_2",
depth=1,
sample_index=sample_index,
)
else:
return None
node_mocker = mocker.patch.object(
decision_tree.tree_node_chann, "recv", side_effect=mock_node
)
mocker.patch.object(
decision_tree.min_split_info_chann, "recv", return_value=[feature_index, 0, [0]]
)
decision_tree.fit()
@pytest.mark.parametrize('feature_index', [(1), (0)])
def test_decision_tree_trainer_plain(self, mocker, tmp_factory, feature_index):
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["category"]["cat_features"]["col_index"] = "1"
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"plain": {
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(conf)
sampled_features, feature_id_mapping = xgb_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
sample_index = [2, 4, 6, 7, 8, 10]
def mock_grad_hess(*args, **kwargs):
grad = np.random.random(len(sample_index))
hess = np.random.random(len(sample_index))
return [grad, hess]
mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_grad_hess
)
decision_tree = VerticalDecisionTreeTrainer(tree_param=xgb_trainer.xgb_config,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_trainer.channels,
encryption_context=xgb_trainer.public_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
def mock_node(*args, **kwargs):
"""
mock the node passing to the trainer
Args:
*args:
**kwargs:
Returns:
an empty None
"""
if node_mocker.call_count == 1:
return Node(id="mock_id",
depth=1,
sample_index=sample_index,
)
elif node_mocker.call_count == 2:
return Node(id="mock_id_2",
depth=1,
sample_index=sample_index,
)
else:
return None
node_mocker = mocker.patch.object(
decision_tree.tree_node_chann, "recv", side_effect=mock_node
)
mocker.patch.object(
decision_tree.min_split_info_chann, "recv", return_value=[feature_index, 0, [0]]
)
decision_tree.fit()
def test_decision_tree_trainer_exception(self, mocker, tmp_factory):
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["category"]["cat_features"]["col_index"] = "1"
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"plain": {
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(conf)
sampled_features, feature_id_mapping = xgb_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
with pytest.raises(ValueError):
xgb_trainer.xgb_config.encryption_param.method = 'palin'
decision_tree = VerticalDecisionTreeTrainer(tree_param=xgb_trainer.xgb_config,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_trainer.channels,
encryption_context=xgb_trainer.public_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
@pytest.mark.parametrize('run_goss, encryption_method', [(True, "paillier"), (False, "plain")])
def test_decision_tree_label_trainer(self, mocker, tmp_factory, run_goss, encryption_method):
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = str(tmp_factory)
conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"] = run_goss
if encryption_method == "plain":
conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
del conf["input"]["testset"]
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
train_y_pred = np.zeros_like(xgb_label_trainer.train_label) + 0.5
sampled_features, feature_id_mapping = xgb_label_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_label_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_label_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
decision_tree = VerticalDecisionTreeLabelTrainer(tree_param=xgb_label_trainer.xgb_config,
y=xgb_label_trainer.train_label,
y_pred=train_y_pred,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_label_trainer.channels,
encryption_context=xgb_label_trainer.private_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
mocker_grad_hess = mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker_grad_hess = mocker.patch.object(
DualChannel, "send", return_value=None
)
decision_tree.summed_grad_hess_channs = {
"node-2": DualChannel(name="summed_grad_hess_node-2")}
decision_tree.sample_index_after_split_channs = {
"node-2": DualChannel(name="sample_index_after_split_node-2")}
def mock_grad_hess(*args, **kwargs):
grad = np.random.random(xgb_label_trainer.xgb_config.num_bins)
hess = np.random.random(xgb_label_trainer.xgb_config.num_bins)
if encryption_method == "plain":
if mocker_grad_hess.call_count > 1:
return False, [(grad, hess, np.random.randint(1, 10, xgb_label_trainer.xgb_config.num_bins))], [0]
else:
return True, [(grad, hess, np.random.randint(1, 10, xgb_label_trainer.xgb_config.num_bins))], [0]
grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
grad_hess_enc = Paillier.encrypt(xgb_label_trainer.private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=999)
grad_hess_hist_list = []
remote_cat_index = []
grad_hess_hist_list.append(
(grad_hess_enc, [xgb_label_trainer.xgb_config.num_bins]))
if mocker_grad_hess.call_count > 1:
return False, grad_hess_hist_list, remote_cat_index
else:
return True, grad_hess_hist_list, remote_cat_index
mocker_grad_hess = mocker.patch.object(
decision_tree.summed_grad_hess_channs["node-2"], "recv", side_effect=mock_grad_hess
)
mocker.patch.object(
decision_tree.sample_index_after_split_channs["node-2"],
"recv",
return_value=[range(len(decision_tree.y) // 2), [range(len(decision_tree.y) // 2, len(decision_tree.y))]]
)
decision_tree.fit()
with pytest.raises(ValueError):
xgb_label_trainer.xgb_config.encryption_param.method = 'palin'
decision_tree = VerticalDecisionTreeLabelTrainer(tree_param=xgb_label_trainer.xgb_config,
y=xgb_label_trainer.train_label,
y_pred=train_y_pred,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_label_trainer.channels,
encryption_context=xgb_label_trainer.private_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
# def test_decision_tree_label_trainer(self, mocker, tmp_factory):
# with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
# conf = json.load(f)
# conf["input"]["trainset"][0]["path"] = str(tmp_factory)
# conf["input"]["trainset"][0]["name"] = "train_guest.csv"
# conf["input"]["valset"][0]["path"] = str(tmp_factory)
# conf["input"]["valset"][0]["name"] = "test_guest.csv"
# conf["output"]["path"] = str(tmp_factory)
# del conf["input"]["testset"]
# mocker.patch.object(
# BroadcastChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
# train_y_pred = np.zeros_like(xgb_label_trainer.train_label) + 0.5
# sampled_features, feature_id_mapping = xgb_label_trainer.col_sample()
# cat_columns_after_sampling = list(filter(
# lambda x: feature_id_mapping[x] in xgb_label_trainer.cat_columns, list(feature_id_mapping.keys())))
# split_points_after_sampling = [
# xgb_label_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
# decision_tree = VerticalDecisionTreeLabelTrainer(tree_param=xgb_label_trainer.xgb_config,
# y=xgb_label_trainer.train_label,
# y_pred=train_y_pred,
# features=sampled_features,
# cat_columns=cat_columns_after_sampling,
# split_points=split_points_after_sampling,
# channels=xgb_label_trainer.channels,
# encryption_context=xgb_label_trainer.private_context,
# feature_id_mapping=feature_id_mapping,
# tree_index=0)
# mocker_grad_hess = mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# decision_tree.summed_grad_hess_channs = {
# "node-2": DualChannel(name="summed_grad_hess_node-2")}
# def mock_grad_hess(*args, **kwargs):
# grad = np.random.random(xgb_label_trainer.xgb_config.num_bins)
# hess = np.random.random(xgb_label_trainer.xgb_config.num_bins)
# grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
# grad_hess_enc = Paillier.encrypt(xgb_label_trainer.private_context,
# data=grad_hess,
# precision=0, # must be 0
# obfuscation=True,
# num_cores=999)
# grad_hess_hist_list = []
# remote_cat_index = []
# grad_hess_hist_list.append(
# (grad_hess_enc, [xgb_label_trainer.xgb_config.num_bins]))
# if mocker_grad_hess.call_count > 1:
# return False, grad_hess_hist_list, remote_cat_index
# else:
# return True, grad_hess_hist_list, remote_cat_index
# mocker_grad_hess = mocker.patch.object(
# decision_tree.summed_grad_hess_channs["node-2"], "recv", side_effect=mock_grad_hess
# )
# decision_tree.fit()
# def test_trainer(self, mocker, tmp_factory):
# # load default config
# with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
# conf = json.load(f)
# conf["input"]["trainset"][0]["path"] = str(tmp_factory)
# conf["input"]["trainset"][0]["name"] = "train_host.csv"
# conf["input"]["valset"][0]["path"] = str(tmp_factory)
# conf["input"]["valset"][0]["name"] = "test_host.csv"
# conf["output"]["path"] = str(tmp_factory)
# # if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# # conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# # conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
# del conf["input"]["testset"]
# # mocker channels in VerticalXgboostTrainer.__init__
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "send", return_value=None
# )
# mocker.patch.object(
# DualChannel, "send", return_value=None
# )
# def mock_func(*args, **kwargs):
# """
# mock encryption keys
# Args:
# *args:
# **kwargs:
# Returns:
# the paillier context
# """
# config = {
# "train_info": {
# "interaction_params": {
# "save_frequency": -1,
# "echo_training_metrics": True,
# "write_training_prediction": True,
# "write_validation_prediction": True
# },
# "train_params": {
# "lossfunc": {
# "BCEWithLogitsLoss": {}
# },
# "num_trees": 10,
# "num_bins": 16,
# "downsampling": {
# "row": {
# "run_goss": True
# }
# },
# "encryption": {
# "paillier": {
# "key_bit_size": 2048,
# "precision": 7,
# "djn_on": True,
# "parallelize_on": True
# }
# },
# "batch_size_val": 40960
# }
# }
# }
# if mock_broadcast_recv.call_count == 1:
# return config
# elif mock_broadcast_recv.call_count == 2:
# encryption = config["train_info"]["train_params"]["encryption"]
# if "paillier" in encryption:
# encryption = encryption["paillier"]
# private_context = Paillier.context(
# encryption["key_bit_size"], encryption["djn_on"])
# return private_context.to_public().serialize()
# else:
# return None
# mocker.patch.object(
# BroadcastChannel, "__init__", return_value=None
# )
# mock_broadcast_recv = mocker.patch.object(
# BroadcastChannel, "recv", side_effect=mock_func
# )
# xgb_trainer = VerticalXgboostTrainer(conf)
# # mock for iters
# private_context = Paillier.context(2048, True)
# public_context = private_context.to_public()
# xgb_trainer.public_context = public_context
# def mock_grad_hess(*args, **kwargs):
# """
# mock the grad and hess calculation in the label trainer.
# Args:
# *args:
# **kwargs:
# Returns:
# paillier encrypted grad and hess vec
# """
# y = np.array([0, 1] * 60)
# y_pred = np.array([0.5] * 120)
# loss_inst = get_xgb_loss_inst("BCEWithLogitsLoss")
# grad = loss_inst.cal_grad(y, y_pred, after_prediction=True)
# hess = loss_inst.cal_hess(y, y_pred, after_prediction=True)
# grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
# enc_grad_hess = Paillier.encrypt(context=private_context,
# data=grad_hess,
# precision=0, # must be 0
# obfuscation=True,
# num_cores=999)
# return Paillier.serialize(enc_grad_hess, compression=False)
# def mock_node(*args, **kwargs):
# """
# mock the node passing to the trainer
# Args:
# *args:
# **kwargs:
# Returns:
# an empty None
# """
# if node_mocker.call_count <= 1:
# return Node(id="mock_id")
# else:
# return None
# # mock results from the label trainer according to difference channels
# mocker.patch.object(
# xgb_trainer.channels["individual_grad_hess"], "recv", side_effect=mock_grad_hess
# )
# node_mocker = mocker.patch.object(
# xgb_trainer.channels["tree_node"], "recv", side_effect=mock_node
# )
# mocker.patch.object(
# xgb_trainer.channels["min_split_info"], "recv", return_value=[-1, -1, -1]
# )
# mocker.patch.object(
# xgb_trainer.channels["restart_com"], "recv", return_value=0
# )
# mocker.patch.object(
# xgb_trainer.channels["early_stop_com"], "recv", return_value=False
# )
# xgb_trainer.fit()
# self.check_trainer_output(tmp_factory)
# def test_label_trainer(self, mocker, tmp_factory):
# with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
# conf = json.load(f)
# conf["input"]["trainset"][0]["path"] = str(tmp_factory)
# conf["input"]["trainset"][0]["name"] = "train_guest.csv"
# conf["input"]["valset"][0]["path"] = str(tmp_factory)
# conf["input"]["valset"][0]["name"] = "test_guest.csv"
# conf["output"]["path"] = str(tmp_factory)
# del conf["input"]["testset"]
# mocker.patch.object(
# BroadcastChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
# mocker.patch.object(
# xgb_label_trainer.channels["check_dataset_com"], "collect", return_value=[]
# )
# xgb_label_trainer.fit()
# self.check_label_trainer_output(tmp_factory)
# # cover dual channel created in: VerticalXgboostLabelTrainer.__init__
# mocker.patch.object(
# FedConfig, "get_trainer", return_value=["node_id"]
# )
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# VerticalXgboostLabelTrainer(conf)
# @staticmethod
# def check_label_trainer_output(tmp_factory):
# # 检查是否正确输出了预测值文件
# assert os.path.exists(tmp_factory.join(
# "xgb_prediction_train_[STAGE_ID].csv"))
# assert os.path.exists(tmp_factory.join(
# "xgb_prediction_val_[STAGE_ID].csv"))
# # 检查是否正确输出了模型文件
# assert os.path.exists(tmp_factory.join(
# "vertical_xgboost_[STAGE_ID].model"))
# # 检查是否正确输出了model config
# assert os.path.exists(tmp_factory.join("model_config.json"))
# with open(tmp_factory.join("model_config.json")) as f:
# model_config = json.load(f)
# assert model_config[0]["class_name"] == "VerticalXGBooster"
# assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].model"
# # 检查是否正确输出了feature importance文件
# assert os.path.exists(tmp_factory.join(
# "xgb_feature_importance_[STAGE_ID].csv"))
# @staticmethod
# def check_trainer_output(tmp_factory):
# # 检查是否正确输出了模型文件
# assert os.path.exists(tmp_factory.join(
# "vertical_xgboost_[STAGE_ID].model"))
# # 检查是否正确输出了model config
# assert os.path.exists(tmp_factory.join("model_config.json"))
# with open(tmp_factory.join("model_config.json")) as f:
# model_config = json.load(f)
# assert model_config[0]["class_name"] == "VerticalXGBooster"
# assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].model"
# def test_predict_label_trainer(self, get_label_trainer_infer_conf, mocker, tmp_factory):
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
# "0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
# "0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "collect", return_value=[{"test": (80, 2)}]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(
# get_label_trainer_infer_conf)
# xgb_label_trainer.predict()
# df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
# assert (df["pred"] > 0.5).sum() == 50
# def test_predict_empty_testset(self, get_label_trainer_infer_conf, mocker, tmp_factory):
# conf = copy.deepcopy(get_label_trainer_infer_conf)
# del conf["input"]["testset"]
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
# "0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
# "0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "collect", return_value=[{"test": (80, 2)}]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
# xgb_label_trainer.predict()
# df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
# assert df.shape == (80, 2)
# def test_predict_trainer(self, get_trainer_infer_conf, mocker, tmp_factory):
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# DualChannel, "send", return_value=0
# )
# mocker.patch.object(
# BroadcastChannel, "send", return_value=0
# )
# def mock_func(*args, **kwargs):
# config = {
# "train_info": {
# "train_params": {
# "lossfunc": {
# "BCEWithLogitsLoss": {}
# },
# "batch_size_val": 40960
# }
# }
# }
# return config
# mocker.patch.object(
# BroadcastChannel, "recv", side_effect=mock_func
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
# )
# xgb_trainer = VerticalXgboostTrainer(get_trainer_infer_conf)
# xgb_trainer.predict()
| 43,089 | 43.560496 | 118 | py |
XFL | XFL-master/test/algorithm/framework/local/test_local_data_statistic.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from algorithm.framework.local.data_statistic.label_trainer import \
LocalDataStatisticLabelTrainer as LocalDataStatistic
from algorithm.framework.local.data_statistic.trainer import \
LocalDataStatisticTrainer as LocalDataStatisticTrainer
@pytest.fixture(scope="module", autouse=True)
def env():
#
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
#
case_df = pd.DataFrame({
'x01': np.random.random(1000),
'x00': [np.NaN, '', None, ' ', 'nan'] + [0] * 995,
'x03': 2 * np.random.random(1000) + 1.0,
'x02': [0] * 300 + [1] * 700
})
case_df['y'] = np.where(case_df['x01'] + case_df['x02'] > 2.5, 1, 0)
case_df[['y', 'x00', 'x01', 'x02', 'x03']].to_csv(
"/opt/dataset/unit_test/data.csv", index=True, index_label='id'
)
case_df[['y', 'x00', 'x01', 'x02', 'x03']].to_csv(
"/opt/dataset/unit_test/data_opt.csv", index=True, index_label='id'
)
yield
#
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
if os.path.exists("/opt/checkpoints/unit_test_1"):
shutil.rmtree("/opt/checkpoints/unit_test_1")
@pytest.fixture()
def get_conf():
with open("python/algorithm/config/local_data_statistic/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["dataset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["dataset"][0]["name"] = "data.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test_1"
yield conf
class TestLocalDataStatistic:
@pytest.mark.parametrize('dataset', [[
{
"type": "csv",
"path": "/opt/dataset/unit_test",
"name": "data.csv",
"has_label": True,
"has_id": True
}
],
[
{
"type": "csv",
"path": "/opt/dataset/unit_test",
"name": "data.csv",
"has_label": True,
"has_id": True
},
{
"type": "csv",
"path": "/opt/dataset/unit_test",
"name": "data_opt.csv",
"has_label": True,
"has_id": True
}
]
])
def test_default(self, get_conf, dataset):
conf = copy.deepcopy(get_conf)
conf["input"]["dataset"] = dataset
lds = LocalDataStatistic(conf)
if len(dataset) == 1:
assert len(lds.data) == 1000
else:
assert len(lds.data) == 2000
@pytest.mark.parametrize('train_info_params', [{}, {"quantile": [0.5, 0.8, 0.9]}])
def test_fit(self, get_conf, train_info_params, mocker):
conf = copy.deepcopy(get_conf)
conf["train_info"]["train_params"] = train_info_params
mocker.patch("service.fed_control._send_progress")
lds = LocalDataStatistic(conf)
if train_info_params == {}:
assert lds.quantile == [0.25, 0.5, 0.75]
# test for no label
conf1 = copy.deepcopy(get_conf)
conf1["input"]["dataset"][0]["has_label"] = False
lds = LocalDataStatistic(conf1)
lds.fit()
assert "label_num" not in lds.summary_dict.keys()
assert lds.summary_dict["row_num"] == 1000
assert lds.summary_dict["column_num"] == 5
assert lds.summary_dict["feature_names"] == ["y", "x00", "x01", "x02", "x03"]
assert lds.summary_dict["missing_ratio"]["x00"] == float("%.6f" % (5/1000))
else:
assert lds.quantile == [0.5, 0.8, 0.9]
lds.fit()
assert len(set(lds.summary_dict.keys()).difference(
{"mean", "median", "missing_ratio", "min", "max", "variance", "std", "quantile", "skewness", "kurtosis",
"quantile", "row_num", "label_num", "column_num", "feature_names"})) == 0
assert lds.summary_dict["column_num"] == 4
assert lds.summary_dict["feature_names"] == ["x00", "x01", "x02", "x03"]
def test_trainer(self, get_conf):
LocalDataStatisticTrainer(get_conf)
| 5,100 | 34.671329 | 120 | py |
XFL | XFL-master/test/algorithm/framework/local/test_local_data_split.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from algorithm.framework.local.data_split.label_trainer import \
LocalDataSplitLabelTrainer as LocalDataSplit
from algorithm.framework.local.data_split.trainer import \
LocalDataSplitTrainer as LocalDataSplitTrainer
@pytest.fixture(scope="module", autouse=True)
def env():
#
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
#
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(case_df['x1'] + case_df['x2'] > 2.5, 1, 0)
case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']].to_csv("/opt/dataset/unit_test/dataset.csv", index=True,
index_label='id')
case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']].to_csv("/opt/dataset/unit_test/dataset_opt.csv", index=True,
index_label='id')
yield
#
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
if os.path.exists("/opt/checkpoints/unit_test_1"):
shutil.rmtree("/opt/checkpoints/unit_test_1")
@pytest.fixture()
def get_conf():
with open("python/algorithm/config/local_data_split/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["dataset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["dataset"][0]["name"] = "dataset.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test_1"
conf["output"]["trainset"]["name"] = "data_train.csv"
conf["output"]["valset"]["name"] = "data_test.csv"
yield conf
class TestLocalDataSplit:
@pytest.mark.parametrize('dataset_name', ["dataset.csv", None])
def test_default(self, get_conf, dataset_name):
conf = copy.deepcopy(get_conf)
conf["input"]["dataset"][0]["name"] = dataset_name
if dataset_name == "dataset.csv":
lds = LocalDataSplit(conf)
assert lds.files == ["/opt/dataset/unit_test/dataset.csv"]
else:
lds = LocalDataSplit(conf)
assert len(lds.files) == 2
#
conf1 = copy.deepcopy(get_conf)
conf1["input"]["dataset"] = []
with pytest.raises(NotImplementedError) as ee:
lds = LocalDataSplit(conf1)
exec_msg = ee.value.args[0]
assert exec_msg == "Dataset was not configured."
@pytest.mark.parametrize('dataset_name, shuffle_params, header, batchSize',
[("dataset.csv", True, True, 1000),
("dataset.csv", True, False, 1000),
("dataset.csv", False, False, 1000),
("dataset.csv", False, True, 1000),
(None, True, True, 1000),
(None, True, False, 1000),
(None, False, False, 1000),
(None, False, True, 1000)])
def test_fit(self, get_conf, shuffle_params, dataset_name, header, batchSize, mocker):
conf = copy.deepcopy(get_conf)
conf["train_info"]["train_params"]["shuffle"] = shuffle_params
conf["input"]["dataset"][0]["name"] = dataset_name
conf["input"]["dataset"][0]["has_header"] = header
conf["train_info"]["train_params"]["batch_size"] = batchSize
output_train = Path(conf["output"]["path"], conf["output"]["trainset"]["name"])
output_val = Path(conf["output"]["path"], conf["output"]["valset"]["name"])
lds = LocalDataSplit(conf)
mocker.patch("service.fed_control._send_progress")
if dataset_name == "dataset.csv":
if not shuffle_params:
if header:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 801 and len(val) == 201
assert train.iloc[0, 0] == "id" and val.iloc[0, 0] == "id"
assert train.iloc[1, 0] == '0' and val.iloc[1, 0] == '800'
else:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 800 and len(val) == 201
assert train.iloc[0, 0] == "id" and val.iloc[0, 0] != "id"
assert train.iloc[1, 0] == '0' and val.iloc[0, 0] == 799
else:
if header:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 801 and len(val) == 201
assert train.iloc[0, 0] == "id" and val.iloc[0, 0] == "id"
assert list(train.iloc[:, 0]) != list(range(800))
else:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 800 and len(val) == 201
assert train.iloc[0, 0] != "id" and val.iloc[0, 0] != "id"
assert list(train.iloc[:, 0]) != list(range(799)) + ["id"]
else:
if not shuffle_params:
if header:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 1601 and len(val) == 401
assert train.iloc[0, 0] == "id" and val.iloc[0, 0] == "id"
assert train.iloc[1, 0] == '0' and val.iloc[1, 0] == '600'
else:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 1601 and len(val) == 401
assert train.iloc[0, 0] == "id" and val.iloc[0, 0] != "id"
assert train.iloc[1, 0] == '0' and val.iloc[0, 0] == 599
else:
if header:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 1601 and len(val) == 401
assert train.iloc[0, 0] == "id" and val.iloc[0, 0] == "id"
assert list(train.iloc[:, 0]) != list(range(1600))
else:
lds.fit()
train = pd.read_csv(output_train, header=None)
val = pd.read_csv(output_val, header=None)
assert len(train) == 1601 and len(val) == 401
assert train.iloc[0, 0] != "id" and val.iloc[0, 0] != "id"
assert list(train.iloc[:, 0]) != list(range(1599)) + ["id"]
def test_trainer(self, get_conf):
LocalDataSplitTrainer(get_conf)
| 8,194 | 45.039326 | 109 | py |
XFL | XFL-master/test/algorithm/framework/local/test_local_standard_scaler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
from algorithm.framework.local.standard_scaler.label_trainer import \
LocalStandardScalerLabelTrainer as LocalStandardScaler
@pytest.fixture(scope="module", autouse=True)
def env():
# 准备目录
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints"):
os.makedirs("/opt/checkpoints")
# 测试用例
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0
})
case_df['y'] = np.where(case_df['x0'] + case_df['x2'] > 2.5, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].head(800).to_csv(
"/opt/dataset/unit_test/train.csv", index=True
)
case_df[['y', 'x0', 'x1', 'x2']].tail(200).to_csv(
"/opt/dataset/unit_test/test.csv", index=True
)
yield
# 清除测试数据
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
@pytest.fixture()
def get_conf():
with open("python/algorithm/config/local_normalization/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test.csv"
conf["output"]["trainset"]["path"] = "/opt/dataset/unit_test"
conf["output"]["trainset"]["name"] = "train.csv"
conf["output"]["valset"]["path"] = "/opt/dataset/unit_test"
conf["output"]["valset"]["name"] = "test.csv"
yield conf
class TestLocalStandardScaler:
def test_init(self, get_conf):
ln = LocalStandardScaler(get_conf)
assert len(ln.train_data) == 800
assert len(ln.valid_data) == 200
@pytest.mark.parametrize('with_mean, with_std', [
(True, True), (True, False), (False, True), (False, False)
])
def test_fit(self, get_conf, with_mean, with_std, mocker):
conf = copy.deepcopy(get_conf)
conf["train_info"]["train_params"]["with_mean"] = with_mean
conf["train_info"]["train_params"]["with_std"] = with_std
mocker.patch("service.fed_control._send_progress")
ln = LocalStandardScaler(conf)
ln.fit()
if with_mean:
assert (np.abs(ln.train_data[['x0', 'x1', 'x2']].mean()) < 1e-6).all()
if with_std:
assert (np.abs(ln.train_data[['x0', 'x2']].std() - 1.0) < 1e-6).all()
@pytest.mark.parametrize('feature_name', ['x0', 'myf'])
def test_feature_wise(self, get_conf, feature_name, mocker):
conf = copy.deepcopy(get_conf)
conf["train_info"]["train_params"]["with_mean"] = False
conf["train_info"]["train_params"]["with_std"] = False
conf["train_info"]["train_params"]["feature_standard"] = {
feature_name: {"with_mean": True, "with_std": True}
}
mocker.patch("service.fed_control._send_progress")
ln = LocalStandardScaler(conf)
if feature_name in ln.train_data.columns:
ln.fit()
assert (np.abs(ln.train_data['x0'].mean()) < 1e-6).all()
assert (np.abs(ln.train_data['x2'].mean()) > 1e-6).all()
else:
with pytest.raises(KeyError):
ln.fit()
| 3,677 | 32.436364 | 82 | py |
XFL | XFL-master/test/algorithm/framework/local/test_local_feature_preprocess.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
from algorithm.framework.local.feature_preprocess.label_trainer import \
LocalFeaturePreprocessLabelTrainer as LocalPreprocess
from algorithm.framework.local.feature_preprocess.trainer import \
LocalFeaturePreprocessTrainer as LocalPreprocessTrainer
@pytest.fixture(scope="module", autouse=True)
def env():
#
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
#
case_df = pd.DataFrame({
'x01': np.random.random(1000),
'x00': [np.NaN, '', None, ' ', 'nan'] + [0] * 995,
'x03': 2 * np.random.random(1000) + 1.0,
'x02': [0] * 300 + [1] * 700
})
case_df['y'] = np.where(case_df['x01'] + case_df['x02'] > 2.5, 1, 0)
case_df[['y', 'x00', 'x01', 'x02', 'x03']].head(800).to_csv(
"/opt/dataset/unit_test/train.csv", index=True, index_label='id'
)
case_df[['y', 'x00', 'x01', 'x02', 'x03']].tail(200).to_csv(
"/opt/dataset/unit_test/test.csv", index=True, index_label="id"
)
yield
#
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
if os.path.exists("/opt/checkpoints/unit_test_1"):
shutil.rmtree("/opt/checkpoints/unit_test_1")
@pytest.fixture()
def get_conf():
with open("python/algorithm/config/local_feature_preprocess/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test_1"
conf["output"]["trainset"]["name"] = "preprocessed_train.csv"
conf["output"]["valset"]["name"] = "preprocessed_test.csv"
yield conf
class TestLocalFeturePreprocess:
@pytest.mark.parametrize('datatype', ["csv", "json"])
def test_default(self, get_conf, datatype):
conf = copy.deepcopy(get_conf)
conf["input"]["trainset"][0]["type"] = datatype
if datatype == "csv":
lp = LocalPreprocess(conf)
assert len(lp.train) == 800
assert len(lp.val) == 200
else:
with pytest.raises(NotImplementedError) as ee:
lp = LocalPreprocess(conf)
exec_msg = ee.value.args[0]
assert exec_msg == "Dataset type {} is not supported.".format(lp.input["trainset"]["type"])
#
conf1 = copy.deepcopy(get_conf)
conf1["input"]["trainset"] = [{}, {}]
try:
LocalPreprocess(conf1)
except:
pass
#
conf2 = copy.deepcopy(get_conf)
conf2["input"]["trainset"] = []
with pytest.raises(NotImplementedError) as ee:
lp = LocalPreprocess(conf2)
exec_msg = ee.value.args[0]
assert exec_msg == "Trainset was not configured."
@pytest.mark.parametrize('missing_params, outlier_params', [
({}, {"outlier_features": {"x03": {"outlier_values": 999}, "x01": {}},
"outlier_values": ["", " ", "nan", "none", "null", "na", "None"]}),
({}, {"outlier_features": {"x03": {"outlier_values": 999}, "x01": {}}}),
({"fill_value": None, "missing_features":
{"x01": {"fill_value": None, "missing_values": None, "strategy": "median"}, "x00": {}},
"missing_values": None, "strategy": "mean"}, {}),
({"fill_value": None, "missing_features": {}, "missing_values": None, "strategy": "mean"},
{"outlier_features": {}, "outlier_values": 999}),
({"fill_value": None, "missing_features": {"x01": {"fill_value": 1, "missing_values": 999,
"strategy": "constant"}, "x00": {}},
"missing_values": None, "strategy": "mean"},
{"outlier_features": {"x03": {"outlier_values": 999}, "x01": {}}, "outlier_values": 999}),
({}, {}),
({"fill_value": 1, "missing_values": 'nan', "strategy": "constant"},
{"outlier_features": {"x03": {"outlier_values": 999}, "x01": {}}, "outlier_values": [999, -999]}),
({"fill_value": 1, "missing_values": 'nan', "strategy": "constant"},
{"outlier_features": {"x03": {"outlier_values": 999}, "x01": {}}, "outlier_values": 999})
])
def test_fit(self, get_conf, missing_params, outlier_params, mocker):
conf = copy.deepcopy(get_conf)
conf["train_info"]["train_params"]["missing"] = missing_params
conf["train_info"]["train_params"]["outlier"] = outlier_params
mocker.patch("service.fed_control._send_progress")
lp = LocalPreprocess(conf)
if missing_params == {}:
if outlier_params == {}:
assert lp.imputer_values_overall == []
assert lp.impute_dict == {}
lp.fit()
assert np.sum(lp.train["x00"].isna()) > 0
else:
if lp.outlier_values_overall:
assert len(set(lp.imputer_values_overall).difference({np.NaN, '', None, ' ', 'nan', 'none', 'null',
'na', 'None', 999})) == 0
assert len(set(lp.impute_dict["x03"]["missing_values"]).difference(
{np.NaN, '', None, ' ', 'nan', 'none', 'null', 'na', 'None', 999})) == 0
lp.fit()
assert np.sum(lp.train["x00"].isna()) == 0
else:
assert len(lp.impute_dict) == 1
lp.fit()
assert np.sum(lp.train["x00"].isna()) > 0
else:
if outlier_params == {}:
assert lp.outlier_conf == {}
lp.impute_dict["x01"]["strategy"] = "median"
else:
if lp.missing_feat_conf == {} and lp.outlier_feat_conf == {}:
assert not lp.feature_flag
assert len(lp.impute_dict) == len(lp.train.columns)
else:
assert lp.feature_flag
lp.fit()
assert np.sum(lp.train["x00"].isna()) == 0
assert len(set(lp.train.columns).difference({'y', 'x00', 'x01', 'x03', 'x02_0', 'x02_1'})) == 0
# test no onehot
conf1 = copy.deepcopy(get_conf)
conf1["train_info"]["train_params"]["onehot"] = {}
lp = LocalPreprocess(conf1)
lp.fit()
assert len(set(lp.train.columns).difference({'y', 'x00', 'x01', 'x03', 'x02'})) == 0
def test_trainer(self, get_conf):
LocalPreprocessTrainer(get_conf)
| 7,597 | 43.432749 | 119 | py |
XFL | XFL-master/test/algorithm/framework/local/test_local_normalization.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
from scipy.linalg import norm
from google.protobuf import json_format
from algorithm.framework.local.normalization.label_trainer import \
LocalNormalizationLabelTrainer as LocalNormalization
from common.model.python.feature_model_pb2 import NormalizationModel
@pytest.fixture(scope="module", autouse=True)
def env():
# 准备目录
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
# 测试用例
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0
})
case_df['y'] = np.where(case_df['x0'] + case_df['x2'] > 2.5, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].head(800).to_csv(
"/opt/dataset/unit_test/train.csv", index=True, index_label='id'
)
case_df[['y', 'x0', 'x1', 'x2']].tail(200).to_csv(
"/opt/dataset/unit_test/test.csv", index=True, index_label="id"
)
yield
# 清除测试数据
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
@pytest.fixture()
def get_conf():
with open("python/algorithm/config/local_normalization/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["trainset"]["name"] = "normalized_train.csv"
conf["output"]["valset"]["name"] = "normalized_test.csv"
yield conf
class TestLocalNormalization:
def test_default(self, get_conf, mocker):
mocker.patch("service.fed_control._send_progress")
ln = LocalNormalization(get_conf)
assert len(ln.train_data) == 800
assert len(ln.valid_data) == 200
ln.fit()
assert os.path.exists("/opt/checkpoints/unit_test/normalized_train.csv")
assert os.path.exists("/opt/checkpoints/unit_test/normalized_test.csv")
assert len(pd.read_csv("/opt/checkpoints/unit_test/normalized_train.csv")) == 800
assert len(pd.read_csv("/opt/checkpoints/unit_test/normalized_test.csv")) == 200
@pytest.mark.parametrize('axis, norm_', [
(1, 'l1'), (1, 'l2'), (1, 'max'), (1, 'other'), (0, 'l1'), (0, 'l2'), (0, 'max'), (0, 'other2'), (2, 'l1')
])
def test_fit(self, get_conf, axis, norm_, mocker):
conf = copy.deepcopy(get_conf)
conf["train_info"]["train_params"]["axis"] = axis
conf["train_info"]["train_params"]["norm"] = norm_
mocker.patch("service.fed_control._send_progress")
ln = LocalNormalization(conf)
check_output = True
if axis == 0:
if norm_ == 'l1':
ln.fit()
assert (np.abs(ln.train_data[['x1']].apply(lambda x: norm(x, ord=1)) - 0.0) < 1e-6).all()
elif norm_ == 'l2':
ln.fit()
assert (np.abs(ln.train_data[['x1']].apply(lambda x: norm(x, ord=2)) - 0.0) < 1e-6).all()
assert (np.abs(ln.train_data[['x0', 'x2']].apply(lambda x: norm(x, ord=2)) - 1.0) < 1e-6).all()
elif norm_ == 'max':
ln.fit()
assert (np.abs(ln.train_data[['x1']].apply(lambda x: np.max(np.abs(x))) - 0.0) < 1e-6).all()
assert (ln.train_data[['x0', 'x2']].apply(lambda x: np.max(np.abs(x))).to_numpy() <= 1.0).all()
else:
with pytest.raises(NotImplementedError) as e:
ln.fit()
exec_msg = e.value.args[0]
assert exec_msg == "norm {} is invalid.".format(norm_)
check_output = False
elif axis == 1:
if norm_ == 'l1':
ln.fit()
assert (abs(
ln.train_data[['x1', 'x2', 'x0']].apply(lambda x: norm(x, ord=1),
axis=1).to_numpy() - 1.0) < 1e-6).all()
elif norm_ == 'l2':
ln.fit()
assert (abs(
ln.train_data[['x1', 'x2', 'x0']].apply(lambda x: norm(x, ord=2),
axis=1).to_numpy() - 1.0) < 1e-6).all()
elif norm_ == 'max':
ln.fit()
assert (ln.train_data[['x1', 'x2', 'x0']].apply(lambda x: np.max(np.abs(x)),
axis=1).to_numpy() <= 1.0).all()
else:
with pytest.raises(NotImplementedError) as e:
ln.fit()
exec_msg = e.value.args[0]
assert exec_msg == "norm {} is invalid.".format(norm_)
check_output = False
else:
with pytest.raises(ValueError) as e:
ln.fit()
exec_msg = e.value.args[0]
assert exec_msg == "axis {} is invalid.".format(axis)
check_output = False
if check_output:
with open(conf["output"]["path"] + '/' + conf["output"]["proto_model"]["name"], 'rb') as f:
byte_str = f.read()
m = NormalizationModel()
m.ParseFromString(byte_str)
d = json_format.MessageToDict(m,
including_default_value_fields=True,
preserving_proto_field_name=True)
assert d.get("axis") == axis
if axis == 0:
assert len(d.get("normalizer")) == 3
elif axis == 1:
assert d.get("norm") == norm_
@pytest.mark.parametrize('feature_name', ['x0', 'myf'])
def test_feature_wise(self, get_conf, feature_name, mocker):
conf = copy.deepcopy(get_conf)
conf["train_info"]["train_params"]["norm"] = 'l2'
conf["train_info"]["train_params"]["feature_norm"] = {feature_name: {"norm": 'l1'}}
mocker.patch("service.fed_control._send_progress")
ln = LocalNormalization(conf)
if feature_name in ln.train_data.columns:
ln.fit()
assert np.abs(norm(ln.train_data['x0'].to_numpy(), ord=1) - 1.0) < 1e-6
assert np.abs(norm(ln.train_data['x0'].to_numpy(), ord=2) - 1.0) >= 1e-6
assert np.abs(norm(ln.train_data['x2'].to_numpy(), ord=2) - 1.0) < 1e-6
else:
with pytest.raises(KeyError):
ln.fit()
| 6,376 | 36.075581 | 108 | py |
XFL | XFL-master/demo/horizontal/linear_regression/data_preprocess.py | import random
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
bostonDataset = load_boston()
features = pd.DataFrame(bostonDataset['data'])
features.columns = bostonDataset['feature_names']
label = pd.DataFrame(bostonDataset['target'])
data = label.join(features)
num = len(data)
index = list(range(num))
random.shuffle(index)
tt = data.iloc[index]
thre = num / 11
data_g = tt[:int(np.floor(thre * 5))]
data_h = tt[int(np.floor(thre * 5)):int(np.floor(thre * 10))]
data_t = tt[int(np.floor(thre * 10)):]
data_g.to_csv("./dataset/horizontal_house_price/house_price_1.csv")
data_h.to_csv("./dataset/horizontal_house_price/house_price_2.csv")
data_t.to_csv("./dataset/horizontal_house_price/house_price_test.csv")
| 746 | 28.88 | 70 | py |
XFL | XFL-master/demo/horizontal/linear_regression/2party/config/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/demo/horizontal/linear_regression/3party/config/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/demo/horizontal/nbafl/2party/config/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/demo/horizontal/nbafl/3party/config/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/demo/horizontal/chatglm/chatglm-demo/tokenization_chatglm.py | """Tokenization classes for ChatGLM."""
from typing import List, Optional, Union
import os
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils import logging, PaddingStrategy
from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
from typing import Dict
import sentencepiece as spm
import numpy as np
logger = logging.get_logger(__name__)
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"THUDM/chatglm-6b": 2048,
}
class TextTokenizer:
def __init__(self, model_path):
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_path)
self.num_tokens = self.sp.vocab_size()
def encode(self, text):
return self.sp.EncodeAsIds(text)
def decode(self, ids: List[int]):
return self.sp.DecodeIds(ids)
def tokenize(self, text):
return self.sp.EncodeAsPieces(text)
def convert_tokens_to_string(self, tokens):
return self.sp.DecodePieces(tokens)
def convert_tokens_to_ids(self, tokens):
return [self.sp.PieceToId(token) for token in tokens]
def convert_token_to_id(self, token):
return self.sp.PieceToId(token)
def convert_id_to_token(self, idx):
return self.sp.IdToPiece(idx)
def __len__(self):
return self.num_tokens
class SPTokenizer:
def __init__(
self,
vocab_file,
num_image_tokens=20000,
max_blank_length=80,
byte_fallback=True,
):
assert vocab_file is not None
self.vocab_file = vocab_file
self.num_image_tokens = num_image_tokens
self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "<unused_0>", "<sop>", "<eop>", "<ENC>", "<dBLOCK>"]
self.max_blank_length = max_blank_length
self.byte_fallback = byte_fallback
self.text_tokenizer = TextTokenizer(vocab_file)
def _get_text_tokenizer(self):
return self.text_tokenizer
@staticmethod
def get_blank_token(length: int):
assert length >= 2
return f"<|blank_{length}|>"
@staticmethod
def get_tab_token():
return f"<|tab|>"
@property
def num_text_tokens(self):
return self.text_tokenizer.num_tokens
@property
def num_tokens(self):
return self.num_image_tokens + self.num_text_tokens
@staticmethod
def _encode_whitespaces(text: str, max_len: int = 80):
text = text.replace("\t", SPTokenizer.get_tab_token())
for i in range(max_len, 1, -1):
text = text.replace(" " * i, SPTokenizer.get_blank_token(i))
return text
def _preprocess(self, text: str, linebreak=True, whitespaces=True):
if linebreak:
text = text.replace("\n", "<n>")
if whitespaces:
text = self._encode_whitespaces(text, max_len=self.max_blank_length)
return text
def encode(
self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
) -> List[int]:
"""
@param text: Text to encode.
@param linebreak: Whether to encode newline (\n) in text.
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
"""
text = self._preprocess(text, linebreak, whitespaces)
if not add_dummy_prefix:
text = "<n>" + text
tmp = self._get_text_tokenizer().encode(text)
tokens = [x + self.num_image_tokens for x in tmp]
return tokens if add_dummy_prefix else tokens[2:]
def postprocess(self, text):
text = text.replace("<n>", "\n")
text = text.replace(SPTokenizer.get_tab_token(), "\t")
for i in range(2, self.max_blank_length + 1):
text = text.replace(self.get_blank_token(i), " " * i)
return text
def decode(self, text_ids: List[int]) -> str:
ids = [int(_id) - self.num_image_tokens for _id in text_ids]
ids = [_id for _id in ids if _id >= 0]
text = self._get_text_tokenizer().decode(ids)
text = self.postprocess(text)
return text
def decode_tokens(self, tokens: List[str]) -> str:
text = self._get_text_tokenizer().convert_tokens_to_string(tokens)
text = self.postprocess(text)
return text
def tokenize(
self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
) -> List[str]:
"""
@param text: Text to encode.
@param linebreak: Whether to encode newline (\n) in text.
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
"""
text = self._preprocess(text, linebreak, whitespaces)
if not add_dummy_prefix:
text = "<n>" + text
tokens = self._get_text_tokenizer().tokenize(text)
return tokens if add_dummy_prefix else tokens[2:]
def __getitem__(self, x: Union[int, str]):
if isinstance(x, int):
if x < self.num_image_tokens:
return "<image_{}>".format(x)
else:
return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)
elif isinstance(x, str):
if x.startswith("<image_") and x.endswith(">") and x[7:-1].isdigit():
return int(x[7:-1])
else:
return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens
else:
raise ValueError("The key should be str or int.")
class ChatGLMTokenizer(PreTrainedTokenizer):
"""
Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding.
Args:
vocab_file (`str`):
Path to the vocabulary file.
"""
vocab_files_names = {"vocab_file": "ice_text.model"}
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask", "position_ids"]
def __init__(
self,
vocab_file,
do_lower_case=False,
remove_space=False,
bos_token='<sop>',
eos_token='<eop>',
end_token='</s>',
mask_token='[MASK]',
gmask_token='[gMASK]',
padding_side="left",
pad_token="<pad>",
unk_token="<unk>",
num_image_tokens=20000,
**kwargs
) -> None:
super().__init__(
do_lower_case=do_lower_case,
remove_space=remove_space,
padding_side=padding_side,
bos_token=bos_token,
eos_token=eos_token,
end_token=end_token,
mask_token=mask_token,
gmask_token=gmask_token,
pad_token=pad_token,
unk_token=unk_token,
num_image_tokens=num_image_tokens,
**kwargs
)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.vocab_file = vocab_file
self.bos_token = bos_token
self.eos_token = eos_token
self.end_token = end_token
self.mask_token = mask_token
self.gmask_token = gmask_token
self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens)
""" Initialisation """
@property
def gmask_token_id(self) -> Optional[int]:
if self.gmask_token is None:
return None
return self.convert_tokens_to_ids(self.gmask_token)
@property
def end_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been
set.
"""
if self.end_token is None:
return None
return self.convert_tokens_to_ids(self.end_token)
@property
def vocab_size(self):
""" Returns vocab size """
return self.sp_tokenizer.num_tokens
def get_vocab(self):
""" Returns vocab as a dict """
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, **kwargs):
""" Returns a tokenized string. """
text = self.preprocess_text(text)
seq = self.sp_tokenizer.tokenize(text)
return seq
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return self.sp_tokenizer.decode_tokens(tokens)
def _decode(
self,
token_ids: Union[int, List[int]],
**kwargs
) -> str:
if isinstance(token_ids, int):
token_ids = [token_ids]
if len(token_ids) == 0:
return ""
if self.pad_token_id in token_ids: # remove pad
token_ids = list(filter((self.pad_token_id).__ne__, token_ids))
return super()._decode(token_ids, **kwargs)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.sp_tokenizer[token]
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_tokenizer[index]
def save_vocabulary(self, save_directory, filename_prefix=None):
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, self.vocab_files_names["vocab_file"]
)
else:
vocab_file = save_directory
with open(self.vocab_file, 'rb') as fin:
proto_str = fin.read()
with open(vocab_file, "wb") as writer:
writer.write(proto_str)
return (vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
gmask_id = self.sp_tokenizer[self.gmask_token]
eos_id = self.sp_tokenizer[self.eos_token]
token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]]
if token_ids_1 is not None:
token_ids_0 = token_ids_0 + token_ids_1 + [eos_id]
return token_ids_0
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
bos_token_id = self.sp_tokenizer[self.bos_token]
mask_token_id = self.sp_tokenizer[self.mask_token]
gmask_token_id = self.sp_tokenizer[self.gmask_token]
assert self.padding_side == "left"
required_input = encoded_inputs[self.model_input_names[0]]
seq_length = len(required_input)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if max_length is not None:
if "attention_mask" not in encoded_inputs:
if bos_token_id in required_input:
context_length = required_input.index(bos_token_id)
else:
context_length = seq_length
attention_mask = np.ones((1, seq_length, seq_length))
attention_mask = np.tril(attention_mask)
attention_mask[:, :, :context_length] = 1
attention_mask = np.bool_(attention_mask < 0.5)
encoded_inputs["attention_mask"] = attention_mask
if "position_ids" not in encoded_inputs:
if bos_token_id in required_input:
context_length = required_input.index(bos_token_id)
else:
context_length = seq_length
position_ids = np.arange(seq_length, dtype=np.int64)
mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id
if mask_token in required_input:
mask_position = required_input.index(mask_token)
position_ids[context_length:] = mask_position
block_position_ids = np.concatenate(
[np.zeros(context_length, dtype=np.int64),
np.arange(1, seq_length - context_length + 1, dtype=np.int64)])
encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0)
if needs_to_be_padded:
difference = max_length - len(required_input)
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"],
pad_width=[(0, 0), (difference, 0), (difference, 0)],
mode='constant', constant_values=True)
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
if "position_ids" in encoded_inputs:
encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"],
pad_width=[(0, 0), (difference, 0)])
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
return encoded_inputs
| 17,047 | 37.396396 | 119 | py |
XFL | XFL-master/demo/horizontal/chatglm/chatglm-demo/configuration_chatglm.py | """ ChatGLM model configuration """
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class ChatGLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~ChatGLMModel`].
It is used to instantiate an ChatGLM model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used
to control the model outputs. Read the documentation from [`PretrainedConfig`]
for more information.
Args:
vocab_size (`int`, *optional*, defaults to 150528):
Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~ChatGLMModel`] or
[`~TFChatGLMModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
inner_hidden_size (`int`, *optional*, defaults to 16384):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
max_sequence_length (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
layernorm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from configuration_chatglm import ChatGLMConfig
>>> from modeling_chatglm import ChatGLMModel
>>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration
>>> configuration = ChatGLMConfig()
>>> # Initializing a model from the THUDM/ChatGLM-6B style configuration
>>> model = ChatGLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "chatglm"
def __init__(
self,
vocab_size=150528,
hidden_size=4096,
num_layers=28,
num_attention_heads=32,
layernorm_epsilon=1e-5,
use_cache=False,
bos_token_id=150004,
eos_token_id=150005,
mask_token_id=150000,
gmask_token_id=150001,
pad_token_id=0,
max_sequence_length=2048,
inner_hidden_size=16384,
position_encoding_2d=True,
quantization_bit=0,
pre_seq_len=None,
prefix_projection=False,
**kwargs
):
self.num_layers = num_layers
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.max_sequence_length = max_sequence_length
self.layernorm_epsilon = layernorm_epsilon
self.inner_hidden_size = inner_hidden_size
self.use_cache = use_cache
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.gmask_token_id = gmask_token_id
self.position_encoding_2d = position_encoding_2d
self.quantization_bit = quantization_bit
self.pre_seq_len = pre_seq_len
self.prefix_projection = prefix_projection
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs
)
| 4,276 | 40.125 | 122 | py |
XFL | XFL-master/demo/horizontal/chatglm/chatglm-demo/quantization.py | from torch.nn import Linear
from torch.nn.parameter import Parameter
import bz2
import torch
import base64
import ctypes
from transformers.utils import logging
from typing import List
from functools import partial
logger = logging.get_logger(__name__)
try:
from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
class Kernel:
def __init__(self, code: bytes, function_names: List[str]):
self.code = code
self._function_names = function_names
self._cmodule = LazyKernelCModule(self.code)
for name in self._function_names:
setattr(self, name, KernelFunction(self._cmodule, name))
quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
kernels = Kernel(
bz2.decompress(base64.b64decode(quantization_code)),
[
"int4WeightCompression",
"int4WeightExtractionFloat",
"int4WeightExtractionHalf",
"int8WeightExtractionFloat",
"int8WeightExtractionHalf",
],
)
except Exception as exception:
kernels = None
logger.warning("Failed to load cpm_kernels:" + str(exception))
class W8A16Linear(torch.autograd.Function):
@staticmethod
def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
ctx.inp_shape = inp.size()
ctx.weight_bit_width = weight_bit_width
out_features = quant_w.size(0)
inp = inp.contiguous().view(-1, inp.size(-1))
weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
ctx.weight_shape = weight.size()
output = inp.mm(weight.t())
ctx.save_for_backward(inp, quant_w, scale_w)
return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
@staticmethod
def backward(ctx, grad_output: torch.Tensor):
inp, quant_w, scale_w = ctx.saved_tensors
weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
grad_output = grad_output.contiguous().view(-1, weight.size(0))
grad_input = grad_output.mm(weight)
grad_weight = grad_output.t().mm(inp)
return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
def compress_int4_weight(weight: torch.Tensor): # (n, m)
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
assert m % 2 == 0
m = m // 2
out = torch.empty(n, m, dtype=torch.int8, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
kernels.int4WeightCompression(
gridDim,
blockDim,
0,
stream,
[ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
)
return out
def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
if source_bit_width == 8:
func = kernels.int8WeightExtractionHalf
elif source_bit_width == 4:
func = kernels.int4WeightExtractionHalf
else:
assert False, "Unsupported bit-width"
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
func(
gridDim,
blockDim,
0,
stream,
[
ctypes.c_void_p(weight.data_ptr()),
ctypes.c_void_p(scale_list.data_ptr()),
ctypes.c_void_p(out.data_ptr()),
ctypes.c_int32(n),
ctypes.c_int32(m),
],
)
return out
class QuantizedLinear(Linear):
def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs):
super(QuantizedLinear, self).__init__(*args, **kwargs)
self.weight_bit_width = weight_bit_width
shape = self.weight.shape
del self.weight
if weight_tensor is None or empty_init:
self.weight = torch.empty(
shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"]
)
self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"])
else:
self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half()
self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)
if weight_bit_width == 4:
self.weight = compress_int4_weight(self.weight)
self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False)
self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False)
if bias_tensor is not None:
self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False)
else:
self.bias = None
def forward(self, input):
output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
if self.bias is not None:
output = output + self.bias
return output
def quantize(model, weight_bit_width, empty_init=False, **kwargs):
"""Replace fp16 linear with quantized linear"""
for layer in model.layers:
layer.attention.query_key_value = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()),
bias_tensor=layer.attention.query_key_value.bias,
in_features=layer.attention.query_key_value.in_features,
out_features=layer.attention.query_key_value.out_features,
bias=True,
dtype=torch.half,
device=layer.attention.query_key_value.weight.device,
empty_init=empty_init
)
layer.attention.dense = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()),
bias_tensor=layer.attention.dense.bias,
in_features=layer.attention.dense.in_features,
out_features=layer.attention.dense.out_features,
bias=True,
dtype=torch.half,
device=layer.attention.dense.weight.device,
empty_init=empty_init
)
layer.mlp.dense_h_to_4h = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
bias_tensor=layer.mlp.dense_h_to_4h.bias,
in_features=layer.mlp.dense_h_to_4h.in_features,
out_features=layer.mlp.dense_h_to_4h.out_features,
bias=True,
dtype=torch.half,
device=layer.mlp.dense_h_to_4h.weight.device,
empty_init=empty_init
)
layer.mlp.dense_4h_to_h = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
bias_tensor=layer.mlp.dense_4h_to_h.bias,
in_features=layer.mlp.dense_4h_to_h.in_features,
out_features=layer.mlp.dense_4h_to_h.out_features,
bias=True,
dtype=torch.half,
device=layer.mlp.dense_4h_to_h.weight.device,
empty_init=empty_init
)
return model
| 15,054 | 73.529703 | 7,375 | py |
XFL | XFL-master/demo/horizontal/chatglm/chatglm-demo/modeling_chatglm.py | """ PyTorch ChatGLM model. """
import math
import copy
import os
import warnings
import re
import sys
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss, LayerNorm
from torch.nn.utils import skip_init
from typing import Optional, Tuple, Union, List, Callable, Dict, Any
from transformers.utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
BaseModelOutputWithPastAndCrossAttentions,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from transformers.generation.logits_process import LogitsProcessor
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
from .configuration_chatglm import ChatGLMConfig
# flags required to enable jit fusion kernels
if sys.platform != 'darwin':
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B"
_CONFIG_FOR_DOC = "ChatGLM6BConfig"
CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
"THUDM/chatglm-6b",
# See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm
]
class InvalidScoreLogitsProcessor(LogitsProcessor):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
if torch.isnan(scores).any() or torch.isinf(scores).any():
scores.zero_()
scores[..., 5] = 5e4
return scores
def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class PrefixEncoder(torch.nn.Module):
"""
The torch.nn model to encode the prefix
Input shape: (batch-size, prefix-length)
Output shape: (batch-size, prefix-length, 2*layers*hidden)
"""
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
if self.prefix_projection:
# Use a two-layer MLP to encode the prefix
self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)
self.trans = torch.nn.Sequential(
torch.nn.Linear(config.hidden_size, config.hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2)
)
else:
self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2)
def forward(self, prefix: torch.Tensor):
if self.prefix_projection:
prefix_tokens = self.embedding(prefix)
past_key_values = self.trans(prefix_tokens)
else:
past_key_values = self.embedding(prefix)
return past_key_values
@torch.jit.script
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
(1.0 + 0.044715 * x * x)))
def gelu(x):
return gelu_impl(x)
class RotaryEmbedding(torch.nn.Module):
def __init__(self, dim, base=10000, precision=torch.half, learnable=False):
super().__init__()
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
inv_freq = inv_freq.half()
self.learnable = learnable
if learnable:
self.inv_freq = torch.nn.Parameter(inv_freq)
self.max_seq_len_cached = None
else:
self.register_buffer('inv_freq', inv_freq)
self.max_seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
self.precision = precision
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
error_msgs):
pass
def forward(self, x, seq_dim=1, seq_len=None):
if seq_len is None:
seq_len = x.shape[seq_dim]
if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):
self.max_seq_len_cached = None if self.learnable else seq_len
t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
if self.precision == torch.bfloat16:
emb = emb.float()
# [sx, 1 (b * np), hn]
cos_cached = emb.cos()[:, None, :]
sin_cached = emb.sin()[:, None, :]
if self.precision == torch.bfloat16:
cos_cached = cos_cached.bfloat16()
sin_cached = sin_cached.bfloat16()
if self.learnable:
return cos_cached, sin_cached
self.cos_cached, self.sin_cached = cos_cached, sin_cached
return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
def _apply(self, fn):
if self.cos_cached is not None:
self.cos_cached = fn(self.cos_cached)
if self.sin_cached is not None:
self.sin_cached = fn(self.sin_cached)
return super()._apply(fn)
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions
@torch.jit.script
def apply_rotary_pos_emb_index(q, k, cos, sin, position_id):
# position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]
cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \
F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)
q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
return q, k
def attention_fn(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
hidden_size_per_partition,
layer_id,
layer_past=None,
scaling_attention_score=True,
use_cache=False,
):
if layer_past is not None:
past_key, past_value = layer_past[0], layer_past[1]
key_layer = torch.cat((past_key, key_layer), dim=0)
value_layer = torch.cat((past_value, value_layer), dim=0)
# seqlen, batch, num_attention_heads, hidden_size_per_attention_head
seq_len, b, nh, hidden_size = key_layer.shape
if use_cache:
present = (key_layer, value_layer)
else:
present = None
query_key_layer_scaling_coeff = float(layer_id + 1)
if scaling_attention_score:
query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff)
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
matmul_result = torch.zeros(
1, 1, 1,
dtype=query_layer.dtype,
device=query_layer.device,
)
matmul_result = torch.baddbmm(
matmul_result,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=1.0,
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
if self.scale_mask_softmax:
self.scale_mask_softmax.scale = query_key_layer_scaling_coeff
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous())
else:
if not (attention_mask == 0).all():
# if auto-regressive, skip
attention_scores.masked_fill_(attention_mask, -10000.0)
dtype = attention_scores.dtype
attention_scores = attention_scores.float()
attention_scores = attention_scores * query_key_layer_scaling_coeff
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = attention_probs.type(dtype)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
# change view [sk, b * np, hn]
value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, present, attention_probs)
return outputs
def default_init(cls, *args, **kwargs):
return cls(*args, **kwargs)
class SelfAttention(torch.nn.Module):
def __init__(self, hidden_size, num_attention_heads,
layer_id, hidden_size_per_attention_head=None, bias=True,
params_dtype=torch.float, position_encoding_2d=True, empty_init=True):
if empty_init:
init_method = skip_init
else:
init_method = default_init
super(SelfAttention, self).__init__()
self.layer_id = layer_id
self.hidden_size = hidden_size
self.hidden_size_per_partition = hidden_size
self.num_attention_heads = num_attention_heads
self.num_attention_heads_per_partition = num_attention_heads
self.position_encoding_2d = position_encoding_2d
self.rotary_emb = RotaryEmbedding(
self.hidden_size // (self.num_attention_heads * 2)
if position_encoding_2d
else self.hidden_size // self.num_attention_heads,
base=10000,
precision=torch.half,
learnable=False,
)
self.scale_mask_softmax = None
if hidden_size_per_attention_head is None:
self.hidden_size_per_attention_head = hidden_size // num_attention_heads
else:
self.hidden_size_per_attention_head = hidden_size_per_attention_head
self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head
# Strided linear layer.
self.query_key_value = init_method(
torch.nn.Linear,
hidden_size,
3 * self.inner_hidden_size,
bias=bias,
dtype=params_dtype,
)
self.dense = init_method(
torch.nn.Linear,
self.inner_hidden_size,
hidden_size,
bias=bias,
dtype=params_dtype,
)
@staticmethod
def attention_mask_func(attention_scores, attention_mask):
attention_scores.masked_fill_(attention_mask, -10000.0)
return attention_scores
def split_tensor_along_last_dim(self, tensor, num_partitions,
contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = tensor.size()[last_dim] // num_partitions
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def forward(
self,
hidden_states: torch.Tensor,
position_ids,
attention_mask: torch.Tensor,
layer_id,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
output_attentions: bool = False,
):
"""
hidden_states: [seq_len, batch, hidden_size]
attention_mask: [(1, 1), seq_len, seq_len]
"""
# [seq_len, batch, 3 * hidden_size]
mixed_raw_layer = self.query_key_value(hidden_states)
# [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head]
new_tensor_shape = mixed_raw_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape)
# [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
(query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3)
if self.position_encoding_2d:
q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1))
k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))
cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1)
position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \
position_ids[:, 1, :].transpose(0, 1).contiguous()
q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids)
q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids)
query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))
key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))
else:
position_ids = position_ids.transpose(0, 1)
cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)
# [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids)
# [seq_len, batch, hidden_size]
context_layer, present, attention_probs = attention_fn(
self=self,
query_layer=query_layer,
key_layer=key_layer,
value_layer=value_layer,
attention_mask=attention_mask,
hidden_size_per_partition=self.hidden_size_per_partition,
layer_id=layer_id,
layer_past=layer_past,
use_cache=use_cache
)
output = self.dense(context_layer)
outputs = (output, present)
if output_attentions:
outputs += (attention_probs,)
return outputs # output, present, attention_probs
class GEGLU(torch.nn.Module):
def __init__(self):
super().__init__()
self.activation_fn = F.gelu
def forward(self, x):
# dim=-1 breaks in jit for pt<1.10
x1, x2 = x.chunk(2, dim=(x.ndim - 1))
return x1 * self.activation_fn(x2)
class GLU(torch.nn.Module):
def __init__(self, hidden_size, inner_hidden_size=None,
layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float, empty_init=True):
super(GLU, self).__init__()
if empty_init:
init_method = skip_init
else:
init_method = default_init
self.layer_id = layer_id
self.activation_func = activation_func
# Project to 4h.
self.hidden_size = hidden_size
if inner_hidden_size is None:
inner_hidden_size = 4 * hidden_size
self.inner_hidden_size = inner_hidden_size
self.dense_h_to_4h = init_method(
torch.nn.Linear,
self.hidden_size,
self.inner_hidden_size,
bias=bias,
dtype=params_dtype,
)
# Project back to h.
self.dense_4h_to_h = init_method(
torch.nn.Linear,
self.inner_hidden_size,
self.hidden_size,
bias=bias,
dtype=params_dtype,
)
def forward(self, hidden_states):
"""
hidden_states: [seq_len, batch, hidden_size]
"""
# [seq_len, batch, inner_hidden_size]
intermediate_parallel = self.dense_h_to_4h(hidden_states)
intermediate_parallel = self.activation_func(intermediate_parallel)
output = self.dense_4h_to_h(intermediate_parallel)
return output
class GLMBlock(torch.nn.Module):
def __init__(
self,
hidden_size,
num_attention_heads,
layernorm_epsilon,
layer_id,
inner_hidden_size=None,
hidden_size_per_attention_head=None,
layernorm=LayerNorm,
use_bias=True,
params_dtype=torch.float,
num_layers=28,
position_encoding_2d=True,
empty_init=True
):
super(GLMBlock, self).__init__()
# Set output layer initialization if not provided.
self.layer_id = layer_id
# Layernorm on the input data.
self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
self.position_encoding_2d = position_encoding_2d
# Self attention.
self.attention = SelfAttention(
hidden_size,
num_attention_heads,
layer_id,
hidden_size_per_attention_head=hidden_size_per_attention_head,
bias=use_bias,
params_dtype=params_dtype,
position_encoding_2d=self.position_encoding_2d,
empty_init=empty_init
)
# Layernorm on the input data.
self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
self.num_layers = num_layers
# GLU
self.mlp = GLU(
hidden_size,
inner_hidden_size=inner_hidden_size,
bias=use_bias,
layer_id=layer_id,
params_dtype=params_dtype,
empty_init=empty_init
)
def forward(
self,
hidden_states: torch.Tensor,
position_ids,
attention_mask: torch.Tensor,
layer_id,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
output_attentions: bool = False,
):
"""
hidden_states: [seq_len, batch, hidden_size]
attention_mask: [(1, 1), seq_len, seq_len]
"""
# Layer norm at the begining of the transformer layer.
# [seq_len, batch, hidden_size]
attention_input = self.input_layernorm(hidden_states)
# Self attention.
attention_outputs = self.attention(
attention_input,
position_ids,
attention_mask=attention_mask,
layer_id=layer_id,
layer_past=layer_past,
use_cache=use_cache,
output_attentions=output_attentions
)
attention_output = attention_outputs[0]
outputs = attention_outputs[1:]
# Residual connection.
alpha = (2 * self.num_layers) ** 0.5
hidden_states = attention_input * alpha + attention_output
mlp_input = self.post_attention_layernorm(hidden_states)
# MLP.
mlp_output = self.mlp(mlp_input)
# Second residual connection.
output = mlp_input * alpha + mlp_output
if use_cache:
outputs = (output,) + outputs
else:
outputs = (output,) + outputs[1:]
return outputs # hidden_states, present, attentions
class ChatGLMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
is_parallelizable = False
supports_gradient_checkpointing = True
config_class = ChatGLMConfig
base_model_prefix = "transformer"
_no_split_modules = ["GLMBlock"]
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
return
def get_masks(self, input_ids, device):
batch_size, seq_length = input_ids.shape
context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device)
attention_mask.tril_()
for i, context_length in enumerate(context_lengths):
attention_mask[i, :, :context_length] = 1
attention_mask.unsqueeze_(1)
attention_mask = (attention_mask < 0.5).bool()
return attention_mask
def get_position_ids(self, input_ids, mask_positions, device, use_gmasks=None):
batch_size, seq_length = input_ids.shape
if use_gmasks is None:
use_gmasks = [False] * batch_size
context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
if self.position_encoding_2d:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
for i, context_length in enumerate(context_lengths):
position_ids[i, context_length:] = mask_positions[i]
block_position_ids = [torch.cat((
torch.zeros(context_length, dtype=torch.long, device=device),
torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1
)) for context_length in context_lengths]
block_position_ids = torch.stack(block_position_ids, dim=0)
position_ids = torch.stack((position_ids, block_position_ids), dim=1)
else:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
for i, context_length in enumerate(context_lengths):
if not use_gmasks[i]:
position_ids[i, context_length:] = mask_positions[i]
return position_ids
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, ChatGLMModel):
module.gradient_checkpointing = value
CHATGLM_6B_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CHATGLM_6B_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ChatGLM6BTokenizer`].
See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert *input_ids* indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.",
CHATGLM_6B_START_DOCSTRING,
)
class ChatGLMModel(ChatGLMPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
`is_decoder` argument of the configuration set to `True`.
To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
argument and `add_cross_attention` set to `True`; an
`encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config: ChatGLMConfig, empty_init=True):
super().__init__(config)
if empty_init:
init_method = skip_init
else:
init_method = default_init
# recording parameters
self.max_sequence_length = config.max_sequence_length
self.hidden_size = config.hidden_size
self.params_dtype = torch.half
self.num_attention_heads = config.num_attention_heads
self.vocab_size = config.vocab_size
self.num_layers = config.num_layers
self.layernorm_epsilon = config.layernorm_epsilon
self.inner_hidden_size = config.inner_hidden_size
self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
self.position_encoding_2d = config.position_encoding_2d
self.pre_seq_len = config.pre_seq_len
self.prefix_projection = config.prefix_projection
self.word_embeddings = init_method(
torch.nn.Embedding,
num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,
dtype=self.params_dtype
)
self.gradient_checkpointing = False
def get_layer(layer_id):
return GLMBlock(
self.hidden_size,
self.num_attention_heads,
self.layernorm_epsilon,
layer_id,
inner_hidden_size=self.inner_hidden_size,
hidden_size_per_attention_head=self.hidden_size_per_attention_head,
layernorm=LayerNorm,
use_bias=True,
params_dtype=self.params_dtype,
position_encoding_2d=self.position_encoding_2d,
empty_init=empty_init
)
self.layers = torch.nn.ModuleList(
[get_layer(layer_id) for layer_id in range(self.num_layers)]
)
# Final layer norm before output.
self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)
if self.pre_seq_len is not None:
for param in self.parameters():
param.requires_grad = False
self.prefix_tokens = torch.arange(self.pre_seq_len).long()
self.prefix_encoder = PrefixEncoder(config)
self.dropout = torch.nn.Dropout(0.1)
# total_params = sum(p.numel() for p in self.parameters())
# trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
# print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params))
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, new_embeddings: torch.Tensor):
self.word_embeddings = new_embeddings
def get_prompt(self, batch_size, device, dtype=torch.half):
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
past_key_values = past_key_values.view(
batch_size,
self.pre_seq_len,
self.num_layers * 2,
self.num_attention_heads,
self.hidden_size // self.num_attention_heads
)
# seq_len, b, nh, hidden_size
past_key_values = self.dropout(past_key_values)
past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
# past_key_values = [(v[0], v[1]) for v in past_key_values]
return past_key_values
@add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape[:2]
elif inputs_embeds is not None:
batch_size, seq_length = inputs_embeds.shape[:2]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if past_key_values is None:
if self.pre_seq_len is not None:
past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device,
dtype=inputs_embeds.dtype)
else:
past_key_values = tuple([None] * len(self.layers))
if attention_mask is None:
attention_mask = self.get_masks(
input_ids,
device=input_ids.device
)
if position_ids is None:
MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
seqs = input_ids.tolist()
mask_positions, use_gmasks = [], []
for seq in seqs:
mask_token = gMASK if gMASK in seq else MASK
use_gmask = mask_token == gMASK
mask_positions.append(seq.index(mask_token))
use_gmasks.append(use_gmask)
position_ids = self.get_position_ids(
input_ids,
mask_positions=mask_positions,
device=input_ids.device,
use_gmasks=use_gmasks
)
if self.pre_seq_len is not None and attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to(
attention_mask.device)
prefix_attention_mask = (prefix_attention_mask < 0.5).bool()
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3)
# [seq_len, batch, hidden_size]
hidden_states = inputs_embeds.transpose(0, 1)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if attention_mask is None:
attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
else:
attention_mask = attention_mask.to(hidden_states.device)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_past = past_key_values[i]
if self.gradient_checkpointing and self.training:
layer_ret = torch.utils.checkpoint.checkpoint(
layer,
hidden_states,
position_ids,
attention_mask,
torch.tensor(i),
layer_past,
use_cache,
output_attentions
)
else:
layer_ret = layer(
hidden_states,
position_ids=position_ids,
attention_mask=attention_mask,
layer_id=torch.tensor(i),
layer_past=layer_past,
use_cache=use_cache,
output_attentions=output_attentions
)
hidden_states = layer_ret[0]
if use_cache:
presents = presents + (layer_ret[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],)
# Final layer norm.
hidden_states = self.final_layernorm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
def __init__(self, config: ChatGLMConfig, empty_init=True):
super().__init__(config)
if empty_init:
init_method = skip_init
else:
init_method = default_init
# self.hidden_size = config.hidden_size
# self.params_dtype = torch.half
# self.vocab_size = config.vocab_size
self.max_sequence_length = config.max_sequence_length
self.position_encoding_2d = config.position_encoding_2d
self.transformer = ChatGLMModel(config, empty_init=empty_init)
self.lm_head = init_method(
nn.Linear,
config.hidden_size,
config.vocab_size,
bias=False,
dtype=torch.half
)
self.config = config
self.quantized = False
if self.config.quantization_bit:
self.quantize(self.config.quantization_bit, empty_init=True)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def _update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: Dict[str, Any],
is_encoder_decoder: bool = False,
standardize_cache_format: bool = False,
) -> Dict[str, Any]:
# update past_key_values
model_kwargs["past_key_values"] = self._extract_past_from_model_output(
outputs, standardize_cache_format=standardize_cache_format
)
# update attention mask
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
if attention_mask is not None and attention_mask.dtype == torch.bool:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3)
new_attention_mask = attention_mask[:, :, -1:].clone()
new_attention_mask[..., -1] = False
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, new_attention_mask], dim=2
)
# update position ids
if "position_ids" in model_kwargs:
position_ids = model_kwargs["position_ids"]
new_position_id = position_ids[..., -1:].clone()
new_position_id[:, 1, :] += 1
model_kwargs["position_ids"] = torch.cat(
[position_ids, new_position_id], dim=-1
)
return model_kwargs
def prepare_inputs_for_generation(
self,
input_ids: torch.LongTensor,
past: Optional[torch.Tensor] = None,
past_key_values: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs
) -> dict:
batch_size, seq_length = input_ids.shape
MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
seqs = input_ids.tolist()
mask_positions, use_gmasks = [], []
for seq in seqs:
mask_token = gMASK if gMASK in seq else MASK
use_gmask = mask_token == gMASK
mask_positions.append(seq.index(mask_token))
use_gmasks.append(use_gmask)
# only last token for input_ids if past is not None
if past is not None or past_key_values is not None:
last_token = input_ids[:, -1].unsqueeze(-1)
if attention_mask is not None and attention_mask.dtype == torch.bool:
attention_mask = attention_mask[:, :, -1:]
else:
attention_mask = None
if position_ids is not None:
position_ids = position_ids[..., -1:]
else:
context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs]
if self.position_encoding_2d:
position_ids = torch.tensor(
[[mask_position, seq_length - context_length] for mask_position, context_length in
zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1)
else:
position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long,
device=input_ids.device).unsqueeze(-1)
if past is None:
past = past_key_values
return {
"input_ids": last_token,
"past_key_values": past,
"position_ids": position_ids,
"attention_mask": attention_mask
}
else:
if attention_mask is not None and attention_mask.dtype != torch.bool:
logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool")
attention_mask = None
if attention_mask is None:
attention_mask = self.get_masks(
input_ids,
device=input_ids.device
)
if position_ids is None:
position_ids = self.get_position_ids(
input_ids,
device=input_ids.device,
mask_positions=mask_positions,
use_gmasks=use_gmasks
)
return {
"input_ids": input_ids,
"past_key_values": past,
"position_ids": position_ids,
"attention_mask": attention_mask
}
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids=input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous()
loss = None
if labels is not None:
lm_logits = lm_logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
lm_logits = lm_logits.to(hidden_states.dtype)
loss = loss.to(hidden_states.dtype)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(
past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
"""
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
Output shares the same memory storage as `past`.
"""
return tuple(
(
layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
)
for layer_past in past
)
def process_response(self, response):
response = response.strip()
response = response.replace("[[训练时间]]", "2023年")
punkts = [
[",", ","],
["!", "!"],
[":", ":"],
[";", ";"],
["\?", "?"],
]
for item in punkts:
response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response)
response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response)
return response
@torch.no_grad()
def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,
do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
if history is None:
history = []
if logits_processor is None:
logits_processor = LogitsProcessorList()
logits_processor.append(InvalidScoreLogitsProcessor())
gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
if not history:
prompt = query
else:
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
inputs = tokenizer([prompt], return_tensors="pt")
inputs = inputs.to(self.device)
outputs = self.generate(**inputs, **gen_kwargs)
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
response = tokenizer.decode(outputs)
response = self.process_response(response)
history = history + [(query, response)]
return response, history
@torch.no_grad()
def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048,
do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
if history is None:
history = []
if logits_processor is None:
logits_processor = LogitsProcessorList()
logits_processor.append(InvalidScoreLogitsProcessor())
gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
if not history:
prompt = query
else:
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
inputs = tokenizer([prompt], return_tensors="pt")
inputs = inputs.to(self.device)
for outputs in self.stream_generate(**inputs, **gen_kwargs):
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
response = tokenizer.decode(outputs)
response = self.process_response(response)
new_history = history + [(query, response)]
yield response, new_history
@torch.no_grad()
def stream_generate(
self,
input_ids,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
**kwargs,
):
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs)
bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
if not has_default_max_length:
logger.warn(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
UserWarning,
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing `max_new_tokens`."
)
# 2. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=input_ids,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
logits_warper = self._get_logits_warper(generation_config)
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
scores = None
while True:
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
next_token_logits = outputs.logits[:, -1, :]
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
if generation_config.do_sample:
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
next_tokens = torch.argmax(probs, dim=-1)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
break
yield input_ids
def quantize(self, bits: int, empty_init=False, **kwargs):
if bits == 0:
return
from .quantization import quantize
if self.quantized:
logger.info("Already quantized.")
return self
self.quantized = True
self.config.quantization_bit = bits
self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs)
return self
| 57,568 | 39.089833 | 121 | py |
XFL | XFL-master/demo/horizontal/logistic_regression/3party/config/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/docs/en/source/conf.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'XFL'
copyright = '2022, The XFL Authors.'
author = 'chi.zhang'
# The full version, including alpha/beta/rc tags
release = '1.2.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_markdown_tables',
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
extensions.append('sphinx_rtd_theme')
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'logo_only': True,
'navigation_depth': 5,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
} | 3,375 | 32.76 | 79 | py |
XFL | XFL-master/docs/zh_CN/source/conf.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'XFL'
copyright = '2022, The XFL Authors.'
author = 'chi.zhang'
# The full version, including alpha/beta/rc tags
release = '1.2.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_markdown_tables',
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
extensions.append('sphinx_rtd_theme')
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'logo_only': True,
'navigation_depth': 5,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
} | 3,378 | 32.79 | 79 | py |
benchmark | benchmark-main/setup.py | import contextlib
import os
import platform
import shutil
import sysconfig
from pathlib import Path
import setuptools
from setuptools.command import build_ext
PYTHON_INCLUDE_PATH_PLACEHOLDER = "<PYTHON_INCLUDE_PATH>"
IS_WINDOWS = platform.system() == "Windows"
IS_MAC = platform.system() == "Darwin"
@contextlib.contextmanager
def temp_fill_include_path(fp: str):
"""Temporarily set the Python include path in a file."""
with open(fp, "r+") as f:
try:
content = f.read()
replaced = content.replace(
PYTHON_INCLUDE_PATH_PLACEHOLDER,
Path(sysconfig.get_paths()['include']).as_posix(),
)
f.seek(0)
f.write(replaced)
f.truncate()
yield
finally:
# revert to the original content after exit
f.seek(0)
f.write(content)
f.truncate()
class BazelExtension(setuptools.Extension):
"""A C/C++ extension that is defined as a Bazel BUILD target."""
def __init__(self, name: str, bazel_target: str):
super().__init__(name=name, sources=[])
self.bazel_target = bazel_target
stripped_target = bazel_target.split("//")[-1]
self.relpath, self.target_name = stripped_target.split(":")
class BuildBazelExtension(build_ext.build_ext):
"""A command that runs Bazel to build a C/C++ extension."""
def run(self):
for ext in self.extensions:
self.bazel_build(ext)
build_ext.build_ext.run(self)
def bazel_build(self, ext: BazelExtension):
"""Runs the bazel build to create the package."""
with temp_fill_include_path("WORKSPACE"):
temp_path = Path(self.build_temp)
bazel_argv = [
"bazel",
"build",
ext.bazel_target,
f"--symlink_prefix={temp_path / 'bazel-'}",
f"--compilation_mode={'dbg' if self.debug else 'opt'}",
# C++17 is required by nanobind
f"--cxxopt={'/std:c++17' if IS_WINDOWS else '-std=c++17'}",
]
if IS_WINDOWS:
# Link with python*.lib.
for library_dir in self.library_dirs:
bazel_argv.append("--linkopt=/LIBPATH:" + library_dir)
elif IS_MAC:
if platform.machine() == "x86_64":
# C++17 needs macOS 10.14 at minimum
bazel_argv.append("--macos_minimum_os=10.14")
# cross-compilation for Mac ARM64 on GitHub Mac x86 runners.
# ARCHFLAGS is set by cibuildwheel before macOS wheel builds.
archflags = os.getenv("ARCHFLAGS", "")
if "arm64" in archflags:
bazel_argv.append("--cpu=darwin_arm64")
bazel_argv.append("--macos_cpus=arm64")
elif platform.machine() == "arm64":
bazel_argv.append("--macos_minimum_os=11.0")
self.spawn(bazel_argv)
shared_lib_suffix = '.dll' if IS_WINDOWS else '.so'
ext_name = ext.target_name + shared_lib_suffix
ext_bazel_bin_path = temp_path / 'bazel-bin' / ext.relpath / ext_name
ext_dest_path = Path(self.get_ext_fullpath(ext.name))
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
# explicitly call `bazel shutdown` for graceful exit
self.spawn(["bazel", "shutdown"])
setuptools.setup(
cmdclass=dict(build_ext=BuildBazelExtension),
ext_modules=[
BazelExtension(
name="google_benchmark._benchmark",
bazel_target="//bindings/python/google_benchmark:_benchmark",
)
],
)
| 3,777 | 32.140351 | 81 | py |
benchmark | benchmark-main/.ycm_extra_conf.py | import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Werror',
'-pedantic-errors',
'-std=c++0x',
'-fno-strict-aliasing',
'-O3',
'-DNDEBUG',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-I', 'include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 3,640 | 30.387931 | 79 | py |
benchmark | benchmark-main/tools/compare.py | #!/usr/bin/env python3
import unittest
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import json
import sys
import os
import gbench
from gbench import util, report
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = util.classify_input_file(in1)
in2_kind, in2_err = util.classify_input_file(in2)
output_file = util.find_benchmark_flag('--benchmark_out=', flags)
output_type = util.find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == util.IT_Executable and in2_kind == util.IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == util.IT_JSON and in2_kind == util.IT_JSON:
# When both sides are JSON the only supported flag is
# --benchmark_filter=
for flag in util.remove_benchmark_flags('--benchmark_filter=', flags):
print("WARNING: passing %s has no effect since both "
"inputs are JSON" % flag)
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
parser.add_argument(
'-a',
'--display_aggregates_only',
dest='display_aggregates_only',
action="store_true",
help="If there are repetitions, by default, we display everything - the"
" actual runs, and the aggregates computed. Sometimes, it is "
"desirable to only view the aggregates. E.g. when there are a lot "
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
parser.add_argument(
'--no-color',
dest='color',
default=True,
action="store_false",
help="Do not use colors in the terminal output"
)
parser.add_argument(
'-d',
'--dump_to_json',
dest='dump_to_json',
help="Additionally, dump benchmark comparison output to this file in JSON format.")
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
dest='utest',
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
alpha_default = 0.05
utest.add_argument(
"--alpha",
dest='utest_alpha',
default=alpha_default,
type=float,
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
alpha_default)
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
if args.display_aggregates_only:
benchmark_options += ['--benchmark_display_aggregates_only=true']
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline))
json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender))
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
diff_report = gbench.report.get_difference_report(
json1, json2, args.utest)
output_lines = gbench.report.print_difference_report(
diff_report,
args.display_aggregates_only,
args.utest, args.utest_alpha, args.color)
print(description)
for ln in output_lines:
print(ln)
# Optionally, diff and output to JSON
if args.dump_to_json is not None:
with open(args.dump_to_json, 'w') as f_json:
json.dump(diff_report, f_json)
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_display_aggregates_only(self):
parsed = self.parser.parse_args(
['-a', 'benchmarks', self.testInput0, self.testInput1])
self.assertTrue(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| 18,479 | 41.678984 | 513 | py |
benchmark | benchmark-main/tools/strip_asm.py | #!/usr/bin/env python3
"""
strip_asm.py - Cleanup ASM output for the specified file
"""
from argparse import ArgumentParser
import sys
import os
import re
def find_used_labels(asm):
found = set()
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
for l in asm.splitlines():
m = label_re.match(l)
if m:
found.add('.L%s' % m.group(1))
return found
def normalize_labels(asm):
decls = set()
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if m:
decls.add(m.group(0))
if len(decls) == 0:
return asm
needs_dot = next(iter(decls))[0] != '.'
if not needs_dot:
return asm
for ld in decls:
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
return asm
def transform_labels(asm):
asm = normalize_labels(asm)
used_decls = find_used_labels(asm)
new_asm = ''
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if not m or m.group(0) in used_decls:
new_asm += l
new_asm += '\n'
return new_asm
def is_identifier(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True
def process_identifiers(l):
"""
process_identifiers - process all identifiers and modify them to have
consistent names across all platforms; specifically across ELF and MachO.
For example, MachO inserts an additional understore at the beginning of
names. This function removes that.
"""
parts = re.split(r'([a-zA-Z0-9_]+)', l)
new_line = ''
for tk in parts:
if is_identifier(tk):
if tk.startswith('__Z'):
tk = tk[1:]
elif tk.startswith('_') and len(tk) > 1 and \
tk[1].isalpha() and tk[1] != 'Z':
tk = tk[1:]
new_line += tk
return new_line
def process_asm(asm):
"""
Strip the ASM of unwanted directives and lines
"""
new_contents = ''
asm = transform_labels(asm)
# TODO: Add more things we want to remove
discard_regexes = [
re.compile("\s+\..*$"), # directive
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
re.compile("\s*#.*$"), # comment line
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
]
keep_regexes = [
]
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
for l in asm.splitlines():
# Remove Mach-O attribute
l = l.replace('@GOTPCREL', '')
add_line = True
for reg in discard_regexes:
if reg.match(l) is not None:
add_line = False
break
for reg in keep_regexes:
if reg.match(l) is not None:
add_line = True
break
if add_line:
if fn_label_def.match(l) and len(new_contents) != 0:
new_contents += '\n'
l = process_identifiers(l)
new_contents += l
new_contents += '\n'
return new_contents
def main():
parser = ArgumentParser(
description='generate a stripped assembly file')
parser.add_argument(
'input', metavar='input', type=str, nargs=1,
help='An input assembly file')
parser.add_argument(
'out', metavar='output', type=str, nargs=1,
help='The output file')
args, unknown_args = parser.parse_known_args()
input = args.input[0]
output = args.out[0]
if not os.path.isfile(input):
print(("ERROR: input file '%s' does not exist") % input)
sys.exit(1)
contents = None
with open(input, 'r') as f:
contents = f.read()
new_contents = process_asm(contents)
with open(output, 'w') as f:
f.write(new_contents)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| 4,419 | 28.078947 | 92 | py |
benchmark | benchmark-main/tools/gbench/report.py | """report.py - Utilities for reporting statistics about benchmark results
"""
import unittest
import os
import re
import copy
import random
from scipy.stats import mannwhitneyu, gmean
from numpy import array
class BenchmarkColor(object):
def __init__(self, name, code):
self.name = name
self.code = code
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.code))
def __format__(self, format):
return self.code
# Benchmark Colors Enumeration
BC_NONE = BenchmarkColor('NONE', '')
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m')
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
UTEST_MIN_REPETITIONS = 2
UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
UTEST_COL_NAME = "_pvalue"
_TIME_UNIT_TO_SECONDS_MULTIPLIER = {
"s": 1.0,
"ms": 1e-3,
"us": 1e-6,
"ns": 1e-9,
}
def color_format(use_color, fmt_str, *args, **kwargs):
"""
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args]
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()}
return fmt_str.format(*args, **kwargs)
def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name
def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
if old_val == 0 and new_val == 0:
return 0.0
if old_val == 0:
return float(new_val - old_val) / (float(old_val + new_val) / 2)
return float(new_val - old_val) / abs(old_val)
def filter_benchmark(json_orig, family, replacement=""):
"""
Apply a filter to the json, and only leave the 'family' of benchmarks.
"""
regex = re.compile(family)
filtered = {}
filtered['benchmarks'] = []
for be in json_orig['benchmarks']:
if not regex.search(be['name']):
continue
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
filtered['benchmarks'].append(filteredbench)
return filtered
def get_unique_benchmark_names(json):
"""
While *keeping* the order, give all the unique 'names' used for benchmarks.
"""
seen = set()
uniqued = [x['name'] for x in json['benchmarks']
if x['name'] not in seen and
(seen.add(x['name']) or True)]
return uniqued
def intersect(list1, list2):
"""
Given two lists, get a new list consisting of the elements only contained
in *both of the input lists*, while preserving the ordering.
"""
return [x for x in list1 if x in list2]
def is_potentially_comparable_benchmark(x):
return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x)
def partition_benchmarks(json1, json2):
"""
While preserving the ordering, find benchmarks with the same names in
both of the inputs, and group them.
(i.e. partition/filter into groups with common name)
"""
json1_unique_names = get_unique_benchmark_names(json1)
json2_unique_names = get_unique_benchmark_names(json2)
names = intersect(json1_unique_names, json2_unique_names)
partitions = []
for name in names:
time_unit = None
# Pick the time unit from the first entry of the lhs benchmark.
# We should be careful not to crash with unexpected input.
for x in json1['benchmarks']:
if (x['name'] == name and is_potentially_comparable_benchmark(x)):
time_unit = x['time_unit']
break
if time_unit is None:
continue
# Filter by name and time unit.
# All the repetitions are assumed to be comparable.
lhs = [x for x in json1['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
rhs = [x for x in json2['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
partitions.append([lhs, rhs])
return partitions
def get_timedelta_field_as_seconds(benchmark, field_name):
"""
Get value of field_name field of benchmark, which is time with time unit
time_unit, as time in seconds.
"""
timedelta = benchmark[field_name]
time_unit = benchmark.get('time_unit', 's')
return timedelta * _TIME_UNIT_TO_SECONDS_MULTIPLIER.get(time_unit)
def calculate_geomean(json):
"""
Extract all real/cpu times from all the benchmarks as seconds,
and calculate their geomean.
"""
times = []
for benchmark in json['benchmarks']:
if 'run_type' in benchmark and benchmark['run_type'] == 'aggregate':
continue
times.append([get_timedelta_field_as_seconds(benchmark, 'real_time'),
get_timedelta_field_as_seconds(benchmark, 'cpu_time')])
return gmean(times) if times else array([])
def extract_field(partition, field_name):
# The count of elements may be different. We want *all* of them.
lhs = [x[field_name] for x in partition[0]]
rhs = [x[field_name] for x in partition[1]]
return [lhs, rhs]
def calc_utest(timings_cpu, timings_time):
min_rep_cnt = min(len(timings_time[0]),
len(timings_time[1]),
len(timings_cpu[0]),
len(timings_cpu[1]))
# Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
if min_rep_cnt < UTEST_MIN_REPETITIONS:
return False, None, None
time_pvalue = mannwhitneyu(
timings_time[0], timings_time[1], alternative='two-sided').pvalue
cpu_pvalue = mannwhitneyu(
timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue
def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
def get_utest_color(pval):
return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
# Check if we failed miserably with minimum required repetitions for utest
if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None:
return []
dsc = "U Test, Repetitions: {} vs {}".format(
utest['nr_of_repetitions'], utest['nr_of_repetitions_other'])
dsc_color = BC_OKGREEN
# We still got some results to show but issue a warning about it.
if not utest['have_optimal_repetitions']:
dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
UTEST_OPTIMAL_REPETITIONS)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
return [color_format(use_color,
special_str,
BC_HEADER,
"{}{}".format(bc_name, UTEST_COL_NAME),
first_col_width,
get_utest_color(
utest['time_pvalue']), utest['time_pvalue'],
get_utest_color(
utest['cpu_pvalue']), utest['cpu_pvalue'],
dsc_color, dsc,
endc=BC_ENDC)]
def get_difference_report(
json1,
json2,
utest=False):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'. Output is another json containing
relevant details for each test run.
"""
assert utest is True or utest is False
diff_report = []
partitions = partition_benchmarks(json1, json2)
for partition in partitions:
benchmark_name = partition[0][0]['name']
label = partition[0][0]['label'] if 'label' in partition[0][0] else ''
time_unit = partition[0][0]['time_unit']
measurements = []
utest_results = {}
# Careful, we may have different repetition count.
for i in range(min(len(partition[0]), len(partition[1]))):
bn = partition[0][i]
other_bench = partition[1][i]
measurements.append({
'real_time': bn['real_time'],
'cpu_time': bn['cpu_time'],
'real_time_other': other_bench['real_time'],
'cpu_time_other': other_bench['cpu_time'],
'time': calculate_change(bn['real_time'], other_bench['real_time']),
'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time'])
})
# After processing the whole partition, if requested, do the U test.
if utest:
timings_cpu = extract_field(partition, 'cpu_time')
timings_time = extract_field(partition, 'real_time')
have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(
timings_cpu, timings_time)
if cpu_pvalue and time_pvalue:
utest_results = {
'have_optimal_repetitions': have_optimal_repetitions,
'cpu_pvalue': cpu_pvalue,
'time_pvalue': time_pvalue,
'nr_of_repetitions': len(timings_cpu[0]),
'nr_of_repetitions_other': len(timings_cpu[1])
}
# Store only if we had any measurements for given benchmark.
# E.g. partition_benchmarks will filter out the benchmarks having
# time units which are not compatible with other time units in the
# benchmark suite.
if measurements:
run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else ''
aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else ''
diff_report.append({
'name': benchmark_name,
'label': label,
'measurements': measurements,
'time_unit': time_unit,
'run_type': run_type,
'aggregate_name': aggregate_name,
'utest': utest_results
})
lhs_gmean = calculate_geomean(json1)
rhs_gmean = calculate_geomean(json2)
if lhs_gmean.any() and rhs_gmean.any():
diff_report.append({
'name': 'OVERALL_GEOMEAN',
'label': '',
'measurements': [{
'real_time': lhs_gmean[0],
'cpu_time': lhs_gmean[1],
'real_time_other': rhs_gmean[0],
'cpu_time_other': rhs_gmean[1],
'time': calculate_change(lhs_gmean[0], rhs_gmean[0]),
'cpu': calculate_change(lhs_gmean[1], rhs_gmean[1])
}],
'time_unit': 's',
'run_type': 'aggregate',
'aggregate_name': 'geomean',
'utest': {}
})
return diff_report
def print_difference_report(
json_diff_report,
include_aggregates_only=False,
utest=False,
utest_alpha=0.05,
use_color=True):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
assert utest is True or utest is False
def get_color(res):
if res > 0.05:
return BC_FAIL
elif res > -0.07:
return BC_WHITE
else:
return BC_CYAN
first_col_width = find_longest_name(json_diff_report)
first_col_width = max(
first_col_width,
len('Benchmark'))
first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
for benchmark in json_diff_report:
# *If* we were asked to only include aggregates,
# and if it is non-aggregate, then don't print it.
if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate':
for measurement in benchmark['measurements']:
output_strs += [color_format(use_color,
fmt_str,
BC_HEADER,
benchmark['name'],
first_col_width,
get_color(measurement['time']),
measurement['time'],
get_color(measurement['cpu']),
measurement['cpu'],
measurement['real_time'],
measurement['real_time_other'],
measurement['cpu_time'],
measurement['cpu_time_other'],
endc=BC_ENDC)]
# After processing the measurements, if requested and
# if applicable (e.g. u-test exists for given benchmark),
# print the U test.
if utest and benchmark['utest']:
output_strs += print_utest(benchmark['name'],
benchmark['utest'],
utest_alpha=utest_alpha,
first_col_width=first_col_width,
use_color=use_color)
return output_strs
###############################################################################
# Unit tests
class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test3_run0.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
'BM_One',
'BM_Two',
'short', # These two are not sorted
'medium', # These two are not sorted
]
json = self.load_results()
output_lines = get_unique_benchmark_names(json)
print("\n")
print("\n".join(output_lines))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
self.assertEqual(expect_lines[i], output_lines[i])
class TestReportDifference(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(json1, json2)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
['BM_100xSlower', '+99.0000', '+99.0000',
'100', '10000', '100', '10000'],
['BM_100xFaster', '-0.9900', '-0.9900',
'10000', '100', '10000', '100'],
['BM_10PercentCPUToTime', '+0.1000',
'-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
['BM_hasLabel', '+0.0000', '+0.0000', '1', '1', '1', '1'],
['OVERALL_GEOMEAN', '-0.8113', '-0.7779', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report_output(self):
expected_output = [
{
'name': 'BM_SameTimes',
'label': '',
'measurements': [{'time': 0.0000, 'cpu': 0.0000,
'real_time': 10, 'real_time_other': 10,
'cpu_time': 10, 'cpu_time_other': 10}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_2xFaster',
'label': '',
'measurements': [{'time': -0.5000, 'cpu': -0.5000,
'real_time': 50, 'real_time_other': 25,
'cpu_time': 50, 'cpu_time_other': 25}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_2xSlower',
'label': '',
'measurements': [{'time': 1.0000, 'cpu': 1.0000,
'real_time': 50, 'real_time_other': 100,
'cpu_time': 50, 'cpu_time_other': 100}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_1PercentFaster',
'label': '',
'measurements': [{'time': -0.0100, 'cpu': -0.0100,
'real_time': 100, 'real_time_other': 98.9999999,
'cpu_time': 100, 'cpu_time_other': 98.9999999}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_1PercentSlower',
'label': '',
'measurements': [{'time': 0.0100, 'cpu': 0.0100,
'real_time': 100, 'real_time_other': 101,
'cpu_time': 100, 'cpu_time_other': 101}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentFaster',
'label': '',
'measurements': [{'time': -0.1000, 'cpu': -0.1000,
'real_time': 100, 'real_time_other': 90,
'cpu_time': 100, 'cpu_time_other': 90}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentSlower',
'label': '',
'measurements': [{'time': 0.1000, 'cpu': 0.1000,
'real_time': 100, 'real_time_other': 110,
'cpu_time': 100, 'cpu_time_other': 110}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_100xSlower',
'label': '',
'measurements': [{'time': 99.0000, 'cpu': 99.0000,
'real_time': 100, 'real_time_other': 10000,
'cpu_time': 100, 'cpu_time_other': 10000}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_100xFaster',
'label': '',
'measurements': [{'time': -0.9900, 'cpu': -0.9900,
'real_time': 10000, 'real_time_other': 100,
'cpu_time': 10000, 'cpu_time_other': 100}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentCPUToTime',
'label': '',
'measurements': [{'time': 0.1000, 'cpu': -0.1000,
'real_time': 100, 'real_time_other': 110,
'cpu_time': 100, 'cpu_time_other': 90}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_ThirdFaster',
'label': '',
'measurements': [{'time': -0.3333, 'cpu': -0.3334,
'real_time': 100, 'real_time_other': 67,
'cpu_time': 100, 'cpu_time_other': 67}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_NotBadTimeUnit',
'label': '',
'measurements': [{'time': -0.9000, 'cpu': 0.2000,
'real_time': 0.4, 'real_time_other': 0.04,
'cpu_time': 0.5, 'cpu_time_other': 0.6}],
'time_unit': 's',
'utest': {}
},
{
'name': 'BM_hasLabel',
'label': 'a label',
'measurements': [{'time': 0.0000, 'cpu': 0.0000,
'real_time': 1, 'real_time_other': 1,
'cpu_time': 1, 'cpu_time_other': 1}],
'time_unit': 's',
'utest': {}
},
{
'name': 'OVERALL_GEOMEAN',
'label': '',
'measurements': [{'real_time': 3.1622776601683826e-06, 'cpu_time': 3.2130844755623912e-06,
'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07,
'time': -0.8112976497120911, 'cpu': -0.7778551721181174}],
'time_unit': 's',
'run_type': 'aggregate',
'aggregate_name': 'geomean', 'utest': {}
},
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['label'], expected['label'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_result():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test2_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
json = load_result()
json1 = filter_benchmark(json, "BM_Z.ro", ".")
json2 = filter_benchmark(json, "BM_O.e", ".")
cls.json_diff_report = get_difference_report(json1, json2)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
['OVERALL_GEOMEAN', '-0.5000', '-0.5000', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
expected_output = [
{
'name': u'.',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'./4',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}],
'time_unit': 'ns',
'utest': {},
},
{
'name': u'Prefix/.',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'Prefix/./3',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'OVERALL_GEOMEAN',
'measurements': [{'real_time': 2.213363839400641e-08, 'cpu_time': 2.213363839400641e-08,
'real_time_other': 1.1066819197003185e-08, 'cpu_time_other': 1.1066819197003185e-08,
'time': -0.5000000000000009, 'cpu': -0.5000000000000009}],
'time_unit': 's',
'run_type': 'aggregate',
'aggregate_name': 'geomean',
'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportDifferenceWithUTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(
json1, json2, utest=True)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'1.0000',
'0.6667',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.2000',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report_pretty_printing_aggregates_only(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two_pvalue',
'1.0000',
'0.6667',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.2000',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
expected_output = [
{
'name': u'BM_One',
'measurements': [
{'time': -0.1,
'cpu': 0.1,
'real_time': 10,
'real_time_other': 9,
'cpu_time': 100,
'cpu_time_other': 110}
],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'BM_Two',
'measurements': [
{'time': 0.1111111111111111,
'cpu': -0.011111111111111112,
'real_time': 9,
'real_time_other': 10,
'cpu_time': 90,
'cpu_time_other': 89},
{'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0
}
},
{
'name': u'short',
'measurements': [
{'time': -0.125,
'cpu': -0.0625,
'real_time': 8,
'real_time_other': 7,
'cpu_time': 80,
'cpu_time_other': 75},
{'time': -0.4325,
'cpu': -0.13506493506493514,
'real_time': 8,
'real_time_other': 4.54,
'cpu_time': 77,
'cpu_time_other': 66.6}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772
}
},
{
'name': u'medium',
'measurements': [
{'time': -0.375,
'cpu': -0.3375,
'real_time': 8,
'real_time_other': 5,
'cpu_time': 80,
'cpu_time_other': 53}
],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'OVERALL_GEOMEAN',
'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08,
'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08,
'time': 1.6404861082353634, 'cpu': -0.6984640740519662}],
'time_unit': 's',
'run_type': 'aggregate',
'aggregate_name': 'geomean',
'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(
json1, json2, utest=True)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'1.0000',
'0.6667',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.2000',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report,
utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
expected_output = [
{
'name': u'BM_One',
'measurements': [
{'time': -0.1,
'cpu': 0.1,
'real_time': 10,
'real_time_other': 9,
'cpu_time': 100,
'cpu_time_other': 110}
],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'BM_Two',
'measurements': [
{'time': 0.1111111111111111,
'cpu': -0.011111111111111112,
'real_time': 9,
'real_time_other': 10,
'cpu_time': 90,
'cpu_time_other': 89},
{'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0
}
},
{
'name': u'short',
'measurements': [
{'time': -0.125,
'cpu': -0.0625,
'real_time': 8,
'real_time_other': 7,
'cpu_time': 80,
'cpu_time_other': 75},
{'time': -0.4325,
'cpu': -0.13506493506493514,
'real_time': 8,
'real_time_other': 4.54,
'cpu_time': 77,
'cpu_time_other': 66.6}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772
}
},
{
'name': u'medium',
'measurements': [
{'real_time_other': 5,
'cpu_time': 80,
'time': -0.375,
'real_time': 8,
'cpu_time_other': 53,
'cpu': -0.3375
}
],
'utest': {},
'time_unit': u'ns',
'aggregate_name': ''
},
{
'name': 'OVERALL_GEOMEAN',
'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08,
'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08,
'time': 1.6404861082353634, 'cpu': -0.6984640740519662}],
'time_unit': 's',
'run_type': 'aggregate',
'aggregate_name': 'geomean',
'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportDifferenceForPercentageAggregates(
unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test4_run0.json')
testOutput2 = os.path.join(testInputs, 'test4_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(
json1, json2, utest=True)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['whocares', '-0.5000', '+0.5000', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report,
utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
expected_output = [
{
'name': u'whocares',
'measurements': [
{'time': -0.5,
'cpu': 0.5,
'real_time': 0.01,
'real_time_other': 0.005,
'cpu_time': 0.10,
'cpu_time_other': 0.15}
],
'time_unit': 'ns',
'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportSorting(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_result():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test4_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
cls.json = load_result()
def test_json_diff_report_pretty_printing(self):
import util
expected_names = [
"99 family 0 instance 0 repetition 0",
"98 family 0 instance 0 repetition 1",
"97 family 0 instance 0 aggregate",
"96 family 0 instance 1 repetition 0",
"95 family 0 instance 1 repetition 1",
"94 family 0 instance 1 aggregate",
"93 family 1 instance 0 repetition 0",
"92 family 1 instance 0 repetition 1",
"91 family 1 instance 0 aggregate",
"90 family 1 instance 1 repetition 0",
"89 family 1 instance 1 repetition 1",
"88 family 1 instance 1 aggregate"
]
for n in range(len(self.json['benchmarks']) ** 2):
random.shuffle(self.json['benchmarks'])
sorted_benchmarks = util.sort_benchmark_results(self.json)[
'benchmarks']
self.assertEqual(len(expected_names), len(sorted_benchmarks))
for out, expected in zip(sorted_benchmarks, expected_names):
self.assertEqual(out['name'], expected)
def assert_utest(unittest_instance, lhs, rhs):
if lhs['utest']:
unittest_instance.assertAlmostEqual(
lhs['utest']['cpu_pvalue'],
rhs['utest']['cpu_pvalue'])
unittest_instance.assertAlmostEqual(
lhs['utest']['time_pvalue'],
rhs['utest']['time_pvalue'])
unittest_instance.assertEqual(
lhs['utest']['have_optimal_repetitions'],
rhs['utest']['have_optimal_repetitions'])
else:
# lhs is empty. assert if rhs is not.
unittest_instance.assertEqual(lhs['utest'], rhs['utest'])
def assert_measurements(unittest_instance, lhs, rhs):
for m1, m2 in zip(lhs['measurements'], rhs['measurements']):
unittest_instance.assertEqual(m1['real_time'], m2['real_time'])
unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time'])
# m1['time'] and m1['cpu'] hold values which are being calculated,
# and therefore we must use almost-equal pattern.
unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4)
unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4)
if __name__ == '__main__':
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| 46,852 | 37.979201 | 140 | py |
benchmark | benchmark-main/tools/gbench/util.py | """util.py - General utilities for running, loading, and processing benchmarks
"""
import json
import os
import re
import subprocess
import sys
import tempfile
# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
an executable. A file is considered an executable if it starts with the
magic bytes for a EXE, Mach O, or ELF file.
"""
if not os.path.isfile(filename):
return False
with open(filename, mode='rb') as f:
magic_bytes = f.read(_num_magic_bytes)
if sys.platform == 'darwin':
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
]
elif sys.platform.startswith('win'):
return magic_bytes == b'MZ'
else:
return magic_bytes == b'\x7FELF'
def is_json_file(filename):
"""
Returns 'True' if 'filename' names a valid JSON output file.
'False' otherwise.
"""
try:
with open(filename, 'r') as f:
json.load(f)
return True
except BaseException:
pass
return False
def classify_input_file(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string representing the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
return ftype, err_msg
def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print("Invalid input file: %s" % msg)
sys.exit(1)
return ftype
def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned.
"""
assert prefix.startswith('--') and prefix.endswith('=')
result = None
for f in benchmark_flags:
if f.startswith(prefix):
result = f[len(prefix):]
return result
def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
with the specified prefix.
"""
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]
def load_benchmark_results(fname, benchmark_filter):
"""
Read benchmark output from a file and return the JSON object.
Apply benchmark_filter, a regular expression, with nearly the same
semantics of the --benchmark_filter argument. May be None.
Note: the Python regular expression engine is used instead of the
one used by the C++ code, which may produce different results
in complex cases.
REQUIRES: 'fname' names a file containing JSON benchmark output.
"""
def benchmark_wanted(benchmark):
if benchmark_filter is None:
return True
name = benchmark.get('run_name', None) or benchmark['name']
if re.search(benchmark_filter, name):
return True
return False
with open(fname, 'r') as f:
results = json.load(f)
if 'benchmarks' in results:
results['benchmarks'] = list(filter(benchmark_wanted,
results['benchmarks']))
return results
def sort_benchmark_results(result):
benchmarks = result['benchmarks']
# From inner key to the outer key!
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1)
benchmarks = sorted(
benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0)
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1)
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1)
result['benchmarks'] = benchmarks
return result
def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
real time console output.
RETURNS: A JSON object representing the benchmark output
"""
output_name = find_benchmark_flag('--benchmark_out=',
benchmark_flags)
is_temp_output = False
if output_name is None:
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:
print('TEST FAILED...')
sys.exit(exitCode)
json_res = load_benchmark_results(output_name, None)
if is_temp_output:
os.unlink(output_name)
return json_res
def run_or_load_benchmark(filename, benchmark_flags):
"""
Get the results for a specified benchmark. If 'filename' specifies
an executable benchmark then the results are generated by running the
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
benchmark_filter = find_benchmark_flag('--benchmark_filter=',
benchmark_flags)
return load_benchmark_results(filename, benchmark_filter)
if ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
raise ValueError('Unknown file type %s' % ftype)
| 6,858 | 32.622549 | 133 | py |
benchmark | benchmark-main/tools/gbench/__init__.py | """Google Benchmark tooling"""
__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__versioninfo__ = (0, 5, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
| 194 | 20.666667 | 63 | py |
benchmark | benchmark-main/bindings/python/google_benchmark/example.py | # Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Python using C++ benchmark framework.
To run this example, you must first install the `google_benchmark` Python package.
To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute:
python setup.py install
"""
import random
import time
import google_benchmark as benchmark
from google_benchmark import Counter
@benchmark.register
def empty(state):
while state:
pass
@benchmark.register
def sum_million(state):
while state:
sum(range(1_000_000))
@benchmark.register
def pause_timing(state):
"""Pause timing every iteration."""
while state:
# Construct a list of random ints every iteration without timing it
state.pause_timing()
random_list = [random.randint(0, 100) for _ in range(100)]
state.resume_timing()
# Time the in place sorting algorithm
random_list.sort()
@benchmark.register
def skipped(state):
if True: # Test some predicate here.
state.skip_with_error("some error")
return # NOTE: You must explicitly return, or benchmark will continue.
... # Benchmark code would be here.
@benchmark.register
def manual_timing(state):
while state:
# Manually count Python CPU time
start = time.perf_counter() # perf_counter_ns() in Python 3.7+
# Something to benchmark
time.sleep(0.01)
end = time.perf_counter()
state.set_iteration_time(end - start)
@benchmark.register
def custom_counters(state):
"""Collect custom metric using benchmark.Counter."""
num_foo = 0.0
while state:
# Benchmark some code here
pass
# Collect some custom metric named foo
num_foo += 0.13
# Automatic Counter from numbers.
state.counters["foo"] = num_foo
# Set a counter as a rate.
state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
# Set a counter as an inverse of rate.
state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
# Set a counter as a thread-average quantity.
state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
# There's also a combined flag:
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
@benchmark.register
@benchmark.option.measure_process_cpu_time()
@benchmark.option.use_real_time()
def with_options(state):
while state:
sum(range(1_000_000))
@benchmark.register(name="sum_million_microseconds")
@benchmark.option.unit(benchmark.kMicrosecond)
def with_options2(state):
while state:
sum(range(1_000_000))
@benchmark.register
@benchmark.option.arg(100)
@benchmark.option.arg(1000)
def passing_argument(state):
while state:
sum(range(state.range(0)))
@benchmark.register
@benchmark.option.range(8, limit=8 << 10)
def using_range(state):
while state:
sum(range(state.range(0)))
@benchmark.register
@benchmark.option.range_multiplier(2)
@benchmark.option.range(1 << 10, 1 << 18)
@benchmark.option.complexity(benchmark.oN)
def computing_complexity(state):
while state:
sum(range(state.range(0)))
state.complexity_n = state.range(0)
if __name__ == "__main__":
benchmark.main()
| 3,858 | 27.167883 | 88 | py |
benchmark | benchmark-main/bindings/python/google_benchmark/__init__.py | # Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python benchmarking utilities.
Example usage:
import google_benchmark as benchmark
@benchmark.register
def my_benchmark(state):
... # Code executed outside `while` loop is not timed.
while state:
... # Code executed within `while` loop is timed.
if __name__ == '__main__':
benchmark.main()
"""
import atexit
from absl import app
from google_benchmark import _benchmark
from google_benchmark._benchmark import (
Counter,
kNanosecond,
kMicrosecond,
kMillisecond,
kSecond,
oNone,
o1,
oN,
oNSquared,
oNCubed,
oLogN,
oNLogN,
oAuto,
oLambda,
State,
)
__all__ = [
"register",
"main",
"Counter",
"kNanosecond",
"kMicrosecond",
"kMillisecond",
"kSecond",
"oNone",
"o1",
"oN",
"oNSquared",
"oNCubed",
"oLogN",
"oNLogN",
"oAuto",
"oLambda",
"State",
]
__version__ = "1.8.2"
class __OptionMaker:
"""A stateless class to collect benchmark options.
Collect all decorator calls like @option.range(start=0, limit=1<<5).
"""
class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
def __init__(self, func):
self.func = func
self.builder_calls = []
@classmethod
def make(cls, func_or_options):
"""Make Options from Options or the benchmarked function."""
if isinstance(func_or_options, cls.Options):
return func_or_options
return cls.Options(func_or_options)
def __getattr__(self, builder_name):
"""Append option call in the Options."""
# The function that get returned on @option.range(start=0, limit=1<<5).
def __builder_method(*args, **kwargs):
# The decorator that get called, either with the benchmared function
# or the previous Options
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @register
return options
return __decorator
return __builder_method
# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
option = __OptionMaker()
def register(undefined=None, *, name=None):
"""Register function for benchmarking."""
if undefined is None:
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
options = __OptionMaker.make(undefined)
if name is None:
name = options.func.__name__
# We register the benchmark and reproduce all the @option._ calls onto the
# benchmark builder pattern
benchmark = _benchmark.RegisterBenchmark(name, options.func)
for name, args, kwargs in options.builder_calls[::-1]:
getattr(benchmark, name)(*args, **kwargs)
# return the benchmarked function because the decorator does not modify it
return options.func
def _flags_parser(argv):
argv = _benchmark.Initialize(argv)
return app.parse_flags_with_usage(argv)
def _run_benchmarks(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
return _benchmark.RunSpecifiedBenchmarks()
def main(argv=None):
return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
# Methods for use with custom main function.
initialize = _benchmark.Initialize
run_benchmarks = _benchmark.RunSpecifiedBenchmarks
atexit.register(_benchmark.ClearRegisteredBenchmarks)
| 4,463 | 26.386503 | 90 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/utils/ExportToPX4/SIP2PX4.py | # ___SIPtoPX4___
# StructuralInspectionPlanner Path Exporter to PX4 Mission
#
# This script reads the output *.csv file of an inspection path generated by
# the StructuralInspectionPlanner Toolbox and exports a *.txt file that can
# be loaded at QGroudControl or any other service to import it as a PX4/Pixhawk
# autopilot mission.
#
# More Info on PX/Pixhawk and QGroundControl:
# http://pixhawk.org/
# http://qgroundcontrol.org/
#
# Thanks: the coordinate transforms are based on the Mathworks Aerospace references
# and relevant work at pydoc.net
#
import math
import sys, getopt
import numpy as np
from math import pow, cos, sin, sqrt, degrees, radians, atan2, pi
from scipy import cos, sin, arctan, sqrt, arctan2
# Change based on your files and LonLatAlt Origin
LLA_ENU_Point = np.array([8.688611111111111, 47.135388888888890, 919.5254262437646] )
# Acceptance Radius and Auto flag (PX4-mission related)
AcceptanceRadius = 50 # radius to consider that the UAV crossed the waypoint
AutoContFlag = 1 # flag to declare if the UAV should enter auto-cont. mode (eg. loiter after the waypoint list is executed)
# Sparsify mission
SparsifyFactor = 1 # 1 - no Sparsification
# assume WGS84
wgs84_a = 6378137.0
wgs84_f = 1.0 / 298.257223563
wgs84_e2 = 2 * wgs84_f - np.power(wgs84_f,2)
# Coordinate conversion functions
def lla2ecef(lonLatAlt):
# LLA2ECEF Conversion (meters)
lonDeg, latDeg, alt = lonLatAlt
a, e2 = wgs84_a, wgs84_e2
lon = radians(lonDeg)
lat = radians(latDeg)
chi = sqrt(1 - e2 * sin(lat) ** 2)
q = (a / chi + alt) * cos(lat)
return (q * cos(lon),
q * sin(lon),
((a * (1 - e2) / chi) + alt) * sin(lat))
def ecef2lla(ecef):
# ECEF2LLA (degrees)
x, y, z = ecef
a, e2, f = wgs84_a, wgs84_e2, wgs84_f
lon = atan2(y, x)
s = sqrt(x ** 2 + y ** 2)
step = 0
lat = None
latPrev = 0
converged = False
while not converged:
if step == 0:
beta = atan2(z, (1 - f) * s) # initial guess
else:
beta = atan2((1 - f) * sin(lat), cos(lat)) # improved guess
lat = atan2(z + (e2 * (1 - f) / (1 - e2)) * a * sin(beta) ** 3,
s - e2 * a * cos(beta) ** 3)
if (lat - latPrev) < 1e-4:
converged = True
latPrev = lat
step += 1
N = a / sqrt(1 - e2 * sin(lat) ** 2)
alt = s * cos(lat) + (z + e2 * N * sin(lat)) * sin(lat) - N
return (degrees(lon),
degrees(lat),
alt)
def ecef2enu(LonLatAlt_orig, ecef):
# ECEF2ENU (meters)
x, y, z = ecef
ox, oy, oz = lla2ecef(LonLatAlt_orig)
dx, dy, dz = (x - ox, y - oy, z - oz)
lonDeg, latDeg, _ = LonLatAlt_orig
lon = radians(lonDeg)
lat = radians(latDeg)
return (-sin(lon) * dx + cos(lon) * dy,
-sin(lat) * cos(lon) * dx - sin(lat) * sin(lon) * dy + cos(lat) * dz,
cos(lat) * cos(lon) * dx + cos(lat) * sin(lon) * dy + sin(lat) * dz)
def enu2ecef(LonLatAlt_orig, enu):
# ENU2ECEF (meters)
e, n, u = enu
lonDeg, latDeg, _ = LonLatAlt_orig
lon = radians(lonDeg)
lat = radians(latDeg)
ox, oy, oz = lla2ecef(LonLatAlt_orig)
return (ox - sin(lon) * e - cos(lon) * sin(lat) * n + cos(lon) * cos(lat) * u,
oy + cos(lon) * e - sin(lon) * sin(lat) * n + cos(lat) * sin(lon) * u,
oz + cos(lat) * n + sin(lat) * u)
def lla2enu(LonLatAlt_orig, lonLatAlt):
return ecef2enu(LonLatAlt_orig, lla2ecef(lonLatAlt))
def enu2lla(LonLatAlt_orig, enu):
return ecef2lla(enu2ecef(LonLatAlt_orig, enu))
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
global px4_mission_file
px4_mission_file = open(outputfile, "w")
global PathENU
PathENU = np.genfromtxt(inputfile, delimiter = ',')
Nx,Ny = PathENU.shape
ECEF_X = np.zeros(Nx); ECEF_Y = np.zeros(Nx); ECEF_Z = np.zeros(Nx)
lat = np.zeros(Nx); lon = np.zeros(Nx); alt = np.zeros(Nx)
for i in range(0,Nx):
lon[i], lat[i], alt[i] = enu2lla(LLA_ENU_Point,PathENU[i,0:3])
# PX4 mission head
px4_mission_file.write("QGC WPL 120")
px4_mission_file.write("\r")
px4_mission_file.write("\r\n")
# write the PX4 mission waypoints
count = 0
for i in range(0,Nx,SparsifyFactor):
if i == 0:
px4_mission_file.write("%i\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%.17f\t%.17f\t%.0f\t%i" % (count,1,0,16,0,AcceptanceRadius,0,0,lat[i],lon[i],alt[i],AutoContFlag))
px4_mission_file.write("\r\r\n")
else:
px4_mission_file.write("%i\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%.17f\t%.17f\t%.0f\t%i" % (count,0,0,16,0,AcceptanceRadius,0,0,lat[i],lon[i],alt[i],AutoContFlag))
px4_mission_file.write("\r\r\n")
count = count + 1
px4_mission_file.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 5,131 | 29.188235 | 158 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/utils/ExportToPX4/KML2PX4.py | # __KML2PX4__
# KML (.kml) to PX4/Pixhawk Mission File Parsing and Export
#
# This script accepts a *.kml file as an input and exports the
# corresponding PX4/Pixhawk autopilot mission to be loaded using
# QGroundControl or other compative software
#
# More Info on PX4/Pixhawk and QGroundControl:
# http://pixhawk.org/
# http://qgroundcontrol.org/
#
# Example Syntax:
# python KML2PX4.py --file=ExampleKML.kml --output=KMLmissionPX4.txt
# --radius=30 --auto=1 --sparsify=1 --height=100
#
# note: set height=-1 to retrieve altitude reference form the KML file
#
# This script is part of the "utils" section of the StructuralInspectionPlanner
# Toolbox. A set of elementary components are released together with this
# path-planning toolbox in order to make further developments easier.
#
import os
import csv
import getpass
import logging
import numpy as np
import re
from optparse import OptionParser
from BeautifulSoup import BeautifulSoup
class PX4MissionPars(object):
def __init__(self,AcceptanceRadius,AutoContFlag,SparsifyFactor,HeightRef):
self.AcceptanceRadius = AcceptanceRadius
self.AutoContFlag = AutoContFlag
self.SparsifyFactor = SparsifyFactor
self.HeightRef = HeightRef
class KMLParser(object):
"""
KmlParser
"""
def __init__(self, kml_file, px4_file, mission_pars):
self.kml_file = kml_file
self.px4_file = px4_file
self.outputfile = ''
self.outputdata = []
self.MissionPars = mission_pars
def ParseKml(self):
"""
parse_kml
"""
count = 0
try:
handler = open(self.kml_file).read()
soup = BeautifulSoup(handler)
for message in soup.findAll('placemark'):
locationdata = {}
coordinates = message.find('coordinates')
locationdata['geometry'] = '<LineString> %s </LineString>' % (coordinates)
names = message.findAll('name')
for name in names:
text = name.find(text = True)
locationdata['name'] = text
coordinates = str(coordinates)
coordinates = re.split(', | |\n |\t',coordinates)
coordinates = coordinates[4:(len(coordinates)-4)]
lla_coords = np.zeros((len(coordinates),3))
for i in range(0,len(coordinates)):
lla_coords[i] = np.array(coordinates[i].split(','))
self.outputdata.append(lla_coords)
except IOError as (errno, strerror):
logging.error("I/O error(%d): %s" %(errno, strerror))
def WritePX4(self):
"""
write_px4
"""
self.outputfile = os.getcwd() + '/' + self.px4_file
try:
out = open(self.outputfile,'w')
print 'Writing output to file ', self.outputfile
try:
out.write("QGC WPL 120")
out.write("\r")
out.write("\r\n")
count = 0
for i in range(0,len(self.outputdata[0]),self.MissionPars.SparsifyFactor):
if i == 0:
if self.MissionPars.HeightRef == -1:
z_ref = self.outputdata[0][i][2]
else:
z_ref = self.MissionPars.HeightRef
out.write("%i\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%.17f\t%.17f\t%.0f\t%i" % (count,1,0,16,0,self.MissionPars.AcceptanceRadius,0,0,self.outputdata[0][i][0],self.outputdata[0][i][1],z_ref,self.MissionPars.AutoContFlag))
out.write("\r\r\n")
else:
if self.MissionPars.HeightRef == -1:
z_ref = self.outputdata[0][i][2]
else:
z_ref = self.MissionPars.HeightRef
out.write("%i\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%.17f\t%.17f\t%.0f\t%i" % (count,0,0,16,0,self.MissionPars.AcceptanceRadius,0,0,self.outputdata[0][i][0],self.outputdata[0][i][1],z_ref,self.MissionPars.AutoContFlag))
out.write("\r\r\n")
count = count + 1
print 'Output file ', self.outputfile, ' written'
finally:
out.close()
except IOError as (errno, strerror):
logging.error("I/O error(%d): %s" % (errno, strerror))
return self.outputfile
def main():
"""
Main method
"""
parser = OptionParser()
parser.add_option("-f", "--file", dest = "kmlfile",
help = "KML file to be parsed",
metavar = "FILE")
parser.add_option("-a", "--radius", dest = "AcceptanceRadius",
help = "PX4 Mission Acceptance Radius",
type = "float")
parser.add_option("-b", "--auto", dest = "AutoContFlag",
help = "PX4 Mission Auto Continue Flag",
type = "int")
parser.add_option("-c", "--sparsify", dest = "SparsifyFactor",
help = "KML 2 PX4 Sparsify Factor",
type = "int")
parser.add_option("-e", "--height", dest = "HeightRef",
help = "Height ref (-1 to get height from KML)",
type = "float")
parser.add_option("-d", "--output", dest = "px4file",
help = "PX4 Mission output file",
metavar = "FILE")
(options, args) = parser.parse_args()
if not options.kmlfile:
print "please type python " \
"KML2PX4.py --file=[kmlfilename] --output=[px4filename]"
elif not options.px4file:
print "please type python " \
"KML2PX4.py --file=[kmlfilename] --output=[px4filename]"
else:
MissionsPars = PX4MissionPars(options.AcceptanceRadius,options.AutoContFlag,options.SparsifyFactor,options.HeightRef)
kmlparser = KMLParser(kml_file=options.kmlfile,
px4_file=options.px4file,
mission_pars=MissionsPars)
kmlparser.ParseKml()
upload_file = kmlparser.WritePX4()
if __name__ == "__main__":
main()
| 6,553 | 38.245509 | 234 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/utils/ExportToRotorS/SIP2RotorS.py | # ___SIPtoRotorS___
# StructuralInspectionPlanner Path Exporter to RotorS missions
#
# This script reads the output *.csv file of an inspection path generated by
# the StructuralInspectionPlanner Toolbox and exports a *.txt file that can
# be loaded by the waypoint sampler of the RotorS simulator. Note that speed
# constraints as well as sampling time must be chosen by the user.
#
# More info on RotorS simulator:
# https://github.com/ethz-asl/rotors_simulator.git
#
#
import math
import sys, getopt
import numpy as np
from math import pow, cos, sin, sqrt, degrees, radians, atan2, pi, ceil, floor
from scipy import cos, sin, arctan, sqrt, arctan2
# RotorS Mission Parameters
Speed = 0.25 # m/s
RotationalSpeed = 0.5 # rad/s
TimeStep = 0.1 # s, approximately
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
rotors_mission_file = open(outputfile, "wb")
PathENU = np.genfromtxt(inputfile, delimiter = ',')
Nx,Ny = PathENU.shape
if Nx < 2:
print "Number of waypoints must at least be 2"
sys.exit(2)
time = np.zeros(Nx); time[0] = 0.0;
distance = np.zeros(Nx-1);
rotation = np.zeros(Nx-1);
for i in range(0, Nx-1):
distance[i] = sqrt((PathENU[i,0]-PathENU[i+1,0])*(PathENU[i,0]-PathENU[i+1,0]) + (PathENU[i,1]-PathENU[i+1,1])*(PathENU[i,1]-PathENU[i+1,1]) + (PathENU[i,2]-PathENU[i+1,2])*(PathENU[i,2]-PathENU[i+1,2]));
rotation[i] = np.abs(PathENU[i,3] - PathENU[i+1,3]);
if rotation[i] > pi:
rotation[i] = 2.0*pi-rotation[i];
time[i+1] = max(distance[i]/Speed, rotation[i]/RotationalSpeed);
if TimeStep < time[1]:
rotors_mission_file.write(str(TimeStep) + " ");
else:
rotors_mission_file.write(str(time[1]) + " ");
rotors_mission_file.write(str(PathENU[0,0]) + " ");
rotors_mission_file.write(str(PathENU[0,1]) + " ");
rotors_mission_file.write(str(PathENU[0,2]) + " ");
rotors_mission_file.write(str(PathENU[0,3]) + "\n");
for i in range(0, Nx-1):
numDisc = math.floor(time[i+1]/TimeStep);
dt = time[i+1]/numDisc;
for j in range(1, int(numDisc+1)):
jfloat = float(j);
dyaw = PathENU[i,3] - PathENU[i+1,3];
if dyaw > pi:
dyaw = dyaw - 2.0*pi;
if dyaw < -pi:
dyaw = dyaw + 2.0*pi;
rotors_mission_file.write(str(dt) + " ");
rotors_mission_file.write(str(PathENU[i+1,0]*jfloat/numDisc + PathENU[i,0]*(1-jfloat/numDisc)) + " ");
rotors_mission_file.write(str(PathENU[i+1,1]*jfloat/numDisc + PathENU[i,1]*(1-jfloat/numDisc)) + " ");
rotors_mission_file.write(str(PathENU[i+1,2]*jfloat/numDisc + PathENU[i,2]*(1-jfloat/numDisc)) + " ");
rotors_mission_file.write(str(PathENU[i,3] - dyaw*jfloat/numDisc) + "\n");
rotors_mission_file.close()
print "### RotorS Mission file written :: please make sure you have respected all the platform mission constraints!"
if __name__ == "__main__":
main(sys.argv[1:])
| 3,188 | 34.043956 | 206 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/utils/Tools/LKH_Python_Interface/InvokeLKH.py | # __InvokeLKH__
# Interface the TSP LKH Solver
#
# This script is a simple python interface to a compiled
# version of the LKH TSP Solver. It requires that the
# solver is compiled at the given directories.
#
#
# Example Syntax:
# python InvokeLKH.py
#
# This script is part of the "utils" section of the StructuralInspectionPlanner
# Toolbox. A set of elementary components are released together with this
# path-planning toolbox in order to make further developments easier.
#
import os
import math
import numpy as np
# Change with the Cost Matrix of your problem or
# consider using it as an argument
CostMatrix = np.ones((10,10))*100
fname_tsp = "test"
user_comment = "a comment by the user"
# Change these directories based on where you have
# a compiled executable of the LKH TSP Solver
lkh_dir = '/LKH/LKH-2.0.7/'
tsplib_dir = '/TSPLIB/'
lkh_cmd = 'LKH'
pwd= os.path.dirname(os.path.abspath(__file__))
def writeTSPLIBfile_FE(fname_tsp,CostMatrix,user_comment):
dims_tsp = len(CostMatrix)
name_line = 'NAME : ' + fname_tsp + '\n'
type_line = 'TYPE: TSP' + '\n'
comment_line = 'COMMENT : ' + user_comment + '\n'
tsp_line = 'TYPE : ' + 'TSP' + '\n'
dimension_line = 'DIMENSION : ' + str(dims_tsp) + '\n'
edge_weight_type_line = 'EDGE_WEIGHT_TYPE : ' + 'EXPLICIT' + '\n' # explicit only
edge_weight_format_line = 'EDGE_WEIGHT_FORMAT: ' + 'FULL_MATRIX' + '\n'
display_data_type_line ='DISPLAY_DATA_TYPE: ' + 'NO_DISPLAY' + '\n' # 'NO_DISPLAY'
edge_weight_section_line = 'EDGE_WEIGHT_SECTION' + '\n'
eof_line = 'EOF\n'
Cost_Matrix_STRline = []
for i in range(0,dims_tsp):
cost_matrix_strline = ''
for j in range(0,dims_tsp-1):
cost_matrix_strline = cost_matrix_strline + str(int(CostMatrix[i][j])) + ' '
j = dims_tsp-1
cost_matrix_strline = cost_matrix_strline + str(int(CostMatrix[i][j]))
cost_matrix_strline = cost_matrix_strline + '\n'
Cost_Matrix_STRline.append(cost_matrix_strline)
fileID = open((pwd + tsplib_dir + fname_tsp + '.tsp'), "w")
print name_line
fileID.write(name_line)
fileID.write(comment_line)
fileID.write(tsp_line)
fileID.write(dimension_line)
fileID.write(edge_weight_type_line)
fileID.write(edge_weight_format_line)
fileID.write(edge_weight_section_line)
for i in range(0,len(Cost_Matrix_STRline)):
fileID.write(Cost_Matrix_STRline[i])
fileID.write(eof_line)
fileID.close()
fileID2 = open((pwd + tsplib_dir + fname_tsp + '.par'), "w")
problem_file_line = 'PROBLEM_FILE = ' + pwd + tsplib_dir + fname_tsp + '.tsp' + '\n' # remove pwd + tsplib_dir
optimum_line = 'OPTIMUM 378032' + '\n'
move_type_line = 'MOVE_TYPE = 5' + '\n'
patching_c_line = 'PATCHING_C = 3' + '\n'
patching_a_line = 'PATCHING_A = 2' + '\n'
runs_line = 'RUNS = 10' + '\n'
tour_file_line = 'TOUR_FILE = ' + fname_tsp + '.txt' + '\n'
fileID2.write(problem_file_line)
fileID2.write(optimum_line)
fileID2.write(move_type_line)
fileID2.write(patching_c_line)
fileID2.write(patching_a_line)
fileID2.write(runs_line)
fileID2.write(tour_file_line)
fileID2.close()
return fileID, fileID2
def copy_toTSPLIBdir_cmd(fname_basis):
copy_toTSPLIBdir_cmd = 'cp' + ' ' + pwd + '/' + fname_basis + '.txt' + ' ' + pwd + tsplib_dir
os.system(copy_toTSPLIBdir_cmd)
def run_LKHsolver_cmd(fname_basis):
run_lkh_cmd = pwd + lkh_dir + lkh_cmd + ' ' + pwd + tsplib_dir + fname_basis + '.par'
os.system(run_lkh_cmd)
def rm_solution_file_cmd(fname_basis):
rm_sol_cmd = 'rm' + ' ' + pwd + '/' + fname_basis + '.txt'
os.system(rm_sol_cmd)
def main():
[fileID1,fileID2] = writeTSPLIBfile_FE(fname_tsp,CostMatrix,user_comment)
run_LKHsolver_cmd(fname_tsp)
copy_toTSPLIBdir_cmd(fname_tsp)
rm_solution_file_cmd(fname_tsp)
if __name__ == "__main__":
main()
| 3,731 | 30.627119 | 111 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/utils/Tools/Airplane2p5D/Airplane2p5D.py | # __Airplane2p5D__
# 2.5D Airplane Point to Point Connections
# A Python script for simplified Aircraft Kinematics
#
# Computes Point - to - Point Aircraft connections based on
# 2D dubins curves for the XY-plane and constrained linear
# interpolation for the altitude component.
#
# For the dubins curve solution, the code uses the dubins
# package available at: https://github.com/AndrewWalker/pydubins
#
# Example Syntax:
# python Airplane2p5D.py --output=airplane_solution.txt
#
# This script is part of the "utils" section of the StructuralInspectionPlanner
# Toolbox. A set of elementary components are released together with this
# path-planning toolbox in order to make further developments easier.
#
import dubins
import numpy as np
import math
import matplotlib.pyplot as mpplot
from optparse import OptionParser
class VehicleParameters(object):
def __init__(self,MinTurnRadius,MaxAscDescRate,V_travel):
self.MinTurnRadius = MinTurnRadius
self.MaxAscDescRate = MaxAscDescRate
self.V_travel = V_travel
class VehicleConfiguration(object):
def __init__(self,x0,y0,z0,yaw0):
self.x0 = x0
self.y0 = y0
self.z0 = z0
self.yaw0 = yaw0
class SolverParameters(object):
def __init__(self,StepSize):
self.StepSize = StepSize
class VehicleSolution(object):
def __init__(self,path_3D,yaw):
self.Path = path_3D
self.Xvec = path_3D[:,0]
self.Yvec = path_3D[:,1]
self.Zvec = path_3D[:,2]
self.YAWvec = yaw
# define the problem parameters
q0 = VehicleConfiguration(0.0, 0.0, 100.0, np.pi/4)
q1 = VehicleConfiguration(0.0, 0.0, 50.0, -np.pi/4)
AircraftParameters = VehicleParameters(1.0,10.0,1.0)
DubinsSolver = SolverParameters(0.5)
def DubinsPathLength(dubinsPath):
# Compute 2D Dubins Path Length
sol_2d = np.asarray(dubinsPath)
[Ns,Np] = sol_2d.shape
PathLength = 0.0
for i in range(1,Ns):
PathLength = PathLength + np.sqrt( np.power( (sol_2d[i,0]-sol_2d[i-1,0]),2) + np.power( (sol_2d[i,1]-sol_2d[i-1,1]),2) )
return PathLength
def MaxAscDesc(q0,q1,Ns,MaxAscDescRate,Ts):
# Max AscDesc connection
z_vec = np.zeros(Ns)
z_vec[0] = q0[2]
for i in range(1,Ns):
z_vec[i] = z_vec[i-1] - np.sign(q1.z0 - q0.z0)*AircraftParameters.MaxAscDescRate*Ts
return z_vec
def LinearAscDesc(q0,q1,Ns):
# Linear Asc Desc Connection of two values
samples_i = range(Ns)
z_vec = np.interp(samples_i,np.array([0, Ns]),np.array([q0.z0,q1.z0]))
return z_vec
def plot3(a,b,c,mark="o",col="r"):
# mimic matlab plot3
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
fig = pylab.figure()
ax = Axes3D(fig)
ax.plot(a, b,c,color=col,marker=mark)
fig.show()
def main():
parser = OptionParser()
parser.add_option("-d", "--output", dest = "pathfile",
help = "Path output file",
metavar = "FILE")
(options, args) = parser.parse_args()
# 2D Dubins Path Part
q0_2d = np.array([q0.x0, q0.y0, q0.yaw0])
q1_2d = np.array([q1.x0, q1.y0, q1.yaw0])
qs, _ = dubins.path_sample(q0_2d, q1_2d, AircraftParameters.MinTurnRadius, DubinsSolver.StepSize)
sol_2d = np.asarray(qs); [Ns,Ndim] = sol_2d.shape;
AirplaneSolution = VehicleSolution(np.zeros((Ns,3)),np.zeros((Ns,1)))
AirplaneSolution.Path[:,0] = sol_2d[:,0]; AirplaneSolution.Path[:,1] = sol_2d[:,1]; AirplaneSolution.YAWvec = sol_2d[:,2];
PathLength = DubinsPathLength(sol_2d)
TimeToDest = PathLength/AircraftParameters.V_travel
# altitude path
RateDes = abs(q0.z0-q1.z0)/TimeToDest
if RateDes > AircraftParameters.MaxAscDescRate:
# the system will not arrive to the desired altitude
# it will rather ascend/descend with the maximum rate
AirplaneSolution.Path[:,2] = MaxAscDesc(q0,q1,Ns,AircraftParameters.MaxAscDescRate,(PathLength/Ns)/AircraftParameters.V_travel)
else:
AirplaneSolution.Path[:,2] = LinearAscDesc(q0,q1,Ns)
# save to file
solution_mat = np.zeros((Ns,4))
solution_mat[:,0] = AirplaneSolution.Path[:,0]; solution_mat[:,1] = AirplaneSolution.Path[:,1];
solution_mat[:,2] = AirplaneSolution.Path[:,2]; solution_mat[:,3] = AirplaneSolution.YAWvec;
np.savetxt(options.pathfile,solution_mat)
# plot option
plot3(AirplaneSolution.Path[:,0],AirplaneSolution.Path[:,1],AirplaneSolution.Path[:,2],'o','g')
raw_input()
if __name__ == "__main__":
main()
| 4,392 | 31.540741 | 129 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/utils/ExportToDJI/SIP2DJI.py | # ___SIPtoDJI___
# StructuralInspectionPlanner Path Exporter to DJI Missions
#
# This script reads the output *.csv file of an inspection path generated by
# the StructuralInspectionPlanner Toolbox and exports a *.awm file that can
# be loaded at the Groud Control Station of the DJI Drones. Note that all constraints
# of the platform or the GCS software must be respected by the user.
#
# More Info on PX/Pixhawk and QGroundControl:
# http://www.dji.com/product/pc-ground-station
#
# Thanks: the coordinate transforms are based on the Mathworks Aerospace references
# and relevant work at pydoc.net
#
import math
import sys, getopt
import numpy as np
from math import pow, cos, sin, sqrt, degrees, radians, atan2, pi
from scipy import cos, sin, arctan, sqrt, arctan2
# Change based on your files and LLA ENU Origin
LLA_ENU_Point = np.array([47.135388888888890, 8.688611111111111, 919.5254262437646] )
# DJI Mission Parameters
Speed = 5
TimeLimitStep = 100
HoldTime = 3
StartDelay = 0
Period = 0
RepeatTime = 0
RepeatDistance = 0
V_z_max = 2
TurnMode = 0 # 0 for StopAndTurn, 1 for BankTurn
# DJI GCS runs on windows
EOL = "\r\n"
# Sparsify mission
SparsifyFactor = 2 # 1 - no Sparsification
# assume WGS84
wgs84_a = 6378137.0
wgs84_f = 1.0 / 298.257223563
wgs84_e2 = 2 * wgs84_f - np.power(wgs84_f,2)
# Coordinate conversion functions
def lla2ecef(lonLatAlt):
# LLA2ECEF Conversion (meters)
lonDeg, latDeg, alt = lonLatAlt
a, e2 = wgs84_a, wgs84_e2
lon = radians(lonDeg)
lat = radians(latDeg)
chi = sqrt(1 - e2 * sin(lat) ** 2)
q = (a / chi + alt) * cos(lat)
return (q * cos(lon),
q * sin(lon),
((a * (1 - e2) / chi) + alt) * sin(lat))
def ecef2lla(ecef):
# ECEF2LLA (degrees)
x, y, z = ecef
a, e2, f = wgs84_a, wgs84_e2, wgs84_f
lon = atan2(y, x)
s = sqrt(x ** 2 + y ** 2)
step = 0
lat = None
latPrev = 0
converged = False
while not converged:
if step == 0:
beta = atan2(z, (1 - f) * s) # initial guess
else:
beta = atan2((1 - f) * sin(lat), cos(lat)) # improved guess
lat = atan2(z + (e2 * (1 - f) / (1 - e2)) * a * sin(beta) ** 3,
s - e2 * a * cos(beta) ** 3)
if (lat - latPrev) < 1e-4:
converged = True
latPrev = lat
step += 1
N = a / sqrt(1 - e2 * sin(lat) ** 2)
alt = s * cos(lat) + (z + e2 * N * sin(lat)) * sin(lat) - N
return (degrees(lon),
degrees(lat),
alt)
def ecef2enu(LonLatAlt_orig, ecef):
# ECEF2ENU (meters)
x, y, z = ecef
ox, oy, oz = lla2ecef(LonLatAlt_orig)
dx, dy, dz = (x - ox, y - oy, z - oz)
lonDeg, latDeg, _ = LonLatAlt_orig
lon = radians(lonDeg)
lat = radians(latDeg)
return (-sin(lon) * dx + cos(lon) * dy,
-sin(lat) * cos(lon) * dx - sin(lat) * sin(lon) * dy + cos(lat) * dz,
cos(lat) * cos(lon) * dx + cos(lat) * sin(lon) * dy + sin(lat) * dz)
def enu2ecef(LonLatAlt_orig, enu):
# ENU2ECEF (meters)
e, n, u = enu
lonDeg, latDeg, _ = LonLatAlt_orig
lon = radians(lonDeg)
lat = radians(latDeg)
ox, oy, oz = lla2ecef(LonLatAlt_orig)
return (ox - sin(lon) * e - cos(lon) * sin(lat) * n + cos(lon) * cos(lat) * u,
oy + cos(lon) * e - sin(lon) * sin(lat) * n + cos(lat) * sin(lon) * u,
oz + cos(lat) * n + sin(lat) * u)
def lla2enu(LonLatAlt_orig, lonLatAlt):
return ecef2enu(LonLatAlt_orig, lla2ecef(lonLatAlt))
def enu2lla(LonLatAlt_orig, enu):
return ecef2lla(enu2ecef(LonLatAlt_orig, enu))
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
global dji_mission_file
dji_mission_file = open(outputfile, "wb")
global PathENU
PathENU = np.genfromtxt(inputfile, delimiter = ',')
Nx,Ny = PathENU.shape
ECEF_X = np.zeros(Nx); ECEF_Y = np.zeros(Nx); ECEF_Z = np.zeros(Nx)
lat = np.zeros(Nx); lon = np.zeros(Nx); alt = np.zeros(Nx); yaw = np.zeros(Nx);
for i in range(0,Nx):
lon[i], lat[i], alt[i] = enu2lla(LLA_ENU_Point,PathENU[i,0:3]);
yaw[i] = PathENU[i,3];
dji_mission_file.write("<?xml version=\"1.0\" encoding=\"utf-16\" standalone=\"yes\"?>" + EOL);
next_line = "<Mission MissionTimeLmt=\"65535\" IsPatrol=\"Continuous\"" + " StartWayPointIndex=\"0\""+ " VerticalSpeedLimit=\"" + str(V_z_max)+ "\">" + EOL
dji_mission_file.write(next_line)
if TurnMode ==0:
TurnModeStr = "StopAndTurn"
else:
TurnModeStr = "Bank_turn"
for i in range(0,Nx):
dji_mission_file.write(" <WayPoint id=\"" + str(i) + "\">" + EOL)
dji_mission_file.write(" <Latitude>" + str(lat[i]) + "</Latitude>" + EOL)
dji_mission_file.write(" <Longitude>" + str(lon[i]) + "</Longitude>" + EOL)
dji_mission_file.write(" <Altitude>" + str(alt[i]) + "</Altitude>" + EOL)
dji_mission_file.write(" <Speed>" + str(Speed) + "</Speed>" + EOL)
dji_mission_file.write(" <TimeLimit>" + str( (i+1)*TimeLimitStep ) + "</TimeLimit>" + EOL)
dji_mission_file.write(" <YawDegree>" + str(int(degrees(yaw[i]))) + "</YawDegree>" + EOL)
dji_mission_file.write(" <HoldTime>" + str(HoldTime) + "</HoldTime>" + EOL)
dji_mission_file.write(" <StartDelay>" + str(StartDelay) + "</StartDelay>" + EOL)
dji_mission_file.write(" <Period>" + str(Period) + "</Period>" + EOL)
dji_mission_file.write(" <RepeatTime>" + str(RepeatTime) + "</RepeatTime>" + EOL)
dji_mission_file.write(" <RepeatDistance>" + str(RepeatDistance) + "</RepeatDistance>" + EOL)
dji_mission_file.write(" <TurnMode>" + TurnModeStr + "</TurnMode>" + EOL)
dji_mission_file.write(" </WayPoint>" + EOL)
dji_mission_file.write("</Mission>")
dji_mission_file.close()
print "### DJI Mission file written :: please make sure you have respected all the platform mission constraints!"
if __name__ == "__main__":
main(sys.argv[1:])
| 6,266 | 32.15873 | 156 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/utils/PathToTrajectory/PATH2TRAJ.py | # __PATH2TRAJ__
# SIP Path to Trajectory based on simple second-order linear model
# and an LQ controller with saturated control actions
#
# This script accepts an input file with the path and exports a
# trajectory based on second-order closed-loop simulation of simplified
# system dynamics and an LQ-controller. Its purpose is to stand as a
# simple way to derive timed trajectories from paths produced by
# the StructuralInspectionPlanner. The extracted trajectory is by no
# means optimal and depends heavily on the tuning parameters. However,
# it provided as a quick way to derived a time trajectory as typically
# required by several controllers on unmanned aerial vehicles.
#
# Example Syntax:
# python py_compile -O -m PATH2TRAJ.py # to enable some of the optimization flags
# python PATH2TRAJ.pyo -i <inputfile> -o <outputfile> # to run the script
#
# This script is part of the "utils" section of the StructuralInspectionPlanner
# Toolbox. A set of elementary components are released together with this
# path-planning toolbox in order to make further developments easier.
#
from scipy import signal
from scipy import cos, sin, arctan, sqrt, arctan2, radians, degrees
from scipy.linalg import block_diag
import numpy as np
import matplotlib.pyplot as plt
import pydare as dare
import sys, getopt
# LQ tuning matrices : these should be retuned
Q_x = np.zeros((2,2)); Q_x[0,0] = 10; Q_x[1,1] = 2000;
R_x = np.zeros((1,1)); R_x[0,0] = 2;
Q_y = np.zeros((2,2)); Q_y[0,0] = 10; Q_y[1,1] = 2000;
R_y = np.zeros((1,1)); R_y[0,0] = 2;
Q_z = np.zeros((2,2)); Q_z[0,0] = 20; Q_z[1,1] = 2000;
R_z = np.zeros((1,1)); R_z[0,0] = 2;
# max sliding error
max_error_xyz = 2;
# accuracy range specifying when a waypoint has been reached
accuracy_range = .1;
# vehicle parameters
mass = 1.2; max_xy_vel = 2; max_z_vel = 2; maxRollPitch = 15; maxDeltaT = 6;
# simulation parameters
sample_time = 0.01;
x_init_x = np.zeros((2,1));
x_init_y = np.zeros((2,1));
x_init_z = np.zeros((2,1));
x_init_yaw = np.zeros((1,1));
class VehicleParameters(object):
"""
Vehicle Parameters
"""
def __init__(self,mass,max_xy_vel,max_z_vel,maxRollPitch,maxDeltaT):
self.mass = mass;
self.xvel_max = max_xy_vel;
self.yvel_max = max_xy_vel;
self.zvel_max = max_z_vel;
self.maxRoll = maxRollPitch;
self.maxPitch = maxRollPitch;
self.maxDeltaT = maxDeltaT;
class EnvironmentParameters(object):
"""
Environment Parameters
"""
def __init__(self,g):
self.g = g;
class TuningParameters(object):
"""
Tuning Parameters
"""
def __init__(self,Ts,Qx,Rx,Qy,Ry,Qz,Rz,max_err):
self.Ts = Ts;
self.Qx = Qx;
self.Rx = Rx;
self.Qy = Qy;
self.Ry = Ry;
self.Qz = Qz;
self.Rz = Rz;
self.max_err = max_err;
class SimulationParameters(object):
"""
Simulation Parameters
"""
def __init__(self,Ts,t_vec,u_vec,ref_x,ref_y,ref_z):
self.Ts = Ts
self.t_vec = t_vec
self.u_vec = u_vec
self.ref_x = ref_x;
self.ref_y = ref_y;
self.ref_z = ref_z;
class SimulationResults(object):
"""
Simulation Results holder
"""
def __init__(self,t_out_x,t_out_y,t_out_z,y_out_x,y_out_y,y_out_z,x_out_x,x_out_y,x_out_z):
self.t_out_x = t_out_x;
self.t_out_y = t_out_y;
self.t_out_z = t_out_z;
self.y_out_x = y_out_x;
self.y_out_y = y_out_y;
self.y_out_z = y_out_z;
self.x_out_x = x_out_x;
self.x_out_y = x_out_y;
self.x_out_z = x_out_z;
class Filter2ndOrder(object):
"""
Second order filter based on 2 time constants
"""
def __init__(self,Ts1,Ts2,dc_gain,Ts):
self.Ts1 = Ts1;
self.Ts2 = Ts2;
self.dc_gain = dc_gain;
self.Ts = Ts;
def RunFilter(self,x_vec,t_vec):
den = signal.convolve(np.array([self.Ts1,1]),np.array([self.Ts2,1]));
num = self.dc_gain;
filter_obj = signal.cont2discrete((num,den),self.Ts,method='zoh');
tout, x_filt= signal.dlsim(filter_obj,x_vec,t=t_vec);
return x_filt;
class SystemDynamics(object):
"""
System Dynamics
"""
def __init__(self, VehicleParameters, EnvironmentParameters,TuningParameters,SimulationParameters):
# x-axis
A_x = np.zeros((2,2)); A_x[0,1] = 1;
B_x = np.zeros((2,1)); B_x[1] = -EnvironmentParameters.g;
C_x = np.zeros((2,2)); C_x[0,0] = 1; C_x[1,1] = 1;
D_x = np.zeros((2,1));
sys_x_d = signal.cont2discrete((A_x,B_x,C_x,D_x),TuningParameters.Ts,"zoh");
# y-axis
A_y = np.zeros((2,2)); A_y[0,1] = 1;
B_y = np.zeros((2,1)); B_y[1] = EnvironmentParameters.g;
C_y = np.zeros((2,2)); C_y[0,0] = 1; C_y[1,1] = 1;
D_y = np.zeros((2,1));
sys_y_d = signal.cont2discrete((A_y,B_y,C_y,D_y),TuningParameters.Ts,"zoh");
# z-axis
A_z = np.zeros((2,2)); A_z[0,1] = 1;
B_z = np.zeros((2,1)); B_z[1] = 1;
C_z = np.zeros((2,2)); C_z[0,0] = 1; C_z[1,1] = 1;
D_z = np.zeros((2,1));
sys_z_d = signal.cont2discrete((A_z,B_z,C_z,D_z),TuningParameters.Ts,"zoh");
self.VehicleParameters = VehicleParameters;
self.EnvironmentParameters = EnvironmentParameters;
self.TuningParameters = TuningParameters;
self.SimulationParameters = SimulationParameters;
self.SysX_d = sys_x_d;
self.SysY_d = sys_y_d;
self.SysZ_d = sys_z_d;
def SimulateDynamics(self):
t_out_x, y_out_x, x_out_x = signal.dlsim(self.SysX_d, self.SimulationParameters.u_vec, t=self.SimulationParameters.t_vec);
t_out_y, y_out_y, x_out_y = signal.dlsim(self.SysY_d, self.SimulationParameters.u_vec, t=self.SimulationParameters.t_vec);
t_out_z, y_out_z, x_out_z = signal.dlsim(self.SysZ_d, self.SimulationParameters.u_vec, t=self.SimulationParameters.t_vec);
self.SimOL = SimulationResults(t_out_x,t_out_y,t_out_z,y_out_x,y_out_y,y_out_z,x_out_x,x_out_y,x_out_z);
def PlotOL(self):
plt.subplot(321);
plt.plot(self.SimOL.t_out_x,self.SimOL.y_out_x[:,0],'ro');
plt.xlabel('Time (s)'); plt.ylabel('x (m)');
plt.subplot(322);
plt.plot(self.SimOL.t_out_x,self.SimOL.y_out_x[:,1],'ro');
plt.xlabel('Time (s)'); plt.ylabel('v_x (m/s)');
plt.subplot(323);
plt.plot(self.SimOL.t_out_y,self.SimOL.y_out_y[:,0],'bo');
plt.xlabel('Time (s)'); plt.ylabel('y (m)');
plt.subplot(324);
plt.plot(self.SimOL.t_out_y,self.SimOL.y_out_y[:,1],'bo');
plt.xlabel('Time (s)'); plt.ylabel('v_y (m/s)');
plt.subplot(325);
plt.plot(self.SimOL.t_out_z,self.SimOL.y_out_z[:,0],'go');
plt.xlabel('Time (s)'); plt.ylabel('z (m)');
plt.subplot(326);
plt.plot(self.SimOL.t_out_z,self.SimOL.y_out_z[:,1],'go');
plt.xlabel('Time (s)'); plt.ylabel('v_z (m/s)');
print "Close the window to continue ...";
plt.show();
raw_input("... and press Enter");
class LQctrl(object):
"""
LQ Controller derivation
"""
def __init__(self,sys,Q,R):
Q1 = np.matrix(Q);
R1 = np.matrix(R);
A = sys[0]; B = sys[1];
P = dare.DareSolver( A, B, Q1, R1).solve_direct();
self.K = (R1 + B.transpose() * P *B).I * (B.transpose() * P * A);
def ControlAction(self,x):
return -np.dot(self.K,x);
def plot3(a,b,c,mark="o",col="r"):
"""
plot3 - like MATLAB
"""
# mimic matlab plot3
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
pylab.ion();
fig = pylab.figure();
ax = Axes3D(fig);
ax.plot(a, b,c,color=col,marker=mark);
fig.show();
def plot_result(SimResults,ref_xyz,mark="o",col="r",mark_ref="*",col_ref="b"):
"""
2x plot3
"""
# mimic matlab plot3
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
pylab.ion();
fig = pylab.figure();
ax = Axes3D(fig);
ax.plot(SimResults[:,0], SimResults[:,2],SimResults[:,4],color=col,marker=mark);
ax.plot(ref_xyz[:,0],ref_xyz[:,1],ref_xyz[:,2],color=col_ref,marker=mark_ref);
fig.show();
class ClosedLoopSim(object):
"""
Closed-loop Simulation
"""
def __init__(self,sys,ctrl,x_init,ref_pos,Ts,accur,max_ctrl,max_err):
self.sys = sys;
self.ctrl = ctrl;
self.x_init = x_init;
self.ref_pos = ref_pos;
self.Ts = Ts;
self.accur = accur;
self.max_ctrl = max_ctrl;
self.max_err = max_err;
def Simulate(self):
A = self.sys[0];
B = self.sys[1];
C = self.sys[2];
D = self.sys[3];
x_init = self.x_init;
error_vec = np.zeros((2,1))
x_tmp = np.zeros((1000000,2));
t_tmp = np.zeros((1000000,1));
cnt = 2;
x_tmp[0,0] = x_init[0]; x_tmp[0,1] = x_init[1];
x_tmp[1,0] = x_init[0]; x_tmp[1,1] = x_init[1];
while (abs(x_init[0] - self.ref_pos) > self.accur) & (cnt<= 100000):
error_vec[0] = self.ref_pos - x_init[0]; error_vec[1] = 0 - x_init[1];
error_vec[0] = max(error_vec[0],-self.max_err); error_vec[0] = min(error_vec[0],self.max_err);
u_ctrl = self.ctrl.ControlAction(-error_vec);
u_ctrl = min(u_ctrl,self.max_ctrl); u_ctrl = max(u_ctrl,-self.max_ctrl);
x_tmp[cnt,1] = A[1,0]*x_init[0] + A[1,1]*x_init[1] + B[1]*u_ctrl*self.Ts;
x_tmp[cnt,0] = A[0,0]*x_init[0] + A[0,1]*x_init[1] + B[0]*u_ctrl*self.Ts;
x_init[0] = x_tmp[cnt,0]; x_init[1] = x_tmp[cnt,1];
t_tmp[cnt] = cnt*self.Ts;
cnt = cnt + 1;
x_out = x_tmp[:(cnt-1),:];
t_out = t_tmp[:(cnt-1)];
return x_out, t_out;
def main(argv):
"""
Main method
"""
# read the input file
inputfile = '';
outputfile = '';
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="]);
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2);
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit();
elif opt in ("-i", "--ifile"):
inputfile = arg;
elif opt in ("-o", "--ofile"):
outputfile = arg;
global exported_trajectory_file
exported_trajectory_file = open(outputfile,"w");
global ref_xyz
ref_xyz = np.genfromtxt(inputfile, delimiter = ',');
x_init_x[0] = ref_xyz[0,0]; x_init_y[0] = ref_xyz[0,1]; x_init_z[0] = ref_xyz[0,2];
Vehicle = VehicleParameters(mass,max_xy_vel,max_z_vel,radians(maxRollPitch),maxDeltaT);
Environment = EnvironmentParameters(9.8065);
Tuning = TuningParameters(sample_time,Q_x,R_x,Q_y,R_y,Q_z,R_z,max_error_xyz);
filt = Filter2ndOrder(1,2,1,Tuning.Ts);
t_vec = np.arange(0,10,Tuning.Ts); # used only for open-loop simulations
u_vec = np.ones((len(t_vec),1)); u_vec[0] = 0; # used only for open-loop simulations
SimulationPars = SimulationParameters(Tuning.Ts,t_vec,u_vec,ref_xyz[:,0],ref_xyz[:,1],ref_xyz[:,2]);
OverallSys = SystemDynamics(VehicleParameters = Vehicle, EnvironmentParameters = Environment, TuningParameters = Tuning, SimulationParameters = SimulationPars);
Xctrl = LQctrl(OverallSys.SysX_d,Tuning.Qx,Tuning.Rx);
Yctrl = LQctrl(OverallSys.SysY_d,Tuning.Qy,Tuning.Ry);
Zctrl = LQctrl(OverallSys.SysZ_d,Tuning.Qz,Tuning.Rz);
[Nr,Ny] = ref_xyz.shape;
X_vec = np.zeros((1,2)); X_vec[0,0] = x_init_x[0]; X_vec[0,1] = x_init_x[1];
Y_vec = np.zeros((1,2)); Y_vec[0,0] = x_init_y[0]; Y_vec[0,1] = x_init_y[1];
Z_vec = np.zeros((1,2)); Z_vec[0,0] = x_init_z[0]; Z_vec[0,1] = x_init_z[1];
Yaw_vec = np.zeros((1,1)); Yaw_vec[0,0] = x_init_yaw[0];
SimResults = np.zeros((1,7));
SimResults[0,0] = x_init_x[0]; SimResults[0,1] = x_init_x[1];
SimResults[0,2] = x_init_y[0]; SimResults[0,3] = x_init_y[1];
SimResults[0,4] = x_init_z[0]; SimResults[0,5] = x_init_z[1];
SimResults[0,6] = x_init_yaw[0];
two_indices = np.zeros((1,2)); two_yaw_refs = np.zeros((1,2));
# simulate the whole path and extract the trajectory
for i in range(1,Nr):
SysX_Cl = ClosedLoopSim(OverallSys.SysX_d,Xctrl,x_init_x,ref_xyz[i,0],Tuning.Ts,accuracy_range,Vehicle.maxPitch,max_error_xyz);
SysY_Cl = ClosedLoopSim(OverallSys.SysY_d,Yctrl,x_init_y,ref_xyz[i,1],Tuning.Ts,accuracy_range,Vehicle.maxRoll,max_error_xyz);
SysZ_Cl = ClosedLoopSim(OverallSys.SysZ_d,Zctrl,x_init_z,ref_xyz[i,2],Tuning.Ts,accuracy_range,Vehicle.maxDeltaT,max_error_xyz);
x_vec, t_x = SysX_Cl.Simulate(); x_init_x[0] = x_vec[len(t_x)-1,0]; x_init_x[1] = x_vec[len(t_x)-1,1];
y_vec, t_y = SysY_Cl.Simulate(); x_init_y[0] = y_vec[len(t_y)-1,0]; x_init_y[1] = y_vec[len(t_y)-1,1];
z_vec, t_z = SysZ_Cl.Simulate(); x_init_z[0] = z_vec[len(t_z)-1,0]; x_init_z[1] = z_vec[len(t_z)-1,1];
max_len = max(len(x_vec),len(y_vec)); max_len = max(max_len,len(z_vec));
val_vec_x = np.linspace(0,len(x_vec),len(x_vec));
val_vec_y = np.linspace(0,len(y_vec),len(y_vec));
val_vec_z = np.linspace(0,len(z_vec),len(z_vec));
x_pos_interp = np.interp(np.linspace(0,len(x_vec),max_len),val_vec_x,x_vec[:,0]);
x_vel_interp = np.interp(np.linspace(0,len(x_vec),max_len),val_vec_x,x_vec[:,1]);
y_pos_interp = np.interp(np.linspace(0,len(y_vec),max_len),val_vec_y,y_vec[:,0]);
y_vel_interp = np.interp(np.linspace(0,len(y_vec),max_len),val_vec_y,y_vec[:,1]);
z_pos_interp = np.interp(np.linspace(0,len(z_vec),max_len),val_vec_z,z_vec[:,0]);
z_vel_interp = np.interp(np.linspace(0,len(z_vec),max_len),val_vec_z,z_vec[:,1]);
two_indices[0,0] = 0; two_indices[0,1] = max_len; two_yaw_refs[0,0] = ref_xyz[i-1,3]; two_yaw_refs[0,1] = ref_xyz[i,3];
yaw_vec_interp = np.interp(np.linspace(0,max_len,max_len),two_indices[:,0],two_yaw_refs[:,0]);
sim_results = np.zeros((len(x_pos_interp),7));
sim_results[:,0] = x_pos_interp; sim_results[:,1] = x_vel_interp;
sim_results[:,2] = y_pos_interp; sim_results[:,3] = y_vel_interp;
sim_results[:,4] = z_pos_interp; sim_results[:,5] = z_vel_interp;
sim_results[:,6] = yaw_vec_interp;
SimResults = np.vstack((SimResults,sim_results));
t_out = np.linspace(0,len(SimResults)*Tuning.Ts,len(SimResults));
# pass all states from a second order filter
SimResults_Filter = np.zeros((len(SimResults)+1,8));
SimResults_Filter[:,0] = filt.RunFilter(SimResults[:,0],t_out).transpose();
SimResults_Filter[:,1] = filt.RunFilter(SimResults[:,1],t_out).transpose();
SimResults_Filter[:,2] = filt.RunFilter(SimResults[:,2],t_out).transpose();
SimResults_Filter[:,3] = filt.RunFilter(SimResults[:,3],t_out).transpose();
SimResults_Filter[:,4] = filt.RunFilter(SimResults[:,4],t_out).transpose();
SimResults_Filter[:,5] = filt.RunFilter(SimResults[:,5],t_out).transpose();
SimResults_Filter[:,6] = filt.RunFilter(SimResults[:,6],t_out).transpose();
SimResults_Filter[:,7] = np.linspace(0,len(SimResults)*Tuning.Ts,len(SimResults)+1);
np.savetxt(exported_trajectory_file,SimResults_Filter, delimiter = ',')
# plot the results
plot_result(SimResults_Filter[1:len(SimResults_Filter),:],ref_xyz,'o','g','*','b');
raw_input("Press Enter to close");
if __name__ == "__main__":
main(sys.argv[1:])
| 15,749 | 39.076336 | 164 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/setup.py | #!/usr/bin/env python
##
## This file is part of qpOASES.
##
## qpOASES -- An Implementation of the Online Active Set Strategy.
## Copyright (C) 2007-2014 by Hans Joachim Ferreau, Andreas Potschka,
## Christian Kirches et al. All rights reserved.
##
## qpOASES is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## qpOASES is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with qpOASES; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## author: Sebastian F. Walter, Manuel Kudruss
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import os
import numpy as np
BASEDIR = os.path.dirname(os.path.abspath(__file__))
BASEDIR = os.path.dirname(BASEDIR)
BASEDIR = os.path.dirname(BASEDIR)
print(('BASEDIR=', BASEDIR))
extra_params = {}
extra_params['include_dirs'] = [
'/usr/include',
os.path.join(BASEDIR, 'include'),
os.path.join(BASEDIR, 'include', 'qpOASES'),
np.get_include()]
extra_params['extra_compile_args'] = ["-O2", "-Wno-unused-variable"]
extra_params['extra_link_args'] = ["-Wl,-O1", "-Wl,--as-needed"]
extra_params = extra_params.copy()
extra_params['libraries'] = ['qpOASES']
extra_params['library_dirs'] = ['/usr/lib', os.path.join(BASEDIR, 'bin')]
extra_params['language'] = 'c++'
if os.name == 'posix':
extra_params['runtime_library_dirs'] = extra_params['library_dirs']
ext_modules = [
Extension("qpoases", ["qpoases.pyx", "qpoases.pxd"], **extra_params),
]
setup(
name='qpOASES interface',
cmdclass={'build_ext': build_ext},
ext_modules=cythonize(ext_modules),
)
| 2,182 | 31.58209 | 82 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/examples/example2.py | ##
## This file is part of qpOASES.
##
## qpOASES -- An Implementation of the Online Active Set Strategy.
## Copyright (C) 2007-2014 by Hans Joachim Ferreau, Andreas Potschka,
## Christian Kirches et al. All rights reserved.
##
## qpOASES is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## qpOASES is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with qpOASES; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Example adapted from examples/example2.cpp.
## author of this file: Sebastian F. Walter
import numpy as np
from qpoases import PySQProblem as SQProblem
from qpoases import PySolutionAnalysis as SolutionAnalysis
# Setup data of first QP.
H = np.array([ 1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))
A = np.array([ 1.0, 1.0 ]).reshape((2,1))
g = np.array([ 1.5, 1.0 ])
lb = np.array([ 0.5, -2.0 ])
ub = np.array([ 5.0, 2.0 ])
lbA = np.array([ -1.0 ])
ubA = np.array([ 2.0 ])
# Setup data of second QP.
H_new = np.array([ 1.0, 0.5, 0.5, 0.5 ]).reshape((2,2))
A_new = np.array([ 1.0, 5.0 ]).reshape((2,1))
g_new = np.array([ 1.0, 1.5 ])
lb_new = np.array([ 0.0, -1.0 ])
ub_new = np.array([ 5.0, -0.5 ])
lbA_new = np.array([ -2.0 ])
ubA_new = np.array([ 1.0 ])
# Setting up SQProblem object and solution analyser.
example = SQProblem(2, 1)
analyser = SolutionAnalysis()
# Solve first QP ...
nWSR = 10
example.init(H, g, A, lb, ub, lbA, ubA, nWSR)
# ... and analyse it.
maxKKTviolation = np.zeros(1)
analyser.getMaxKKTviolation(example, maxKKTviolation)
print("maxKKTviolation: %e\n"%maxKKTviolation)
# Solve second QP ...
nWSR = 10;
example.hotstart(H_new, g_new, A_new, lb_new, ub_new,
lbA_new, ubA_new, nWSR)
# ... and analyse it.
analyser.getMaxKKTviolation(example, maxKKTviolation)
print("maxKKTviolation: %e\n"%maxKKTviolation)
# ------------ VARIANCE-COVARIANCE EVALUATION --------------------
Var = np.zeros(5*5)
Primal_Dual_Var = np.zeros(5*5)
Var.reshape((5,5))[0,0] = 1.
Var.reshape((5,5))[1,1] = 1.
# ( 1 0 0 0 0 )
# ( 0 1 0 0 0 )
# Var = ( 0 0 0 0 0 )
# ( 0 0 0 0 0 )
# ( 0 0 0 0 0 )
analyser.getVarianceCovariance(example, Var, Primal_Dual_Var)
print('Primal_Dual_Var=\n', Primal_Dual_Var.reshape((5,5)))
print(maxKKTviolation)
| 2,876 | 30.615385 | 81 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/examples/example1.py | ##
## This file is part of qpOASES.
##
## qpOASES -- An Implementation of the Online Active Set Strategy.
## Copyright (C) 2007-2014 by Hans Joachim Ferreau, Andreas Potschka,
## Christian Kirches et al. All rights reserved.
##
## qpOASES is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## qpOASES is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with qpOASES; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Example adapted from examples/example1.cpp.
## author of this file: Sebastian F. Walter
import numpy as np
from qpoases import PyQProblem as QProblem
from qpoases import PyOptions as Options
from qpoases import PyPrintLevel as PrintLevel
#Setup data of first QP.
H = np.array([1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))
A = np.array([1.0, 1.0 ]).reshape((2,1))
g = np.array([1.5, 1.0 ])
lb = np.array([0.5, -2.0])
ub = np.array([5.0, 2.0 ])
lbA = np.array([-1.0 ])
ubA = np.array([2.0])
# Setup data of second QP.
g_new = np.array([1.0, 1.5])
lb_new = np.array([0.0, -1.0])
ub_new = np.array([5.0, -0.5])
lbA_new = np.array([-2.0])
ubA_new = np.array([1.0])
# Setting up QProblem object.
example = QProblem(2, 1)
options = Options()
options.printLevel = PrintLevel.NONE
example.setOptions(options)
# Solve first QP.
nWSR = 10;
example.init(H, g, A, lb, ub, lbA, ubA, nWSR)
# Solve second QP.
nWSR = 10
for i in range(100000):
for j in range(1, 100):
g_new[0] = i%j
example.hotstart( g_new, lb_new, ub_new, lbA_new, ubA_new, nWSR)
# Get and print solution of second QP.
xOpt = np.zeros(2)
example.getPrimalSolution(xOpt);
print("\nxOpt = [ %e, %e ]; objVal = %e\n\n"%(xOpt[0],xOpt[1],example.getObjVal()))
example.printOptions();
| 2,227 | 27.935065 | 84 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/examples/example1b.py | ##
## This file is part of qpOASES.
##
## qpOASES -- An Implementation of the Online Active Set Strategy.
## Copyright (C) 2007-2014 by Hans Joachim Ferreau, Andreas Potschka,
## Christian Kirches et al. All rights reserved.
##
## qpOASES is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## qpOASES is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with qpOASES; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Example adapted from examples/example1b.cpp.
## author of this file: Sebastian F. Walter
import numpy as np
from qpoases import PyQProblemB as QProblemB
from qpoases import PyBooleanType as BooleanType
from qpoases import PySubjectToStatus as SubjectToStatus
from qpoases import PyOptions as Options
# Example for qpOASES main function using the QProblemB class.
#Setup data of first QP.
H = np.array([1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))
g = np.array([1.5, 1.0 ])
lb = np.array([0.5, -2.0])
ub = np.array([5.0, 2.0 ])
# Setup data of second QP.
g_new = np.array([1.0, 1.5])
lb_new = np.array([0.0, -1.0])
ub_new = np.array([5.0, -0.5])
# Setting up QProblemB object.
example = QProblemB(2)
options = Options()
options.enableFlippingBounds = BooleanType.FALSE
options.initialStatusBounds = SubjectToStatus.INACTIVE
options.numRefinementSteps = 1
example.setOptions(options)
# Solve first QP.
nWSR = 10
example.init(H, g, lb, ub, nWSR);
print("\nnWSR = %d\n\n"%nWSR)
# Solve second QP.
nWSR = 10;
example.hotstart(g_new, lb_new, ub_new, nWSR)
print("\nnWSR = %d\n\n"% nWSR)
# Get and print solution of second QP.
xOpt = np.zeros(2)
example.getPrimalSolution(xOpt)
print("\nxOpt = [ %e, %e ]; objVal = %e\n\n" %(xOpt[0], xOpt[1],
example.getObjVal()))
| 2,282 | 30.273973 | 81 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/examples/cython/setup.py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_module = Extension(
"example1",
["example1.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
)
setup(
name = 'Hello world app',
cmdclass = {'build_ext': build_ext},
ext_modules = [ext_module],
)
| 363 | 20.411765 | 41 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/tests/test_idhessian.py | """
This file is part of qpOASES.
qpOASES -- An Implementation of the Online Active Set Strategy.
Copyright (C) 2007-2009 by Hans Joachim Ferreau et al. All rights reserved.
qpOASES is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
qpOASES is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with qpOASES; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
#TODO add doxygen support
# \author Manuel Kudruss
# \version 3.0beta
# \date 2013
import os
import numpy as np
from numpy.testing import *
from qpoases import PyQProblem as QProblem
from qpoases import PyBooleanType as BooleanType
from qpoases import PyOptions as Options
from qpoases import PyPrintLevel as PrintLevel
# get qpOASES path
qpoases_path = os.path.dirname(os.path.abspath(__file__))
qpoases_path = os.path.dirname(qpoases_path)
qpoases_path = os.path.dirname(qpoases_path)
qpoases_path = os.path.dirname(qpoases_path)
# set qpOASES testing path
testing_path = os.path.join(qpoases_path, "testing")
class TestIdHessian(TestCase):
def test_id_hessian(self):
"""Very simple example for testing qpOASES (using QProblem class)."""
path = os.path.join(testing_path, "dev_idhessian_data")
#Setup data for QP.
H = np.loadtxt(os.path.join(path, "H.txt"))
g = np.loadtxt(os.path.join(path, "g.txt"))
A = np.loadtxt(os.path.join(path, "A.txt"))
lb = np.loadtxt(os.path.join(path, "lb.txt"))
ub = np.loadtxt(os.path.join(path, "ub.txt"))
lbA = np.loadtxt(os.path.join(path, "lbA.txt"))
ubA = np.loadtxt(os.path.join(path, "ubA.txt"))
#Setting up QProblem object.
qp = QProblem(72,144)
options = Options()
options.numRefinementSteps = 1
options.printLevel = PrintLevel.NONE
#options.setToMPC()
#options.setToReliable()
#options.enableFlippingBounds = BooleanType.FALSE
options.enableRamping = BooleanType.FALSE
#options.enableRamping = BooleanType.TRUE
#options.enableFarBounds = BooleanType.FALSE
#options.enableRamping = BooleanType.FALSE
#options.printLevel = PL_LOW
#options.enableFullLITests = BooleanType.FALSE
#options.boundRelaxation = 1.0e-1
qp.setOptions( options )
#Solve QP.
nWSR = 1200
qp.init(H, g, A, lb, ub, lbA, ubA, nWSR)
# FIXME check against what?
# Where can I find solution?
if __name__=="__main__":
try:
import nose
nose.runmodule()
except ImportError:
sys.stderr.write("Please install nosestests for python unittesting.\n")
| 3,128 | 31.936842 | 79 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/tests/test_examples.py | """
This file is part of qpOASES.
qpOASES -- An Implementation of the Online Active Set Strategy.
Copyright (C) 2007-2009 by Hans Joachim Ferreau et al. All rights reserved.
qpOASES is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
qpOASES is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with qpOASES; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
author Manuel Kudruss
version 3.0beta
date 2013
"""
import os
import re
import numpy as np
from numpy.testing import *
from subprocess import Popen, PIPE, STDOUT
from qpoases import PyQProblem as QProblem
from qpoases import PyQProblemB as QProblemB
from qpoases import PySQProblem as SQProblem
from qpoases import PySolutionAnalysis as SolutionAnalysis
from qpoases import PyBooleanType as BooleanType
from qpoases import PySubjectToStatus as SubjectToStatus
from qpoases import PyOptions as Options
from qpoases import PyPrintLevel as PrintLevel
# get qpOASES path
qpoases_path = os.path.dirname(os.path.abspath(__file__))
qpoases_path = os.path.dirname(qpoases_path)
qpoases_path = os.path.dirname(qpoases_path)
qpoases_path = os.path.dirname(qpoases_path)
# set qpOASES binary path
bin_path = os.path.join(qpoases_path, "bin")
class TestExamples(TestCase):
def test_example1(self):
return 0
# Example for qpOASES main function using the QProblem class.
#Setup data of first QP.
H = np.array([1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))
A = np.array([1.0, 1.0 ]).reshape((2,1))
g = np.array([1.5, 1.0 ])
lb = np.array([0.5, -2.0])
ub = np.array([5.0, 2.0 ])
lbA = np.array([-1.0 ])
ubA = np.array([2.0])
# Setup data of second QP.
g_new = np.array([1.0, 1.5])
lb_new = np.array([0.0, -1.0])
ub_new = np.array([5.0, -0.5])
lbA_new = np.array([-2.0])
ubA_new = np.array([1.0])
# Setting up QProblemB object.
qp = QProblem(2, 1)
options = Options()
options.printLevel = PrintLevel.NONE
qp.setOptions(options)
# Solve first QP.
nWSR = 10
qp.init(H, g, A, lb, ub, lbA, ubA, nWSR)
# Solve second QP.
nWSR = 10
qp.hotstart(g_new, lb_new, ub_new, lbA_new, ubA_new, nWSR)
# Get and print solution of second QP.
xOpt_actual = np.zeros(2)
qp.getPrimalSolution(xOpt_actual)
xOpt_actual = np.asarray(xOpt_actual, dtype=float)
objVal_actual = qp.getObjVal()
objVal_actual = np.asarray(objVal_actual, dtype=float)
cmd = os.path.join(bin_path, "example1")
p = Popen(cmd, shell=True, stdout=PIPE)
stdout, stderr = p.communicate()
stdout = str(stdout).replace('\\n', '\n')
stdout = stdout.replace("'", '')
print(stdout)
# get c++ solution from std
pattern = re.compile(r'xOpt\s*=\s*\[\s+(?P<xOpt>([0-9., e+-])*)\];')
match = pattern.search(stdout)
xOpt_expected = match.group('xOpt')
xOpt_expected = xOpt_expected.split(",")
xOpt_expected = np.asarray(xOpt_expected, dtype=float)
pattern = re.compile(r'objVal = (?P<objVal>[0-9-+e.]*)')
match = pattern.search(stdout)
objVal_expected = match.group('objVal')
objVal_expected = np.asarray(objVal_expected, dtype=float)
print("xOpt_actual =", xOpt_actual)
print("xOpt_expected =", xOpt_expected)
print("objVal_actual = ", objVal_actual)
print("objVal_expected = ", objVal_expected)
assert_almost_equal(xOpt_actual, xOpt_expected, decimal=7)
assert_almost_equal(objVal_actual, objVal_expected, decimal=7)
def test_example1b(self):
# Example for qpOASES main function using the QProblemB class.
#Setup data of first QP.
H = np.array([1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))
g = np.array([1.5, 1.0 ])
lb = np.array([0.5, -2.0])
ub = np.array([5.0, 2.0 ])
# Setup data of second QP.
g_new = np.array([1.0, 1.5])
lb_new = np.array([0.0, -1.0])
ub_new = np.array([5.0, -0.5])
# Setting up QProblemB object.
qp = QProblemB(2)
options = Options()
options.enableFlippingBounds = BooleanType.FALSE
options.initialStatusBounds = SubjectToStatus.INACTIVE
options.numRefinementSteps = 1
options.printLevel = PrintLevel.NONE
qp.setOptions(options)
# Solve first QP.
nWSR = 10
qp.init(H, g, lb, ub, nWSR)
# Solve second QP.
nWSR = 10;
qp.hotstart(g_new, lb_new, ub_new, nWSR)
# Get and print solution of second QP.
xOpt_actual = np.zeros(2)
qp.getPrimalSolution(xOpt_actual)
xOpt_actual = np.asarray(xOpt_actual, dtype=float)
objVal_actual = qp.getObjVal()
objVal_actual = np.asarray(objVal_actual, dtype=float)
cmd = os.path.join(bin_path, "example1b")
p = Popen(cmd, shell=True, stdout=PIPE)
stdout, stderr = p.communicate()
stdout = str(stdout).replace('\\n', '\n')
stdout = stdout.replace("'", '')
print(stdout)
# get c++ solution from std
pattern = re.compile(r'xOpt\s*=\s*\[\s+(?P<xOpt>([0-9., e+-])*)\];')
match = pattern.search(stdout)
xOpt_expected = match.group('xOpt')
xOpt_expected = xOpt_expected.split(",")
xOpt_expected = np.asarray(xOpt_expected, dtype=float)
pattern = re.compile(r'objVal = (?P<objVal>[0-9-+e.]*)')
match = pattern.search(stdout)
objVal_expected = match.group('objVal')
objVal_expected = np.asarray(objVal_expected, dtype=float)
print("xOpt_actual =", xOpt_actual)
print("xOpt_expected =", xOpt_expected)
print("objVal_actual = ", objVal_actual)
print("objVal_expected = ", objVal_expected)
assert_almost_equal(xOpt_actual, xOpt_expected, decimal=7)
assert_almost_equal(objVal_actual, objVal_expected, decimal=7)
def test_example2(self):
# Example for qpOASES main function using the SQProblem class.
# Setup data of first QP.
H = np.array([ 1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))
A = np.array([ 1.0, 1.0 ]).reshape((2,1))
g = np.array([ 1.5, 1.0 ])
lb = np.array([ 0.5, -2.0 ])
ub = np.array([ 5.0, 2.0 ])
lbA = np.array([ -1.0 ])
ubA = np.array([ 2.0 ])
# Setup data of second QP.
H_new = np.array([ 1.0, 0.5, 0.5, 0.5 ]).reshape((2,2))
A_new = np.array([ 1.0, 5.0 ]).reshape((2,1))
g_new = np.array([ 1.0, 1.5 ])
lb_new = np.array([ 0.0, -1.0 ])
ub_new = np.array([ 5.0, -0.5 ])
lbA_new = np.array([ -2.0 ])
ubA_new = np.array([ 1.0 ])
# Setting up SQProblem object and solution analyser.
qp = SQProblem(2, 1)
options = Options()
options.printLevel = PrintLevel.NONE
qp.setOptions(options)
analyser = SolutionAnalysis()
# get c++ solution from std
cmd = os.path.join(bin_path, "example2")
p = Popen(cmd, shell=True, stdout=PIPE)
stdout, stderr = p.communicate()
stdout = str(stdout).replace('\\n', '\n')
stdout = stdout.replace("'", '')
print(stdout)
# Solve first QP ...
nWSR = 10
qp.init(H, g, A, lb, ub, lbA, ubA, nWSR)
# ... and analyse it.
maxKKTviolation = np.zeros(1)
analyser.getMaxKKTviolation(qp, maxKKTviolation)
print("maxKKTviolation: %e\n"%maxKKTviolation)
actual = np.asarray(maxKKTviolation)
pattern = re.compile(r'maxKKTviolation: (?P<maxKKTviolation>[0-9+-e.]*)')
match = pattern.findall(stdout)
expected = np.asarray(match[0], dtype=float)
assert_almost_equal(actual, expected, decimal=7)
# Solve second QP ...
nWSR = 10
qp.hotstart(H_new, g_new, A_new,
lb_new, ub_new,
lbA_new, ubA_new, nWSR)
# ... and analyse it.
analyser.getMaxKKTviolation(qp, maxKKTviolation)
print("maxKKTviolation: %e\n"%maxKKTviolation)
actual = np.asarray(maxKKTviolation)
expected = np.asarray(match[1], dtype=float)
assert_almost_equal(actual, expected, decimal=7)
# ------------ VARIANCE-COVARIANCE EVALUATION --------------------
Var = np.zeros(5*5)
Primal_Dual_Var = np.zeros(5*5)
Var.reshape((5,5))[0,0] = 1.
Var.reshape((5,5))[1,1] = 1.
# ( 1 0 0 0 0 )
# ( 0 1 0 0 0 )
# Var = ( 0 0 0 0 0 )
# ( 0 0 0 0 0 )
# ( 0 0 0 0 0 )
analyser.getVarianceCovariance(qp, Var, Primal_Dual_Var)
print('Primal_Dual_Var=\n', Primal_Dual_Var.reshape((5,5)))
actual = Primal_Dual_Var.reshape((5,5))
pattern = re.compile(r'Primal_Dual_VAR = (?P<VAR>.*)',
re.DOTALL)
print(stdout)
match = pattern.search(stdout)
expected = match.group('VAR').strip().split("\n")
expected = [x.strip().split() for x in expected]
print(expected)
expected = np.asarray(expected, dtype=float)
assert_almost_equal(actual, expected, decimal=7)
def test_example7(self):
H = np.array([ 0.8514828085899353, -0.15739890933036804, -0.081726007163524628, -0.530426025390625, 0.16773293912410736,
-0.15739890933036804, 1.1552412509918213, 0.57780224084854126, -0.0072606131434440613, 0.010559185408055782,
-0.081726007163524628, 0.57780224084854126, 0.28925251960754395, 5.324830453901086e-006, -3.0256599075073609e-006,
-0.530426025390625, -0.0072606131434440613, 5.324830453901086e-006, 0.35609596967697144, -0.15124998986721039,
0.16773293912410736, 0.010559185408055782, -3.0256599075073609e-006, -0.15124998986721039,
0.15129712224006653], dtype=float).reshape((5, 5))
g = np.array([0.30908384919166565, 0.99325823783874512, 0.49822014570236206, -0.26309865713119507, 0.024296050891280174], dtype=float).reshape((5,))
A = np.array([1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], dtype=float).reshape((5, 5))
lb = np.array([-0.052359879016876221, -0.052359879016876221, -0.052359879016876221, -0.052359879016876221, -0.052359938621520996], dtype=float).reshape((5,))
ub = np.array([ 0.052359879016876221, 0.052359879016876221, 0.052359879016876221, 0, 0], dtype=float).reshape((5,))
lbA = np.array([-0.052359879016876221, -0.052359879016876221, -0.052359879016876221, -0.052359879016876221, -0.052359938621520996], dtype=float).reshape((5,))
ubA = np.array([0.052359879016876221, 0.052359879016876221, 0.052359879016876221, 0, 0], dtype=float).reshape((5,))
# Setting up QProblem object.
qp = QProblem(5, 5)
options = Options()
options.printLevel = PrintLevel.NONE
qp.setOptions(options)
# Solve first QP.
nWSR = 100
qp.init(H, g, A, lb, ub, lbA, ubA, nWSR)
result = np.zeros((5,))
qp.getPrimalSolution(result)
# TODO check against what?
# Where can I find solution?
if __name__=="__main__":
try:
import nose
nose.runmodule()
except ImportError:
sys.stderr.write("Please install nosestests for python unittesting.\n")
| 12,207 | 36.106383 | 166 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/tests/test_testbench.py | """
This file is part of qpOASES.
qpOASES -- An Implementation of the Online Active Set Strategy.
Copyright (C) 2007-2009 by Hans Joachim Ferreau et al. All rights reserved.
qpOASES is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
qpOASES is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with qpOASES; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
author Manuel Kudruss
version 3.0beta
date 2013
"""
import os
import numpy as np
from numpy.testing import *
from qpoases import py_runOQPbenchmark as runOQPbenchmark
from qpoases import PyQProblem as QProblem
from qpoases import PyBooleanType as BooleanType
from qpoases import PyReturnValue as ReturnValue
from qpoases import PyOptions as Options
from qpoases import PyPrintLevel as PrintLevel
# get qpOASES path
qpoases_path = os.path.dirname(os.path.abspath(__file__))
qpoases_path = os.path.dirname(qpoases_path)
qpoases_path = os.path.dirname(qpoases_path)
qpoases_path = os.path.dirname(qpoases_path)
# set qpOASES testing path
testing_path = os.path.join(qpoases_path, 'testing')
benchmarks = ('CVXQP1_S',
'CVXQP2_S',
'CVXQP3_S',
'DPKLO1',
'DUAL1',
'DUAL2',
'DUAL3',
'DUAL4',
'DUALC1',
'DUALC2',
'DUALC5',
'DUALC8',
'GENHS28',
'HS118',
'HS21',
'HS268',
'HS35',
'HS35MOD',
'HS51',
'HS52',
'HS53',
'HS76',
'LOTSCHD',
'PRIMALC1',
'PRIMALC2',
'PRIMALC5',
'QADLITTL',
'QAFIRO',
'QBEACONF',
'QBRANDY',
'QE226',
'QISRAEL',
'QPCBLEND',
'QPCBOEI2',
'QPTEST',
'QRECIPE',
'QSC205',
'QSCAGR7',
'QSHARE1B',
'QSHARE2B',
'S268',
'TAME',
'VALUES',
'ZECEVIC2',
)
def results2str(results):
"""converts results dictionary to pretty string"""
hline = '{0:->11}|{0:-<12}|{0:-<12}|{0:-<12}|{0:-<8}|{0:-<8}\n'.format("")
npass = 0
nfail = 0
string = ""
string += '{:<10} | {: <10} | {: <10} | {: <10} | {: <6} | {:<6}\n'.format('problem', 'stat', 'feas', 'compl', 'nWSR', 'result')
string += hline
for key in results:
line = '{name:<10} | {stat: >10.4e} | {feas: >10.4e} | {comp: >10.4e} | {nwsr: >6d} | {pass!s:<6}\n'.format(**results[key])
string += line
if results[key]['pass']:
npass += 1
else:
nfail +=1
string += hline
string += '\n'
string += 'Testbench results:\n'
string += '==================\n'
string += 'Pass: {: >10d}\n'.format(npass)
string += 'Fail: {: >10d}\n'.format(nfail)
string += '------------------\n'
string += 'Ratio: {: >10.2%}\n'.format(npass/float(len(results)))
return string
def write_results(name, string):
"""writes results into results dictionary"""
path = os.path.dirname(os.path.abspath(__file__))
directory = os.path.join(path, 'results')
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, name), 'w') as f:
f.write(string)
def get_nfail(results):
"""get nfail from results dictionary"""
nfail = 0
for key in results:
if not results[key]['pass']:
nfail +=1
return nfail
def run_benchmarks(benchmarks, options, isSparse, useHotstarts,
nWSR, cpu_time, TOL):
"""run all benchmarks and return results as dictionary"""
results = {}
for item in benchmarks:
# NOTE: c/c++ function epects trailing slash in path!
path = os.path.join(testing_path, 'problems', item, '')
# Run QP benchmark
returnvalue, maxNWSR, avgNWSR, maxCPUtime, avgCPUtime, \
maxStationarity, maxFeasibility, maxComplementarity \
= runOQPbenchmark(path, isSparse, useHotstarts,
options, nWSR, cpu_time )
if (returnvalue == ReturnValue.SUCCESSFUL_RETURN
and maxStationarity < TOL
and maxFeasibility < TOL
and maxComplementarity < TOL):
ret_val = True
else:
ret_val = False
tmp_d = {'name': item,
'stat': maxStationarity,
'feas': maxFeasibility,
'comp': maxComplementarity,
'nwsr': int(maxNWSR),
'pass': bool(ret_val),
}
results[item] = tmp_d
return results
class Testbench(TestCase):
def setUp(self):
# Setup global options for every problem
self.TOL = 1e-5
self.nWSR = 3500
self.cpu_time = 300
self.decimal = 7 # number of decimals for assert
def test_m44_default_dense(self):
test_name = 'mm44_default_dense.txt'
print("Test: ", test_name)
# QP Options
options = Options()
options.setToDefault()
options.printLevel = PrintLevel.NONE
isSparse = False
useHotstarts = False
# run QP benchmarks
results = run_benchmarks(benchmarks, options, isSparse, useHotstarts,
self.nWSR, self.cpu_time, self.TOL)
# print and write results
string = results2str(results)
print(string)
write_results(test_name, string)
assert get_nfail(results) <= 0, 'One ore more tests failed.'
def test_m44_default_sparse(self):
test_name = 'mm44_default_sparse.txt'
print("Test: ", test_name)
# QP Options
options = Options()
options.setToDefault()
options.printLevel = PrintLevel.NONE
isSparse = True
useHotstarts = False
# run QP benchmarks
results = run_benchmarks(benchmarks, options, isSparse, useHotstarts,
self.nWSR, self.cpu_time, self.TOL)
# print and write results
string = results2str(results)
print(string)
write_results(test_name, string)
assert get_nfail(results) <= 0, 'One ore more tests failed.'
def test_m44_mpc_dense(self):
test_name ='mm44_mpc_dense.txt'
print("Test: ", test_name)
# QP Options
options = Options()
options.setToMPC()
options.printLevel = PrintLevel.NONE
isSparse = False
useHotstarts = False
# run QP benchmarks
results = run_benchmarks(benchmarks, options, isSparse, useHotstarts,
self.nWSR, self.cpu_time, self.TOL)
# print and write results
string = results2str(results)
print(string)
write_results(test_name, string)
assert get_nfail(results) <= 2, 'One ore more tests failed.'
def test_m44_mpc_sparse(self):
test_name ='mm44_mpc_sparse.txt'
print("Test: ", test_name)
# QP Options
options = Options()
options.setToMPC()
options.printLevel = PrintLevel.NONE
isSparse = True
useHotstarts = False
# run QP benchmarks
results = run_benchmarks(benchmarks, options, isSparse, useHotstarts,
self.nWSR, self.cpu_time, self.TOL)
# print and write results
string = results2str(results)
print(string)
write_results(test_name, string)
assert get_nfail(results) <= 19, 'One ore more tests failed.'
def test_m44_reliable_dense(self):
test_name = 'mm44_reliable_dense.txt'
print("Test: ", test_name)
# QP Options
options = Options()
options.setToReliable()
options.printLevel = PrintLevel.NONE
isSparse = False
useHotstarts = False
# run QP benchmarks
results = run_benchmarks(benchmarks, options, isSparse, useHotstarts,
self.nWSR, self.cpu_time, self.TOL)
# print and write results
string = results2str(results)
print(string)
write_results(test_name, string)
assert get_nfail(results) <= 0, 'One ore more tests failed.'
def test_m44_reliable_sparse(self):
test_name = 'mm44_reliable_sparse.txt'
print(test_name)
# QP Options
options = Options()
options.setToReliable()
options.printLevel = PrintLevel.NONE
isSparse = True
useHotstarts = False
# run QP benchmarks
results = run_benchmarks(benchmarks, options, isSparse, useHotstarts,
self.nWSR, self.cpu_time, self.TOL)
# print and write results
string = results2str(results)
print(string)
write_results(test_name, string)
assert get_nfail(results) <= 0, 'One ore more tests failed.'
if __name__=='__main__':
try:
import nose
nose.runmodule(argv=['', '-s', '-v'])
except ImportError:
sys.stderr.write('Please install nosestests for python unittesting.\n')
| 9,796 | 28.42042 | 132 | py |
StructuralInspectionPlanner | StructuralInspectionPlanner-master/optec/interfaces/python/tests/__init__.py | 1 | 0 | 0 | py | |
ultrasound-nerve-segmentation | ultrasound-nerve-segmentation-master/data.py | from __future__ import print_function
import os
import numpy as np
from skimage.io import imsave, imread
data_path = 'raw/'
image_rows = 420
image_cols = 580
def create_train_data():
train_data_path = os.path.join(data_path, 'train')
images = os.listdir(train_data_path)
total = len(images) / 2
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
imgs_mask = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in images:
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.tif'
img = imread(os.path.join(train_data_path, image_name), as_grey=True)
img_mask = imread(os.path.join(train_data_path, image_mask_name), as_grey=True)
img = np.array([img])
img_mask = np.array([img_mask])
imgs[i] = img
imgs_mask[i] = img_mask
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print('Loading done.')
np.save('imgs_train.npy', imgs)
np.save('imgs_mask_train.npy', imgs_mask)
print('Saving to .npy files done.')
def load_train_data():
imgs_train = np.load('imgs_train.npy')
imgs_mask_train = np.load('imgs_mask_train.npy')
return imgs_train, imgs_mask_train
def create_test_data():
train_data_path = os.path.join(data_path, 'test')
images = os.listdir(train_data_path)
total = len(images)
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
imgs_id = np.ndarray((total, ), dtype=np.int32)
i = 0
print('-'*30)
print('Creating test images...')
print('-'*30)
for image_name in images:
img_id = int(image_name.split('.')[0])
img = imread(os.path.join(train_data_path, image_name), as_grey=True)
img = np.array([img])
imgs[i] = img
imgs_id[i] = img_id
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print('Loading done.')
np.save('imgs_test.npy', imgs)
np.save('imgs_id_test.npy', imgs_id)
print('Saving to .npy files done.')
def load_test_data():
imgs_test = np.load('imgs_test.npy')
imgs_id = np.load('imgs_id_test.npy')
return imgs_test, imgs_id
if __name__ == '__main__':
create_train_data()
create_test_data()
| 2,446 | 25.031915 | 87 | py |
ultrasound-nerve-segmentation | ultrasound-nerve-segmentation-master/submission.py | from __future__ import print_function
import numpy as np
from skimage.transform import resize
from data import image_cols, image_rows
def prep(img):
img = img.astype('float32')
img = (img > 0.5).astype(np.uint8) # threshold
img = resize(img, (image_cols, image_rows), preserve_range=True)
return img
def run_length_enc(label):
from itertools import chain
x = label.transpose().flatten()
y = np.where(x > 0)[0]
if len(y) < 10: # consider as empty
return ''
z = np.where(np.diff(y) > 1)[0]
start = np.insert(y[z+1], 0, y[0])
end = np.append(y[z], y[-1])
length = end - start
res = [[s+1, l+1] for s, l in zip(list(start), list(length))]
res = list(chain.from_iterable(res))
return ' '.join([str(r) for r in res])
def submission():
from data import load_test_data
imgs_test, imgs_id_test = load_test_data()
imgs_test = np.load('imgs_mask_test.npy')
argsort = np.argsort(imgs_id_test)
imgs_id_test = imgs_id_test[argsort]
imgs_test = imgs_test[argsort]
total = imgs_test.shape[0]
ids = []
rles = []
for i in range(total):
img = imgs_test[i, 0]
img = prep(img)
rle = run_length_enc(img)
rles.append(rle)
ids.append(imgs_id_test[i])
if i % 100 == 0:
print('{}/{}'.format(i, total))
first_row = 'img,pixels'
file_name = 'submission.csv'
with open(file_name, 'w+') as f:
f.write(first_row + '\n')
for i in range(total):
s = str(ids[i]) + ',' + rles[i]
f.write(s + '\n')
if __name__ == '__main__':
submission()
| 1,643 | 24.292308 | 68 | py |
ultrasound-nerve-segmentation | ultrasound-nerve-segmentation-master/train.py | from __future__ import print_function
import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from data import load_train_data, load_test_data
K.set_image_data_format('channels_last') # TF dimension ordering in this code
img_rows = 96
img_cols = 96
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def preprocess(imgs):
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.uint8)
for i in range(imgs.shape[0]):
imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)
imgs_p = imgs_p[..., np.newaxis]
return imgs_p
def train_and_predict():
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_mask_train = load_train_data()
imgs_train = preprocess(imgs_train)
imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255. # scale masks to [0, 1]
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, imgs_mask_train, batch_size=32, nb_epoch=20, verbose=1, shuffle=True,
validation_split=0.2,
callbacks=[model_checkpoint])
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights('weights.h5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save('imgs_mask_test.npy', imgs_mask_test)
print('-' * 30)
print('Saving predicted masks to files...')
print('-' * 30)
pred_dir = 'preds'
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
image = (image[:, :, 0] * 255.).astype(np.uint8)
imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
if __name__ == '__main__':
train_and_predict()
| 5,352 | 33.75974 | 107 | py |
deep-video-mvs | deep-video-mvs-master/setup.py | from setuptools import setup
setup(
name='deep-video-mvs',
version='1.0',
description='Deep Video Multi-View Stereo',
author='Arda Düzceker',
packages=['dvmvs']
)
| 184 | 17.5 | 47 | py |
deep-video-mvs | deep-video-mvs-master/dataset/utils.py | import re
import numpy as np
def write_point_cloud(ply_filename, points):
formatted_points = []
for point in points:
formatted_points.append("%f %f %f %d %d %d 0\n" % (point[0], point[1], point[2], point[3], point[4], point[5]))
out_file = open(ply_filename, "w")
out_file.write('''ply
format ascii 1.0
element vertex %d
property float x
property float y
property float z
property uchar blue
property uchar green
property uchar red
property uchar alpha
end_header
%s
''' % (len(points), "".join(formatted_points)))
out_file.close()
def depth_image_to_point_cloud(rgb, depth, scale, K, pose):
u = range(0, rgb.shape[1])
v = range(0, rgb.shape[0])
u, v = np.meshgrid(u, v)
u = u.astype(float)
v = v.astype(float)
Z = depth.astype(float) / scale
X = (u - K[0, 2]) * Z / K[0, 0]
Y = (v - K[1, 2]) * Z / K[1, 1]
X = np.ravel(X)
Y = np.ravel(Y)
Z = np.ravel(Z)
valid = Z > 0
X = X[valid]
Y = Y[valid]
Z = Z[valid]
position = np.vstack((X, Y, Z, np.ones(len(X))))
position = np.dot(pose, position)
R = np.ravel(rgb[:, :, 0])[valid]
G = np.ravel(rgb[:, :, 1])[valid]
B = np.ravel(rgb[:, :, 2])[valid]
points = np.transpose(np.vstack((position[0:3, :], R, G, B))).tolist()
return points
def create_depth_map_from_disparity(disp, focal_length, baseline):
depth = baseline * focal_length / disp
mask = depth == np.inf
return depth, mask
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(r'^(\d+)\s(\d+)\s$', temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
file.close()
return data, scale
| 2,575 | 22.633028 | 119 | py |
deep-video-mvs | deep-video-mvs-master/dataset/build_point_cloud.py | import os
import sys
import numpy as np
import cv2
from path import Path
from tqdm import tqdm
sys.path.append('.')
from utils import depth_image_to_point_cloud, write_point_cloud
def build_point_cloud(dataset_folder, scene_name, is_test=True):
scene_folder = Path(dataset_folder) / scene_name
poses = np.fromfile(os.path.join(scene_folder, "poses.txt"), dtype=float, sep="\n ")
poses = np.reshape(poses, newshape=(-1, 4, 4))
K = np.fromfile(os.path.join(scene_folder, "K.txt"), dtype=float, sep="\n ")
K = np.reshape(K, newshape=(3, 3))
if is_test:
image_files = sorted(Path(os.path.join(scene_folder, "images")).files('*.png'))
depth_files = sorted(Path(os.path.join(scene_folder, "depth")).files('*.png'))
scene_points_3D = []
counter = 1
for i in tqdm(range(0, len(image_files), 10), desc=scene_name):
image_file = image_files[i]
depth_file = depth_files[i]
rgb = cv2.imread(image_file)
depth = cv2.imread(depth_file, -1).astype(np.float32) / 1000.0
current_points_3D = depth_image_to_point_cloud(rgb, depth, scale=1.0, K=K, pose=poses[i])
scene_points_3D.extend(current_points_3D)
if counter % 30 == 0:
part = str((counter + 1) // 30)
write_point_cloud(scene_name + "_point_cloud_part" + part + ".ply", scene_points_3D)
scene_points_3D.clear()
counter = counter + 1
write_point_cloud(scene_name + "_point_cloud_part_last.ply", scene_points_3D)
build_point_cloud("/media/ardaduz/T5/test/7scenes", "chess-seq-01", is_test=True)
| 1,662 | 32.938776 | 101 | py |
deep-video-mvs | deep-video-mvs-master/dataset/rgbdscenes-export/rgbdscenes-export.py | import os
import shutil
from functools import partial
from multiprocessing.pool import Pool
import cv2
import numpy as np
from path import Path
from scipy.spatial.transform.rotation import Rotation
def process_scene(scene_no, input_directory, output_folder):
image_filenames = sorted((input_directory / 'imgs' / "scene_" + scene_no).files("*color*.png"))
depth_filenames = sorted((input_directory / 'imgs' / "scene_" + scene_no).files("*depth*.png"))
extrinsics = np.loadtxt(input_directory / 'pc' / scene_no + ".pose")
K = np.array([[570.3, 0.0, 320.0],
[0.0, 570.3, 240.0],
[0.0, 0.0, 1.0]])
homogen = np.zeros((1, 4))
homogen[0, 3] = 1
poses = []
for extrinsic in extrinsics:
w = extrinsic[0]
xyz = extrinsic[1:4]
rot = Rotation.from_quat(np.hstack((xyz, w))).as_matrix()
tra = np.reshape(extrinsic[4:], (3, 1))
extrinsic = np.hstack((rot, tra))
extrinsic = np.vstack((extrinsic, homogen))
poses.append(extrinsic)
current_output_dir = output_folder / "scene_" + scene_no
if os.path.isdir(current_output_dir):
shutil.rmtree(current_output_dir)
os.mkdir(current_output_dir)
os.mkdir(os.path.join(current_output_dir, "images"))
os.mkdir(os.path.join(current_output_dir, "depth"))
output_poses = []
for current_index in range(len(image_filenames)):
image = cv2.imread(image_filenames[current_index])
depth = cv2.imread(depth_filenames[current_index], cv2.IMREAD_ANYDEPTH)
depth = np.float32(depth)
depth = depth / 10000.0
depth[depth > 50.0] = 0.0
depth[np.isnan(depth)] = 0.0
depth[np.isinf(depth)] = 0.0
depth = (depth * 1000.0).astype(np.uint16)
output_poses.append(poses[current_index].ravel().tolist())
cv2.imwrite("{}/images/{}.png".format(current_output_dir, str(current_index).zfill(6)), image, [cv2.IMWRITE_PNG_COMPRESSION, 3])
cv2.imwrite("{}/depth/{}.png".format(current_output_dir, str(current_index).zfill(6)), depth, [cv2.IMWRITE_PNG_COMPRESSION, 3])
output_poses = np.array(output_poses)
np.savetxt("{}/poses.txt".format(current_output_dir), output_poses)
np.savetxt("{}/K.txt".format(current_output_dir), K)
return scene_no
def main():
output_folder = Path("/media/ardaduz/T5/test/rgbdscenes")
input_directory = Path("/home/ardaduz/HDD/deep-mvs-dataset/raw-data/rgbd-scenes-v2-official")
scene_nos = [str(i).zfill(2) for i in [1, 2, 5, 6, 9, 10, 13, 14]]
pool = Pool(12)
for finished_scene in pool.imap_unordered(partial(process_scene,
input_directory=input_directory,
output_folder=output_folder), scene_nos):
print("finished", finished_scene)
if __name__ == '__main__':
main()
| 2,918 | 35.037037 | 136 | py |
deep-video-mvs | deep-video-mvs-master/dataset/tum-rgbd-export/tum-rgbd-export.py | import os
import shutil
from functools import partial
from multiprocessing.pool import Pool
import cv2
import numpy as np
from path import Path
from scipy.spatial.transform import Rotation
def get_closest_index(target_timestamp, other_timestamps):
differences = np.abs(other_timestamps - target_timestamp)
return np.argmin(differences)
def process_scene(input_directory, output_folder):
K = np.array([[525.0, 0.0, 320.0],
[0.0, 525.0, 240.0],
[0.0, 0.0, 1.0]])
print("processing", input_directory)
image_filenames = sorted((input_directory / "rgb").files("*.png"))
image_timestamps = np.loadtxt(input_directory / "rgb.txt", usecols=0)
depth_filenames = sorted((input_directory / "depth").files("*.png"))
depth_timestamps = np.loadtxt(input_directory / "depth.txt", usecols=0)
poses_with_quat = np.loadtxt(input_directory / "groundtruth.txt")
pose_timestamps = poses_with_quat[:, 0]
pose_locations = poses_with_quat[:, 1:4]
pose_quaternions = poses_with_quat[:, 4:]
sequence = input_directory.split("/")[-1]
current_output_dir = output_folder / sequence
if os.path.isdir(current_output_dir):
if os.path.exists("{}/poses.txt".format(current_output_dir)) and os.path.exists("{}/K.txt".format(current_output_dir)):
return sequence
else:
shutil.rmtree(current_output_dir)
os.mkdir(current_output_dir)
os.mkdir(os.path.join(current_output_dir, "images"))
os.mkdir(os.path.join(current_output_dir, "depth"))
output_poses = []
for i in range(len(depth_filenames)):
depth_timestamp = depth_timestamps[i]
pose_index = get_closest_index(depth_timestamp, pose_timestamps)
image_index = get_closest_index(depth_timestamp, image_timestamps)
depth_filename = depth_filenames[i]
image_filename = image_filenames[image_index]
pose_location = pose_locations[pose_index]
pose_quaternion = pose_quaternions[pose_index]
rot = Rotation.from_quat(pose_quaternion).as_matrix()
pose = np.eye(4)
pose[0:3, 0:3] = rot
pose[0:3, 3] = pose_location
image = cv2.imread(image_filename, -1)
depth = (cv2.imread(depth_filename, -1).astype(float) / 5).astype(np.uint16)
output_poses.append(pose.ravel().tolist())
cv2.imwrite("{}/images/{}.png".format(current_output_dir, str(i).zfill(6)), image, [cv2.IMWRITE_PNG_COMPRESSION, 3])
cv2.imwrite("{}/depth/{}.png".format(current_output_dir, str(i).zfill(6)), depth, [cv2.IMWRITE_PNG_COMPRESSION, 3])
output_poses = np.array(output_poses)
np.savetxt("{}/poses.txt".format(current_output_dir), output_poses)
np.savetxt("{}/K.txt".format(current_output_dir), K)
return sequence
def main():
input_folder = Path("/home/ardaduz/HDD/Downloads/tum-rgbd-raw")
output_folder = Path("/media/ardaduz/T5/test/tumrgbd")
input_directories = [
input_folder / "rgbd_dataset_freiburg1_desk",
input_folder / "rgbd_dataset_freiburg1_plant",
input_folder / "rgbd_dataset_freiburg1_room",
input_folder / "rgbd_dataset_freiburg1_teddy",
input_folder / "rgbd_dataset_freiburg2_desk",
input_folder / "rgbd_dataset_freiburg2_dishes",
input_folder / "rgbd_dataset_freiburg2_large_no_loop",
input_folder / "rgbd_dataset_freiburg3_cabinet",
input_folder / "rgbd_dataset_freiburg3_long_office_household",
input_folder / "rgbd_dataset_freiburg3_nostructure_notexture_far",
input_folder / "rgbd_dataset_freiburg3_nostructure_texture_far",
input_folder / "rgbd_dataset_freiburg3_structure_notexture_far",
input_folder / "rgbd_dataset_freiburg3_structure_texture_far"]
pool = Pool(6)
for finished_scene in pool.imap_unordered(partial(process_scene, output_folder=output_folder), input_directories):
print("finished", finished_scene)
if __name__ == '__main__':
main()
| 4,003 | 38.643564 | 127 | py |
deep-video-mvs | deep-video-mvs-master/dataset/augmented-iclnuim-export/iclnuim-export.py | import os
import shutil
from functools import partial
from multiprocessing.pool import Pool
import cv2
import numpy as np
from path import Path
def process_scene(input_directory, output_folder):
# For K, https://github.com/intel-isl/Open3D/issues/540
K = np.array([[525.0, 0.0, 320.0],
[0.0, 525.0, 240.0],
[0.0, 0.0, 1.0]])
print("processing", input_directory)
image_filenames = sorted((input_directory + "-color").files("*.jpg"))
depth_filenames = sorted((input_directory + "-depth-clean").files("*.png"))
pose_filename = input_directory + "-traj.txt"
f = open(pose_filename)
lines = f.readlines()
f.close()
poses = []
for line in lines:
elements = line.strip("\n").split(" ")
if len(elements) < 4:
continue
poses.append(elements)
poses = np.array(poses, dtype=float).reshape((-1, 4, 4))
sequence = input_directory.split("/")[-1]
current_output_dir = output_folder / sequence
if os.path.isdir(current_output_dir):
if os.path.exists("{}/poses.txt".format(current_output_dir)) and os.path.exists("{}/K.txt".format(current_output_dir)):
return sequence
else:
shutil.rmtree(current_output_dir)
os.mkdir(current_output_dir)
os.mkdir(os.path.join(current_output_dir, "images"))
os.mkdir(os.path.join(current_output_dir, "depth"))
output_poses = []
for i in range(len(poses)):
depth_filename = depth_filenames[i]
image_filename = image_filenames[i]
image = cv2.imread(image_filename, -1)
depth = cv2.imread(depth_filename, -1)
output_poses.append(poses[i].ravel().tolist())
cv2.imwrite("{}/images/{}.png".format(current_output_dir, str(i).zfill(6)), image, [cv2.IMWRITE_PNG_COMPRESSION, 3])
cv2.imwrite("{}/depth/{}.png".format(current_output_dir, str(i).zfill(6)), depth, [cv2.IMWRITE_PNG_COMPRESSION, 3])
output_poses = np.array(output_poses)
np.savetxt("{}/poses.txt".format(current_output_dir), output_poses)
np.savetxt("{}/K.txt".format(current_output_dir), K)
return sequence
def main():
input_folder = Path("/home/ardaduz/HDD/Downloads/iclnuim-aug-raw")
output_folder = Path("/media/ardaduz/T5/test/iclnuim")
input_directories = [
input_folder / "livingroom1",
input_folder / "livingroom2",
input_folder / "office1",
input_folder / "office2"]
pool = Pool(4)
for finished_scene in pool.imap_unordered(partial(process_scene, output_folder=output_folder), input_directories):
print("finished", finished_scene)
pool.join()
pool.close()
if __name__ == '__main__':
main()
| 2,725 | 30.697674 | 127 | py |
deep-video-mvs | deep-video-mvs-master/dataset/7scenes-export/7scenes-export-depth.py | import shutil
import cv2
import numpy as np
from path import Path
scenes = [("7scenes_chess", "01", "02"),
("7scenes_fire", "01", "02"),
("7scenes_heads", "02"),
("7scenes_office", "01", "03"),
("7scenes_pumpkin", "03", "06"),
("7scenes_redkitchen", "01", "07"),
("7scenes_stairs", "02", "06")]
input_folder = Path("/home/ardaduz/HDD/deep-mvs-dataset/raw-data/7scenes-depth")
output_folder = Path("/media/ardaduz/T5/test/7scenes")
for scene in scenes:
if len(scene) == 3:
folder_name, seq1, seq2 = scene
seqs = [seq1, seq2]
else:
folder_name, seq1 = scene
seqs = [seq1]
scene_input_folder = input_folder / folder_name / "train" / "depth"
for seq in seqs:
files = sorted(scene_input_folder.files("seq" + seq + "*"))
room_name = folder_name.split("_")[-1]
scene_name = room_name + "-seq-" + seq
scene_output_folder = output_folder / scene_name / 'depth'
if scene_output_folder.exists():
shutil.rmtree(scene_output_folder)
scene_output_folder.mkdir()
for index, file in enumerate(files):
depth = cv2.imread(file, -1)
depth_uint = np.round(depth).astype(np.uint16)
save_filename = scene_output_folder / (str(index).zfill(6) + ".png")
cv2.imwrite(save_filename, depth_uint, [cv2.IMWRITE_PNG_COMPRESSION, 3])
| 1,434 | 33.166667 | 84 | py |
deep-video-mvs | deep-video-mvs-master/dataset/7scenes-export/7scenes-export-color.py | import os
import shutil
from multiprocessing.pool import Pool
import cv2
import numpy as np
from functools import partial
from path import Path
def process_scene(input_directory, output_folder):
K = np.array([[525.0, 0.0, 320.0],
[0.0, 525.0, 240.0],
[0.0, 0.0, 1.0]])
print("processing", input_directory)
image_filenames = sorted(input_directory.files("*color.png"))
pose_filenames = sorted(input_directory.files("*pose.txt"))
poses = []
for pose_filename in pose_filenames:
pose = np.loadtxt(pose_filename)
poses.append(pose)
scene = input_directory.split("/")[-2]
seq = input_directory.split("/")[-1]
current_output_dir = output_folder / scene + "-" + seq
if os.path.isdir(current_output_dir):
if os.path.exists("{}/poses.txt".format(current_output_dir)) and os.path.exists("{}/K.txt".format(current_output_dir)):
return scene
else:
shutil.rmtree(current_output_dir)
os.mkdir(current_output_dir)
os.mkdir(os.path.join(current_output_dir, "images"))
output_poses = []
for current_index in range(len(image_filenames)):
image = cv2.imread(image_filenames[current_index])
output_poses.append(poses[current_index].ravel().tolist())
cv2.imwrite("{}/images/{}.png".format(current_output_dir, str(current_index).zfill(6)), image, [cv2.IMWRITE_PNG_COMPRESSION, 3])
output_poses = np.array(output_poses)
np.savetxt("{}/poses.txt".format(current_output_dir), output_poses)
np.savetxt("{}/K.txt".format(current_output_dir), K)
return scene
def main():
input_folder = Path("/home/ardaduz/HDD/deep-mvs-dataset/raw-data/7scenes-official")
output_folder = Path("/media/ardaduz/T5/test/7scenes")
input_directories = [
input_folder / "redkitchen/seq-01",
input_folder / "redkitchen/seq-07",
input_folder / "chess/seq-01",
input_folder / "chess/seq-02",
input_folder / "heads/seq-02",
input_folder / "fire/seq-01",
input_folder / "fire/seq-02",
input_folder / "office/seq-01",
input_folder / "office/seq-03",
input_folder / "pumpkin/seq-03",
input_folder / "pumpkin/seq-06",
input_folder / "stairs/seq-02",
input_folder / "stairs/seq-06"]
pool = Pool(6)
for finished_scene in pool.imap_unordered(partial(process_scene, output_folder=output_folder), input_directories):
print("finished", finished_scene)
pool.join()
pool.close()
if __name__ == '__main__':
main()
| 2,596 | 31.4625 | 136 | py |
deep-video-mvs | deep-video-mvs-master/dataset/scannet-export/scannet-export.py | import os
import random
import numpy as np
from multiprocessing import Pool
import copy
import os
import struct
import zlib
from itertools import groupby
import cv2
import imageio
import numpy as np
import torch
COMPRESSION_TYPE_COLOR = {-1: 'unknown', 0: 'raw', 1: 'png', 2: 'jpeg'}
COMPRESSION_TYPE_DEPTH = {-1: 'unknown', 0: 'raw_ushort', 1: 'zlib_ushort', 2: 'occi_ushort'}
def process_color_image(color, depth, K_color, K_depth):
old_height, old_width = np.shape(color)[0:2]
new_height, new_width = np.shape(depth)
x = np.linspace(0, new_width - 1, num=new_width)
y = np.linspace(0, new_height - 1, num=new_height)
ones = np.ones(shape=(new_height, new_width))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().unsqueeze(0)
H = K_color.dot(np.linalg.inv(K_depth))
H = torch.from_numpy(H).float().unsqueeze(0)
width_normalizer = old_width / 2.0
height_normalizer = old_height / 2.0
warping = H.bmm(warp_grid).transpose(dim0=1, dim1=2)
warping = warping[:, :, 0:2] / (warping[:, :, 2].unsqueeze(-1) + 1e-8)
warping = warping.view(1, new_height, new_width, 2)
warping[:, :, :, 0] = (warping[:, :, :, 0] - width_normalizer) / width_normalizer
warping[:, :, :, 1] = (warping[:, :, :, 1] - height_normalizer) / height_normalizer
image = torch.from_numpy(np.transpose(color, axes=(2, 0, 1))).float().unsqueeze(0)
warped_image = torch.nn.functional.grid_sample(input=image,
grid=warping,
mode='nearest',
padding_mode='zeros',
align_corners=True)
warped_image = warped_image.squeeze(0).numpy().astype(np.uint8)
warped_image = np.transpose(warped_image, axes=(1, 2, 0))
return warped_image
class RGBDFrame():
def load(self, file_handle):
self.camera_to_world = np.asarray(struct.unpack('f' * 16, file_handle.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.timestamp_color = struct.unpack('Q', file_handle.read(8))[0]
self.timestamp_depth = struct.unpack('Q', file_handle.read(8))[0]
self.color_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.depth_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.color_data = ''.join(struct.unpack('c' * self.color_size_bytes, file_handle.read(self.color_size_bytes)))
self.depth_data = ''.join(struct.unpack('c' * self.depth_size_bytes, file_handle.read(self.depth_size_bytes)))
def decompress_depth(self, compression_type):
if compression_type == 'zlib_ushort':
return self.decompress_depth_zlib()
else:
raise
def decompress_depth_zlib(self):
return zlib.decompress(self.depth_data)
def decompress_color(self, compression_type):
if compression_type == 'jpeg':
return self.decompress_color_jpeg()
else:
raise
def decompress_color_jpeg(self):
return imageio.imread(self.color_data)
def find_longest_reliable_subsequence(is_ok):
longest_interval_length = 0
longest_interval = None
index = 0
for k, g in groupby(is_ok, None):
length = len(list(g))
if k:
start_index = copy.deepcopy(index)
end_index = start_index + length
if length > longest_interval_length:
longest_interval_length = copy.deepcopy(length)
longest_interval = [start_index, end_index]
index += length
return longest_interval
class SensorData:
def __init__(self, filename):
self.version = 4
with open(filename, 'rb') as f:
version = struct.unpack('I', f.read(4))[0]
assert self.version == version
strlen = struct.unpack('Q', f.read(8))[0]
self.sensor_name = ''.join(struct.unpack('c' * strlen, f.read(strlen)))
self.intrinsic_color = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_color = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.intrinsic_depth = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_depth = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.color_compression_type = COMPRESSION_TYPE_COLOR[struct.unpack('i', f.read(4))[0]]
self.depth_compression_type = COMPRESSION_TYPE_DEPTH[struct.unpack('i', f.read(4))[0]]
self.color_width = struct.unpack('I', f.read(4))[0]
self.color_height = struct.unpack('I', f.read(4))[0]
self.depth_width = struct.unpack('I', f.read(4))[0]
self.depth_height = struct.unpack('I', f.read(4))[0]
self.depth_shift = struct.unpack('f', f.read(4))[0]
self.num_frames = struct.unpack('Q', f.read(8))[0]
self.frames = []
for i in range(self.num_frames):
frame = RGBDFrame()
frame.load(f)
self.frames.append(frame)
def export_train(self, output_path, frame_skip):
counter = 0
poses = []
for index in range(0, len(self.frames), frame_skip):
pose = self.frames[index].camera_to_world
if np.any(np.isnan(pose)) or np.any(np.isinf(pose)) or np.any(np.isneginf(pose)):
print("Pose NaN, Inf or -Inf encountered!, Skipping...")
continue
poses.append(np.ravel(pose).tolist())
depth = self.frames[index].decompress_depth(self.depth_compression_type)
depth = np.fromstring(depth, dtype=np.uint16).reshape(self.depth_height, self.depth_width)
color = self.frames[index].decompress_color(self.color_compression_type)
color = process_color_image(color=color,
depth=depth,
K_color=self.intrinsic_color[0:3, 0:3],
K_depth=self.intrinsic_depth[0:3, 0:3])
output_file = os.path.join(output_path, str(counter).zfill(6))
np.savez_compressed(output_file, image=color, depth=depth)
counter += 1
np.savetxt(fname=os.path.join(output_path, "poses.txt"), X=np.array(poses), fmt='%.8e')
np.savetxt(fname=os.path.join(output_path, "K.txt"), X=self.intrinsic_depth[0:3, 0:3])
def export_test(self, output_path, frame_skip):
poses = []
for f in range(0, len(self.frames)):
pose = self.frames[f].camera_to_world
poses.append(np.ravel(pose).tolist())
poses = np.array(poses)
np.savetxt(fname=os.path.join(output_path, "poses.txt"), X=poses, fmt='%.8e')
np.savetxt(fname=os.path.join(output_path, "K.txt"), X=self.intrinsic_depth[0:3, 0:3])
print 'exporting', self.num_frames // frame_skip, ' frames to', output_path
image_folder = os.path.join(output_path, 'images')
depth_folder = os.path.join(output_path, 'depth')
os.mkdir(image_folder)
os.mkdir(depth_folder)
for f in range(0, self.num_frames, frame_skip):
depth = self.frames[f].decompress_depth(self.depth_compression_type)
depth = np.fromstring(depth, dtype=np.uint16).reshape(self.depth_height, self.depth_width)
color = self.frames[f].decompress_color(self.color_compression_type)
color = process_color_image(color=color,
depth=depth,
K_color=self.intrinsic_color[0:3, 0:3],
K_depth=self.intrinsic_depth[0:3, 0:3])
color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
cv2.imwrite(os.path.join(image_folder, str(f).zfill(6) + '.png'), color, [cv2.IMWRITE_PNG_COMPRESSION, 3])
cv2.imwrite(os.path.join(depth_folder, str(f).zfill(6) + '.png'), depth, [cv2.IMWRITE_PNG_COMPRESSION, 3])
def export_samples(scene_path):
scene_name = scene_path.split("/")[-1]
scene_output_path = os.path.join(output_path, scene_name)
if not os.path.exists(scene_output_path):
# load the data
print 'loading sensor data for %s...' % scene_path
sd = SensorData(os.path.join(scene_path, scene_name + ".sens"))
os.mkdir(scene_output_path)
if is_train:
sd.export_train(scene_output_path, frame_skip=frame_skip)
else:
sd.export_test(scene_output_path, frame_skip=frame_skip)
else:
print 'existing scene %s, skipping...' % scene_path
def sanity_check_test():
exported_scenes = sorted(os.listdir(output_path))
for exported_scene in exported_scenes:
n_images = len(os.listdir(os.path.join(output_path, exported_scene, "images")))
n_depths = len(os.listdir(os.path.join(output_path, exported_scene, "depth")))
n_poses = len(np.loadtxt(os.path.join(output_path, exported_scene, "poses.txt")))
if n_images != n_poses or n_images != n_depths or n_depths != n_poses:
print exported_scene, "is problematic"
def sanity_check_train():
exported_scenes = sorted(os.listdir(output_path))
for exported_scene in exported_scenes:
if ".txt" not in exported_scene:
n_poses = len(np.loadtxt(os.path.join(output_path, exported_scene, "poses.txt")))
K = np.loadtxt(os.path.join(output_path, exported_scene, "K.txt"))
n_files = len(os.listdir(os.path.join(output_path, exported_scene)))
if n_files - 2 != n_poses:
print exported_scene, "is problematic"
frame_skip = 1
is_train = False
is_sanity_check = False
if is_train:
input_path = "/home/ardaduz/HDD/Downloads/ScanNet/scans"
output_path = "/media/ardaduz/T5/train"
else:
input_path = "/home/ardaduz/HDD/Downloads/ScanNet/scans_test"
output_path = "/media/ardaduz/T5/test/scannet"
if __name__ == '__main__':
if is_sanity_check:
if is_train:
sanity_check_train()
else:
sanity_check_test()
exit(0)
sequence_names = sorted(os.listdir(input_path))
if is_train:
scene_names_dict = dict()
for sequence_name in sequence_names:
scene_name, idx = sequence_name.split('_')
if scene_name in scene_names_dict:
scene_names_dict[scene_name].append(idx)
else:
scene_names_dict[scene_name] = [idx]
scene_names = scene_names_dict.keys()
random.seed(123)
random.shuffle(scene_names)
n_scenes = len(scene_names)
n_training = int(n_scenes * 0.9)
training_scenes = scene_names[:n_training]
validation_scenes = scene_names[n_training:]
training_sequences = []
for training_scene in training_scenes:
idxs = scene_names_dict[training_scene]
for idx in idxs:
training_sequences.append(training_scene + "_" + idx)
validation_sequences = []
for validation_scene in validation_scenes:
idxs = scene_names_dict[validation_scene]
for idx in idxs:
validation_sequences.append(validation_scene + "_" + idx)
np.savetxt(os.path.join(output_path, "train.txt"), np.array(training_sequences), fmt='%s')
np.savetxt(os.path.join(output_path, "validation.txt"), np.array(validation_sequences), fmt='%s')
sequence_names.sort()
sequence_paths = []
for index, sequence_name in enumerate(sequence_names):
sequence_paths.append(os.path.join(input_path, sequence_name))
pool = Pool(6)
pool.map(export_samples, sequence_paths)
pool.join()
pool.close()
| 12,059 | 40.586207 | 124 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/keyframe_buffer.py | from collections import deque
import numpy as np
from dvmvs.utils import is_pose_available, pose_distance
class KeyframeBuffer:
def __init__(self, buffer_size, keyframe_pose_distance, optimal_t_score, optimal_R_score, store_return_indices):
self.buffer = deque([], maxlen=buffer_size)
self.keyframe_pose_distance = keyframe_pose_distance
self.optimal_t_score = optimal_t_score
self.optimal_R_score = optimal_R_score
self.__tracking_lost_counter = 0
self.__store_return_indices = store_return_indices # mostly required for simulation of the frame selection
def calculate_penalty(self, t_score, R_score):
degree = 2.0
R_penalty = np.abs(R_score - self.optimal_R_score) ** degree
t_diff = t_score - self.optimal_t_score
if t_diff < 0.0:
t_penalty = 5.0 * (np.abs(t_diff) ** degree)
else:
t_penalty = np.abs(t_diff) ** degree
return R_penalty + t_penalty
def try_new_keyframe(self, pose, image, index=None):
if self.__store_return_indices and index is None:
raise ValueError("Storing and returning the frame indices is requested in the constructor, but index=None is passed to the function")
if is_pose_available(pose):
self.__tracking_lost_counter = 0
if len(self.buffer) == 0:
if self.__store_return_indices:
self.buffer.append((pose, image, index))
else:
self.buffer.append((pose, image))
return 0 # pose is available, new frame added but buffer was empty, this is the first frame, no depth map prediction will be done
else:
if self.__store_return_indices:
last_pose, last_image, last_index = self.buffer[-1]
else:
last_pose, last_image = self.buffer[-1]
combined_measure, R_measure, t_measure = pose_distance(pose, last_pose)
if combined_measure >= self.keyframe_pose_distance:
if self.__store_return_indices:
self.buffer.append((pose, image, index))
else:
self.buffer.append((pose, image))
return 1 # pose is available, new frame added, everything is perfect, and we will predict a depth map later
else:
return 2 # pose is available but not enough change has happened since the last keyframe
else:
self.__tracking_lost_counter += 1
if self.__tracking_lost_counter > 30:
if len(self.buffer) > 0:
self.buffer.clear()
return 3 # a pose reading has not arrived for over a second, tracking is now lost
else:
return 4 # we are still very lost
else:
return 5 # pose is not available right now, but not enough time has passed to consider lost, there is still hope :)
def get_best_measurement_frames(self, n_requested_measurement_frames):
buffer_array = list(self.buffer)
if self.__store_return_indices:
reference_pose, reference_image, reference_index = buffer_array[-1]
else:
reference_pose, reference_image = buffer_array[-1]
n_requested_measurement_frames = min(n_requested_measurement_frames, len(buffer_array) - 1)
penalties = []
for i in range(len(buffer_array) - 1):
measurement_pose = buffer_array[i][0]
combined_measure, R_measure, t_measure = pose_distance(reference_pose, measurement_pose)
penalty = self.calculate_penalty(t_measure, R_measure)
penalties.append(penalty)
indices = np.argpartition(penalties, n_requested_measurement_frames - 1)[:n_requested_measurement_frames]
measurement_frames = []
for index in indices:
measurement_frames.append(buffer_array[index])
return measurement_frames
class SimpleBuffer:
def __init__(self, buffer_size, store_return_indices):
self.buffer = deque([], maxlen=buffer_size + 1)
self.__tracking_lost_counter = 0
self.__store_return_indices = store_return_indices # mostly required for simulation of the frame selection
def try_new_keyframe(self, pose, image, index=None):
if self.__store_return_indices and index is None:
raise ValueError("Storing and returning the frame indices is requested in the constructor, but index=None is passed to the function")
if is_pose_available(pose):
self.__tracking_lost_counter = 0
if len(self.buffer) == 0:
if self.__store_return_indices:
self.buffer.append((pose, image, index))
else:
self.buffer.append((pose, image))
return 0 # pose is available, new frame added but buffer was empty, this is the first frame, no depth map prediction will be done
else:
if self.__store_return_indices:
self.buffer.append((pose, image, index))
else:
self.buffer.append((pose, image))
return 1 # pose is available, new frame added, everything is perfect, and we will predict a depth map later
else:
self.__tracking_lost_counter += 1
if self.__tracking_lost_counter > 30:
if len(self.buffer) > 0:
self.buffer.clear()
return 2 # a pose reading has not arrived for over a second, tracking is now lost
else:
return 3 # we are still very lost
else:
return 4 # pose is not available right now, but not enough time has passed to consider lost, there is still hope :)
def get_measurement_frames(self):
measurement_frames = list(self.buffer)[:-1]
return measurement_frames
| 6,058 | 45.607692 | 146 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/losses.py | from __future__ import division
import torch
from torch import nn
class LossMeter(object):
def __init__(self):
self.count = 0.0
self.sum = 0.0
self.avg = 0.0
self.item_average = 0.0
def update(self, loss, count):
self.sum += loss
self.count += count
self.avg = self.sum / self.count
self.item_average = loss / count
def __repr__(self):
return '{:.4f} ({:.4f})'.format(self.item_average, self.avg)
def update_losses(predictions, weights, groundtruth, is_training, l1_meter, huber_meter, l1_inv_meter, l1_rel_meter, loss_type):
optimizer_loss = 0
sample_l1_loss, sample_huber_loss, sample_l1_inv_loss, sample_l1_rel_loss, sample_valid_count = None, None, None, None, None
if is_training:
for j, prediction in enumerate(predictions):
sample_l1_loss, sample_huber_loss, sample_l1_inv_loss, sample_l1_rel_loss, sample_valid_count = \
calculate_loss(groundtruth=groundtruth, prediction=prediction)
if loss_type == "L1":
optimizer_loss = optimizer_loss + weights[j] * (sample_l1_loss / sample_valid_count)
elif loss_type == "L1-inv":
optimizer_loss = optimizer_loss + weights[j] * (sample_l1_inv_loss / sample_valid_count)
elif loss_type == "L1-rel":
optimizer_loss = optimizer_loss + weights[j] * (sample_l1_rel_loss / sample_valid_count)
elif loss_type == "Huber":
optimizer_loss = optimizer_loss + weights[j] * (sample_huber_loss / sample_valid_count)
else:
sample_l1_loss, sample_huber_loss, sample_l1_inv_loss, sample_l1_rel_loss, sample_valid_count = calculate_loss(groundtruth=groundtruth,
prediction=predictions[-1])
l1_meter.update(sample_l1_loss.item(), sample_valid_count)
huber_meter.update(sample_huber_loss.item(), sample_valid_count)
l1_inv_meter.update(sample_l1_inv_loss.item(), sample_valid_count)
l1_rel_meter.update(sample_l1_rel_loss.item(), sample_valid_count)
return optimizer_loss
def calculate_loss(groundtruth, prediction):
batch, height_original, width_original = groundtruth.size()
groundtruth = groundtruth.view(batch, 1, height_original, width_original)
batch, height_scaled, width_scaled = prediction.size()
prediction = prediction.view(batch, 1, height_scaled, width_scaled)
groundtruth_scaled = nn.functional.interpolate(groundtruth,
size=(height_scaled, width_scaled),
mode='nearest')
valid_mask = groundtruth_scaled != 0
valid_count = valid_mask.nonzero().size()[0]
groundtruth_valid = groundtruth_scaled[valid_mask]
prediction_valid = prediction[valid_mask]
groundtruth_inverse_valid = 1.0 / groundtruth_valid
prediction_inverse_valid = 1.0 / prediction_valid
l1_diff = torch.abs(groundtruth_valid - prediction_valid)
smooth_l1_loss = torch.nn.functional.smooth_l1_loss(prediction_valid, groundtruth_valid, reduction='none')
smooth_l1_loss = torch.sum(smooth_l1_loss)
l1_loss = torch.sum(l1_diff)
l1_inv_loss = torch.sum(torch.abs(groundtruth_inverse_valid - prediction_inverse_valid))
l1_rel_loss = torch.sum(l1_diff / groundtruth_valid)
return l1_loss, smooth_l1_loss, l1_inv_loss, l1_rel_loss, valid_count
| 3,529 | 41.53012 | 146 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/errors.py | import numpy as np
def compute_errors(gt, pred, max_depth=np.inf):
valid1 = gt >= 0.5
valid2 = gt <= max_depth
valid = valid1 & valid2
gt = gt[valid]
pred = pred[valid]
n_valid = np.float32(len(gt))
if n_valid == 0:
return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
differences = gt - pred
abs_differences = np.abs(differences)
squared_differences = np.square(differences)
abs_error = np.mean(abs_differences)
abs_relative_error = np.mean(abs_differences / gt)
abs_inverse_error = np.mean(np.abs(1 / gt - 1 / pred))
squared_relative_error = np.mean(squared_differences / gt)
rmse = np.sqrt(np.mean(squared_differences))
ratios = np.maximum(gt / pred, pred / gt)
n_valid = np.float32(len(ratios))
ratio_125 = np.count_nonzero(ratios < 1.25) / n_valid
ratio_125_2 = np.count_nonzero(ratios < 1.25 ** 2) / n_valid
ratio_125_3 = np.count_nonzero(ratios < 1.25 ** 3) / n_valid
return abs_error, abs_relative_error, abs_inverse_error, squared_relative_error, rmse, ratio_125, ratio_125_2, ratio_125_3
def sanity_check_compute_errors():
gt = np.ones((256, 256), dtype=float) / 2.0
pred = gt + np.random.normal(loc=0.0, scale=0.1, size=(np.shape(gt)))
print(compute_errors(gt, pred))
if __name__ == '__main__':
sanity_check_compute_errors()
| 1,369 | 34.128205 | 126 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/simulate_keyframe_buffer.py | import numpy as np
from path import Path
from dvmvs.keyframe_buffer import KeyframeBuffer, SimpleBuffer
def simulate_keyframe_buffer(test_dataset_path, output_folder, n_measurement_frames):
test_dataset_path = Path(test_dataset_path)
scene_folders = sorted(test_dataset_path.listdir())
test_keyframe_buffer_size = 30
test_keyframe_pose_distance = 0.1
test_optimal_t_measure = 0.15
test_optimal_R_measure = 0.0
for scene_index in range(0, len(scene_folders)):
scene_folder = scene_folders[scene_index]
scene = scene_folder.split("/")[-1]
print("Simulating scene:", scene, " - ", scene_index, "/", len(scene_folders))
keyframe_buffer = KeyframeBuffer(buffer_size=test_keyframe_buffer_size,
keyframe_pose_distance=test_keyframe_pose_distance,
optimal_t_score=test_optimal_t_measure,
optimal_R_score=test_optimal_R_measure,
store_return_indices=True)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
output_lines = []
for i in range(0, len(poses)):
reference_pose = poses[i]
# POLL THE KEYFRAME BUFFER
response = keyframe_buffer.try_new_keyframe(reference_pose, None, index=i)
if response == 3:
output_lines.append("TRACKING LOST")
elif response == 1:
measurement_frames = keyframe_buffer.get_best_measurement_frames(n_measurement_frames)
output_line = image_filenames[i].split("/")[-1]
for (measurement_pose, measurement_image, measurement_index) in measurement_frames:
output_line += (" " + image_filenames[measurement_index].split("/")[-1])
output_line = output_line.strip(" ")
output_lines.append(output_line)
output_lines = np.array(output_lines)
dataset_name = test_dataset_path.split("/")[-1]
np.savetxt('{}/keyframe+{}+{}+nmeas+{}'.format(output_folder, dataset_name, scene, n_measurement_frames), output_lines, fmt='%s')
def simulate_simple_buffer(test_dataset_path, output_folder, n_skip, n_measurement_frames):
test_dataset_path = Path(test_dataset_path)
scene_folders = sorted(test_dataset_path.listdir())
for scene_index in range(0, len(scene_folders)):
scene_folder = scene_folders[scene_index]
scene = scene_folder.split("/")[-1]
print("Simulating scene:", scene, " - ", scene_index, "/", len(scene_folders))
simple_buffer = SimpleBuffer(n_measurement_frames, store_return_indices=True)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
output_lines = []
i = 0
while i < len(poses):
reference_pose = poses[i]
# POLL THE KEYFRAME BUFFER
response = simple_buffer.try_new_keyframe(reference_pose, None, index=i)
if response == 0:
i += n_skip
continue
elif response == 2:
output_lines.append("TRACKING LOST")
i += 1
continue
elif response == 3 or response == 4:
i += 1
else:
measurement_frames = simple_buffer.get_measurement_frames()
output_line = image_filenames[i].split("/")[-1]
for (measurement_pose, measurement_image, measurement_index) in measurement_frames:
output_line += (" " + image_filenames[measurement_index].split("/")[-1])
output_line = output_line.strip(" ")
output_lines.append(output_line)
i += n_skip
output_lines = np.array(output_lines)
dataset_name = test_dataset_path.split("/")[-1]
n_skip_str = str(n_skip)
np.savetxt('{}/simple{}+{}+{}+nmeas+{}'.format(output_folder, n_skip_str, dataset_name, scene, n_measurement_frames), output_lines,
fmt='%s')
def main():
output_folder = "../sample-data/indices"
test_dataset_path = "../sample-data/hololens-dataset"
simulate_keyframe_buffer(test_dataset_path, output_folder, n_measurement_frames=1)
simulate_keyframe_buffer(test_dataset_path, output_folder, n_measurement_frames=2)
simulate_keyframe_buffer(test_dataset_path, output_folder, n_measurement_frames=3)
# for evaluation of simple selection (comment out the rest if only our keyframe selection method is desired)
simulate_simple_buffer(test_dataset_path, output_folder, n_skip=10, n_measurement_frames=2)
simulate_simple_buffer(test_dataset_path, output_folder, n_skip=20, n_measurement_frames=2)
main()
| 5,005 | 42.155172 | 139 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/utils.py | from __future__ import division
import os
import zipfile
import cv2
import kornia
import numpy as np
import torch
from path import Path
from pytorch3d import structures, renderer
from dvmvs.errors import compute_errors
# GEOMETRIC UTILS
def pose_distance(reference_pose, measurement_pose):
"""
:param reference_pose: 4x4 numpy array, reference frame camera-to-world pose (not extrinsic matrix!)
:param measurement_pose: 4x4 numpy array, measurement frame camera-to-world pose (not extrinsic matrix!)
:return combined_measure: float, combined pose distance measure
:return R_measure: float, rotation distance measure
:return t_measure: float, translation distance measure
"""
rel_pose = np.dot(np.linalg.inv(reference_pose), measurement_pose)
R = rel_pose[:3, :3]
t = rel_pose[:3, 3]
R_measure = np.sqrt(2 * (1 - min(3.0, np.matrix.trace(R)) / 3))
t_measure = np.linalg.norm(t)
combined_measure = np.sqrt(t_measure ** 2 + R_measure ** 2)
return combined_measure, R_measure, t_measure
def get_warp_grid_for_cost_volume_calculation(width, height, device):
x = np.linspace(0, width - 1, num=int(width))
y = np.linspace(0, height - 1, num=int(height))
ones = np.ones(shape=(height, width))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().to(device)
return warp_grid
def calculate_cost_volume_by_warping(image1, image2, pose1, pose2, K, warp_grid, min_depth, max_depth, n_depth_levels, device, dot_product):
batch_size, channels, height, width = image1.size()
warp_grid = torch.cat(batch_size * [warp_grid.unsqueeze(dim=0)])
cost_volume = torch.empty(size=(batch_size, n_depth_levels, height, width), dtype=torch.float32).to(device)
extrinsic2 = torch.inverse(pose2).bmm(pose1)
R = extrinsic2[:, 0:3, 0:3]
t = extrinsic2[:, 0:3, 3].unsqueeze(-1)
Kt = K.bmm(t)
K_R_Kinv = K.bmm(R).bmm(torch.inverse(K))
K_R_Kinv_UV = K_R_Kinv.bmm(warp_grid)
inverse_depth_base = 1.0 / max_depth
inverse_depth_step = (1.0 / min_depth - 1.0 / max_depth) / (n_depth_levels - 1)
width_normalizer = width / 2.0
height_normalizer = height / 2.0
for depth_i in range(n_depth_levels):
this_depth = 1 / (inverse_depth_base + depth_i * inverse_depth_step)
warping = K_R_Kinv_UV + (Kt / this_depth)
warping = warping.transpose(dim0=1, dim1=2)
warping = warping[:, :, 0:2] / (warping[:, :, 2].unsqueeze(-1) + 1e-8)
warping = warping.view(batch_size, height, width, 2)
warping[:, :, :, 0] = (warping[:, :, :, 0] - width_normalizer) / width_normalizer
warping[:, :, :, 1] = (warping[:, :, :, 1] - height_normalizer) / height_normalizer
warped_image2 = torch.nn.functional.grid_sample(input=image2,
grid=warping,
mode='bilinear',
padding_mode='zeros',
align_corners=True)
if dot_product:
cost_volume[:, depth_i, :, :] = torch.sum(image1 * warped_image2, dim=1) / channels
else:
cost_volume[:, depth_i, :, :] = torch.sum(torch.abs(image1 - warped_image2), dim=1)
return cost_volume
def cost_volume_fusion(image1, image2s, pose1, pose2s, K, warp_grid, min_depth, max_depth, n_depth_levels, device, dot_product):
batch_size, channels, height, width = image1.size()
fused_cost_volume = torch.zeros(size=(batch_size, n_depth_levels, height, width), dtype=torch.float32).to(device)
for pose2, image2 in zip(pose2s, image2s):
cost_volume = calculate_cost_volume_by_warping(image1=image1,
image2=image2,
pose1=pose1,
pose2=pose2,
K=K,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=dot_product)
fused_cost_volume += cost_volume
fused_cost_volume /= len(pose2s)
return fused_cost_volume
def get_non_differentiable_rectangle_depth_estimation(reference_pose_torch,
measurement_pose_torch,
previous_depth_torch,
full_K_torch,
half_K_torch,
original_width,
original_height):
batch_size, _, _ = reference_pose_torch.shape
half_width = int(original_width / 2)
half_height = int(original_height / 2)
trans = torch.bmm(torch.inverse(reference_pose_torch), measurement_pose_torch)
points_3d_src = kornia.depth_to_3d(previous_depth_torch, full_K_torch, normalize_points=False)
points_3d_src = points_3d_src.permute(0, 2, 3, 1)
points_3d_dst = kornia.transform_points(trans[:, None], points_3d_src)
points_3d_dst = points_3d_dst.view(batch_size, -1, 3)
z_values = points_3d_dst[:, :, -1]
z_values = torch.relu(z_values)
sorting_indices = torch.argsort(z_values, descending=True)
z_values = torch.gather(z_values, dim=1, index=sorting_indices)
sorting_indices_for_points = torch.stack([sorting_indices] * 3, dim=-1)
points_3d_dst = torch.gather(points_3d_dst, dim=1, index=sorting_indices_for_points)
projections = torch.round(kornia.project_points(points_3d_dst, half_K_torch.unsqueeze(1))).long()
is_valid_below = (projections[:, :, 0] >= 0) & (projections[:, :, 1] >= 0)
is_valid_above = (projections[:, :, 0] < half_width) & (projections[:, :, 1] < half_height)
is_valid = is_valid_below & is_valid_above
depth_hypothesis = torch.zeros(size=(batch_size, 1, half_height, half_width)).cuda()
for projection_index in range(0, batch_size):
valid_points_zs = z_values[projection_index][is_valid[projection_index]]
valid_projections = projections[projection_index][is_valid[projection_index]]
i_s = valid_projections[:, 1]
j_s = valid_projections[:, 0]
ij_combined = i_s * half_width + j_s
_, ij_combined_unique_indices = np.unique(ij_combined.cpu().numpy(), return_index=True)
ij_combined_unique_indices = torch.from_numpy(ij_combined_unique_indices).long().cuda()
i_s = i_s[ij_combined_unique_indices]
j_s = j_s[ij_combined_unique_indices]
valid_points_zs = valid_points_zs[ij_combined_unique_indices]
torch.index_put_(depth_hypothesis[projection_index, 0], (i_s, j_s), valid_points_zs)
return depth_hypothesis
def get_differentiable_square_depth_estimation(reference_pose_torch,
measurement_pose_torch,
previous_depth_torch,
full_K_torch,
half_K_torch,
original_image_size,
device):
batch_size, _, _ = full_K_torch.size()
R_render = torch.eye(3, dtype=torch.float, device=device)
T_render = torch.zeros(3, dtype=torch.float, device=device)
R_render = torch.stack(batch_size * [R_render], dim=0)
T_render = torch.stack(batch_size * [T_render], dim=0)
R_render[:, 0, 0] *= -1
R_render[:, 1, 1] *= -1
trans = torch.bmm(torch.inverse(reference_pose_torch), measurement_pose_torch)
points_3d_src = kornia.depth_to_3d(previous_depth_torch, full_K_torch, normalize_points=False)
points_3d_src = points_3d_src.permute(0, 2, 3, 1)
points_3d_dst = kornia.transform_points(trans[:, None], points_3d_src).view(batch_size, -1, 3)
point_cloud_p3d = structures.Pointclouds(points=points_3d_dst, features=None)
width_normalizer = original_image_size / 4.0
height_normalizer = original_image_size / 4.0
px_ndc = (half_K_torch[:, 0, 2] - width_normalizer) / width_normalizer
py_ndc = (half_K_torch[:, 1, 2] - height_normalizer) / height_normalizer
fx_ndc = half_K_torch[:, 0, 0] / width_normalizer
fy_ndc = half_K_torch[:, 1, 1] / height_normalizer
principal_point = torch.stack([px_ndc, py_ndc], dim=-1)
focal_length = torch.stack([fx_ndc, fy_ndc], dim=-1)
cameras = renderer.SfMPerspectiveCameras(focal_length=focal_length,
principal_point=principal_point,
R=R_render,
T=T_render,
device=torch.device('cuda'))
raster_settings = renderer.PointsRasterizationSettings(
image_size=int(original_image_size / 2.0),
radius=0.02,
points_per_pixel=3)
depth_renderer = renderer.PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
rendered_depth = torch.min(depth_renderer(point_cloud_p3d).zbuf, dim=-1)[0]
depth_hypothesis = torch.relu(rendered_depth).unsqueeze(1)
return depth_hypothesis
def warp_frame_depth(
image_src: torch.Tensor,
depth_dst: torch.Tensor,
src_trans_dst: torch.Tensor,
camera_matrix: torch.Tensor,
normalize_points: bool = False,
sampling_mode='bilinear') -> torch.Tensor:
# TAKEN FROM KORNIA LIBRARY
if not isinstance(image_src, torch.Tensor):
raise TypeError(f"Input image_src type is not a torch.Tensor. Got {type(image_src)}.")
if not len(image_src.shape) == 4:
raise ValueError(f"Input image_src musth have a shape (B, D, H, W). Got: {image_src.shape}")
if not isinstance(depth_dst, torch.Tensor):
raise TypeError(f"Input depht_dst type is not a torch.Tensor. Got {type(depth_dst)}.")
if not len(depth_dst.shape) == 4 and depth_dst.shape[-3] == 1:
raise ValueError(f"Input depth_dst musth have a shape (B, 1, H, W). Got: {depth_dst.shape}")
if not isinstance(src_trans_dst, torch.Tensor):
raise TypeError(f"Input src_trans_dst type is not a torch.Tensor. "
f"Got {type(src_trans_dst)}.")
if not len(src_trans_dst.shape) == 3 and src_trans_dst.shape[-2:] == (3, 3):
raise ValueError(f"Input src_trans_dst must have a shape (B, 3, 3). "
f"Got: {src_trans_dst.shape}.")
if not isinstance(camera_matrix, torch.Tensor):
raise TypeError(f"Input camera_matrix type is not a torch.Tensor. "
f"Got {type(camera_matrix)}.")
if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
f"Got: {camera_matrix.shape}.")
# unproject source points to camera frame
points_3d_dst: torch.Tensor = kornia.depth_to_3d(depth_dst, camera_matrix, normalize_points) # Bx3xHxW
# transform points from source to destination
points_3d_dst = points_3d_dst.permute(0, 2, 3, 1) # BxHxWx3
# apply transformation to the 3d points
points_3d_src = kornia.transform_points(src_trans_dst[:, None], points_3d_dst) # BxHxWx3
points_3d_src[:, :, :, 2] = torch.relu(points_3d_src[:, :, :, 2])
# project back to pixels
camera_matrix_tmp: torch.Tensor = camera_matrix[:, None, None] # Bx1x1xHxW
points_2d_src: torch.Tensor = kornia.project_points(points_3d_src, camera_matrix_tmp) # BxHxWx2
# normalize points between [-1 / 1]
height, width = depth_dst.shape[-2:]
points_2d_src_norm: torch.Tensor = kornia.normalize_pixel_coordinates(points_2d_src, height, width) # BxHxWx2
return torch.nn.functional.grid_sample(image_src, points_2d_src_norm, align_corners=True, mode=sampling_mode)
def is_pose_available(pose):
is_nan = np.isnan(pose).any()
is_inf = np.isinf(pose).any()
is_neg_inf = np.isneginf(pose).any()
if is_nan or is_inf or is_neg_inf:
return False
else:
return True
# TRAINING UTILS
def freeze_batchnorm(module):
if isinstance(module, torch.nn.BatchNorm1d) or isinstance(module, torch.nn.BatchNorm2d) or isinstance(module, torch.nn.BatchNorm3d):
module.eval()
module.weight.requires_grad = False
module.bias.requires_grad = False
def zip_code(run_directory):
zip_file_path = os.path.join(run_directory, "code.zip")
zip_handle = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)
files = Path("./").files("*.py")
for file in files:
zip_handle.write(file)
files = Path("../").files("*.py")
for file in files:
zip_handle.write(file)
zip_handle.close()
def save_checkpoint(save_path, models, step, loss, filename='checkpoint.pth.tar'):
save_path = Path(save_path)
for model in models:
prefix = model['name']
model_state = model['state_dict']
torch.save(model_state, save_path / '{}_{}_epoch:{}_l1:{:.4f}_l1-inv:{:.4f}_l1-rel:{:.4f}_huber:{:.4f}'.format(prefix,
filename,
step,
loss[0],
loss[1],
loss[2],
loss[3]))
def save_optimizer(save_path, optimizer, step, loss, filename='checkpoint.pth.tar'):
save_path = Path(save_path)
optimizer_state = optimizer.state_dict()
torch.save(optimizer_state, save_path / 'optimizer_{}_epoch:{}_l1:{:.4f}_l1-inv:{:.4f}_l1-rel:{:.4f}_huber:{:.4f}'.format(filename,
step,
loss[0],
loss[1],
loss[2],
loss[3]))
def print_number_of_trainable_parameters(optimizer):
parameter_counter = 0
for param_group in optimizer.param_groups:
for parameter in param_group['params']:
if parameter.requires_grad:
parameter_counter += parameter.nelement()
print("Number of trainable parameters:", f"{parameter_counter:,d}")
# TESTING UTILS
def save_results(predictions, groundtruths, system_name, scene_name, save_folder, max_depth=np.inf):
if groundtruths is not None:
errors = []
for i, prediction in enumerate(predictions):
errors.append(compute_errors(groundtruths[i], prediction, max_depth))
error_names = ['abs_error', 'abs_relative_error', 'abs_inverse_error',
'squared_relative_error', 'rmse', 'ratio_125', 'ratio_125_2', 'ratio_125_3']
errors = np.array(errors)
mean_errors = np.nanmean(errors, 0)
print("Metrics of {} for scene {}:".format(system_name, scene_name))
print("{:>25}, {:>25}, {:>25}, {:>25}, {:>25}, {:>25}, {:>25}, {:>25}".format(*error_names))
print("{:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}".format(*mean_errors))
np.savez_compressed(Path(save_folder) / system_name + "_errors_" + scene_name, errors)
predictions = np.array(predictions)
np.savez_compressed(Path(save_folder) / system_name + "_predictions_" + scene_name, predictions)
def save_predictions(predictions, system_name, scene_name, save_folder):
np.savez_compressed(Path(save_folder) / system_name + "_predictions_" + scene_name, predictions)
def visualize_predictions(numpy_reference_image, numpy_measurement_image, numpy_predicted_depth, normalization_mean, normalization_std, normalization_scale,
depth_multiplier_for_visualization=5000):
numpy_reference_image = numpy_reference_image * np.array(normalization_std) + np.array(normalization_mean)
numpy_reference_image = (numpy_reference_image * normalization_scale).astype(np.uint8)
numpy_measurement_image = numpy_measurement_image * np.array(normalization_std) + np.array(normalization_mean)
numpy_measurement_image = (numpy_measurement_image * normalization_scale).astype(np.uint8)
cv2.imshow("Reference Image", cv2.cvtColor(numpy_reference_image, cv2.COLOR_RGB2BGR))
cv2.imshow("A Measurement Image", cv2.cvtColor(numpy_measurement_image, cv2.COLOR_RGB2BGR))
cv2.imshow("Predicted Depth", (depth_multiplier_for_visualization * numpy_predicted_depth).astype(np.uint16))
cv2.waitKey()
class InferenceTimer:
def __init__(self, n_skip=20):
self.times = []
self.n_skip = n_skip
self.forward_pass_start = torch.cuda.Event(enable_timing=True)
self.forward_pass_end = torch.cuda.Event(enable_timing=True)
def record_start_time(self):
self.forward_pass_start.record()
def record_end_time_and_elapsed_time(self):
self.forward_pass_end.record()
torch.cuda.synchronize()
elapsed_time = self.forward_pass_start.elapsed_time(self.forward_pass_end)
self.times.append(elapsed_time)
def print_statistics(self):
times = np.array(self.times[self.n_skip:])
if len(times) > 0:
mean_time = np.mean(times)
std_time = np.std(times)
min_time = np.min(times)
max_time = np.max(times)
median_time = np.median(times)
print("Number of Forward Passes:", len(times))
print("--- Mean Inference Time:", mean_time)
print("--- Std Inference Time:", std_time)
print("--- Median Inference Time:", median_time)
print("--- Min Inference Time:", min_time)
print("--- Max Inference Time:", max_time)
else:
print("Not enough time measurements are taken!")
| 19,349 | 47.014888 | 156 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/layers.py | import torch
def down_conv_layer(input_channels, output_channels, kernel_size):
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU(),
torch.nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU())
def up_conv_layer(input_channels, output_channels, kernel_size):
return torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size, stride, apply_bn_relu):
if apply_bn_relu:
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=stride,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU(inplace=True))
else:
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=stride,
bias=False))
def depth_layer_3x3(input_channels):
return torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 1, 3, padding=1),
torch.nn.Sigmoid())
| 1,984 | 29.075758 | 84 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/config.py | import time
class Config:
# training settings
train_image_width = 256
train_image_height = 256
train_min_depth = 0.25
train_max_depth = 20.0
train_n_depth_levels = 64
train_minimum_pose_distance = 0.125
train_maximum_pose_distance = 0.325
train_crawl_step = 3
train_subsequence_length = None
train_predict_two_way = None
train_freeze_batch_normalization = False
train_data_pipeline_workers = 8
train_epochs = 100000
train_print_frequency = 5000
train_validate = True
train_seed = int(round(time.time()))
# test settings
test_image_width = 320
test_image_height = 256
test_distortion_crop = 0
test_perform_crop = False
test_visualize = True
test_n_measurement_frames = 2
test_keyframe_buffer_size = 30
test_keyframe_pose_distance = 0.1
test_optimal_t_measure = 0.15
test_optimal_R_measure = 0.0
# SET THESE: TRAINING FOLDER LOCATIONS
dataset = "/media/ardaduz/T5/train"
train_run_directory = "/home/ardaduz/Workspace/git/deep-video-mvs/training-runs"
# SET THESE: TESTING FOLDER LOCATIONS
# for run-testing-online.py (evaluate a single scene, WITHOUT keyframe indices, online selection)
test_online_scene_path = "/home/ardaduz/Workspace/git/deep-video-mvs/sample-data/hololens-dataset/000"
# for run-testing.py (evaluate all available scenes, WITH pre-calculated keyframe indices)
test_offline_data_path = "/home/ardaduz/Workspace/git/deep-video-mvs/sample-data"
# below give a dataset name like tumrgbd, i.e. folder or None
# if None, all datasets will be evaluated given that
# their keyframe index files are in Config.test_offline_data_path/indices folder
test_dataset_name = "hololens-dataset" # or None
test_result_folder = "/media/ardaduz/T5/results/"
| 1,830 | 34.211538 | 106 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/dataset_loader.py | import copy
import random
from functools import partial
from multiprocessing import Manager
from multiprocessing.pool import Pool
import cv2
import numpy as np
import torch
from kornia import adjust_brightness, adjust_gamma, adjust_contrast
from path import Path
from torch.utils.data import Dataset, DataLoader
from dvmvs.config import Config
from dvmvs.utils import pose_distance
def is_valid_pair(reference_pose, measurement_pose, pose_dist_min, pose_dist_max, t_norm_threshold=0.05, return_measure=False):
combined_measure, R_measure, t_measure = pose_distance(reference_pose, measurement_pose)
if pose_dist_min <= combined_measure <= pose_dist_max and t_measure >= t_norm_threshold:
result = True
else:
result = False
if return_measure:
return result, combined_measure
else:
return result
def gather_pairs_train(poses, used_pairs, is_backward, initial_pose_dist_min, initial_pose_dist_max):
sequence_length = len(poses)
while_range = range(0, sequence_length)
pose_dist_min = copy.deepcopy(initial_pose_dist_min)
pose_dist_max = copy.deepcopy(initial_pose_dist_max)
used_measurement_indices = set()
# Gather pairs
check_future = False
pairs = []
if is_backward:
i = sequence_length - 1
step = -1
first_limit = 5
second_limit = sequence_length - 5
else:
i = 0
step = 1
first_limit = sequence_length - 5
second_limit = 5
loosening_counter = 0
while i in while_range:
pair = (i, -1)
if check_future:
for j in range(i + step, first_limit, step):
if j not in used_measurement_indices and (i, j) not in used_pairs:
valid = is_valid_pair(poses[i], poses[j], pose_dist_min, pose_dist_max)
if valid:
pair = (i, j)
pairs.append(pair)
used_pairs.add(pair)
used_pairs.add((pair[1], pair[0]))
used_measurement_indices.add(j)
pose_dist_min = copy.deepcopy(initial_pose_dist_min)
pose_dist_max = copy.deepcopy(initial_pose_dist_max)
i += step
check_future = False
loosening_counter = 0
break
else:
for j in range(i - step, second_limit, -step):
if j not in used_measurement_indices and (i, j) not in used_pairs:
valid = is_valid_pair(poses[i], poses[j], pose_dist_min, pose_dist_max)
if valid:
pair = (i, j)
pairs.append(pair)
used_pairs.add(pair)
used_pairs.add((pair[1], pair[0]))
used_measurement_indices.add(j)
pose_dist_min = copy.deepcopy(initial_pose_dist_min)
pose_dist_max = copy.deepcopy(initial_pose_dist_max)
i += step
check_future = False
loosening_counter = 0
break
if pair[1] == -1:
if check_future:
pose_dist_min = pose_dist_min / 1.1
pose_dist_max = pose_dist_max * 1.1
check_future = False
loosening_counter += 1
if loosening_counter > 1:
i += step
loosening_counter = 0
else:
check_future = True
else:
check_future = False
return pairs
def crawl_subprocess_short(scene, dataset_path, count, progress):
scene_path = Path(dataset_path) / scene
poses = np.reshape(np.loadtxt(scene_path / "poses.txt"), newshape=(-1, 4, 4))
samples = []
used_pairs = set()
for multiplier in [(1.0, False), (0.666, True), (1.5, False)]:
pairs = gather_pairs_train(poses, used_pairs,
is_backward=multiplier[1],
initial_pose_dist_min=multiplier[0] * Config.train_minimum_pose_distance,
initial_pose_dist_max=multiplier[0] * Config.train_maximum_pose_distance)
for pair in pairs:
i, j = pair
sample = {'scene': scene,
'indices': [i, j]}
samples.append(sample)
progress.value += 1
print(progress.value, "/", count, end='\r')
return samples
def crawl_subprocess_long(scene, dataset_path, count, progress, subsequence_length):
scene_path = Path(dataset_path) / scene
poses = np.reshape(np.loadtxt(scene_path / "poses.txt"), newshape=(-1, 4, 4))
sequence_length = np.shape(poses)[0]
used_pairs = set()
usage_threshold = 1
used_nodes = dict()
for i in range(sequence_length):
used_nodes[i] = 0
calculated_step = Config.train_crawl_step
samples = []
for offset, multiplier, is_backward in [(0 % calculated_step, 1.0, False),
(1 % calculated_step, 0.666, True),
(2 % calculated_step, 1.5, False),
(3 % calculated_step, 0.8, True),
(4 % calculated_step, 1.25, False),
(5 % calculated_step, 1.0, True),
(6 % calculated_step, 0.666, False),
(7 % calculated_step, 1.5, True),
(8 % calculated_step, 0.8, False),
(9 % calculated_step, 1.25, True)]:
if is_backward:
start = sequence_length - 1 - offset
step = -calculated_step
limit = subsequence_length
else:
start = offset
step = calculated_step
limit = sequence_length - subsequence_length + 1
for i in range(start, limit, step):
if used_nodes[i] > usage_threshold:
continue
sample = {'scene': scene,
'indices': [i]}
previous_index = i
valid_counter = 1
any_counter = 1
reached_sequence_limit = False
while valid_counter < subsequence_length:
if is_backward:
j = i - any_counter
reached_sequence_limit = j < 0
else:
j = i + any_counter
reached_sequence_limit = j >= sequence_length
if not reached_sequence_limit:
current_index = j
check1 = used_nodes[current_index] <= usage_threshold
check2 = (previous_index, current_index) not in used_pairs
check3 = is_valid_pair(poses[previous_index],
poses[current_index],
multiplier * Config.train_minimum_pose_distance,
multiplier * Config.train_maximum_pose_distance,
t_norm_threshold=multiplier * Config.train_minimum_pose_distance * 0.5)
if check1 and check2 and check3:
sample['indices'].append(current_index)
previous_index = copy.deepcopy(current_index)
valid_counter += 1
any_counter += 1
else:
break
if not reached_sequence_limit:
previous_node = sample['indices'][0]
used_nodes[previous_node] += 1
for current_node in sample['indices'][1:]:
used_nodes[current_node] += 1
used_pairs.add((previous_node, current_node))
used_pairs.add((current_node, previous_node))
previous_node = copy.deepcopy(current_node)
samples.append(sample)
progress.value += 1
print(progress.value, "/", count, end='\r')
return samples
def crawl(dataset_path, scenes, subsequence_length, num_workers=1):
pool = Pool(num_workers)
manager = Manager()
count = len(scenes)
progress = manager.Value('i', 0)
samples = []
if subsequence_length == 2:
for scene_samples in pool.imap_unordered(partial(crawl_subprocess_short,
dataset_path=dataset_path,
count=count,
progress=progress), scenes):
samples.extend(scene_samples)
else:
for scene_samples in pool.imap_unordered(partial(crawl_subprocess_long,
dataset_path=dataset_path,
count=count,
progress=progress,
subsequence_length=subsequence_length), scenes):
samples.extend(scene_samples)
random.shuffle(samples)
return samples
def read_split(path):
scenes_txt = np.loadtxt(path, dtype=str, delimiter="\n")
return scenes_txt
def load_image(path):
image = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_depth(path, scaling=1000.0):
depth = np.load(path).astype(np.float32) / scaling
return depth
class PreprocessImage:
def __init__(self, K, old_width, old_height, new_width, new_height, distortion_crop=0, perform_crop=True):
self.fx = K[0, 0]
self.fy = K[1, 1]
self.cx = K[0, 2]
self.cy = K[1, 2]
self.new_width = new_width
self.new_height = new_height
self.perform_crop = perform_crop
original_height = np.copy(old_height)
original_width = np.copy(old_width)
if self.perform_crop:
old_height -= 2 * distortion_crop
old_width -= 2 * distortion_crop
old_aspect_ratio = float(old_width) / float(old_height)
new_aspect_ratio = float(new_width) / float(new_height)
if old_aspect_ratio > new_aspect_ratio:
# we should crop horizontally to decrease image width
target_width = old_height * new_aspect_ratio
self.crop_x = int(np.floor((old_width - target_width) / 2.0)) + distortion_crop
self.crop_y = distortion_crop
else:
# we should crop vertically to decrease image height
target_height = old_width / new_aspect_ratio
self.crop_x = distortion_crop
self.crop_y = int(np.floor((old_height - target_height) / 2.0)) + distortion_crop
self.cx -= self.crop_x
self.cy -= self.crop_y
intermediate_height = original_height - 2 * self.crop_y
intermediate_width = original_width - 2 * self.crop_x
factor_x = float(new_width) / float(intermediate_width)
factor_y = float(new_height) / float(intermediate_height)
self.fx *= factor_x
self.fy *= factor_y
self.cx *= factor_x
self.cy *= factor_y
else:
self.crop_x = 0
self.crop_y = 0
factor_x = float(new_width) / float(original_width)
factor_y = float(new_height) / float(original_height)
self.fx *= factor_x
self.fy *= factor_y
self.cx *= factor_x
self.cy *= factor_y
def apply_depth(self, depth):
raw_height, raw_width = depth.shape
cropped_depth = depth[self.crop_y:raw_height - self.crop_y, self.crop_x:raw_width - self.crop_x]
resized_cropped_depth = cv2.resize(cropped_depth, (self.new_width, self.new_height), interpolation=cv2.INTER_NEAREST)
return resized_cropped_depth
def apply_rgb(self, image, scale_rgb, mean_rgb, std_rgb, normalize_colors=True):
raw_height, raw_width, _ = image.shape
cropped_image = image[self.crop_y:raw_height - self.crop_y, self.crop_x:raw_width - self.crop_x, :]
cropped_image = cv2.resize(cropped_image, (self.new_width, self.new_height), interpolation=cv2.INTER_LINEAR)
if normalize_colors:
cropped_image = cropped_image / scale_rgb
cropped_image[:, :, 0] = (cropped_image[:, :, 0] - mean_rgb[0]) / std_rgb[0]
cropped_image[:, :, 1] = (cropped_image[:, :, 1] - mean_rgb[1]) / std_rgb[1]
cropped_image[:, :, 2] = (cropped_image[:, :, 2] - mean_rgb[2]) / std_rgb[2]
return cropped_image
def get_updated_intrinsics(self):
return np.array([[self.fx, 0, self.cx],
[0, self.fy, self.cy],
[0, 0, 1]])
class MVSDataset(Dataset):
def __init__(self, root, seed, split, subsequence_length, scale_rgb, mean_rgb, std_rgb, geometric_scale_augmentation=False):
np.random.seed(seed)
random.seed(seed)
self.subsequence_length = subsequence_length
self.geometric_scale_augmentation = geometric_scale_augmentation
self.root = Path(root)
self.split = split
if split == "TRAINING":
self.scenes = read_split(self.root / "train.txt")
elif split == "VALIDATION":
self.scenes = read_split(self.root / "validation.txt")
# self.scenes = self.scenes[0:20]
self.samples = crawl(dataset_path=self.root,
scenes=self.scenes,
subsequence_length=self.subsequence_length,
num_workers=Config.train_data_pipeline_workers)
self.scale_rgb = scale_rgb
self.mean_rgb = mean_rgb
self.std_rgb = std_rgb
def __getitem__(self, sample_index):
sample = self.samples[sample_index]
scene = sample['scene']
indices = sample['indices']
scene_path = self.root / scene
K = np.loadtxt(scene_path / 'K.txt', dtype=np.float32)
scene_poses = np.reshape(np.loadtxt(scene_path / 'poses.txt', dtype=np.float32), newshape=(-1, 4, 4))
scene_npzs = sorted(scene_path.files('*.npz'))
if self.split == "TRAINING" and np.random.random() > 0.5:
indices.reverse()
raw_poses = []
raw_images = []
raw_depths = []
for i in indices:
data = np.load(scene_npzs[i])
raw_images.append(data['image'])
raw_depths.append(data['depth'])
raw_poses.append(scene_poses[i])
preprocessor = PreprocessImage(K=K,
old_width=raw_images[0].shape[1],
old_height=raw_depths[0].shape[0],
new_width=Config.train_image_width,
new_height=Config.train_image_height,
distortion_crop=0)
output_images = []
output_depths = []
output_poses = []
rgb_sum = 0
min_depth_in_sequence = Config.train_max_depth
max_depth_in_sequence = Config.train_min_depth
intermediate_depths = []
intermediate_images = []
for i in range(len(raw_images)):
depth = (raw_depths[i]).astype(np.float32) / 1000.0
depth_nan = depth == np.nan
depth_inf = depth == np.inf
depth_nan_or_inf = np.logical_or(depth_inf, depth_nan)
depth[depth_nan_or_inf] = 0
depth = preprocessor.apply_depth(depth)
intermediate_depths.append(depth)
valid_mask = depth > 0
valid_depth_values = depth[valid_mask]
if len(valid_depth_values) > 0:
current_min_depth = np.min(valid_depth_values)
current_max_depth = np.max(valid_depth_values)
min_depth_in_sequence = min(min_depth_in_sequence, current_min_depth)
max_depth_in_sequence = max(max_depth_in_sequence, current_max_depth)
image = raw_images[i]
image = preprocessor.apply_rgb(image=image,
scale_rgb=1.0,
mean_rgb=[0.0, 0.0, 0.0],
std_rgb=[1.0, 1.0, 1.0],
normalize_colors=False)
rgb_sum += np.sum(image)
intermediate_images.append(image)
rgb_average = rgb_sum / (len(raw_images) * Config.train_image_height * Config.train_image_width * 3)
# GEOMETRIC AUGMENTATION
geometric_scale_factor = 1.0
if self.geometric_scale_augmentation:
possible_low_scale_value = Config.train_min_depth / min_depth_in_sequence
possible_high_scale_value = Config.train_max_depth / max_depth_in_sequence
if np.random.random() > 0.5:
low = max(possible_low_scale_value, 0.666)
high = min(possible_high_scale_value, 1.5)
else:
low = max(possible_low_scale_value, 0.8)
high = min(possible_high_scale_value, 1.25)
geometric_scale_factor = np.random.uniform(low=low, high=high)
# COLOR AUGMENTATION
color_transforms = []
brightness = random.uniform(-0.03, 0.03)
contrast = random.uniform(0.8, 1.2)
gamma = random.uniform(0.8, 1.2)
color_transforms.append((adjust_gamma, gamma))
color_transforms.append((adjust_contrast, contrast))
color_transforms.append((adjust_brightness, brightness))
random.shuffle(color_transforms)
K = preprocessor.get_updated_intrinsics()
for i in range(len(raw_images)):
image = intermediate_images[i]
depth = intermediate_depths[i] * geometric_scale_factor
image = np.transpose(image, (2, 0, 1))
image = torch.from_numpy(image.astype(np.float32))
image = image / 255.0
if self.split == "TRAINING" and (55.0 < rgb_average < 200.0):
for (color_transform_function, color_transform_value) in color_transforms:
image = color_transform_function(image, color_transform_value)
image = (image * 255.0) / self.scale_rgb
image[0, :, :] = (image[0, :, :] - self.mean_rgb[0]) / self.std_rgb[0]
image[1, :, :] = (image[1, :, :] - self.mean_rgb[1]) / self.std_rgb[1]
image[2, :, :] = (image[2, :, :] - self.mean_rgb[2]) / self.std_rgb[2]
pose = raw_poses[i].astype(np.float32)
pose[0:3, 3] = pose[0:3, 3] * geometric_scale_factor
pose = torch.from_numpy(pose)
depth = torch.from_numpy(depth.astype(np.float32))
output_poses.append(pose)
output_depths.append(depth)
output_images.append(image)
K = torch.from_numpy(K.astype(np.float32))
return output_images, output_depths, output_poses, K
def __len__(self):
return len(self.samples)
def main():
subsequence_length = 8
dataset = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="TRAINING",
subsequence_length=subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.0, 0.0, 0.0],
std_rgb=[1.0, 1.0, 1.0],
geometric_scale_augmentation=False)
print("Number of samples:", len(dataset))
loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=12, pin_memory=True)
for i, (images, depths, poses, K) in enumerate(loader):
for j in range(1, len(images)):
current_image = images[j]
current_depth = depths[j].unsqueeze(1)
previous_image = images[j - 1]
previous_depth = depths[j - 1].unsqueeze(1)
print(np.max(current_depth.squeeze(1).numpy()[0]))
print(np.min(current_depth.squeeze(1).numpy()[0]))
current_image = (np.transpose(current_image.numpy()[0], (1, 2, 0)) * 255).astype(np.uint8)
current_depth = (current_depth.squeeze(1).numpy()[0] * 5000).astype(np.uint16)
measurement_image = (np.transpose(previous_image.numpy()[0], (1, 2, 0)) * 255).astype(np.uint8)
measurement_depth = (previous_depth.squeeze(1).numpy()[0] * 5000).astype(np.uint16)
cv2.imshow("Reference Image", cv2.cvtColor(current_image, cv2.COLOR_BGR2RGB))
cv2.imshow("Reference Depth", current_depth)
cv2.imshow("Measurement Image", cv2.cvtColor(measurement_image, cv2.COLOR_BGR2RGB))
cv2.imshow("Measurement Depth", measurement_depth)
cv2.waitKey()
if __name__ == '__main__':
main()
| 21,202 | 38.192237 | 128 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/convlstm.py | import torch
import torch.nn as nn
from dvmvs.utils import warp_frame_depth
class MVSLayernormConvLSTMCell(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, activation_function=None):
super(MVSLayernormConvLSTMCell, self).__init__()
self.activation_function = activation_function
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=False)
def forward(self, input_tensor, cur_state, previous_pose, current_pose, estimated_current_depth, camera_matrix):
h_cur, c_cur = cur_state
if previous_pose is not None:
transformation = torch.bmm(torch.inverse(previous_pose), current_pose)
non_valid = estimated_current_depth <= 0.01
h_cur = warp_frame_depth(image_src=h_cur,
depth_dst=estimated_current_depth,
src_trans_dst=transformation,
camera_matrix=camera_matrix,
normalize_points=False,
sampling_mode='bilinear')
b, c, h, w = h_cur.size()
non_valid = torch.cat([non_valid] * c, dim=1)
h_cur.data[non_valid] = 0.0
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
b, c, h, w = h_cur.size()
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
cc_g = torch.layer_norm(cc_g, [h, w])
g = self.activation_function(cc_g)
c_next = f * c_cur + i * g
c_next = torch.layer_norm(c_next, [h, w])
h_next = o * self.activation_function(c_next)
return h_next, c_next
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))
| 2,554 | 38.307692 | 116 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/__init__.py | 0 | 0 | 0 | py | |
deep-video-mvs | deep-video-mvs-master/dvmvs/train.py | import torch
import torchvision
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.losses import LossMeter
from dvmvs.utils import save_checkpoint, save_optimizer, freeze_batchnorm
def switch_mode(model, mode):
if mode == 'train':
for module in model:
module.train()
if Config.train_freeze_batch_normalization:
module.apply(freeze_batchnorm)
elif mode == 'eval':
for module in model:
module.eval()
def train(train_loader, val_loader, model, optimizer, summary_writer, epoch, best_loss, run_directory, forward_pass_function):
training_l1_meter = LossMeter()
training_huber_meter = LossMeter()
training_l1_inv_meter = LossMeter()
training_l1_rel_meter = LossMeter()
info_printer = tqdm(total=0, position=1, bar_format='{desc}')
info = 'L1 Loss: {} --- L1-inv Loss: {} --- L1-rel Loss: {} --- Huber Loss: {}'
# switch to train mode
switch_mode(model=model, mode='train')
for i, (images, depths, poses, K) in enumerate(tqdm(train_loader)):
batch_l1_meter, batch_huber_meter, batch_l1_inv_meter, batch_l1_rel_meter, \
optimizer_loss, predictions, predictions_names = forward_pass_function(images=images,
depths=depths,
poses=poses,
K=K,
model=model,
is_training=True)
# record losses
training_l1_meter.update(loss=batch_l1_meter.sum, count=batch_l1_meter.count)
training_huber_meter.update(loss=batch_huber_meter.sum, count=batch_huber_meter.count)
training_l1_inv_meter.update(loss=batch_l1_inv_meter.sum, count=batch_l1_inv_meter.count)
training_l1_rel_meter.update(loss=batch_l1_rel_meter.sum, count=batch_l1_rel_meter.count)
if i > 0 and i % Config.train_print_frequency == 0:
rgb_debug_image = images[-1][0].cpu().detach()
depth_debug_image = depths[-1][0].cpu().repeat(3, 1, 1).detach()
debug_images = [rgb_debug_image, depth_debug_image]
debug_names = "input_image ground_truth"
for index, prediction in enumerate(predictions):
debug_names += " " + predictions_names[index]
prediction = prediction[0].cpu().repeat(3, 1, 1).detach().unsqueeze(0)
_, channel, height, width = prediction.size()
scale_factor = Config.train_image_width / width
prediction = torch.nn.functional.interpolate(prediction, scale_factor=scale_factor, mode='bilinear', align_corners=True)
prediction = prediction.squeeze(0)
debug_images.append(prediction)
debug_images_grid = torchvision.utils.make_grid(debug_images,
nrow=3,
normalize=True,
scale_each=True)
summary_writer.add_image(debug_names, debug_images_grid, epoch * len(train_loader) + i)
# compute gradient and do Adam step
optimizer.zero_grad()
optimizer_loss.backward()
optimizer.step()
summary_writer.add_scalar('Batch Loss/L1', training_l1_meter.item_average, epoch * len(train_loader) + i)
summary_writer.add_scalar('Batch Loss/Huber', training_huber_meter.item_average, epoch * len(train_loader) + i)
summary_writer.add_scalar('Batch Loss/L1-inv', training_l1_inv_meter.item_average, epoch * len(train_loader) + i)
summary_writer.add_scalar('Batch Loss/L1-rel', training_l1_rel_meter.item_average, epoch * len(train_loader) + i)
info_printer.set_description_str(info.format(training_l1_meter, training_l1_inv_meter, training_l1_rel_meter, training_huber_meter))
if Config.train_validate:
validation_l1_loss, validation_huber_loss, validation_l1_inv_loss, validation_l1_rel_loss = validate(val_loader=val_loader,
model=model,
forward_pass_function=forward_pass_function)
summary_writer.add_scalar('L1 Loss/Training', training_l1_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1 Loss/Validation', validation_l1_loss, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('Huber Loss/Training', training_huber_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('Huber Loss/Validation', validation_huber_loss, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-inv Loss/Training', training_l1_inv_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-inv Loss/Validation', validation_l1_inv_loss, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-rel Loss/Training', training_l1_rel_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-rel Loss/Validation', validation_l1_rel_loss, (epoch + 1) * len(train_loader))
if validation_l1_loss < best_loss[0] or validation_huber_loss < best_loss[1] or \
validation_l1_inv_loss < best_loss[2] or validation_l1_rel_loss < best_loss[3]:
best_loss[0] = min(validation_l1_loss, best_loss[0])
best_loss[1] = min(validation_huber_loss, best_loss[1])
best_loss[2] = min(validation_l1_inv_loss, best_loss[2])
best_loss[3] = min(validation_l1_rel_loss, best_loss[3])
# save best checkpoint
checkpoint_list = []
for k, module in enumerate(model):
entry = {
'name': "module_" + str(k),
'epoch': epoch + 1,
'state_dict': module.state_dict()
}
checkpoint_list.append(entry)
save_checkpoint(run_directory,
checkpoint_list,
step=(epoch + 1) * len(train_loader),
loss=[validation_l1_loss, validation_l1_inv_loss, validation_l1_rel_loss, validation_huber_loss])
save_optimizer(run_directory,
optimizer=optimizer,
step=(epoch + 1) * len(train_loader),
loss=[validation_l1_loss, validation_l1_inv_loss, validation_l1_rel_loss, validation_huber_loss])
# switch back to train mode !!!
switch_mode(model=model, mode='train')
def validate(val_loader, model, forward_pass_function):
validation_l1_meter = LossMeter()
validation_huber_meter = LossMeter()
validation_l1_inv_meter = LossMeter()
validation_l1_rel_meter = LossMeter()
# switch to evaluate mode
switch_mode(model=model, mode='eval')
with torch.no_grad():
for i, (images, depths, poses, K) in enumerate(tqdm(val_loader)):
batch_l1_meter, batch_huber_meter, batch_l1_inv_meter, batch_l1_rel_meter, \
optimizer_loss, predictions, predictions_names = forward_pass_function(images=images,
depths=depths,
poses=poses,
K=K,
model=model,
is_training=False)
# record losses
validation_l1_meter.update(loss=batch_l1_meter.sum, count=batch_l1_meter.count)
validation_huber_meter.update(loss=batch_huber_meter.sum, count=batch_huber_meter.count)
validation_l1_inv_meter.update(loss=batch_l1_inv_meter.sum, count=batch_l1_inv_meter.count)
validation_l1_rel_meter.update(loss=batch_l1_rel_meter.sum, count=batch_l1_rel_meter.count)
return validation_l1_meter.avg, validation_huber_meter.avg, validation_l1_inv_meter.avg, validation_l1_rel_meter.avg
| 8,579 | 56.583893 | 153 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/mvdepthnet/encoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = down_conv_layer(67, 128, 7)
self.conv2 = down_conv_layer(128, 256, 5)
self.conv3 = down_conv_layer(256, 512, 3)
self.conv4 = down_conv_layer(512, 512, 3)
self.conv5 = down_conv_layer(512, 512, 3)
def getVolume(self, left_image, right_image, KRKiUV_T, KT_T):
idepth_base = 1.0 / 50.0
idepth_step = (1.0 / 0.5 - 1.0 / 50.0) / 63.0
costvolume = Variable(
torch.cuda.FloatTensor(left_image.shape[0], 64,
left_image.shape[2], left_image.shape[3]))
image_height = 256
image_width = 320
batch_number = left_image.shape[0]
normalize_base = torch.cuda.FloatTensor(
[image_width / 2.0, image_height / 2.0])
normalize_base = normalize_base.unsqueeze(0).unsqueeze(-1)
for depth_i in range(64):
this_depth = 1.0 / (idepth_base + depth_i * idepth_step)
transformed = KRKiUV_T * this_depth + KT_T
demon = transformed[:, 2, :].unsqueeze(1)
warp_uv = transformed[:, 0: 2, :] / (demon + 1e-6)
warp_uv = (warp_uv - normalize_base) / normalize_base
warp_uv = warp_uv.view(
batch_number, 2, image_width,
image_height)
warp_uv = Variable(warp_uv.permute(
0, 3, 2, 1))
warped = F.grid_sample(right_image, warp_uv)
costvolume[:, depth_i, :, :] = torch.sum(
torch.abs(warped - left_image), dim=1)
return costvolume
def forward(self, image, plane_sweep_volume):
x = torch.cat((image, plane_sweep_volume), 1)
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
return [conv5, conv4, conv3, conv2, conv1]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Encoder, self).train(mode)
self.apply(freeze_batchnorm)
| 3,979 | 28.051095 | 77 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/mvdepthnet/run-testing.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.baselines.mvdepthnet.decoder import Decoder
from dvmvs.baselines.mvdepthnet.encoder import Encoder
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import cost_volume_fusion, save_results, InferenceTimer, visualize_predictions, get_warp_grid_for_cost_volume_calculation
def predict():
predict_with_finetuned = True
if predict_with_finetuned:
extension = "finetuned"
else:
extension = "without_ft"
input_image_width = 320
input_image_height = 256
print("System: MVDEPTHNET, is_finetuned = ", predict_with_finetuned)
device = torch.device('cuda')
encoder = Encoder()
decoder = Decoder()
if predict_with_finetuned:
encoder_weights = torch.load(Path("finetuned-weights").files("*encoder*")[0])
decoder_weights = torch.load(Path("finetuned-weights").files("*decoder*")[0])
else:
mvdepth_weights = torch.load(Path("original-weights") / "pretrained_mvdepthnet_combined")
pretrained_dict = mvdepth_weights['state_dict']
encoder_weights = encoder.state_dict()
pretrained_dict_encoder = {k: v for k, v in pretrained_dict.items() if k in encoder_weights}
encoder_weights.update(pretrained_dict_encoder)
decoder_weights = decoder.state_dict()
pretrained_dict_decoder = {k: v for k, v in pretrained_dict.items() if k in decoder_weights}
decoder_weights.update(pretrained_dict_decoder)
encoder.load_state_dict(encoder_weights)
decoder.load_state_dict(decoder_weights)
encoder = encoder.to(device)
decoder = decoder.to(device)
encoder.eval()
decoder.eval()
warp_grid = get_warp_grid_for_cost_volume_calculation(width=input_image_width,
height=input_image_height,
device=device)
min_depth = 0.5
max_depth = 50.0
n_depth_levels = 64
scale_rgb = 1.0
mean_rgb = [81.0, 81.0, 81.0]
std_rgb = [35.0, 35.0, 35.0]
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
inference_timer.record_start_time()
cost_volume = cost_volume_fusion(image1=reference_image_torch,
image2s=measurement_images_torch,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=full_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=False)
conv5, conv4, conv3, conv2, conv1 = encoder(reference_image_torch, cost_volume)
prediction, _, _, _ = decoder(conv5, conv4, conv3, conv2, conv1)
prediction = torch.clamp(prediction, min=0.02, max=2.0)
prediction = 1 / prediction
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_mvdepthnet_{}".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames,
extension)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 9,324 | 48.078947 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/mvdepthnet/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.upconv5 = up_conv_layer(512, 512, 3)
self.iconv5 = conv_layer(1024, 512, 3) # input upconv5 + conv4
self.upconv4 = up_conv_layer(512, 512, 3)
self.iconv4 = conv_layer(1024, 512, 3) # input upconv4 + conv3
self.disp4 = depth_layer(512)
self.upconv3 = up_conv_layer(512, 256, 3)
self.iconv3 = conv_layer(
513, 256, 3) # input upconv3 + conv2 + disp4 = 256 + 256 + 1 = 513
self.disp3 = depth_layer(256)
self.upconv2 = up_conv_layer(256, 128, 3)
self.iconv2 = conv_layer(
257, 128, 3) # input upconv2 + conv1 + disp3 = 128 + 128 + 1 = 257
self.disp2 = depth_layer(128)
self.upconv1 = up_conv_layer(128, 64, 3)
self.iconv1 = conv_layer(65, 64,
3) # input upconv1 + disp2 = 64 + 1 = 65
self.disp1 = depth_layer(64)
def forward(self, conv5, conv4, conv3, conv2, conv1):
upconv5 = self.upconv5(conv5)
iconv5 = self.iconv5(torch.cat((upconv5, conv4), 1))
upconv4 = self.upconv4(iconv5)
iconv4 = self.iconv4(torch.cat((upconv4, conv3), 1))
disp4 = 2.0 * self.disp4(iconv4)
udisp4 = F.interpolate(disp4, scale_factor=2)
upconv3 = self.upconv3(iconv4)
iconv3 = self.iconv3(torch.cat((upconv3, conv2, udisp4), 1))
disp3 = 2.0 * self.disp3(iconv3)
udisp3 = F.interpolate(disp3, scale_factor=2)
upconv2 = self.upconv2(iconv3)
iconv2 = self.iconv2(torch.cat((upconv2, conv1, udisp3), 1))
disp2 = 2.0 * self.disp2(iconv2)
udisp2 = F.interpolate(disp2, scale_factor=2)
upconv1 = self.upconv1(iconv2)
iconv1 = self.iconv1(torch.cat((upconv1, udisp2), 1))
disp1 = 2.0 * self.disp1(iconv1)
return [disp1, disp2, disp3, disp4]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Decoder, self).train(mode)
self.apply(freeze_batchnorm)
| 3,920 | 28.044444 | 80 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/base_model.py | import collections
from abc import ABCMeta, abstractmethod
from torch import nn
def dict_update(d, u):
"""Improved update for nested dictionaries.
Arguments:
d: The dictionary to be updated.
u: The update dictionary.
Returns:
The updated dictionary.
"""
d = d.copy()
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = dict_update(d.get(k, {}), v)
else:
d[k] = v
return d
class BaseModel(nn.Module, metaclass=ABCMeta):
"""Base Model"""
base_config = {
'name': None,
'trainable': True,
}
default_config = {}
required_data_keys = []
def __init__(self, config):
nn.Module.__init__(self)
default_config = dict_update(self.base_config, self.default_config)
new_keys = set(config.keys()) - set(default_config.keys())
if len(new_keys) > 0:
raise ValueError(
'Detected new keys in config: {}'.format(new_keys))
self.config = dict_update(default_config, config)
self._init()
if not self.config['trainable']:
for param in self.parameters():
param.requires_grad = False
def forward(self, data, **kwarg):
for key in self.required_data_keys:
assert key in data, 'Missing key {} in data'.format(key)
return self._forward(data, **kwarg)
@abstractmethod
def _init(self):
raise NotImplementedError
@abstractmethod
def _forward(self, data):
raise NotImplementedError
@abstractmethod
def loss(self, pred, data):
raise NotImplementedError
@abstractmethod
def metrics(self):
raise NotImplementedError
| 1,752 | 24.042857 | 75 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/utils.py | import torch
def reorder_desc(desc, batch_sz):
"""Reorders Descriptors"""
b, c, h, w = desc.shape
desc = desc.view(-1, batch_sz, c, h, w)
desc = desc.transpose(1, 0)
return desc
def pose_square(pose):
"""Converts pose matrix of size 3x4 to a square matrix of size 4x4"""
pose_sh = pose.shape
if pose_sh[2] == 3:
pose_row = torch.tensor([0., 0., 0., 1.])
if pose.is_cuda:
pose_row = pose_row.to(pose.device)
pose_row = pose_row.repeat(pose_sh[0], pose_sh[1], 1, 1)
pose = torch.cat((pose, pose_row), 2)
return pose
def make_symmetric(anc, ref):
"""Makes anchor and reference tensors symmetric"""
if (anc is None) or (ref is None):
return None
ancs = anc.shape
views = torch.stack(ref, 0)
if len(ancs) == 3:
views = views.view(-1, ancs[1], ancs[2])
else:
views = views.view(-1, anc.shape[1], ancs[2], ancs[3])
anc_ref = torch.cat((anc, views), 0)
return anc_ref
| 1,006 | 24.175 | 73 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/superpoint.py | import torch
import torchvision
from .base_model import BaseModel
def simple_nms(scores, radius):
"""Performs non maximum suppression on the heatmap using max-pooling.
This method does not suppress contiguous points that have the same score.
Arguments:
scores: the score heatmap, with shape `[B, H, W]`.
size: an interger scalar, the radius of the NMS window.
"""
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=radius * 2 + 1, stride=1, padding=radius)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros)
def remove_borders(keypoints, scores, b, h, w):
mask_h = (keypoints[:, 0] >= b) & (keypoints[:, 0] < (h - b))
mask_w = (keypoints[:, 1] >= b) & (keypoints[:, 1] < (w - b))
mask = mask_h & mask_w
return keypoints[mask], scores[mask]
def top_k_keypoints(keypoints, scores, k):
if k >= len(keypoints):
return keypoints, scores
scores, indices = torch.topk(scores, k, dim=0)
return keypoints[indices], scores
class Superpoint(BaseModel):
default_config = {
'has_detector': True,
'has_descriptor': True,
'descriptor_dim': 128,
# Inference for Anchor
'sparse_outputs': True,
'nms_radius': 9,
'detection_threshold': 0.0005,
'top_k_keypoints': 128,
'force_num_keypoints': True,
'remove_borders': 4,
'unique_keypoints': True,
'frac_superpoint': 1.,
'dense_depth': True,
'min_depth': 0.5,
'max_depth': 10.0,
'model_type': 'resnet50',
'align_corners': False,
'height': 240,
'width': 320,
}
def _init(self):
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
pretrained_features = torchvision.models.resnet50(pretrained=False)
c_out = [2048, 8, 10]
c_d = 512
c_k = 64 + 256
self.conv1 = pretrained_features.conv1
self.bn1 = pretrained_features.bn1
self.maxpool = pretrained_features.maxpool
self.layer1 = pretrained_features.layer1
self.layer2 = pretrained_features.layer2
self.layer3 = pretrained_features.layer3
self.layer4 = pretrained_features.layer4
self.rgb_to_gray = torch.tensor([0.299, 0.587, 0.114])
self.rgb_to_gray = self.rgb_to_gray.view(1, -1, 1, 1)
self.mean_add_rgb = torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)
self.std_mul_rgb = torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)
self.mean_add = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
self.std_mul = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
if self.config['has_detector']:
c_1, c_2 = 256, 128
self.convPa = torch.nn.Conv2d(c_out[0], c_1, kernel_size=3, stride=1, padding=1)
self.bnPa = torch.nn.BatchNorm2d(c_1)
self.scale_factorPa = 4
self.convPb = torch.nn.Conv2d(c_1, c_2, kernel_size=3, stride=1, padding=1)
self.bnPb = torch.nn.BatchNorm2d(c_2)
self.convPc = torch.nn.Conv2d(c_2, 65, kernel_size=1, stride=1, padding=0)
if self.config['has_descriptor']:
c_3, c_4 = 128, 256
self.convDa = torch.nn.Conv2d(c_out[0], c_3, kernel_size=3, stride=1, padding=1)
self.bnDa = torch.nn.BatchNorm2d(c_3)
self.convDb = torch.nn.Conv2d(c_3 + c_d, c_4, kernel_size=1, stride=1, padding=0)
self.bnDb = torch.nn.BatchNorm2d(c_4)
self.convDc = torch.nn.Conv2d(c_4, c_4, kernel_size=3, stride=1, padding=1)
self.bnDc = torch.nn.BatchNorm2d(c_4)
self.convDd = torch.nn.Conv2d(c_4 + c_k, self.config['descriptor_dim'], kernel_size=1, stride=1, padding=0)
def _forward(self, data):
img_rgb = data['img']
tsp = data['process_tsp']
img_rgb = (img_rgb - self.mean_add_rgb.to(img_rgb.device)) / self.std_mul_rgb.to(img_rgb.device)
img = img_rgb
##Run superpoint
pred = {}
pred['img_rgb'] = img_rgb
x = self.relu(self.bn1(self.conv1(img)))
if self.config['dense_depth']:
pred['skip_half'] = x
x = self.maxpool(x)
x = self.layer1(x)
if self.config['dense_depth']:
pred['skip_quarter'] = x
x = self.layer2(x)
if self.config['dense_depth']:
pred['skip_eight'] = x
x = self.layer3(x)
if self.config['dense_depth']:
pred['skip_sixteenth'] = x
x = self.layer4(x)
if self.config['dense_depth']:
pred['features'] = x
# Detector Head.
if self.config['has_detector'] and ('t' in tsp):
cPa = self.relu(self.bnPa(self.convPa(x)))
cPa = torch.nn.functional.interpolate(cPa, size=(self.config['height'] // 8, self.config['width'] // 8), mode='bilinear',
align_corners=self.config['align_corners'])
cPa = self.relu(self.bnPb(self.convPb(cPa)))
pred['scores'] = self.convPc(cPa)
# Descriptor Head.
if self.config['has_descriptor'] and ('s' in tsp):
cDa = self.relu(self.bnDa(self.convDa(x)))
cDa = torch.nn.functional.interpolate(cDa, size=(self.config['height'] // 8, self.config['width'] // 8), mode='bilinear',
align_corners=self.config['align_corners'])
cDa = torch.cat((cDa, pred['skip_eight']), 1)
cDa = self.relu(self.bnDb(self.convDb(cDa)))
cDa = self.relu(self.bnDc(self.convDc(cDa)))
skip_4 = torch.nn.functional.interpolate(pred['skip_quarter'], scale_factor=0.5, mode='bilinear', align_corners=self.config['align_corners'])
skip_2 = torch.nn.functional.interpolate(pred['skip_half'], scale_factor=0.25, mode='bilinear', align_corners=self.config['align_corners'])
cDa = torch.cat((cDa, skip_4, skip_2), 1)
desc = self.convDd(cDa)
desc = torch.nn.functional.normalize(desc, p=2, dim=1)
pred['descriptors'] = desc
# Sparse Key-Points
if self.config['sparse_outputs'] and ('t' in tsp):
st = 8 # encoder stride
if self.config['has_detector']:
scores = torch.nn.functional.softmax(pred['scores'], 1)[:, :-1]
b, c, h, w = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, st, st)
scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h * st, w * st)
dense_scores = scores
if self.config['nms_radius']:
scores = simple_nms(scores, self.config['nms_radius'])
keypoints = [torch.nonzero(s > self.config['detection_threshold'], as_tuple=False) for s in scores]
scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)]
if self.config['remove_borders']:
keypoints, scores = list(zip(*[
remove_borders(
k, s, self.config['remove_borders'], h * st, w * st)
for k, s in zip(keypoints, scores)]))
if self.config['top_k_keypoints']:
keypoints, scores = list(zip(*[
top_k_keypoints(k, s, int(self.config['frac_superpoint'] * self.config['top_k_keypoints']))
for k, s in zip(keypoints, scores)]))
if self.config['force_num_keypoints']:
new_keypoints, new_scores = [], []
for k, sc in zip(keypoints, scores):
num = self.config['top_k_keypoints'] - len(k)
new_x = torch.randint_like(k.new_empty(num), w * st)
new_y = torch.randint_like(k.new_empty(num), h * st)
new_k = torch.stack([new_y, new_x], -1)
if self.config['unique_keypoints']:
curr_k = torch.cat([k, new_k])
not_all_unique = True
while not_all_unique:
unique_k = torch.unique(curr_k, dim=1)
if unique_k.shape[0] == curr_k.shape[0]:
not_all_unique = False
else:
new_x = torch.randint_like(k.new_empty(num), w * st)
new_y = torch.randint_like(k.new_empty(num), h * st)
new_k = torch.stack([new_y, new_x], -1)
curr_k = torch.cat([k, new_k])
new_sc = sc.new_zeros(num)
new_keypoints.append(torch.cat([k, new_k], 0))
new_scores.append(torch.cat([sc, new_sc], 0))
keypoints, scores = new_keypoints, new_scores
keypoints = [torch.flip(k, [1]).float() for k in keypoints]
keypoints = torch.stack(keypoints, 0)
pred['keypoints'] = keypoints
pred['scores_sparse'] = scores
return pred
def loss(self, pred, data):
raise NotImplementedError
def metrics(self):
raise NotImplementedError
| 9,883 | 38.536 | 153 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/densedepth.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
from .resnet_s2d import resnet50
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Unpool(nn.Module):
"""Unpool: 2*2 unpooling with zero padding"""
def __init__(self, num_channels, stride=2):
super(Unpool, self).__init__()
self.num_channels = num_channels
self.stride = stride
# create kernel [1, 0; 0, 0]
self.weights = torch.autograd.Variable(torch.zeros(num_channels, 1, stride, stride))
self.weights[:, :, 0, 0] = 1
def forward(self, x):
return F.conv_transpose2d(x, self.weights.to(x.device), stride=self.stride, groups=self.num_channels)
class Gudi_UpProj_Block(nn.Module):
"""UpProjection block from CSPN paper (Cheng et.al.)"""
def __init__(self, in_channels, out_channels, oheight=0, owidth=0, side_channels=0, do_5x5=True, interp_nearest=True):
super(Gudi_UpProj_Block, self).__init__()
if do_5x5:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
if do_5x5:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.sc_bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.oheight = oheight
self.owidth = owidth
self.interp_nearest = interp_nearest
def _up_pooling(self, x, scale):
x = nn.Upsample(scale_factor=scale, mode='nearest')(x)
if self.oheight != 0 and self.owidth != 0:
x = x[:, :, 0:self.oheight, 0:self.owidth]
mask = torch.zeros_like(x)
for h in range(0, self.oheight, 2):
for w in range(0, self.owidth, 2):
mask[:, :, h, w] = 1
x = torch.mul(mask, x)
return x
def forward(self, x):
if self.interp_nearest:
x = self._up_pooling(x, 2)
else:
x = F.interpolate(x, scale_factor=2, mode='bilinear')
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
short_cut = self.sc_bn1(self.sc_conv1(x))
out += short_cut
out = self.relu(out)
return out
class Gudi_UpProj_Block_Cat(nn.Module):
"""UpProjection block with concatenation from CSPN paper (Cheng et.al.)"""
def __init__(self, in_channels, out_channels, oheight=0, owidth=0, side_channels=0, do_5x5=True, interp_nearest=True):
super(Gudi_UpProj_Block_Cat, self).__init__()
if do_5x5:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
out_ch = out_channels + side_channels
self.conv1_1 = nn.Conv2d(out_ch, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
if do_5x5:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.sc_bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.oheight = oheight
self.owidth = owidth
self._up_pool = Unpool(in_channels)
self.interp_nearest = interp_nearest
def _up_pooling(self, x, scale):
x = self._up_pool(x)
if self.oheight != 0 and self.owidth != 0:
x = x.narrow(2, 0, self.oheight)
x = x.narrow(3, 0, self.owidth)
return x
def forward(self, x, side_input):
if self.interp_nearest:
if side_input.shape[2] % x.shape[2] == 0:
x = self._up_pooling(x, 2)
else:
x = F.interpolate(x, size=(side_input.shape[2], side_input.shape[3]), mode='nearest')
else:
x = F.interpolate(x, size=(side_input.shape[2], side_input.shape[3]), mode='bilinear')
out = self.relu(self.bn1(self.conv1(x)))
out = torch.cat((out, side_input), 1)
out = self.relu(self.bn1_1(self.conv1_1(out)))
out = self.bn2(self.conv2(out))
short_cut = self.sc_bn1(self.sc_conv1(x))
out += short_cut
out = self.relu(out)
return out
class dilated_conv3x3(nn.Module):
"""Dilated convolutions"""
def __init__(self, in_channels, out_channels, dilation_rate=1):
super(dilated_conv3x3, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=dilation_rate, dilation=dilation_rate, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
return out
class ASPP(nn.Module):
"""Altrous Spatial Pyramid Pooling block"""
def __init__(self, in_channels):
super(ASPP, self).__init__()
out_channels = in_channels
self.daspp_1 = dilated_conv3x3(out_channels, out_channels // 2, dilation_rate=3)
self.relu = nn.ReLU(inplace=True)
self.daspp_2 = dilated_conv3x3(int(1.5 * out_channels), out_channels // 2, dilation_rate=6)
self.daspp_3 = dilated_conv3x3(int(2 * out_channels), out_channels // 2, dilation_rate=12)
self.daspp_4 = dilated_conv3x3(int(2.5 * out_channels), out_channels // 2, dilation_rate=18)
self.daspp_5 = dilated_conv3x3(int(3 * out_channels), out_channels // 2, dilation_rate=24)
self.convf = nn.Conv2d(int(3.5 * out_channels), out_channels, kernel_size=3, padding=1, bias=False)
self.bnf = nn.BatchNorm2d(out_channels)
def forward(self, x):
x_inp = x
x1_d1 = self.daspp_1(x)
x = torch.cat((x, x1_d1), 1)
x1_d2 = self.daspp_2(x)
x = torch.cat((x, x1_d2), 1)
x1_d3 = self.daspp_3(x)
x = torch.cat((x, x1_d3), 1)
x1_d4 = self.daspp_4(x)
x = torch.cat((x, x1_d4), 1)
x1_d5 = self.daspp_5(x)
x = torch.cat((x_inp, x1_d1, x1_d2, x1_d3, x1_d4, x1_d5), 1)
out = self.relu(self.bnf(self.convf(x)))
return out
class SparsetoDenseNet(BaseModel):
"""Sparse to Dense Network """
default_config = {
'model_type': 'resnet50',
'input_shape': (240, 320, 1),
'min_depth': 0.5,
'max_depth': 10.0,
'multiscale': True,
'do_5x5': True,
'interp_n': True,
}
def _init(self):
##Encoder for sparse depth
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
in_channel = 1
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
pretrained_features = resnet50()
c_out = [int(1.25 * 2048), int(1.25 * 1024), int(1.25 * 512), int(1.25 * 256), int(1.25 * 64)]
self.conv1 = pretrained_features.conv1
self.bn1 = pretrained_features.bn1
self.maxpool = pretrained_features.maxpool
self.layer1 = pretrained_features.layer1
self.layer2 = pretrained_features.layer2
self.layer3 = pretrained_features.layer3
self.layer4 = pretrained_features.layer4
d0, d1, d2, d3, d4 = 512, 256, 128, 64, 32
# Decoder for sparse to dense
block = Gudi_UpProj_Block_Cat
block_simple = Gudi_UpProj_Block
h = self.config['input_shape'][0]
w = self.config['input_shape'][1]
self.gud_up_proj_layer1 = self._make_gud_up_conv_layer(block, c_out[0], d0, math.ceil(h / 16), math.ceil(w / 16), c_out[1], self.config['do_5x5'],
self.config['interp_n'])
self.gud_up_proj_layer2 = self._make_gud_up_conv_layer(block, d0, d1, math.ceil(h / 8), math.ceil(w / 8), c_out[2], self.config['do_5x5'],
self.config['interp_n'])
self.ASPP = ASPP(d1)
self.gud_up_proj_layer3 = self._make_gud_up_conv_layer(block, d1, d2, math.ceil(h / 4), math.ceil(w / 4), c_out[3], self.config['do_5x5'],
self.config['interp_n'])
self.gud_up_proj_layer4 = self._make_gud_up_conv_layer(block, d2, d3, math.ceil(h / 2), math.ceil(w / 2), c_out[4], self.config['do_5x5'],
self.config['interp_n'])
self.gud_up_proj_layer5 = self._make_gud_up_conv_layer(block_simple, d3, d4, h, w, self.config['do_5x5'], self.config['interp_n'])
self.conv_final = nn.Conv2d(d4, 1, kernel_size=3, stride=1, padding=1, bias=True)
if self.config['multiscale']:
self.conv_scale8 = nn.Conv2d(d1, 1, kernel_size=1, stride=1, padding=0, bias=True)
self.conv_scale4 = nn.Conv2d(d2, 1, kernel_size=1, stride=1, padding=0, bias=True)
self.conv_scale2 = nn.Conv2d(d3, 1, kernel_size=1, stride=1, padding=0, bias=True)
def _make_gud_up_conv_layer(self, up_proj_block, in_channels, out_channels, oheight, owidth, side_ch=0, do_5x5=True, interp_nearest=True):
return up_proj_block(in_channels, out_channels, oheight, owidth, side_ch, do_5x5, interp_nearest)
def _forward(self, data):
# Inputs from previous modules
anchor_keypoints = data['anchor_keypoints']
keypoints_3d = data['keypoints_3d']
range_mask = data['range_mask']
features = data['features']
skip_half = data['skip_half']
skip_quarter = data['skip_quarter']
skip_eight = data['skip_eight']
skip_sixteenth = data['skip_sixteenth']
sequence_length = data['sequence_length']
del data
# Impute learnt sparse depth into a sparse image
sparse_depth_learnt = torch.zeros((anchor_keypoints.shape[0], self.config['input_shape'][0], self.config['input_shape'][1])).to(anchor_keypoints.device)
anchor_keypoints_index = anchor_keypoints.long()
bselect = torch.arange(anchor_keypoints.shape[0], dtype=torch.long)
bselect = bselect.unsqueeze(1).unsqueeze(1)
bselect = bselect.repeat(1, anchor_keypoints_index.shape[1], 1).to(anchor_keypoints.device)
anchor_keypoints_indexchunk = torch.cat((bselect, anchor_keypoints_index[:, :, [1]], anchor_keypoints_index[:, :, [0]]), 2)
anchor_keypoints_indexchunk = anchor_keypoints_indexchunk.view(-1, 3).t()
kp3d_val = keypoints_3d[:, :, 2].view(-1, 1).t()
kp3d_val = torch.clamp(kp3d_val, min=0.0, max=self.config['max_depth'])
kp3d_filter = (range_mask > 0).view(-1, 1).t()
kp3d_filter = (kp3d_filter) & (kp3d_val > self.config['min_depth']) & (kp3d_val < self.config['max_depth'])
kp3d_val = kp3d_val * kp3d_filter.float()
sparse_depth_learnt[anchor_keypoints_indexchunk.chunk(chunks=3, dim=0)] = kp3d_val
sparse_depth_learnt = sparse_depth_learnt.unsqueeze(1)
pred = {}
# Forward pass
x = self.relu(self.bn1(self.conv1(sparse_depth_learnt)))
skip_half = torch.cat((x, skip_half), 1)
x = self.maxpool(x)
x = self.layer1(x)
skip_quarter = torch.cat((x, skip_quarter), 1)
x = self.layer2(x)
skip_eight = torch.cat((x, skip_eight), 1)
x = self.layer3(x)
skip_sixteenth = torch.cat((x, skip_sixteenth), 1)
x = self.layer4(x)
x = torch.cat((features, x), 1) # 160
x = self.gud_up_proj_layer1(x, skip_sixteenth)
x = self.gud_up_proj_layer2(x, skip_eight)
x = self.ASPP(x)
if self.config['multiscale']:
x_8 = self.conv_scale8(x)
x = self.gud_up_proj_layer3(x, skip_quarter)
if self.config['multiscale']:
x_4 = self.conv_scale4(x)
x = self.gud_up_proj_layer4(x, skip_half)
if self.config['multiscale']:
x_2 = self.conv_scale2(x)
x = self.gud_up_proj_layer5(x)
x = self.conv_final(x)
if self.config['multiscale']:
pred['multiscale'] = [x_2, x_4, x_8]
depth_dense = x
pred['dense_depth'] = depth_dense
return pred
def loss(self, pred, data):
raise NotImplementedError
def metrics(self):
raise NotImplementedError
| 13,604 | 37.109244 | 160 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/run-testing.py | import argparse
import cv2
import numpy as np
import torch.backends.cudnn as cudnn
import torch.utils.data
from path import Path
from tqdm import tqdm
from dvmvs.baselines.deltas import superpoint, triangulation, densedepth
from dvmvs.baselines.deltas.utils import *
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import InferenceTimer, visualize_predictions, save_results
input_image_width = 320
input_image_height = 240
n_measurement_frames = 1
parser = argparse.ArgumentParser(description='DELTAS Inference',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset-format', default='sequential', metavar='STR',
help='dataset format, stacked: sequential: sequential folders')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers')
parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size')
parser.add_argument('--print-freq', default=200, type=int, metavar='N', help='print frequency')
parser.add_argument('--seed', default=1, type=int, help='seed for random functions, and network initialization')
parser.add_argument('--mindepth', type=float, default=0.5, help='minimum depth')
parser.add_argument('--maxdepth', type=float, default=10., help='maximum depth')
parser.add_argument('--width', type=int, default=input_image_width, help='image width')
parser.add_argument('--height', type=int, default=input_image_height, help='image height')
parser.add_argument('--seq_length', default=n_measurement_frames + 1, type=int, help='length of sequence')
parser.add_argument('--seq_gap', default=1, type=int, help='gap between frames for ScanNet dataset')
parser.add_argument('--resume', type=bool, default=True, help='Use pretrained network')
parser.add_argument('--pretrained', dest='pretrained', default='weights/pretrained_deltas', metavar='PATH', help='path to pre-trained model')
parser.add_argument('--do_confidence', type=bool, default=True, help='confidence in triangulation')
parser.add_argument('--dist_orthogonal', type=int, default=1, help='offset distance in pixels')
parser.add_argument('--kernel_size', type=int, default=1, help='kernel size')
parser.add_argument('--out_length', type=int, default=100, help='output length of epipolar patch')
parser.add_argument('--depth_range', type=bool, default=True, help='clamp using range of depth')
parser.add_argument('--num_kps', default=512, type=int, help='number of interest keypoints')
parser.add_argument('--model_type', type=str, default='resnet50', help='network backbone')
parser.add_argument('--align_corners', type=bool, default=False, help='align corners')
parser.add_argument('--descriptor_dim', type=int, default=128, help='dimension of descriptor')
parser.add_argument('--detection_threshold', type=float, default=0.0005, help='threshold for interest point detection')
parser.add_argument('--frac_superpoint', type=float, default=.5, help='fraction of interest points')
parser.add_argument('--nms_radius', type=int, default=9, help='radius for nms')
n_iter = 0
def get_model():
args = parser.parse_args()
torch.manual_seed(args.seed)
# create model
print("=> creating model")
# step 1 using superpoint
config_sp = {
'top_k_keypoints': args.num_kps,
'height': args.height,
'width': args.width,
'align_corners': args.align_corners,
'detection_threshold': args.detection_threshold,
'frac_superpoint': args.frac_superpoint,
'nms_radius': args.nms_radius,
}
cudnn.benchmark = True
supernet = superpoint.Superpoint(config_sp)
supernet = supernet.cuda() if torch.cuda.is_available() else supernet
# step 2 using differentiable triangulation
config_tri = {
'dist_ortogonal': args.dist_orthogonal,
'kernel_size': args.kernel_size,
'out_length': args.out_length,
'depth_range': args.depth_range,
'has_confidence': args.do_confidence,
'align_corners': args.align_corners,
}
trinet = triangulation.TriangulationNet(config_tri)
trinet = trinet.cuda() if torch.cuda.is_available() else trinet
# step 3 using sparse-to-dense
config_depth = {
'min_depth': args.mindepth,
'max_depth': args.maxdepth,
'input_shape': (args.height, args.width, 1),
}
depthnet = densedepth.SparsetoDenseNet(config_depth)
depthnet = depthnet.cuda() if torch.cuda.is_available() else depthnet
# load pre-trained weights
if args.resume:
if torch.cuda.is_available():
weights = torch.load(args.pretrained)
else:
weights = torch.load(args.pretrained, map_location=torch.device('cpu'))
supernet.load_state_dict(weights['state_dict'], strict=True)
trinet.load_state_dict(weights['state_dict_tri'], strict=True)
depthnet.load_state_dict(weights['state_dict_depth'], strict=True)
if torch.cuda.is_available():
depthnet = torch.nn.DataParallel(depthnet).cuda()
supernet = torch.nn.DataParallel(supernet).cuda()
trinet = torch.nn.DataParallel(trinet).cuda()
return args, supernet, trinet, depthnet
def predict_for_subsequence(args, supernet, trinet, depthnet, tgt_img, tgt_depth, ref_imgs, ref_depths, poses, intrinsics):
global n_iter
tgt_img_var = tgt_img
ref_imgs_var = ref_imgs
img_var = make_symmetric(tgt_img_var, ref_imgs_var)
batch_sz = tgt_img_var.shape[0]
##Pose and intrinsics
poses_var = [pose for pose in poses]
intrinsics_var = intrinsics
seq_val = args.seq_length - 1
pose = torch.cat(poses_var, 1)
pose = pose_square(pose)
##Depth
tgt_depth_var = tgt_depth
ref_depths_var = [ref_depth for ref_depth in ref_depths]
depth = tgt_depth_var
depth_ref = torch.stack(ref_depths_var, 1)
##Step 1: Detect and Describe Points
data_sp = {'img': img_var, 'process_tsp': 'ts'} # t is detector, s is descriptor
pred_sp = supernet(data_sp)
# Keypoints and descriptor logic
keypoints = pred_sp['keypoints'][:batch_sz]
features = pred_sp['features'][:batch_sz]
skip_half = pred_sp['skip_half'][:batch_sz]
skip_quarter = pred_sp['skip_quarter'][:batch_sz]
skip_eight = pred_sp['skip_eight'][:batch_sz]
skip_sixteenth = pred_sp['skip_sixteenth'][:batch_sz]
scores = pred_sp['scores'][:batch_sz]
desc = pred_sp['descriptors']
desc_anc = desc[:batch_sz, :, :, :]
desc_view = desc[batch_sz:, :, :, :]
desc_view = reorder_desc(desc_view, batch_sz)
## Step 2: Match & Triangulate Points
data_sd = {'iter': n_iter, 'intrinsics': intrinsics_var, 'pose': pose, 'depth': depth, 'ref_depths': depth_ref, 'scores': scores,
'keypoints': keypoints, 'descriptors': desc_anc, 'descriptors_views': desc_view, 'img_shape': tgt_img_var.shape,
'sequence_length': seq_val}
pred_sd = trinet(data_sd)
view_matches = pred_sd['multiview_matches']
anchor_keypoints = pred_sd['keypoints']
keypoints3d_gt = pred_sd['keypoints3d_gt']
range_mask_view = pred_sd['range_kp']
range_mask = torch.sum(range_mask_view, 1)
d_shp = tgt_depth_var.shape
keypoints_3d = pred_sd['keypoints_3d']
kp3d_val = keypoints_3d[:, :, 2].view(-1, 1).t()
kp3d_filter = (range_mask > 0).view(-1, 1).t()
kp3d_filter = (kp3d_filter) & (kp3d_val > args.mindepth) & (kp3d_val < args.maxdepth)
## Step 3: Densify using Sparse-to-Dense
data_dd = {'anchor_keypoints': keypoints, 'keypoints_3d': keypoints_3d, 'sequence_length': args.seq_length, 'skip_sixteenth': skip_sixteenth,
'range_mask': range_mask, 'features': features, 'skip_half': skip_half, 'skip_quarter': skip_quarter, 'skip_eight': skip_eight}
pred_dd = depthnet(data_dd)
output = pred_dd['dense_depth']
return output
def predict():
print("System: DELTAS")
device = torch.device('cuda')
cudnn.benchmark = True
args, supernet, trinet, depthnet = get_model()
supernet.eval()
trinet.eval()
depthnet.eval()
scale_rgb = 255.0
mean_rgb = [0.5, 0.5, 0.5]
std_rgb = [0.5, 0.5, 0.5]
dummy_input = torch.empty(size=(1, input_image_height, input_image_width), dtype=torch.float).to(device)
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*nmeas+{}*".format(n_measurement_frames)))
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*nmeas+{}*".format(n_measurement_frames)))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, _ = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
# DELTAS ALWAYS REQUIRE A PREDETERMINED NUMBER OF MEASUREMENT FRAMES, SO FAKE IT
while len(measurement_indices) < n_measurement_frames:
measurement_indices.append(measurement_indices[0])
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose = poses[measurement_index]
measurement_pose = (np.linalg.inv(measurement_pose) @ reference_pose)
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0).unsqueeze(0)
measurement_poses_torch.append(measurement_pose_torch)
measurement_images_torch.append(measurement_image_torch)
K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
tgt_depth = dummy_input
ref_depths = [dummy_input for _ in range(n_measurement_frames)]
inference_timer.record_start_time()
prediction = predict_for_subsequence(args, supernet, trinet, depthnet,
tgt_img=reference_image_torch,
tgt_depth=tgt_depth,
ref_imgs=measurement_images_torch,
ref_depths=ref_depths,
poses=measurement_poses_torch,
intrinsics=K_torch)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_deltas".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 15,377 | 46.462963 | 170 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/resnet_s2d.py | import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.input_ch = 1
self.dilation = 1
c1, c2, c3, c4, c5 = 16, 16, 32, 64, 128
self.inplanes = c1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(self.input_ch, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, c2, layers[0])
self.layer2 = self._make_layer(block, c3, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, c4, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, c5, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
| 9,946 | 36.394737 | 106 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/triangulation.py | import numpy as np
import torch
from torch import svd
from .base_model import BaseModel
def homogeneous_to_euclidean(points):
"""Converts homogeneous points to euclidean
Args:
points numpy array or torch tensor of shape (N, M + 1): N homogeneous points of dimension M
Returns:
numpy array or torch tensor of shape (N, M): euclidean points
"""
if isinstance(points, np.ndarray):
return (points.T[:-1] / points.T[-1]).T
elif torch.is_tensor(points):
return (points.transpose(1, 0)[:-1] / points.transpose(1, 0)[-1]).transpose(1, 0)
else:
raise TypeError("Works only with numpy arrays and PyTorch tensors.")
def triangulate_point_from_multiple_views_linear_torch_batch(proj_matricies, points, confidences=None):
"""Similar as triangulate_point_from_multiple_views_linear() but for PyTorch.
For more information see its documentation.
Args:
proj_matricies torch tensor of shape (N, 3, 4): sequence of projection matricies (3x4)
points torch tensor of of shape (N, 2): sequence of points' coordinates
confidences None or torch tensor of shape (N,): confidences of points [0.0, 1.0].
If None, all confidences are supposed to be 1.0
Returns:
point_3d numpy torch tensor of shape (3,): triangulated point
"""
assert len(proj_matricies) == len(points)
n_views = len(proj_matricies)
if confidences is None:
confidences = torch.ones(points.shape[1], n_views, dtype=torch.float32, device=points.device)
##multiple points
points_t = points.transpose(0, 1)
proj_mat = proj_matricies[:, 2:3].expand(n_views, 2, 4).unsqueeze(0)
points_tview = points_t.view(points_t.size(0), n_views, 2, 1).expand(points_t.size(0), n_views, 2, 4)
A_all = proj_mat * points_tview
A_all -= proj_matricies[:, :2].unsqueeze(0)
A_all *= confidences.view(confidences.size(0), n_views, 1, 1)
A_all = A_all.contiguous().view(A_all.size(0), A_all.size(1) * A_all.size(2), 4)
U, S, V = svd(A_all)
points_3d_homo_all = -V[:, :, 3]
points_3d = homogeneous_to_euclidean(points_3d_homo_all)
return points_3d
def triangulate_batch_of_points(proj_matricies_batch, points_batch, confidences_batch=None):
"""Triangulates for a batch of points"""
batch_size, n_views = proj_matricies_batch.shape[:2]
points_3d_batch = []
for batch_i in range(batch_size):
n_points = points_batch[batch_i].shape[1]
points = points_batch[batch_i]
confidences = confidences_batch[batch_i] if confidences_batch is not None else None
points_3d = triangulate_point_from_multiple_views_linear_torch_batch(proj_matricies_batch[batch_i], points, confidences=confidences)
points_3d_batch.append(points_3d)
return points_3d_batch
def integrate_tensor_2d(heatmaps, softmax=True): # ,temperature = 1.0):
"""Applies softmax to heatmaps and integrates them to get their's "center of masses"
Args:
heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps
Returns:
coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps
"""
batch_size, n_heatmaps, h, w = heatmaps.shape
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1))
if softmax:
heatmaps = torch.nn.functional.softmax(heatmaps, dim=2)
else:
heatmaps = torch.nn.functional.relu(heatmaps)
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w))
mass_x = heatmaps.sum(dim=2)
mass_y = heatmaps.sum(dim=3)
mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y), dim=2)
coordinates = coordinates.reshape((batch_size, n_heatmaps, 2))
return coordinates
def unproject_ij(keypoints_2d, z, camera_matrix):
"""Unprojects points into 3D using intrinsics"""
z = z.squeeze(2).squeeze(1)
x = ((keypoints_2d[:, :, 0] - camera_matrix[:, [0], [2]]) / camera_matrix[:, [0], [0]]) * z
y = ((keypoints_2d[:, :, 1] - camera_matrix[:, [1], [2]]) / camera_matrix[:, [1], [1]]) * z
xyz = torch.cat((x.unsqueeze(1), y.unsqueeze(1), z.unsqueeze(1)), dim=1)
return xyz
def reproject_points(pose, pts, intrinsic, Z):
"""Projects 3d points onto 2D image plane"""
kp_arr = torch.ones((pts.shape[0], pts.shape[1], 3)).to(pts.device)
kp_arr[:, :, :2] = pts
K = intrinsic.unsqueeze(1)
R = pose[:, :, :3, :3]
T = pose[:, :, :3, 3:]
kp_arr = kp_arr.unsqueeze(1)
reproj_val = ((K @ R) @ (torch.inverse(K))) @ kp_arr.transpose(3, 2)
proj_z = K @ T / Z
reproj = reproj_val + proj_z
reproj = reproj / reproj[:, :, 2:, :]
return reproj[:, :, :2, :]
def patch_for_kp(keypoints, ker_size, out_length, roi_patch):
"""Creates patch for key-point"""
keypts_array = keypoints.unsqueeze(1)
n_view = roi_patch.shape[1]
keypts_array = keypts_array.repeat(1, n_view, 1, 1)
xc = keypts_array[:, :, :, 0]
yc = keypts_array[:, :, :, 1]
h = torch.ones((keypts_array.shape[0], n_view, keypts_array.shape[2])).to(roi_patch.device) * ker_size # 3 #kernel_size
w = ker_size * roi_patch[:, :, :, 3] / out_length
theta = torch.zeros((keypts_array.shape[0], n_view, keypts_array.shape[2])).to(roi_patch.device)
keypoint_patch = torch.stack((xc, yc, h, w, theta), 3)
return keypoint_patch
def match_corr(embed_ref, embed_srch):
""" Matches the two embeddings using the correlation layer. As per usual
it expects input tensors of the form [B, C, H, W].
Args:
embed_ref: (torch.Tensor) The embedding of the reference image, or
the template of reference (the average of many embeddings for
example).
embed_srch: (torch.Tensor) The embedding of the search image.
Returns:
match_map: (torch.Tensor) The correlation between
"""
_, _, k1, k2 = embed_ref.shape
b, c, h, w = embed_srch.shape
if k1 == 1 and k2 == 1:
pad_img = (0, 0)
else:
pad_img = (0, 1)
match_map = torch.nn.functional.conv2d(embed_srch.contiguous().view(1, b * c, h, w), embed_ref, groups=b, padding=pad_img)
match_map = match_map.permute(1, 0, 2, 3)
return match_map
def create_transform_matrix(roi_patch):
"""Creates a 3x3 transformation matrix for the patches"""
transform_matrix = torch.zeros((roi_patch.shape[0], roi_patch.shape[1], roi_patch.shape[2], 3, 3)).to(roi_patch.device)
transform_matrix[:, :, :, 0, 0] = torch.cos(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 0, 1] = -torch.sin(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 0, 2] = roi_patch[:, :, :, 0]
transform_matrix[:, :, :, 1, 0] = torch.sin(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 1, 1] = torch.cos(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 1, 2] = roi_patch[:, :, :, 1]
transform_matrix[:, :, :, 2, 2] = 1.0
return transform_matrix
def patch_sampler(roi_patch, out_length=640, distance=2, do_img=True, align_corners=False):
"""Creates, scales and aligns the patch"""
##create a regular grid centered at xc,yc
if out_length > 1:
width_sample = torch.linspace(-0.5, 0.5, steps=out_length)
else:
width_sample = torch.tensor([0.])
height_sample = torch.linspace(-distance, distance, steps=2 * distance + 1)
xv, yv = torch.meshgrid([width_sample, height_sample])
zv = torch.ones(xv.shape)
patch_sample = torch.stack((xv, yv, zv), 2).to(roi_patch.device)
arange_array = patch_sample.repeat(roi_patch.shape[0], roi_patch.shape[1], roi_patch.shape[2], 1, 1, 1)
## scaling the x dimension to ensure unform sampling
arange_array[:, :, :, :, :, 0] = (roi_patch[:, :, :, [3]].unsqueeze(4)) * arange_array[:, :, :, :, :, 0]
aras = arange_array.shape
arange_array = arange_array.contiguous().view(aras[0], aras[1], aras[2], aras[3] * aras[4], aras[5]).transpose(4, 3)
# create matrix transform
transform_matrix = create_transform_matrix(roi_patch)
# transform
patch_kp = transform_matrix @ arange_array
patch_kp = patch_kp.view(aras[0], aras[1], aras[2], aras[5], aras[3], aras[4])
patch_kp = patch_kp[:, :, :, :2, :, :].transpose(5, 3)
return patch_kp, transform_matrix
def patch_for_depth_guided_range(keypoints, pose, intrinsic, img_shape, distance=2, min_depth=0.5, max_depth=10.0, align_corners=False):
"""Represents search patch for a key-point using xc,yc, h,w, theta"""
# get epilines
n_view = pose.shape[1]
pts = keypoints
kp_arr = torch.ones((pts.shape[0], pts.shape[1], 3)).to(pts.device)
kp_arr[:, :, :2] = pts
kp_arr = kp_arr.unsqueeze(1)
Fund, _ = get_fundamental_matrix(pose, intrinsic, intrinsic)
lines_epi = (Fund @ (kp_arr.transpose(3, 2))).transpose(3, 2)
# image shape
height = img_shape[2]
width = img_shape[3]
# default intercepts
array_zeros = torch.zeros((pts.shape[0], n_view, pts.shape[1])).to(pts.device)
array_ones = torch.ones((pts.shape[0], n_view, pts.shape[1])).to(pts.device)
x2ord = array_zeros.clone().detach()
y2ord = array_zeros.clone().detach()
x3ord = array_zeros.clone().detach()
y3ord = array_zeros.clone().detach()
x0_f = array_zeros.clone().detach()
y0_f = array_zeros.clone().detach()
x1_f = array_zeros.clone().detach()
y1_f = array_zeros.clone().detach()
##get x2,x3 and order
x2_y2 = reproject_points(pose, keypoints, intrinsic, min_depth)
x2 = x2_y2[:, :, 0, :]
y2 = x2_y2[:, :, 1, :]
x3_y3 = reproject_points(pose, keypoints, intrinsic, max_depth)
x3 = x3_y3[:, :, 0, :]
y3 = x3_y3[:, :, 1, :]
x_ord = x3 >= x2
x2ord[x_ord] = x2[x_ord]
y2ord[x_ord] = y2[x_ord]
x3ord[x_ord] = x3[x_ord]
y3ord[x_ord] = y3[x_ord]
cx_ord = x2 > x3
x2ord[cx_ord] = x3[cx_ord]
y2ord[cx_ord] = y3[cx_ord]
x3ord[cx_ord] = x2[cx_ord]
y3ord[cx_ord] = y2[cx_ord]
if align_corners:
x_ord0 = (x2ord >= 0) & (x2ord < width)
x_ord1 = (x3ord >= 0) & (x3ord < width)
y_ord0 = (y2ord >= 0) & (y2ord < height)
y_ord1 = (y3ord >= 0) & (y3ord < height)
else:
x_ord0 = (x2ord >= -0.5) & (x2ord < (width - 0.5))
x_ord1 = (x3ord >= -0.5) & (x3ord < (width - 0.5))
y_ord0 = (y2ord >= -0.5) & (y2ord < (height - 0.5))
y_ord1 = (y3ord >= -0.5) & (y3ord < (height - 0.5))
all_range = x_ord0 & x_ord1 & y_ord0 & y_ord1
x0_f[all_range] = x2ord[all_range]
y0_f[all_range] = y2ord[all_range]
x1_f[all_range] = x3ord[all_range]
y1_f[all_range] = y3ord[all_range]
cond_null = ~all_range
x0_f[cond_null] = array_zeros.clone().detach()[cond_null]
y0_f[cond_null] = array_zeros.clone().detach()[cond_null]
x1_f[cond_null] = array_zeros.clone().detach()[cond_null]
y1_f[cond_null] = array_zeros.clone().detach()[cond_null]
## find box representation using #xc,yc, h,w, theta
xc = (x0_f + x1_f) / 2.
yc = (y0_f + y1_f) / 2.
h = torch.ones((pts.shape[0], n_view, pts.shape[1])).to(pts.device) * max(2 * distance, 1)
w = torch.sqrt((x1_f - x0_f) ** 2 + (y1_f - y0_f) ** 2)
theta = torch.atan2(-lines_epi[:, :, :, 0], lines_epi[:, :, :, 1])
if torch.sum(torch.isnan(theta)):
import pdb;
pdb.set_trace()
roi_patch = torch.stack((xc, yc, h, w, theta), 3)
return roi_patch
def sample_descriptors_epi(keypoints, descriptors, s, normalize=True, align_corner=False):
"""Samples descriptors at point locations"""
b, c, h, w = descriptors.shape
keypoints = keypoints - s / 2 + 0.5
keypoints /= torch.tensor([(w * s - s / 2 - 0.5), (h * s - s / 2 - 0.5)], device=keypoints.device)[None]
keypoints = keypoints * 2 - 1
if len(keypoints.shape) == 4:
descriptors = torch.nn.functional.grid_sample(descriptors, keypoints.view(b, keypoints.shape[1], keypoints.shape[2], 2), mode='bilinear',
align_corners=align_corner) ##pythorch 1.3+
elif len(keypoints.shape) == 3:
descriptors = torch.nn.functional.grid_sample(descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', align_corners=align_corner) ##pythorch 1.3+
if normalize:
descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1)
return descriptors
def vec_to_skew_symmetric(v):
"""Creates skew-symmetric matrix"""
zero = torch.zeros_like(v[:, 0])
M = torch.stack([
zero, -v[:, 2], v[:, 1],
v[:, 2], zero, -v[:, 0],
-v[:, 1], v[:, 0], zero,
], dim=1)
return M.reshape(-1, 3, 3)
def get_fundamental_matrix(T_10, K0, K1):
"""Generates fundamental matrix"""
##Expects BX3x3 matrix
k0 = torch.inverse(K0)
k1 = torch.inverse(K1).transpose(1, 2)
k0 = k0.unsqueeze(1)
k1 = k1.unsqueeze(1)
T_10 = T_10.view(-1, 4, 4)
t_skew = vec_to_skew_symmetric(T_10[:, :3, 3])
E = t_skew @ T_10[:, :3, :3] ##Essential matrix
E = E.view(k0.shape[0], -1, 3, 3)
Fu = (k1 @ E) @ k0 ##Fundamental matrix
F_norm = Fu[:, :, 2:, 2:]
F_norm[F_norm == 0.] = 1.
Fu = Fu / F_norm ##normalize it
return Fu, E
class TriangulationNet(BaseModel):
"""Triangulation module"""
default_config = {
'depth_range': True,
'arg_max_weight': 1.0,
'dist_ortogonal': 1,
'kernel_size': 1,
'out_length': 100,
'has_confidence': True,
'min_depth': 0.5,
'max_depth': 10.0,
'align_corners': False,
}
def _init(self):
self.relu = torch.nn.ReLU(inplace=False)
self.bn_match_convD = torch.nn.BatchNorm2d(1)
##confidence layers
pool_shape = (self.config['out_length'], 1 + (5 - self.config['kernel_size']))
pad_shape = (0, 1) if self.config['dist_ortogonal'] == 2 else (1, 1)
if self.config['has_confidence']:
self.convD_confa = torch.nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=pad_shape)
self.bnconvD_confa = torch.nn.BatchNorm2d(1)
self.pool_convD_conf = torch.nn.MaxPool2d(pool_shape, stride=self.config['out_length'], return_indices=False)
def _forward(self, data):
pose = data['pose']
intrinsic = data['intrinsics']
img_shape = data['img_shape']
desc = data['descriptors']
desc_views = data['descriptors_views']
sequence_length = data['sequence_length']
keypoints = data['keypoints']
depth_all = data['depth']
depth_ref = data['ref_depths']
del data
st = img_shape[2] // desc.shape[2]
dist = self.config['dist_ortogonal']
ker_size = self.config['kernel_size']
out_length = self.config['out_length']
pred = {}
pred['keypoints'] = keypoints
## Creates patches for matching
depth_at_kp = sample_descriptors_epi(keypoints, depth_all.unsqueeze(1), 1, False, self.config['align_corners'])
roi_patch = patch_for_depth_guided_range(keypoints, pose, intrinsic, img_shape, distance=dist, min_depth=self.config['min_depth'],
max_depth=self.config['max_depth'], align_corners=self.config['align_corners'])
keypoint_patch = patch_for_kp(keypoints, ker_size, out_length, roi_patch)
## Extract sampled keypoints
kp_image, transform_matrix = patch_sampler(roi_patch, out_length=out_length, distance=dist, do_img=True, align_corners=self.config['align_corners'])
kp_anchor, _ = patch_sampler(keypoint_patch, out_length=ker_size, distance=ker_size // 2, do_img=False, align_corners=self.config['align_corners'])
## Reshape along batch dimenstion
kp_image_shp = kp_image.shape
kp_image = kp_image.contiguous().view(kp_image_shp[0] * kp_image_shp[1], kp_image_shp[2], kp_image_shp[3] * kp_image_shp[4], kp_image_shp[5])
kp_anchor_shp = kp_anchor.shape
kp_anchor = kp_anchor.contiguous().view(kp_anchor_shp[0] * kp_anchor_shp[1], kp_image_shp[2], kp_anchor_shp[3] * kp_anchor_shp[4], kp_anchor_shp[5])
## Sample
desc_views_shp = desc_views.shape
desc_views = desc_views.reshape(desc_views_shp[0] * desc_views_shp[1], desc_views_shp[2], desc_views_shp[3], desc_views_shp[4])
descriptor_at_image = sample_descriptors_epi(kp_image.detach(), desc_views, st, True, self.config['align_corners'])
descriptor_at_anchor = sample_descriptors_epi(kp_anchor.detach(), desc.repeat_interleave(sequence_length, dim=0), st, True,
self.config['align_corners'])
del kp_image, kp_anchor, keypoint_patch, desc, desc_views
descriptor_at_anchor = descriptor_at_anchor.contiguous().view(descriptor_at_anchor.shape[0], descriptor_at_anchor.shape[1], kp_anchor_shp[2],
kp_anchor_shp[3], kp_anchor_shp[4])
descriptor_at_image = descriptor_at_image.contiguous().view(descriptor_at_image.shape[0], descriptor_at_image.shape[1], kp_image_shp[2],
kp_image_shp[3], kp_image_shp[4])
descriptor_at_anchor = descriptor_at_anchor.transpose(2, 1)
descriptor_at_image = descriptor_at_image.transpose(2, 1)
dancs = descriptor_at_anchor.shape
dimgs = descriptor_at_image.shape
descriptor_at_anchor = descriptor_at_anchor.contiguous().view(dancs[0] * dancs[1], dancs[2], dancs[3], dancs[4])
descriptor_at_image = descriptor_at_image.contiguous().view(dimgs[0] * dimgs[1], dimgs[2], dimgs[3], dimgs[4])
## Do cross correlation
match_map = match_corr(descriptor_at_anchor, descriptor_at_image)
match_map = self.bn_match_convD(match_map)
match_map = self.relu(match_map)
del descriptor_at_anchor, descriptor_at_image
if self.config['has_confidence']:
conf_da = match_map
conf_da = torch.nn.functional.adaptive_max_pool2d(conf_da, (1, 1))
conf_da = conf_da.contiguous().view(kp_image_shp[0], kp_image_shp[1], -1)
sc_factor = 1.0
conf_da = torch.sigmoid(sc_factor * conf_da)
conf_damp = roi_patch[:, :, :, 3] > 0.
conf_da = conf_da * (conf_damp.float() + 0.001)
self_confidence = torch.ones((conf_da.shape[0], 1, conf_da.shape[2])).to(conf_da.device)
conf_da = torch.cat((self_confidence, conf_da), 1)
conf_da = conf_da.transpose(2, 1)
pred['confidence'] = conf_da
else:
pred['confidence'] = None
## SOFTARGMAX
out_kp_match = integrate_tensor_2d(match_map * self.config['arg_max_weight'], True)
## Change from local coordinates to image coordinates
out_kp_match /= torch.tensor([match_map.shape[3] - 1., max(match_map.shape[2] - 1., 1.)], device=out_kp_match.device)[None]
if match_map.shape[2] == 1:
sub_roi = (torch.tensor([0.5, 0.]).unsqueeze(0).unsqueeze(1)).to(out_kp_match.device)
else:
sub_roi = 0.5
out_kp_match -= sub_roi
out_ones = torch.ones((out_kp_match.shape[0], 1, 1)).to(out_kp_match.device)
out_kp_match = torch.cat((out_kp_match, out_ones), 2)
out_kp_match = out_kp_match.view(kp_image_shp[0], kp_image_shp[1], kp_image_shp[2], 3)
## scale the local x coordinate to match sampling frequency
mult_0 = roi_patch[:, :, :, [3]]
mult_1 = torch.ones_like(mult_0)
mult_1[mult_0 == 0.] = 0.
roi_mult = torch.cat((mult_0, mult_1, mult_1), 3)
out_kp_match *= roi_mult
range_kp = roi_patch[:, :, :, 3] > 0.
pred['range_kp'] = range_kp
##global coordinates
val_kp_match = ((transform_matrix @ out_kp_match.unsqueeze(4))[:, :, :, :2, :]).squeeze(4)
pred['multiview_matches'] = val_kp_match
del out_kp_match, transform_matrix, match_map
## 3d GT
keypoints_3d_gt = unproject_ij(keypoints, depth_at_kp, intrinsic)
pred['keypoints3d_gt'] = keypoints_3d_gt.transpose(2, 1)
#### Triangulation
pose_tiled = pose[:, :, :3, :]
intrinsic_tiled = intrinsic
confidence = pred['confidence']
anchor_keypoints = keypoints.unsqueeze(1)
multiview_matches = torch.cat((anchor_keypoints, val_kp_match), 1)
projection_mat = []
projection_ref = []
proj_identity = torch.tensor([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.]])
if torch.cuda.is_available():
proj_identity = proj_identity.cuda()
for batch_idx in range(pose_tiled.size(0)):
proj_ref_idx = torch.mm(intrinsic_tiled[batch_idx], proj_identity).unsqueeze(0)
projection_ref.append(proj_ref_idx)
projection_mat_view = []
for j in range(sequence_length):
proj_mat_idx = torch.mm(intrinsic_tiled[batch_idx], pose_tiled[batch_idx][j]).unsqueeze(0)
projection_mat_view.append(proj_mat_idx)
projection_mat_view = torch.cat(projection_mat_view, 0).unsqueeze(0)
projection_mat.append(projection_mat_view)
projection_mat = torch.cat(projection_mat, 0)
projection_ref = torch.cat(projection_ref, 0).unsqueeze(1)
proj_matrices = torch.cat([projection_ref, projection_mat], 1)
del projection_ref, projection_mat
if self.config['has_confidence']:
keypoints_3d = triangulate_batch_of_points(proj_matrices, multiview_matches, confidence)
else:
keypoints_3d = triangulate_batch_of_points(proj_matrices, multiview_matches)
keypoints_3d = torch.stack(keypoints_3d, 0)
if torch.sum(torch.isinf(keypoints_3d)) > 0:
keypoints_3d = torch.clamp(keypoints_3d, min=-1000.0, max=1000.0)
pred['keypoints_3d'] = keypoints_3d
return pred
def loss(self, pred, data):
raise NotImplementedError
def metrics(self):
raise NotImplementedError
| 22,576 | 37.527304 | 157 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/encoder.py | import torch
import torch.nn as nn
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = down_conv_layer(67, 128, 7)
self.conv2 = down_conv_layer(128, 256, 5)
self.conv3 = down_conv_layer(256, 512, 3)
self.conv4 = down_conv_layer(512, 512, 3)
self.conv5 = down_conv_layer(512, 512, 3)
def forward(self, image, plane_sweep_volume):
x = torch.cat((image, plane_sweep_volume), 1)
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
return [conv5, conv4, conv3, conv2, conv1]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Encoder, self).train(mode)
self.apply(freeze_batchnorm) | 2,588 | 26.542553 | 66 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/run-testing.py | from copy import deepcopy
import cv2
import numpy as np
import torch
from path import Path
from scipy.linalg import expm
from tqdm import tqdm
from dvmvs.baselines.gpmvs.decoder import Decoder
from dvmvs.baselines.gpmvs.encoder import Encoder
from dvmvs.baselines.gpmvs.gplayer import GPlayer
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import cost_volume_fusion, pose_distance, save_results, InferenceTimer, visualize_predictions, get_warp_grid_for_cost_volume_calculation
def predict():
predict_with_finetuned = True
if predict_with_finetuned:
extension = "finetuned"
else:
extension = "without_ft"
input_image_width = 320
input_image_height = 256
print("System: GPMVS, is_finetuned = ", predict_with_finetuned)
device = torch.device('cuda')
if predict_with_finetuned:
encoder_weights = torch.load(Path("finetuned-weights").files("*encoder*")[0])
gp_weights = torch.load(Path("finetuned-weights").files("*gplayer*")[0])
decoder_weights = torch.load(Path("finetuned-weights").files("*decoder*")[0])
else:
encoder_weights = torch.load(Path("original-weights").files("*encoder*")[0])['state_dict']
gp_weights = torch.load(Path("original-weights").files("*gplayer*")[0])['state_dict']
decoder_weights = torch.load(Path("original-weights").files("*decoder*")[0])['state_dict']
encoder = Encoder()
encoder = torch.nn.DataParallel(encoder)
encoder.load_state_dict(encoder_weights)
encoder.eval()
encoder = encoder.to(device)
decoder = Decoder()
decoder = torch.nn.DataParallel(decoder)
decoder.load_state_dict(decoder_weights)
decoder.eval()
decoder = decoder.to(device)
# load GP values
gplayer = GPlayer(device=device)
gplayer.load_state_dict(gp_weights)
gplayer.eval()
gamma2 = np.exp(gp_weights['gamma2'][0].item())
ell = np.exp(gp_weights['ell'][0].item())
sigma2 = np.exp(gp_weights['sigma2'][0].item())
warp_grid = get_warp_grid_for_cost_volume_calculation(width=input_image_width,
height=input_image_height,
device=device)
min_depth = 0.5
max_depth = 50.0
n_depth_levels = 64
scale_rgb = 1.0
mean_rgb = [81.0, 81.0, 81.0]
std_rgb = [35.0, 35.0, 35.0]
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files[20:]):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
lam = np.sqrt(3) / ell
F = np.array([[0, 1], [-lam ** 2, -2 * lam]])
Pinf = np.array([[gamma2, 0], [0, gamma2 * lam ** 2]])
h = np.array([[1], [0]])
# State mean and covariance
M = np.zeros((F.shape[0], 512 * 8 * 10))
P = Pinf
inference_timer = InferenceTimer()
previous_index = None
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
inference_timer.record_start_time()
cost_volume = cost_volume_fusion(image1=reference_image_torch,
image2s=measurement_images_torch,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=full_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=False)
conv5, conv4, conv3, conv2, conv1 = encoder(reference_image_torch, cost_volume)
batch, channel, height, width = conv5.size()
y = np.expand_dims(conv5.cpu().numpy().flatten(), axis=0)
if previous_index is None:
previous_index = measurement_index
dt, _, _ = pose_distance(poses[reference_index], poses[previous_index])
A = expm(F * dt)
Q = Pinf - A.dot(Pinf).dot(A.T)
M = A.dot(M)
P = A.dot(P).dot(A.T) + Q
# Update step
v = y - h.T.dot(M)
s = h.T.dot(P).dot(h) + sigma2
k = P.dot(h) / s
M += k.dot(v)
P -= k.dot(h.T).dot(P)
Z = torch.from_numpy(M[0]).view(batch, channel, height, width).float().to(device)
Z = torch.nn.functional.relu(Z)
prediction, _, _, _ = decoder(Z, conv4, conv3, conv2, conv1)
prediction = torch.clamp(prediction, min=0.02, max=2.0)
prediction = 1 / prediction
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
previous_index = deepcopy(reference_index)
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_gpmvs_{}".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames,
extension)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 10,783 | 45.283262 | 153 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.upconv5 = up_conv_layer(512, 512, 3)
self.iconv5 = conv_layer(1024, 512, 3) # input upconv5 + conv4
self.upconv4 = up_conv_layer(512, 512, 3)
self.iconv4 = conv_layer(1024, 512, 3) # input upconv4 + conv3
self.disp4 = depth_layer(512)
self.upconv3 = up_conv_layer(512, 256, 3)
self.iconv3 = conv_layer(
513, 256, 3) # input upconv3 + conv2 + disp4 = 256 + 256 + 1 = 513
self.disp3 = depth_layer(256)
self.upconv2 = up_conv_layer(256, 128, 3)
self.iconv2 = conv_layer(
257, 128, 3) # input upconv2 + conv1 + disp3 = 128 + 128 + 1 = 257
self.disp2 = depth_layer(128)
self.upconv1 = up_conv_layer(128, 64, 3)
self.iconv1 = conv_layer(65, 64,
3) # input upconv1 + disp2 = 64 + 1 = 65
self.disp1 = depth_layer(64)
def forward(self, conv5, conv4, conv3, conv2, conv1):
upconv5 = self.upconv5(conv5)
iconv5 = self.iconv5(torch.cat((upconv5, conv4), 1))
upconv4 = self.upconv4(iconv5)
iconv4 = self.iconv4(torch.cat((upconv4, conv3), 1))
disp4 = 2.0 * self.disp4(iconv4)
udisp4 = F.interpolate(disp4, scale_factor=2)
upconv3 = self.upconv3(iconv4)
iconv3 = self.iconv3(torch.cat((upconv3, conv2, udisp4), 1))
disp3 = 2.0 * self.disp3(iconv3)
udisp3 = F.interpolate(disp3, scale_factor=2)
upconv2 = self.upconv2(iconv3)
iconv2 = self.iconv2(torch.cat((upconv2, conv1, udisp3), 1))
disp2 = 2.0 * self.disp2(iconv2)
udisp2 = F.interpolate(disp2, scale_factor=2)
upconv1 = self.upconv1(iconv2)
iconv1 = self.iconv1(torch.cat((upconv1, udisp2), 1))
disp1 = 2.0 * self.disp1(iconv1)
return [disp1, disp2, disp3, disp4]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Decoder, self).train(mode)
self.apply(freeze_batchnorm)
| 3,920 | 28.044444 | 80 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/gplayer.py | import math
import torch
from dvmvs.utils import freeze_batchnorm
class GPlayer(torch.nn.Module):
def __init__(self, device):
super(GPlayer, self).__init__()
self.gamma2 = torch.nn.Parameter(torch.randn(1).to(device).float(), requires_grad=True)
self.ell = torch.nn.Parameter(torch.randn(1).to(device).float(), requires_grad=True)
self.sigma2 = torch.nn.Parameter(torch.randn(1).to(device).float(), requires_grad=True)
self.device = device
def forward(self, D, Y):
"""
:param D: Distance matrix
:param Y: Stacked outputs from encoder
:return: Z: transformed latent space
"""
# Support for these operations on Half precision is low at the moment, handle everything in Float precision
batch, latents, channel, height, width = Y.size()
Y = Y.view(batch, latents, -1).float()
D = D.to(self.device).float()
# MATERN CLASS OF COVARIANCE FUNCTION
# ell > 0, gamma2 > 0, sigma2 > 0 : EXPONENTIATE THEM !!!
K = torch.exp(self.gamma2) * (1 + math.sqrt(3) * D / torch.exp(self.ell)) * torch.exp(-math.sqrt(3) * D / torch.exp(self.ell))
I = torch.eye(latents, device=self.device, dtype=torch.float32).expand(batch, latents, latents)
C = K + torch.exp(self.sigma2) * I
Cinv = C.inverse()
Z = K.bmm(Cinv).bmm(Y)
Z = torch.nn.functional.relu(Z)
return Z
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(GPlayer, self).train(mode)
self.apply(freeze_batchnorm)
| 1,637 | 37.093023 | 134 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/dpsnet/run-testing.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.baselines.dpsnet.dpsnet import PSNet
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import save_results, InferenceTimer, visualize_predictions
def predict():
predict_with_finetuned = True
if predict_with_finetuned:
extension = "finetuned"
else:
extension = "without_ft"
input_image_width = 320
input_image_height = 240
print("System: DPSNET, is_finetuned = ", predict_with_finetuned)
device = torch.device('cuda')
dpsnet = PSNet(64, 0.5)
if predict_with_finetuned:
weights = torch.load(Path("finetuned-weights").files("*dpsnet*")[0])
else:
weights = torch.load(Path("original-weights").files("*dpsnet*")[0])['state_dict']
dpsnet.load_state_dict(weights)
dpsnet = dpsnet.to(device)
dpsnet.eval()
scale_rgb = 255.0
mean_rgb = [0.5, 0.5, 0.5]
std_rgb = [0.5, 0.5, 0.5]
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose = poses[measurement_index]
measurement_pose = (np.linalg.inv(measurement_pose) @ reference_pose)[0:3, :]
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0)
measurement_poses_torch.append(measurement_pose_torch)
measurement_images_torch.append(measurement_image_torch)
camera_k = preprocessor.get_updated_intrinsics()
camera_k_inv = np.linalg.inv(camera_k)
camera_k_torch = torch.from_numpy(camera_k).float().to(device).unsqueeze(0)
camera_k_inv_torch = torch.from_numpy(camera_k_inv).float().to(device).unsqueeze(0)
inference_timer.record_start_time()
_, prediction = dpsnet(reference_image_torch,
measurement_images_torch,
measurement_poses_torch,
camera_k_torch,
camera_k_inv_torch)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_dpsnet_{}".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames,
extension)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 7,607 | 46.849057 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/dpsnet/dpsnet.py | from __future__ import print_function
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data
from torch.autograd import Variable
from dvmvs.utils import freeze_batchnorm
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1, h, w)).type_as(depth) # [1, H, W]
j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1, h, w)).type_as(depth) # [1, H, W]
ones = Variable(torch.ones(1, h, w)).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i, size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert (all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected), list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
"""Transform coordinates in the pixel frame to the camera frame.
Args:
depth: depth maps -- [B, H, W]
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
Returns:
array of (u,v,1) cam coordinates -- [B, 3, H, W]
"""
b, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) < h:
set_id_grid(depth)
current_pixel_coords = pixel_coords[:, :, :h, :w].expand(b, 3, h, w).contiguous().view(b, 3, -1).cuda() # [B, 3, H*W]
cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
X_norm = 2 * (X / Z) / (w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1) + (X_norm < -1)).detach()
X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray
Y_mask = ((Y_norm > 1) + (Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.view(b, h, w, 2)
def inverse_warp(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
feat: the source feature (where to sample pixels) -- [B, CH, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert (intrinsics_inv.size() == intrinsics.size())
batch_size, _, feat_height, feat_width = feat.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose
pose_mat = pose_mat.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:], padding_mode) # [B,H,W,2]
projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode, align_corners=True)
return projected_feat
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride, bias=False),
nn.BatchNorm3d(out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift, width)])).cuda()), (shift, 0, 0, 0))
shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width - shift)])).cuda()), (shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(batch, filters * 2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super(disparityregression, self).__init__()
self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda(), requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.firstconv = nn.Sequential(convbn(3, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.branch1 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(nn.AvgPool2d((4, 4), stride=(4, 4)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion), )
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
output = self.firstconv(x)
output = self.layer1(output)
output_raw = self.layer2(output)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_feature = torch.cat((output_raw, output_skip, output_branch4, output_branch3, output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature
def convtext(in_planes, out_planes, kernel_size=3, stride=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=((kernel_size - 1) * dilation) // 2, bias=False),
nn.LeakyReLU(0.1, inplace=True)
)
class PSNet(nn.Module):
def __init__(self, nlabel, mindepth):
super(PSNet, self).__init__()
self.nlabel = nlabel
self.mindepth = mindepth
self.feature_extraction = feature_extraction()
self.convs = nn.Sequential(
convtext(33, 128, 3, 1, 1),
convtext(128, 128, 3, 1, 2),
convtext(128, 128, 3, 1, 4),
convtext(128, 96, 3, 1, 8),
convtext(96, 64, 3, 1, 16),
convtext(64, 32, 3, 1, 1),
convtext(32, 1, 3, 1, 1)
)
self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres4 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.classify = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.xavier_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, ref, targets, pose, intrinsics, intrinsics_inv):
intrinsics4 = intrinsics.clone()
intrinsics_inv4 = intrinsics_inv.clone()
intrinsics4[:, :2, :] = intrinsics4[:, :2, :] / 4
intrinsics_inv4[:, :2, :2] = intrinsics_inv4[:, :2, :2] * 4
refimg_fea = self.feature_extraction(ref)
disp2depth = Variable(torch.ones(refimg_fea.size(0), refimg_fea.size(2), refimg_fea.size(3))).cuda() * self.mindepth * self.nlabel
for j, target in enumerate(targets):
cost = Variable(
torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1] * 2, self.nlabel, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
targetimg_fea = self.feature_extraction(target)
for i in range(self.nlabel):
depth = torch.div(disp2depth, i + 1e-16)
targetimg_fea_t = inverse_warp(targetimg_fea, depth, pose[j], intrinsics4, intrinsics_inv4)
cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea_t
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
cost0 = self.dres2(cost0) + cost0
cost0 = self.dres3(cost0) + cost0
cost0 = self.dres4(cost0) + cost0
cost0 = self.classify(cost0)
if j == 0:
costs = cost0
else:
costs = costs + cost0
costs = costs / len(targets)
costss = Variable(torch.FloatTensor(refimg_fea.size()[0], 1, self.nlabel, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.nlabel):
costt = costs[:, :, i, :, :]
costss[:, :, i, :, :] = self.convs(torch.cat([refimg_fea, costt], 1)) + costt
costs = F.interpolate(costs, [self.nlabel, ref.size()[2], ref.size()[3]], mode='trilinear', align_corners=False)
costs = torch.squeeze(costs, 1)
pred0 = F.softmax(costs, dim=1)
pred0 = disparityregression(self.nlabel)(pred0)
depth0 = self.mindepth * self.nlabel / (pred0.unsqueeze(1) + 1e-16)
costss = F.interpolate(costss, [self.nlabel, ref.size()[2], ref.size()[3]], mode='trilinear', align_corners=False)
costss = torch.squeeze(costss, 1)
pred = F.softmax(costss, dim=1)
pred = disparityregression(self.nlabel)(pred)
depth = self.mindepth * self.nlabel / (pred.unsqueeze(1) + 1e-16)
return depth0, depth
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(PSNet, self).train(mode)
self.apply(freeze_batchnorm)
| 16,381 | 40.684478 | 157 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/run-training.py | import datetime
import itertools
import os
import numpy as np
from path import Path
from tensorboardX import SummaryWriter
from torch.backends import cudnn
from torch.utils.data import DataLoader
from dvmvs.dataset_loader import MVSDataset
from dvmvs.losses import LossMeter, update_losses
from dvmvs.pairnet.model import *
from dvmvs.train import train
from dvmvs.utils import zip_code, print_number_of_trainable_parameters, calculate_cost_volume_by_warping
class TrainingHyperparameters:
Config.train_subsequence_length = 2
Config.train_predict_two_way = True
batch_size = 14
learning_rate = 1e-4
momentum = 0.9
beta = 0.999
weight_decay = 0
# loss_type = "Huber"
# loss_type = "L1"
loss_type = "L1-inv"
# loss_type = "L1-rel"
finetune_epochs = 2
use_augmentation = True
use_checkpoint = False
scaling = 0.5
x = np.linspace(0, Config.train_image_width * scaling - 1, num=int(Config.train_image_width * scaling))
y = np.linspace(0, Config.train_image_height * scaling - 1, num=int(Config.train_image_height * scaling))
ones = np.ones(shape=(int(Config.train_image_height * scaling), int(Config.train_image_width * scaling)))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().cuda()
def main():
# set the manual seed for reproducibility
torch.manual_seed(Config.train_seed)
# create the directory for this run of the training
run_directory = os.path.join(Config.train_run_directory, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(run_directory)
# zip every code file
zip_code(run_directory)
summary_writer = SummaryWriter(run_directory)
print("=> fetching scenes in '{}'".format(Config.dataset))
train_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="TRAINING",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225],
geometric_scale_augmentation=True
)
val_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="VALIDATION",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225]
)
print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
print('{} samples found in {} valid scenes'.format(len(val_set), len(val_set.scenes)))
train_loader = DataLoader(dataset=train_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=True,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
val_loader = DataLoader(dataset=val_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=False,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.cuda()
feature_shrinker = feature_shrinker.cuda()
cost_volume_encoder = cost_volume_encoder.cuda()
cost_volume_decoder = cost_volume_decoder.cuda()
model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]
if TrainingHyperparameters.use_checkpoint:
for i in range(len(model)):
try:
checkpoint = Path(".").files("module_" + str(i) + "*")[0]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Skipping...")
cudnn.benchmark = True
best_loss = [np.inf, np.inf, np.inf, np.inf]
# TRAIN MY PARTS
parameters = itertools.chain(feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
# TRAIN EVERYTHING
parameters = itertools.chain(feature_extractor.parameters(),
feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs, Config.train_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
def forward_pass(images, depths, poses, K, model, is_training):
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
cost_volume_decoder = model[3]
K[:, 0:2, :] = K[:, 0:2, :] * scaling
K = K.cuda()
images_cuda = []
depths_cuda = []
poses_cuda = []
feature_halfs = []
feature_quarters = []
feature_one_eights = []
feature_one_sixteens = []
# Extract image features
for i in range(0, len(images)):
images_cuda.append(images[i].cuda())
poses_cuda.append(poses[i].cuda())
depths_cuda.append(depths[i].cuda())
feature_half, feature_quarter, feature_one_eight, feature_one_sixteen = feature_shrinker(*feature_extractor(images_cuda[i]))
feature_halfs.append(feature_half)
feature_quarters.append(feature_quarter)
feature_one_eights.append(feature_one_eight)
feature_one_sixteens.append(feature_one_sixteen)
optimizer_loss = 0
predictions = None
l1_meter = LossMeter()
huber_meter = LossMeter()
l1_inv_meter = LossMeter()
l1_rel_meter = LossMeter()
for i in range(1, len(images)):
reference_index = i
measurement_index = i - 1
if Config.train_predict_two_way:
iterations = [[measurement_index, reference_index],
[reference_index, measurement_index]]
else:
iterations = [[reference_index, measurement_index]]
for [index1, index2] in iterations:
initial_cost_volume = calculate_cost_volume_by_warping(image1=feature_halfs[index1],
image2=feature_halfs[index2],
pose1=poses_cuda[index1],
pose2=poses_cuda[index2],
K=K,
warp_grid=warp_grid,
min_depth=Config.train_min_depth,
max_depth=Config.train_max_depth,
n_depth_levels=Config.train_n_depth_levels,
device=torch.device('cuda'),
dot_product=True)
flipped = False
to_be_used_feature_one_sixteen = feature_one_sixteens[index1]
to_be_used_feature_one_eight = feature_one_eights[index1]
to_be_used_feature_quarter = feature_quarters[index1]
to_be_used_feature_half = feature_halfs[index1]
to_be_used_image = images_cuda[index1]
to_be_used_depth = depths_cuda[index1]
to_be_used_cost_volume = initial_cost_volume
if is_training and TrainingHyperparameters.use_augmentation and np.random.random() > 0.5:
to_be_used_feature_one_sixteen = torch.flip(feature_one_sixteens[index1], dims=[-1])
to_be_used_feature_one_eight = torch.flip(feature_one_eights[index1], dims=[-1])
to_be_used_feature_quarter = torch.flip(feature_quarters[index1], dims=[-1])
to_be_used_feature_half = torch.flip(feature_halfs[index1], dims=[-1])
to_be_used_image = torch.flip(images_cuda[index1], dims=[-1])
to_be_used_depth = torch.flip(depths_cuda[index1], dims=[-1])
to_be_used_cost_volume = torch.flip(initial_cost_volume, dims=[-1])
flipped = True
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=to_be_used_feature_half,
features_quarter=to_be_used_feature_quarter,
features_one_eight=to_be_used_feature_one_eight,
features_one_sixteen=to_be_used_feature_one_sixteen,
cost_volume=to_be_used_cost_volume)
depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen = cost_volume_decoder(to_be_used_image,
skip0,
skip1,
skip2,
skip3,
bottom)
predictions = [depth_one_sixteen, depth_one_eight, depth_quarter, depth_half, depth_full]
weights = [1, 1, 1, 1, 1]
optimizer_loss = optimizer_loss + update_losses(predictions=predictions,
weights=weights,
groundtruth=to_be_used_depth,
is_training=is_training,
l1_meter=l1_meter,
huber_meter=huber_meter,
l1_inv_meter=l1_inv_meter,
l1_rel_meter=l1_rel_meter,
loss_type=TrainingHyperparameters.loss_type)
if flipped and index1 == len(images) - 1:
depth_quarter = torch.flip(depth_quarter, dims=[-1])
depth_half = torch.flip(depth_half, dims=[-1])
depth_full = torch.flip(depth_full, dims=[-1])
predictions = [depth_quarter, depth_half, depth_full]
predictions_names = ["prediction_quarter", "prediction_half", "prediction_full"]
return l1_meter, huber_meter, l1_inv_meter, l1_rel_meter, optimizer_loss, predictions, predictions_names
if __name__ == '__main__':
main()
| 12,924 | 45.160714 | 132 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/model.py | from collections import OrderedDict
import torch
from torchvision import models
from torchvision.ops import FeaturePyramidNetwork
from dvmvs.config import Config
from dvmvs.layers import conv_layer, depth_layer_3x3
fpn_output_channels = 32
hyper_channels = 32
class StandardLayer(torch.nn.Module):
def __init__(self, channels, kernel_size, apply_bn_relu):
super(StandardLayer, self).__init__()
self.conv1 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
self.conv2 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=apply_bn_relu)
def forward(self, inp):
x = self.conv1(inp)
x = self.conv2(x)
return x
class DownconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(DownconvolutionLayer, self).__init__()
self.down_conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=2,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, inp):
x = self.down_conv(inp)
return x
class UpconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(UpconvolutionLayer, self).__init__()
self.conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, inp):
x = torch.nn.functional.interpolate(input=inp, scale_factor=2, mode='bilinear', align_corners=True)
x = self.conv(x)
return x
class EncoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(EncoderBlock, self).__init__()
self.down_convolution = DownconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
self.standard_convolution = StandardLayer(channels=output_channels,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, inp):
x = self.down_convolution(inp)
x = self.standard_convolution(x)
return x
class DecoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, apply_bn_relu, plus_one):
super(DecoderBlock, self).__init__()
# Upsample the inpput coming from previous layer
self.up_convolution = UpconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
if plus_one:
next_input_channels = input_channels + 1
else:
next_input_channels = input_channels
# Aggregate skip and upsampled input
self.convolution1 = conv_layer(input_channels=next_input_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=True)
# Learn from aggregation
self.convolution2 = conv_layer(input_channels=output_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=apply_bn_relu)
def forward(self, inp, skip, depth):
inp = self.up_convolution(inp)
if depth is None:
x = torch.cat([inp, skip], dim=1)
else:
depth = torch.nn.functional.interpolate(depth, scale_factor=2, mode='bilinear', align_corners=True)
x = torch.cat([inp, skip, depth], dim=1)
x = self.convolution1(x)
x = self.convolution2(x)
return x
class FeatureExtractor(torch.nn.Module):
def __init__(self):
super(FeatureExtractor, self).__init__()
backbone_mobile_layers = list(models.mnasnet1_0(pretrained=True).layers.children())
self.layer1 = torch.nn.Sequential(*backbone_mobile_layers[0:8])
self.layer2 = torch.nn.Sequential(*backbone_mobile_layers[8:9])
self.layer3 = torch.nn.Sequential(*backbone_mobile_layers[9:10])
self.layer4 = torch.nn.Sequential(*backbone_mobile_layers[10:12])
self.layer5 = torch.nn.Sequential(*backbone_mobile_layers[12:14])
def forward(self, image):
layer1 = self.layer1(image)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer5 = self.layer5(layer4)
return layer1, layer2, layer3, layer4, layer5
class FeatureShrinker(torch.nn.Module):
def __init__(self):
super(FeatureShrinker, self).__init__()
self.fpn = FeaturePyramidNetwork(in_channels_list=[16, 24, 40, 96, 320],
out_channels=fpn_output_channels,
extra_blocks=None)
def forward(self, layer1, layer2, layer3, layer4, layer5):
fpn_input = OrderedDict()
fpn_input['layer1'] = layer1
fpn_input['layer2'] = layer2
fpn_input['layer3'] = layer3
fpn_input['layer4'] = layer4
fpn_input['layer5'] = layer5
fpn_output = self.fpn(fpn_input)
features_half = fpn_output['layer1']
features_quarter = fpn_output['layer2']
features_one_eight = fpn_output['layer3']
features_one_sixteen = fpn_output['layer4']
return features_half, features_quarter, features_one_eight, features_one_sixteen
class CostVolumeEncoder(torch.nn.Module):
def __init__(self):
super(CostVolumeEncoder, self).__init__()
self.aggregator0 = conv_layer(input_channels=Config.train_n_depth_levels + fpn_output_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True)
self.encoder_block0 = EncoderBlock(input_channels=hyper_channels,
output_channels=hyper_channels * 2,
kernel_size=5)
###
self.aggregator1 = conv_layer(input_channels=hyper_channels * 2 + fpn_output_channels,
output_channels=hyper_channels * 2,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block1 = EncoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels * 4,
kernel_size=3)
###
self.aggregator2 = conv_layer(input_channels=hyper_channels * 4 + fpn_output_channels,
output_channels=hyper_channels * 4,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block2 = EncoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 8,
kernel_size=3)
###
self.aggregator3 = conv_layer(input_channels=hyper_channels * 8 + fpn_output_channels,
output_channels=hyper_channels * 8,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block3 = EncoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 16,
kernel_size=3)
def forward(self, features_half, features_quarter, features_one_eight, features_one_sixteen, cost_volume):
inp0 = torch.cat([features_half, cost_volume], dim=1)
inp0 = self.aggregator0(inp0)
out0 = self.encoder_block0(inp0)
inp1 = torch.cat([features_quarter, out0], dim=1)
inp1 = self.aggregator1(inp1)
out1 = self.encoder_block1(inp1)
inp2 = torch.cat([features_one_eight, out1], dim=1)
inp2 = self.aggregator2(inp2)
out2 = self.encoder_block2(inp2)
inp3 = torch.cat([features_one_sixteen, out2], dim=1)
inp3 = self.aggregator3(inp3)
out3 = self.encoder_block3(inp3)
return inp0, inp1, inp2, inp3, out3
class CostVolumeDecoder(torch.nn.Module):
def __init__(self):
super(CostVolumeDecoder, self).__init__()
self.inverse_depth_base = 1 / Config.train_max_depth
self.inverse_depth_multiplier = 1 / Config.train_min_depth - 1 / Config.train_max_depth
self.decoder_block1 = DecoderBlock(input_channels=hyper_channels * 16,
output_channels=hyper_channels * 8,
kernel_size=3,
apply_bn_relu=True,
plus_one=False)
self.decoder_block2 = DecoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 4,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block3 = DecoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 2,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block4 = DecoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels,
kernel_size=5,
apply_bn_relu=True,
plus_one=True)
self.refine = torch.nn.Sequential(conv_layer(input_channels=hyper_channels + 4,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True),
conv_layer(input_channels=hyper_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True))
self.depth_layer_one_sixteen = depth_layer_3x3(hyper_channels * 8)
self.depth_layer_one_eight = depth_layer_3x3(hyper_channels * 4)
self.depth_layer_quarter = depth_layer_3x3(hyper_channels * 2)
self.depth_layer_half = depth_layer_3x3(hyper_channels)
self.depth_layer_full = depth_layer_3x3(hyper_channels)
def forward(self, image, skip0, skip1, skip2, skip3, bottom):
# work on cost volume
decoder_block1 = self.decoder_block1(bottom, skip3, None)
sigmoid_depth_one_sixteen = self.depth_layer_one_sixteen(decoder_block1)
inverse_depth_one_sixteen = self.inverse_depth_multiplier * sigmoid_depth_one_sixteen + self.inverse_depth_base
decoder_block2 = self.decoder_block2(decoder_block1, skip2, sigmoid_depth_one_sixteen)
sigmoid_depth_one_eight = self.depth_layer_one_eight(decoder_block2)
inverse_depth_one_eight = self.inverse_depth_multiplier * sigmoid_depth_one_eight + self.inverse_depth_base
decoder_block3 = self.decoder_block3(decoder_block2, skip1, sigmoid_depth_one_eight)
sigmoid_depth_quarter = self.depth_layer_quarter(decoder_block3)
inverse_depth_quarter = self.inverse_depth_multiplier * sigmoid_depth_quarter + self.inverse_depth_base
decoder_block4 = self.decoder_block4(decoder_block3, skip0, sigmoid_depth_quarter)
sigmoid_depth_half = self.depth_layer_half(decoder_block4)
inverse_depth_half = self.inverse_depth_multiplier * sigmoid_depth_half + self.inverse_depth_base
scaled_depth = torch.nn.functional.interpolate(sigmoid_depth_half, scale_factor=2, mode='bilinear', align_corners=True)
scaled_decoder = torch.nn.functional.interpolate(decoder_block4, scale_factor=2, mode='bilinear', align_corners=True)
scaled_combined = torch.cat([scaled_decoder, scaled_depth, image], dim=1)
scaled_combined = self.refine(scaled_combined)
inverse_depth_full = self.inverse_depth_multiplier * self.depth_layer_full(scaled_combined) + self.inverse_depth_base
depth_full = 1.0 / inverse_depth_full.squeeze(1)
depth_half = 1.0 / inverse_depth_half.squeeze(1)
depth_quarter = 1.0 / inverse_depth_quarter.squeeze(1)
depth_one_eight = 1.0 / inverse_depth_one_eight.squeeze(1)
depth_one_sixteen = 1.0 / inverse_depth_one_sixteen.squeeze(1)
return depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen
| 14,393 | 46.193443 | 127 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/run-testing.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.pairnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, CostVolumeDecoder
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_warp_grid_for_cost_volume_calculation
def predict():
print("System: PAIRNET")
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
cost_volume_decoder = model[3]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, bottom)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_dvmvs_pairnet".format(keyframing_type,
dataset_name,
Config.test_image_width,
Config.test_image_height,
n_measurement_frames)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 10,187 | 50.715736 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/run-testing-online.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.keyframe_buffer import KeyframeBuffer
from dvmvs.pairnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, CostVolumeDecoder
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_warp_grid_for_cost_volume_calculation
def predict():
dataset_name = Config.test_online_scene_path.split("/")[-2]
system_name = "keyframe_{}_{}_{}_{}_dvmvs_fusionnet_online".format(dataset_name,
Config.test_image_width,
Config.test_image_height,
Config.test_n_measurement_frames)
print("Predicting with System:", system_name)
print("# of Measurement Frames:", Config.test_n_measurement_frames)
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
cost_volume_decoder = model[3]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
scene_folder = Path(Config.test_online_scene_path)
scene = scene_folder.split("/")[-1]
print("Predicting for scene:", scene)
keyframe_buffer = KeyframeBuffer(buffer_size=Config.test_keyframe_buffer_size,
keyframe_pose_distance=Config.test_keyframe_pose_distance,
optimal_t_score=Config.test_optimal_t_measure,
optimal_R_score=Config.test_optimal_R_measure,
store_return_indices=False)
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(poses))):
reference_pose = poses[i]
reference_image = load_image(image_filenames[i])
reference_depth = cv2.imread(depth_filenames[i], -1).astype(float) / 1000.0
# POLL THE KEYFRAME BUFFER
response = keyframe_buffer.try_new_keyframe(reference_pose, reference_image)
if response != 1:
continue
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
measurement_poses_torch = []
measurement_images_torch = []
measurement_frames = keyframe_buffer.get_best_measurement_frames(Config.test_n_measurement_frames)
for (measurement_pose, measurement_image) in measurement_frames:
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, bottom)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene,
save_folder=".")
if __name__ == '__main__':
predict()
| 9,280 | 48.897849 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/run-training.py | import datetime
import itertools
import os
import numpy as np
from path import Path
from tensorboardX import SummaryWriter
from torch.backends import cudnn
from torch.utils.data import DataLoader
from dvmvs.dataset_loader import MVSDataset
from dvmvs.fusionnet.model import *
from dvmvs.losses import LossMeter, update_losses
from dvmvs.train import train
from dvmvs.utils import zip_code, print_number_of_trainable_parameters, calculate_cost_volume_by_warping
class TrainingHyperparameters:
Config.train_subsequence_length = 8
batch_size = 4
learning_rate = 1e-4
momentum = 0.9
beta = 0.999
weight_decay = 0
# loss_type = "Huber"
# loss_type = "L1"
loss_type = "L1-inv"
# loss_type = "L1-rel"
finetune_epochs = 1
use_checkpoint = True
scaling = 0.5
x = np.linspace(0, Config.train_image_width * scaling - 1, num=int(Config.train_image_width * scaling))
y = np.linspace(0, Config.train_image_height * scaling - 1, num=int(Config.train_image_height * scaling))
ones = np.ones(shape=(int(Config.train_image_height * scaling), int(Config.train_image_width * scaling)))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().cuda()
def main():
# set the manual seed for reproducibility
torch.manual_seed(Config.train_seed)
# create the directory for this run of the training
run_directory = os.path.join(Config.train_run_directory, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(run_directory)
# zip every code file
zip_code(run_directory)
summary_writer = SummaryWriter(run_directory)
print("=> fetching scenes in '{}'".format(Config.dataset))
train_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="TRAINING",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225],
geometric_scale_augmentation=True
)
val_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="VALIDATION",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225]
)
print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
print('{} samples found in {} valid scenes'.format(len(val_set), len(val_set.scenes)))
train_loader = DataLoader(dataset=train_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=True,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
val_loader = DataLoader(dataset=val_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=False,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
feature_extractor = FeatureExtractor().cuda()
feature_shrinker = FeatureShrinker().cuda()
cost_volume_encoder = CostVolumeEncoder().cuda()
lstm_fusion = LSTMFusion().cuda()
cost_volume_decoder = CostVolumeDecoder().cuda()
model = [feature_extractor, feature_shrinker, cost_volume_encoder, lstm_fusion, cost_volume_decoder]
if TrainingHyperparameters.use_checkpoint:
checkpoints = sorted(Path("weights").files())
for i in range(len(model)):
try:
weights = torch.load(checkpoints[i])
model[i].load_state_dict(weights)
print("Loaded weights for", checkpoints[i])
except Exception as e:
print(e)
print("Skipping...")
cudnn.benchmark = True
best_loss = [np.inf, np.inf, np.inf, np.inf]
# TRAIN LSTM, DECODER
parameters = itertools.chain(lstm_fusion.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
# TRAIN MY PARTS
parameters = itertools.chain(feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
lstm_fusion.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs, 2 * TrainingHyperparameters.finetune_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
# TRAIN EVERYTHING
parameters = itertools.chain(feature_extractor.parameters(),
feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
lstm_fusion.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(2 * TrainingHyperparameters.finetune_epochs, Config.train_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
def forward_pass(images, depths, poses, K, model, is_training):
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
lstm_fusion = model[3]
cost_volume_decoder = model[4]
full_K = K.clone().cuda()
half_K = K.clone().cuda()
half_K[:, 0:2, :] = half_K[:, 0:2, :] * scaling
lstm_K = K.clone().cuda()
lstm_K[:, 0:2, :] = lstm_K[:, 0:2, :] / 32.0
images_cuda = []
depths_cuda = []
poses_cuda = []
feature_halfs = []
feature_quarters = []
feature_one_eights = []
feature_one_sixteens = []
# Extract image features
for i in range(0, len(images)):
images_cuda.append(images[i].cuda())
depths_cuda.append(depths[i].cuda())
poses_cuda.append(poses[i].cuda())
feature_half, feature_quarter, feature_one_eight, feature_one_sixteen = feature_shrinker(*feature_extractor(images_cuda[i]))
feature_halfs.append(feature_half)
feature_quarters.append(feature_quarter)
feature_one_eights.append(feature_one_eight)
feature_one_sixteens.append(feature_one_sixteen)
optimizer_loss = 0
predictions = None
l1_meter = LossMeter()
huber_meter = LossMeter()
l1_inv_meter = LossMeter()
l1_rel_meter = LossMeter()
batch_size, _, _ = full_K.size()
lstm_state_bottom = None
for i in range(1, len(images_cuda)):
reference_index = i
measurement_index = i - 1
initial_cost_volume = calculate_cost_volume_by_warping(image1=feature_halfs[reference_index],
image2=feature_halfs[measurement_index],
pose1=poses_cuda[reference_index],
pose2=poses_cuda[measurement_index],
K=half_K,
warp_grid=warp_grid,
min_depth=Config.train_min_depth,
max_depth=Config.train_max_depth,
n_depth_levels=Config.train_n_depth_levels,
device=torch.device('cuda'),
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=feature_halfs[reference_index],
features_quarter=feature_quarters[reference_index],
features_one_eight=feature_one_eights[reference_index],
features_one_sixteen=feature_one_sixteens[reference_index],
cost_volume=initial_cost_volume)
depth_estimation = depths_cuda[reference_index].view(batch_size, 1, Config.train_image_height, Config.train_image_width)
depth_estimation = torch.nn.functional.interpolate(input=depth_estimation,
scale_factor=(1.0 / 32.0),
mode="nearest")
lstm_state_bottom = lstm_fusion(current_encoding=bottom,
current_state=lstm_state_bottom,
previous_pose=poses_cuda[measurement_index],
current_pose=poses_cuda[reference_index],
estimated_current_depth=depth_estimation,
camera_matrix=lstm_K)
depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen = cost_volume_decoder(images_cuda[reference_index],
skip0,
skip1,
skip2,
skip3,
lstm_state_bottom[0])
weights = [1, 1, 1, 1, 1]
optimizer_loss = optimizer_loss + update_losses(predictions=[depth_one_sixteen, depth_one_eight, depth_quarter, depth_half, depth_full],
weights=weights,
groundtruth=depths_cuda[reference_index],
is_training=is_training,
l1_meter=l1_meter,
l1_inv_meter=l1_inv_meter,
l1_rel_meter=l1_rel_meter,
huber_meter=huber_meter,
loss_type=TrainingHyperparameters.loss_type)
predictions = [depth_quarter, depth_half, depth_full]
predictions_names = ["prediction_quarter", "prediction_half", "prediction_full"]
return l1_meter, huber_meter, l1_inv_meter, l1_rel_meter, optimizer_loss, predictions, predictions_names
if __name__ == '__main__':
main()
| 13,088 | 44.290657 | 144 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/model.py | from collections import OrderedDict
import torch
from torchvision import models
from torchvision.ops import FeaturePyramidNetwork
from dvmvs.config import Config
from dvmvs.convlstm import MVSLayernormConvLSTMCell
from dvmvs.layers import conv_layer, depth_layer_3x3
fpn_output_channels = 32
hyper_channels = 32
class StandardLayer(torch.nn.Module):
def __init__(self, channels, kernel_size, apply_bn_relu):
super(StandardLayer, self).__init__()
self.conv1 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
self.conv2 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=apply_bn_relu)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DownconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(DownconvolutionLayer, self).__init__()
self.down_conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=2,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, x):
x = self.down_conv(x)
return x
class UpconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(UpconvolutionLayer, self).__init__()
self.conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, x):
x = torch.nn.functional.interpolate(input=x, scale_factor=2, mode='bilinear', align_corners=True)
x = self.conv(x)
return x
class EncoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(EncoderBlock, self).__init__()
self.down_convolution = DownconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
self.standard_convolution = StandardLayer(channels=output_channels,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, x):
x = self.down_convolution(x)
x = self.standard_convolution(x)
return x
class DecoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, apply_bn_relu, plus_one):
super(DecoderBlock, self).__init__()
# Upsample the inpput coming from previous layer
self.up_convolution = UpconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
if plus_one:
next_input_channels = input_channels + 1
else:
next_input_channels = input_channels
# Aggregate skip and upsampled input
self.convolution1 = conv_layer(input_channels=next_input_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=True)
# Learn from aggregation
self.convolution2 = conv_layer(input_channels=output_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=apply_bn_relu)
def forward(self, x, skip, depth):
x = self.up_convolution(x)
if depth is None:
x = torch.cat([x, skip], dim=1)
else:
depth = torch.nn.functional.interpolate(depth, scale_factor=2, mode='bilinear', align_corners=True)
x = torch.cat([x, skip, depth], dim=1)
x = self.convolution1(x)
x = self.convolution2(x)
return x
class FeatureExtractor(torch.nn.Module):
def __init__(self):
super(FeatureExtractor, self).__init__()
backbone_mobile_layers = list(models.mnasnet1_0(pretrained=True).layers.children())
self.layer1 = torch.nn.Sequential(*backbone_mobile_layers[0:8])
self.layer2 = torch.nn.Sequential(*backbone_mobile_layers[8:9])
self.layer3 = torch.nn.Sequential(*backbone_mobile_layers[9:10])
self.layer4 = torch.nn.Sequential(*backbone_mobile_layers[10:12])
self.layer5 = torch.nn.Sequential(*backbone_mobile_layers[12:14])
def forward(self, image):
layer1 = self.layer1(image)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer5 = self.layer5(layer4)
return layer1, layer2, layer3, layer4, layer5
class FeatureShrinker(torch.nn.Module):
def __init__(self):
super(FeatureShrinker, self).__init__()
self.fpn = FeaturePyramidNetwork(in_channels_list=[16, 24, 40, 96, 320],
out_channels=fpn_output_channels,
extra_blocks=None)
def forward(self, layer1, layer2, layer3, layer4, layer5):
fpn_input = OrderedDict()
fpn_input['layer1'] = layer1
fpn_input['layer2'] = layer2
fpn_input['layer3'] = layer3
fpn_input['layer4'] = layer4
fpn_input['layer5'] = layer5
fpn_output = self.fpn(fpn_input)
features_half = fpn_output['layer1']
features_quarter = fpn_output['layer2']
features_one_eight = fpn_output['layer3']
features_one_sixteen = fpn_output['layer4']
return features_half, features_quarter, features_one_eight, features_one_sixteen
class CostVolumeEncoder(torch.nn.Module):
def __init__(self):
super(CostVolumeEncoder, self).__init__()
self.aggregator0 = conv_layer(input_channels=Config.train_n_depth_levels + fpn_output_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True)
self.encoder_block0 = EncoderBlock(input_channels=hyper_channels,
output_channels=hyper_channels * 2,
kernel_size=5)
###
self.aggregator1 = conv_layer(input_channels=hyper_channels * 2 + fpn_output_channels,
output_channels=hyper_channels * 2,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block1 = EncoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels * 4,
kernel_size=3)
###
self.aggregator2 = conv_layer(input_channels=hyper_channels * 4 + fpn_output_channels,
output_channels=hyper_channels * 4,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block2 = EncoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 8,
kernel_size=3)
###
self.aggregator3 = conv_layer(input_channels=hyper_channels * 8 + fpn_output_channels,
output_channels=hyper_channels * 8,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block3 = EncoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 16,
kernel_size=3)
def forward(self, features_half, features_quarter, features_one_eight, features_one_sixteen, cost_volume):
inp0 = torch.cat([features_half, cost_volume], dim=1)
inp0 = self.aggregator0(inp0)
out0 = self.encoder_block0(inp0)
inp1 = torch.cat([features_quarter, out0], dim=1)
inp1 = self.aggregator1(inp1)
out1 = self.encoder_block1(inp1)
inp2 = torch.cat([features_one_eight, out1], dim=1)
inp2 = self.aggregator2(inp2)
out2 = self.encoder_block2(inp2)
inp3 = torch.cat([features_one_sixteen, out2], dim=1)
inp3 = self.aggregator3(inp3)
out3 = self.encoder_block3(inp3)
return inp0, inp1, inp2, inp3, out3
class CostVolumeDecoder(torch.nn.Module):
def __init__(self):
super(CostVolumeDecoder, self).__init__()
self.inverse_depth_base = 1 / Config.train_max_depth
self.inverse_depth_multiplier = 1 / Config.train_min_depth - 1 / Config.train_max_depth
self.decoder_block1 = DecoderBlock(input_channels=hyper_channels * 16,
output_channels=hyper_channels * 8,
kernel_size=3,
apply_bn_relu=True,
plus_one=False)
self.decoder_block2 = DecoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 4,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block3 = DecoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 2,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block4 = DecoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels,
kernel_size=5,
apply_bn_relu=True,
plus_one=True)
self.refine = torch.nn.Sequential(conv_layer(input_channels=hyper_channels + 4,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True),
conv_layer(input_channels=hyper_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True))
self.depth_layer_one_sixteen = depth_layer_3x3(hyper_channels * 8)
self.depth_layer_one_eight = depth_layer_3x3(hyper_channels * 4)
self.depth_layer_quarter = depth_layer_3x3(hyper_channels * 2)
self.depth_layer_half = depth_layer_3x3(hyper_channels)
self.depth_layer_full = depth_layer_3x3(hyper_channels)
def forward(self, image, skip0, skip1, skip2, skip3, bottom):
# work on cost volume
decoder_block1 = self.decoder_block1(bottom, skip3, None)
sigmoid_depth_one_sixteen = self.depth_layer_one_sixteen(decoder_block1)
inverse_depth_one_sixteen = self.inverse_depth_multiplier * sigmoid_depth_one_sixteen + self.inverse_depth_base
decoder_block2 = self.decoder_block2(decoder_block1, skip2, sigmoid_depth_one_sixteen)
sigmoid_depth_one_eight = self.depth_layer_one_eight(decoder_block2)
inverse_depth_one_eight = self.inverse_depth_multiplier * sigmoid_depth_one_eight + self.inverse_depth_base
decoder_block3 = self.decoder_block3(decoder_block2, skip1, sigmoid_depth_one_eight)
sigmoid_depth_quarter = self.depth_layer_quarter(decoder_block3)
inverse_depth_quarter = self.inverse_depth_multiplier * sigmoid_depth_quarter + self.inverse_depth_base
decoder_block4 = self.decoder_block4(decoder_block3, skip0, sigmoid_depth_quarter)
sigmoid_depth_half = self.depth_layer_half(decoder_block4)
inverse_depth_half = self.inverse_depth_multiplier * sigmoid_depth_half + self.inverse_depth_base
scaled_depth = torch.nn.functional.interpolate(sigmoid_depth_half, scale_factor=2, mode='bilinear', align_corners=True)
scaled_decoder = torch.nn.functional.interpolate(decoder_block4, scale_factor=2, mode='bilinear', align_corners=True)
scaled_combined = torch.cat([scaled_decoder, scaled_depth, image], dim=1)
scaled_combined = self.refine(scaled_combined)
inverse_depth_full = self.inverse_depth_multiplier * self.depth_layer_full(scaled_combined) + self.inverse_depth_base
depth_full = 1.0 / inverse_depth_full.squeeze(1)
depth_half = 1.0 / inverse_depth_half.squeeze(1)
depth_quarter = 1.0 / inverse_depth_quarter.squeeze(1)
depth_one_eight = 1.0 / inverse_depth_one_eight.squeeze(1)
depth_one_sixteen = 1.0 / inverse_depth_one_sixteen.squeeze(1)
return depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen
class LSTMFusion(torch.nn.Module):
def __init__(self):
super(LSTMFusion, self).__init__()
input_size = hyper_channels * 16
hidden_size = hyper_channels * 16
self.lstm_cell = MVSLayernormConvLSTMCell(input_dim=input_size,
hidden_dim=hidden_size,
kernel_size=(3, 3),
activation_function=torch.celu)
def forward(self, current_encoding, current_state, previous_pose, current_pose, estimated_current_depth, camera_matrix):
batch, channel, height, width = current_encoding.size()
if current_state is None:
hidden_state, cell_state = self.lstm_cell.init_hidden(batch_size=batch,
image_size=(height, width))
else:
hidden_state, cell_state = current_state
next_hidden_state, next_cell_state = self.lstm_cell(input_tensor=current_encoding,
cur_state=[hidden_state, cell_state],
previous_pose=previous_pose,
current_pose=current_pose,
estimated_current_depth=estimated_current_depth,
camera_matrix=camera_matrix)
return next_hidden_state, next_cell_state
| 15,992 | 46.316568 | 127 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/run-testing.py | import cv2
import numpy as np
import torch
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.fusionnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, LSTMFusion, CostVolumeDecoder
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_non_differentiable_rectangle_depth_estimation, \
get_warp_grid_for_cost_volume_calculation
from path import Path
from tqdm import tqdm
def predict():
print("System: FUSIONNET")
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
lstm_fusion = LSTMFusion()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
lstm_fusion = lstm_fusion.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, lstm_fusion, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
lstm_fusion = model[3]
cost_volume_decoder = model[4]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
lstm_state = None
previous_depth = None
previous_pose = None
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
lstm_state = None
previous_depth = None
previous_pose = None
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
lstm_K_bottom = full_K_torch.clone().cuda()
lstm_K_bottom[:, 0:2, :] = lstm_K_bottom[:, 0:2, :] / 32.0
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
if previous_depth is not None:
depth_estimation = get_non_differentiable_rectangle_depth_estimation(reference_pose_torch=reference_pose_torch,
measurement_pose_torch=previous_pose,
previous_depth_torch=previous_depth,
full_K_torch=full_K_torch,
half_K_torch=half_K_torch,
original_height=Config.test_image_height,
original_width=Config.test_image_width)
depth_estimation = torch.nn.functional.interpolate(input=depth_estimation,
scale_factor=(1.0 / 16.0),
mode="nearest")
else:
depth_estimation = torch.zeros(size=(1, 1, int(Config.test_image_height / 32.0), int(Config.test_image_width / 32.0))).to(device)
lstm_state = lstm_fusion(current_encoding=bottom,
current_state=lstm_state,
previous_pose=previous_pose,
current_pose=reference_pose_torch,
estimated_current_depth=depth_estimation,
camera_matrix=lstm_K_bottom)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, lstm_state[0])
previous_depth = prediction.view(1, 1, Config.test_image_height, Config.test_image_width)
previous_pose = reference_pose_torch
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_dvmvs_fusionnet".format(keyframing_type,
dataset_name,
Config.test_image_width,
Config.test_image_height,
n_measurement_frames)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 12,702 | 53.055319 | 149 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/run-testing-online.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.fusionnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, LSTMFusion, CostVolumeDecoder
from dvmvs.keyframe_buffer import KeyframeBuffer
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_non_differentiable_rectangle_depth_estimation, \
get_warp_grid_for_cost_volume_calculation
def predict(evaluate):
dataset_name = Config.test_online_scene_path.split("/")[-2]
system_name = "keyframe_{}_{}_{}_{}_dvmvs_fusionnet_online".format(dataset_name,
Config.test_image_width,
Config.test_image_height,
Config.test_n_measurement_frames)
print("Predicting with System:", system_name)
print("# of Measurement Frames:", Config.test_n_measurement_frames)
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
lstm_fusion = LSTMFusion()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
lstm_fusion = lstm_fusion.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, lstm_fusion, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
lstm_fusion = model[3]
cost_volume_decoder = model[4]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
scene_folder = Path(Config.test_online_scene_path)
scene = scene_folder.split("/")[-1]
print("Predicting for scene:", scene)
keyframe_buffer = KeyframeBuffer(buffer_size=Config.test_keyframe_buffer_size,
keyframe_pose_distance=Config.test_keyframe_pose_distance,
optimal_t_score=Config.test_optimal_t_measure,
optimal_R_score=Config.test_optimal_R_measure,
store_return_indices=False)
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
inference_timer = InferenceTimer()
lstm_state = None
previous_depth = None
previous_pose = None
predictions = []
if evaluate:
reference_depths = []
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
else:
# if None the system will not be evaluated and errors will not be calculated
reference_depths = None
depth_filenames = None
with torch.no_grad():
for i in tqdm(range(0, len(poses))):
reference_pose = poses[i]
reference_image = load_image(image_filenames[i])
# POLL THE KEYFRAME BUFFER
response = keyframe_buffer.try_new_keyframe(reference_pose, reference_image)
if response == 0 or response == 2 or response == 4 or response == 5:
continue
elif response == 3:
previous_depth = None
previous_pose = None
lstm_state = None
continue
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
if reference_depths is not None:
reference_depth = cv2.imread(depth_filenames[i], -1).astype(float) / 1000.0
reference_depth = preprocessor.apply_depth(reference_depth)
reference_depths.append(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
lstm_K_bottom = full_K_torch.clone().cuda()
lstm_K_bottom[:, 0:2, :] = lstm_K_bottom[:, 0:2, :] / 32.0
measurement_poses_torch = []
measurement_images_torch = []
measurement_frames = keyframe_buffer.get_best_measurement_frames(Config.test_n_measurement_frames)
for (measurement_pose, measurement_image) in measurement_frames:
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
if previous_depth is not None:
depth_estimation = get_non_differentiable_rectangle_depth_estimation(reference_pose_torch=reference_pose_torch,
measurement_pose_torch=previous_pose,
previous_depth_torch=previous_depth,
full_K_torch=full_K_torch,
half_K_torch=half_K_torch,
original_height=Config.test_image_height,
original_width=Config.test_image_width)
depth_estimation = torch.nn.functional.interpolate(input=depth_estimation,
scale_factor=(1.0 / 16.0),
mode="nearest")
else:
depth_estimation = torch.zeros(size=(1, 1, int(Config.test_image_height / 32.0), int(Config.test_image_width / 32.0))).to(device)
lstm_state = lstm_fusion(current_encoding=bottom,
current_state=lstm_state,
previous_pose=previous_pose,
current_pose=reference_pose_torch,
estimated_current_depth=depth_estimation,
camera_matrix=lstm_K_bottom)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, lstm_state[0])
previous_depth = prediction.view(1, 1, Config.test_image_height, Config.test_image_width)
previous_pose = reference_pose_torch
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb,
depth_multiplier_for_visualization=5000)
inference_timer.print_statistics()
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene,
save_folder=".")
if __name__ == '__main__':
predict(evaluate=True)
| 12,118 | 50.351695 | 149 | py |
deep-video-mvs | deep-video-mvs-master/sample-data/run-tsdf-reconstruction.py | ################################################################################################
### This implementation is taken and adapted from https://github.com/andyzeng/tsdf-fusion-python
### Copyright (c) 2018 Andy Zeng
################################################################################################
import time
from argparse import ArgumentParser
import cv2
import gc
import numpy as np
from numba import njit, prange
from path import Path
from skimage import measure
from dvmvs.dataset_loader import PreprocessImage, load_image
try:
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
FUSION_GPU_MODE = 1
except Exception as err:
print('Warning: {}'.format(err))
print('Failed to import PyCUDA. Running fusion in CPU mode.')
FUSION_GPU_MODE = 0
class TSDFVolume:
"""Volumetric TSDF Fusion of RGB-D Images.
"""
def __init__(self, vol_bnds, voxel_size, use_gpu=True):
"""Constructor.
Args:
vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the
xyz bounds (min/max) in meters.
voxel_size (float): The volume discretization in meters.
"""
vol_bnds = np.asarray(vol_bnds)
assert vol_bnds.shape == (3, 2), "[!] `vol_bnds` should be of shape (3, 2)."
# Define voxel volume parameters
self._vol_bnds = vol_bnds
self._voxel_size = float(voxel_size)
self._trunc_margin = 5 * self._voxel_size # truncation on SDF
self._color_const = 256 * 256
# Adjust volume bounds and ensure C-order contiguous
self._vol_dim = np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / self._voxel_size).copy(order='C').astype(int)
self._vol_bnds[:, 1] = self._vol_bnds[:, 0] + self._vol_dim * self._voxel_size
self._vol_origin = self._vol_bnds[:, 0].copy(order='C').astype(np.float32)
print("Voxel volume size: {} x {} x {} - # points: {:,}".format(
self._vol_dim[0], self._vol_dim[1], self._vol_dim[2],
self._vol_dim[0] * self._vol_dim[1] * self._vol_dim[2])
)
# Initialize pointers to voxel volume in CPU memory
self._tsdf_vol_cpu = np.ones(self._vol_dim).astype(np.float32)
# for computing the cumulative moving average of observations per voxel
self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
self.gpu_mode = use_gpu and FUSION_GPU_MODE
# Copy voxel volumes to GPU
if self.gpu_mode:
self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes)
cuda.memcpy_htod(self._tsdf_vol_gpu, self._tsdf_vol_cpu)
self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes)
cuda.memcpy_htod(self._weight_vol_gpu, self._weight_vol_cpu)
self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes)
cuda.memcpy_htod(self._color_vol_gpu, self._color_vol_cpu)
# Cuda kernel function (C++)
self._cuda_src_mod = SourceModule("""
__global__ void integrate(float * tsdf_vol,
float * weight_vol,
float * color_vol,
float * vol_dim,
float * vol_origin,
float * cam_intr,
float * cam_pose,
float * other_params,
float * color_im,
float * depth_im) {
// Get voxel index
int gpu_loop_idx = (int) other_params[0];
int max_threads_per_block = blockDim.x;
int block_idx = blockIdx.z*gridDim.y*gridDim.x+blockIdx.y*gridDim.x+blockIdx.x;
int voxel_idx = gpu_loop_idx*gridDim.x*gridDim.y*gridDim.z*max_threads_per_block+block_idx*max_threads_per_block+threadIdx.x;
int vol_dim_x = (int) vol_dim[0];
int vol_dim_y = (int) vol_dim[1];
int vol_dim_z = (int) vol_dim[2];
if (voxel_idx > vol_dim_x*vol_dim_y*vol_dim_z)
return;
// Get voxel grid coordinates (note: be careful when casting)
float voxel_x = floorf(((float)voxel_idx)/((float)(vol_dim_y*vol_dim_z)));
float voxel_y = floorf(((float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z))/((float)vol_dim_z));
float voxel_z = (float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z-((int)voxel_y)*vol_dim_z);
// Voxel grid coordinates to world coordinates
float voxel_size = other_params[1];
float pt_x = vol_origin[0]+voxel_x*voxel_size;
float pt_y = vol_origin[1]+voxel_y*voxel_size;
float pt_z = vol_origin[2]+voxel_z*voxel_size;
// World coordinates to camera coordinates
float tmp_pt_x = pt_x-cam_pose[0*4+3];
float tmp_pt_y = pt_y-cam_pose[1*4+3];
float tmp_pt_z = pt_z-cam_pose[2*4+3];
float cam_pt_x = cam_pose[0*4+0]*tmp_pt_x+cam_pose[1*4+0]*tmp_pt_y+cam_pose[2*4+0]*tmp_pt_z;
float cam_pt_y = cam_pose[0*4+1]*tmp_pt_x+cam_pose[1*4+1]*tmp_pt_y+cam_pose[2*4+1]*tmp_pt_z;
float cam_pt_z = cam_pose[0*4+2]*tmp_pt_x+cam_pose[1*4+2]*tmp_pt_y+cam_pose[2*4+2]*tmp_pt_z;
// Camera coordinates to image pixels
int pixel_x = (int) roundf(cam_intr[0*3+0]*(cam_pt_x/cam_pt_z)+cam_intr[0*3+2]);
int pixel_y = (int) roundf(cam_intr[1*3+1]*(cam_pt_y/cam_pt_z)+cam_intr[1*3+2]);
// Skip if outside view frustum
int im_h = (int) other_params[2];
int im_w = (int) other_params[3];
if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z<0)
return;
// Skip invalid depth
float depth_value = depth_im[pixel_y*im_w+pixel_x];
if (depth_value == 0)
return;
// Integrate TSDF
float trunc_margin = other_params[4];
float depth_diff = depth_value-cam_pt_z;
if (depth_diff < -trunc_margin)
return;
float dist = fmin(1.0f,depth_diff/trunc_margin);
float w_old = weight_vol[voxel_idx];
float obs_weight = other_params[5];
float w_new = w_old + obs_weight;
weight_vol[voxel_idx] = w_new;
tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx]*w_old+obs_weight*dist)/w_new;
// Integrate color
float old_color = color_vol[voxel_idx];
float old_b = floorf(old_color/(256*256));
float old_g = floorf((old_color-old_b*256*256)/256);
float old_r = old_color-old_b*256*256-old_g*256;
float new_color = color_im[pixel_y*im_w+pixel_x];
float new_b = floorf(new_color/(256*256));
float new_g = floorf((new_color-new_b*256*256)/256);
float new_r = new_color-new_b*256*256-new_g*256;
new_b = fmin(roundf((old_b*w_old+obs_weight*new_b)/w_new),255.0f);
new_g = fmin(roundf((old_g*w_old+obs_weight*new_g)/w_new),255.0f);
new_r = fmin(roundf((old_r*w_old+obs_weight*new_r)/w_new),255.0f);
color_vol[voxel_idx] = new_b*256*256+new_g*256+new_r;
}""")
self._cuda_integrate = self._cuda_src_mod.get_function("integrate")
# Determine block/grid size on GPU
gpu_dev = cuda.Device(0)
self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK
n_blocks = int(np.ceil(float(np.prod(self._vol_dim)) / float(self._max_gpu_threads_per_block)))
grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X, int(np.floor(np.cbrt(n_blocks))))
grid_dim_y = min(gpu_dev.MAX_GRID_DIM_Y, int(np.floor(np.sqrt(n_blocks / grid_dim_x))))
grid_dim_z = min(gpu_dev.MAX_GRID_DIM_Z, int(np.ceil(float(n_blocks) / float(grid_dim_x * grid_dim_y))))
self._max_gpu_grid_dim = np.array([grid_dim_x, grid_dim_y, grid_dim_z]).astype(int)
self._n_gpu_loops = int(np.ceil(float(np.prod(self._vol_dim)) / float(np.prod(self._max_gpu_grid_dim) * self._max_gpu_threads_per_block)))
else:
# Get voxel grid coordinates
xv, yv, zv = np.meshgrid(
range(self._vol_dim[0]),
range(self._vol_dim[1]),
range(self._vol_dim[2]),
indexing='ij'
)
self.vox_coords = np.concatenate([
xv.reshape(1, -1),
yv.reshape(1, -1),
zv.reshape(1, -1)
], axis=0).astype(int).T
@staticmethod
@njit(parallel=True)
def vox2world(vol_origin, vox_coords, vox_size):
"""Convert voxel grid coordinates to world coordinates.
"""
vol_origin = vol_origin.astype(np.float32)
vox_coords = vox_coords.astype(np.float32)
cam_pts = np.empty_like(vox_coords, dtype=np.float32)
for i in prange(vox_coords.shape[0]):
for j in range(3):
cam_pts[i, j] = vol_origin[j] + (vox_size * vox_coords[i, j])
return cam_pts
@staticmethod
@njit(parallel=True)
def cam2pix(cam_pts, intr):
"""Convert camera coordinates to pixel coordinates.
"""
intr = intr.astype(np.float32)
fx, fy = intr[0, 0], intr[1, 1]
cx, cy = intr[0, 2], intr[1, 2]
pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64)
for i in prange(cam_pts.shape[0]):
pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx))
pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy))
return pix
@staticmethod
@njit(parallel=True)
def integrate_tsdf(tsdf_vol, dist, w_old, obs_weight):
"""Integrate the TSDF volume.
"""
tsdf_vol_int = np.empty_like(tsdf_vol, dtype=np.float32)
w_new = np.empty_like(w_old, dtype=np.float32)
for i in prange(len(tsdf_vol)):
w_new[i] = w_old[i] + obs_weight
tsdf_vol_int[i] = (w_old[i] * tsdf_vol[i] + obs_weight * dist[i]) / w_new[i]
return tsdf_vol_int, w_new
def integrate(self, color_im, depth_im, cam_intr, cam_pose, obs_weight=1.):
"""Integrate an RGB-D frame into the TSDF volume.
Args:
color_im (ndarray): An RGB image of shape (H, W, 3).
depth_im (ndarray): A depth image of shape (H, W).
cam_intr (ndarray): The camera intrinsics matrix of shape (3, 3).
cam_pose (ndarray): The camera pose (i.e. extrinsics) of shape (4, 4).
obs_weight (float): The weight to assign for the current observation. A higher
value
"""
im_h, im_w = depth_im.shape
# Fold RGB color image into a single channel image
color_im = color_im.astype(np.float32)
color_im = np.floor(color_im[..., 2] * self._color_const + color_im[..., 1] * 256 + color_im[..., 0])
if self.gpu_mode: # GPU mode: integrate voxel volume (calls CUDA kernel)
for gpu_loop_idx in range(self._n_gpu_loops):
self._cuda_integrate(self._tsdf_vol_gpu,
self._weight_vol_gpu,
self._color_vol_gpu,
cuda.InOut(self._vol_dim.astype(np.float32)),
cuda.InOut(self._vol_origin.astype(np.float32)),
cuda.InOut(cam_intr.reshape(-1).astype(np.float32)),
cuda.InOut(cam_pose.reshape(-1).astype(np.float32)),
cuda.InOut(np.asarray([
gpu_loop_idx,
self._voxel_size,
im_h,
im_w,
self._trunc_margin,
obs_weight
], np.float32)),
cuda.InOut(color_im.reshape(-1).astype(np.float32)),
cuda.InOut(depth_im.reshape(-1).astype(np.float32)),
block=(self._max_gpu_threads_per_block, 1, 1),
grid=(
int(self._max_gpu_grid_dim[0]),
int(self._max_gpu_grid_dim[1]),
int(self._max_gpu_grid_dim[2]),
)
)
else: # CPU mode: integrate voxel volume (vectorized implementation)
# Convert voxel grid coordinates to pixel coordinates
cam_pts = self.vox2world(self._vol_origin, self.vox_coords, self._voxel_size)
cam_pts = TSDFFusion.rigid_transform(cam_pts, np.linalg.inv(cam_pose))
pix_z = cam_pts[:, 2]
pix = self.cam2pix(cam_pts, cam_intr)
pix_x, pix_y = pix[:, 0], pix[:, 1]
# Eliminate pixels outside view frustum
valid_pix = np.logical_and(pix_x >= 0,
np.logical_and(pix_x < im_w,
np.logical_and(pix_y >= 0,
np.logical_and(pix_y < im_h,
pix_z > 0))))
depth_val = np.zeros(pix_x.shape)
depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]]
# Integrate TSDF
depth_diff = depth_val - pix_z
valid_pts = np.logical_and(depth_val > 0, depth_diff >= -self._trunc_margin)
dist = np.minimum(1, depth_diff / self._trunc_margin)
valid_vox_x = self.vox_coords[valid_pts, 0]
valid_vox_y = self.vox_coords[valid_pts, 1]
valid_vox_z = self.vox_coords[valid_pts, 2]
w_old = self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
tsdf_vals = self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
valid_dist = dist[valid_pts]
tsdf_vol_new, w_new = self.integrate_tsdf(tsdf_vals, valid_dist, w_old, obs_weight)
self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = w_new
self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = tsdf_vol_new
# Integrate color
old_color = self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
old_b = np.floor(old_color / self._color_const)
old_g = np.floor((old_color - old_b * self._color_const) / 256)
old_r = old_color - old_b * self._color_const - old_g * 256
new_color = color_im[pix_y[valid_pts], pix_x[valid_pts]]
new_b = np.floor(new_color / self._color_const)
new_g = np.floor((new_color - new_b * self._color_const) / 256)
new_r = new_color - new_b * self._color_const - new_g * 256
new_b = np.minimum(255., np.round((w_old * old_b + obs_weight * new_b) / w_new))
new_g = np.minimum(255., np.round((w_old * old_g + obs_weight * new_g) / w_new))
new_r = np.minimum(255., np.round((w_old * old_r + obs_weight * new_r) / w_new))
self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = new_b * self._color_const + new_g * 256 + new_r
def get_volume(self):
if self.gpu_mode:
cuda.memcpy_dtoh(self._tsdf_vol_cpu, self._tsdf_vol_gpu)
cuda.memcpy_dtoh(self._color_vol_cpu, self._color_vol_gpu)
return self._tsdf_vol_cpu, self._color_vol_cpu
def get_point_cloud(self):
"""Extract a point cloud from the voxel volume.
"""
tsdf_vol, color_vol = self.get_volume()
# Marching cubes
verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]
verts_ind = np.round(verts).astype(int)
verts = verts * self._voxel_size + self._vol_origin
# Get vertex colors
rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
colors_b = np.floor(rgb_vals / self._color_const)
colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
colors = colors.astype(np.uint8)
pc = np.hstack([verts, colors])
return pc
def get_mesh(self):
"""Compute a mesh from the voxel volume using marching cubes.
"""
tsdf_vol, color_vol = self.get_volume()
# Marching cubes
verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)
verts_ind = np.round(verts).astype(int)
verts = verts * self._voxel_size + self._vol_origin # voxel grid coordinates to world coordinates
# Get vertex colors
rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
colors_b = np.floor(rgb_vals / self._color_const)
colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
colors = colors.astype(np.uint8)
return verts, faces, norms, colors
class TSDFFusion:
@staticmethod
def rigid_transform(xyz, transform):
"""Applies a rigid transform to an (N, 3) pointcloud.
"""
xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)])
xyz_t_h = np.dot(transform, xyz_h.T).T
return xyz_t_h[:, :3]
@staticmethod
def get_view_frustum(depth_im, cam_intr, cam_pose):
"""Get corners of 3D camera view frustum of depth image
"""
im_h = depth_im.shape[0]
im_w = depth_im.shape[1]
max_depth = np.max(depth_im)
view_frust_pts = np.array([
(np.array([0, 0, 0, im_w, im_w]) - cam_intr[0, 2]) * np.array([0, max_depth, max_depth, max_depth, max_depth]) / cam_intr[0, 0],
(np.array([0, 0, im_h, 0, im_h]) - cam_intr[1, 2]) * np.array([0, max_depth, max_depth, max_depth, max_depth]) / cam_intr[1, 1],
np.array([0, max_depth, max_depth, max_depth, max_depth])
])
view_frust_pts = TSDFFusion.rigid_transform(view_frust_pts.T, cam_pose).T
return view_frust_pts
@staticmethod
def meshwrite(filename, verts, faces, norms, colors):
"""Save a 3D mesh to a polygon .ply file.
"""
# Write header
ply_file = open(filename, 'w')
ply_file.write("ply\n")
ply_file.write("format ascii 1.0\n")
ply_file.write("element vertex %d\n" % (verts.shape[0]))
ply_file.write("property float x\n")
ply_file.write("property float y\n")
ply_file.write("property float z\n")
ply_file.write("property float nx\n")
ply_file.write("property float ny\n")
ply_file.write("property float nz\n")
ply_file.write("property uchar red\n")
ply_file.write("property uchar green\n")
ply_file.write("property uchar blue\n")
ply_file.write("element face %d\n" % (faces.shape[0]))
ply_file.write("property list uchar int vertex_index\n")
ply_file.write("end_header\n")
# Write vertex list
for i in range(verts.shape[0]):
ply_file.write("%f %f %f %f %f %f %d %d %d\n" % (
verts[i, 0], verts[i, 1], verts[i, 2],
norms[i, 0], norms[i, 1], norms[i, 2],
colors[i, 0], colors[i, 1], colors[i, 2],
))
# Write face list
for i in range(faces.shape[0]):
ply_file.write("3 %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2]))
ply_file.close()
@staticmethod
def pcwrite(filename, xyzrgb):
"""Save a point cloud to a polygon .ply file.
"""
xyz = xyzrgb[:, :3]
rgb = xyzrgb[:, 3:].astype(np.uint8)
# Write header
ply_file = open(filename, 'w')
ply_file.write("ply\n")
ply_file.write("format ascii 1.0\n")
ply_file.write("element vertex %d\n" % (xyz.shape[0]))
ply_file.write("property float x\n")
ply_file.write("property float y\n")
ply_file.write("property float z\n")
ply_file.write("property uchar red\n")
ply_file.write("property uchar green\n")
ply_file.write("property uchar blue\n")
ply_file.write("end_header\n")
# Write vertex list
for i in range(xyz.shape[0]):
ply_file.write("%f %f %f %d %d %d\n" % (
xyz[i, 0], xyz[i, 1], xyz[i, 2],
rgb[i, 0], rgb[i, 1], rgb[i, 2],
))
@staticmethod
def integrate(tsdf_volume, images, depths, poses, K, mesh_name, save_progressive):
n_imgs = len(images)
# Loop through RGB-D images and fuse them together
t0_elapse = time.time()
for i in range(n_imgs):
print("Fusing frame %d/%d" % (i + 1, n_imgs), "for", mesh_name)
# Integrate observation into voxel volume (assume color aligned with depth)
tsdf_volume.integrate(images[i], depths[i], K, poses[i], obs_weight=1.)
if save_progressive:
print("Saving for progressive visuals...")
verts, faces, norms, colors = tsdf_volume.get_mesh()
TSDFFusion.meshwrite(mesh_name + "_frame_{}.ply".format(str(i).zfill(5)), verts, faces, norms, colors)
fps = n_imgs / (time.time() - t0_elapse)
print("Average FPS: {:.2f}".format(fps))
# Get mesh from voxel volume and save to disk (can be viewed with Meshlab)
print("Saving mesh to", mesh_name)
verts, faces, norms, colors = tsdf_volume.get_mesh()
TSDFFusion.meshwrite(mesh_name + "_complete.ply", verts, faces, norms, colors)
@staticmethod
def calculate_volume_bounds(depth_maps, poses, K):
assert len(depth_maps) == len(poses)
volume_bounds = np.zeros((3, 2))
for depth_map, pose in zip(depth_maps, poses):
view_frust_points = TSDFFusion.get_view_frustum(depth_map, K, pose)
volume_bounds[:, 0] = np.minimum(volume_bounds[:, 0], np.amin(view_frust_points, axis=1))
volume_bounds[:, 1] = np.maximum(volume_bounds[:, 1], np.amax(view_frust_points, axis=1))
return volume_bounds
def run(reconstruction_folder,
prediction_folder,
data_folder,
dataset_name,
scene_name,
system_name,
voxel_size,
max_depth,
use_groundtruth_to_anchor,
save_progressive,
save_groundtruth):
#####
##### LIST RELEVANT FILENAMES #####
#####
dataset_folder = Path(data_folder) / dataset_name
scene_folder = dataset_folder / scene_name
original_K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
all_poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
all_image_filenames = sorted((scene_folder / 'images').files("*.png"))
prediction_filename_regex = "keyframe_{}_{}_predictions_{}*".format(dataset_name, system_name, scene_name)
prediction_filename = Path(prediction_folder).files(prediction_filename_regex)[0]
predictions = np.load(prediction_filename)["arr_0"]
prediction_height, prediction_width = np.shape(predictions[0])
nmeas = system_name.split('_')[2]
keyframe_filenames_regex = "keyframe+{}+{}+nmeas+{}".format(dataset_name, scene_name, nmeas)
keyframe_filenames = np.loadtxt(Path(data_folder) / 'indices' / keyframe_filenames_regex, dtype=str, delimiter="\n")
keyframe_poses = []
keyframe_image_filenames = []
for keyframe_filename in keyframe_filenames:
if keyframe_filename == "TRACKING LOST":
continue
keyframe_filename = keyframe_filename.split(" ")[0]
image_filename = scene_folder / 'images' / keyframe_filename
pose_index = all_image_filenames.index(image_filename)
keyframe_poses.append(all_poses[pose_index])
keyframe_image_filenames.append(image_filename)
#####
##### PREPARE IMAGES FOR TSDF RECONSTRUCTION #####
#####
temp_image = load_image(all_image_filenames[0])
preprocessor = PreprocessImage(K=original_K,
old_width=temp_image.shape[1],
old_height=temp_image.shape[0],
new_width=prediction_width,
new_height=prediction_height,
distortion_crop=0,
perform_crop=False)
scaled_K = preprocessor.get_updated_intrinsics()
# ScanNet has black pixels around the edges of the image due to undistortion,
# let's prepare a mask to ignore those corresponding predictions
edge_pixel_amount = 10
edge_mask = np.zeros((prediction_height, prediction_width), dtype=bool)
edge_mask[0:edge_pixel_amount, :] = True
edge_mask[prediction_height - edge_pixel_amount: prediction_height, :] = True
edge_mask[:, 0:edge_pixel_amount] = True
edge_mask[:, prediction_width - edge_pixel_amount: prediction_width] = True
keyframe_images = []
keyframe_predictions = []
for index in range(0, len(keyframe_image_filenames)):
keyframe_image = load_image(keyframe_image_filenames[index])
keyframe_image = cv2.resize(keyframe_image, dsize=(prediction_width, prediction_height), interpolation=cv2.INTER_NEAREST)
keyframe_prediction = predictions[index]
if "scannet" in dataset_name:
# masking the predictions corresponding to black pixels around the edges of the image
black_mask = np.mean(keyframe_image.astype(float), axis=-1) < 10.0
combined_mask = np.logical_and(black_mask, edge_mask)
keyframe_prediction[combined_mask] = 0.0
# masking the depth values larger than max_depth
keyframe_prediction[keyframe_prediction > max_depth] = 0.0
keyframe_predictions.append(keyframe_prediction)
keyframe_images.append(keyframe_image.astype(np.uint8))
#####
##### CALCULATE TSDF VOLUME BOUNDS WITH OR WITHOUT GROUNDTRUTH ANCHORING #####
#####
all_groundtruths = None
if use_groundtruth_to_anchor or save_groundtruth:
all_groundtruth_filenames = sorted((scene_folder / 'depth').files("*.png"))
all_groundtruths = []
for groundtruth_filename in all_groundtruth_filenames:
groundtruth = cv2.imread(groundtruth_filename, -1).astype(float) / 1000.0
groundtruth[groundtruth > max_depth] = 0.0
all_groundtruths.append(groundtruth)
if use_groundtruth_to_anchor:
volume_bounds = TSDFFusion.calculate_volume_bounds(all_groundtruths, all_poses, original_K)
else:
volume_bounds = TSDFFusion.calculate_volume_bounds(keyframe_predictions, keyframe_poses, scaled_K)
volume_bounds *= 1.05 # give some margin for the volume bounds accounting for mistakes
#####
##### RUN THE RECONSTRUCTION WITH GROUNDTRUTH DEPTH MAPS #####
#####
if save_groundtruth:
tsdf_volume = TSDFVolume(volume_bounds, voxel_size=voxel_size)
mesh_name = "{}/reconstruction_voxelsize-{}_maxdepth-{}_anchor-{}_{}_{}_{}".format(reconstruction_folder,
voxel_size,
max_depth,
use_groundtruth_to_anchor,
"GROUNDTRUTH",
dataset_name,
scene_name)
all_images = []
for image_filename in all_image_filenames:
image = load_image(image_filename)
all_images.append(image)
TSDFFusion.integrate(tsdf_volume=tsdf_volume,
images=all_images,
depths=all_groundtruths,
poses=all_poses,
K=original_K,
mesh_name=mesh_name,
save_progressive=False)
tsdf_volume = None
gc.collect()
#####
##### RUN THE RECONSTRUCTION WITH PREDICTED DEPTH MAPS #####
#####
tsdf_volume = TSDFVolume(volume_bounds, voxel_size=voxel_size)
mesh_name = "{}/reconstruction_voxelsize-{}_maxdepth-{}_anchor-{}_{}_{}_{}".format(reconstruction_folder,
voxel_size,
max_depth,
use_groundtruth_to_anchor,
system_name,
dataset_name,
scene_name)
TSDFFusion.integrate(tsdf_volume=tsdf_volume,
images=keyframe_images,
depths=keyframe_predictions,
poses=keyframe_poses,
K=scaled_K,
mesh_name=mesh_name,
save_progressive=save_progressive)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--reconstruction_folder', default="./reconstructions", type=str)
parser.add_argument('--prediction_folder', default="./predictions", type=str)
parser.add_argument('--data_folder', default=".", type=str)
parser.add_argument('--dataset_name', default="hololens-dataset", type=str)
parser.add_argument('--scene_name', default="000", type=str)
parser.add_argument('--system_name', default='320_256_3_dvmvs_fusionnet_online', type=str)
parser.add_argument('--voxel_size', default=0.025, type=float)
parser.add_argument('--max_depth', default=5.0, type=float)
parser.add_argument('--use_groundtruth_to_anchor', action='store_true',
help="flag for using groundtruth depth maps to calculate the volume bounds and the origin of the volume,"
"recommended if groundtruth depth maps are available")
parser.add_argument('--save_progressive', action='store_true',
help="flag for saving the mesh after each fused keyframe to the disk to get the progressive visuals "
"similar to the demo video shown in README")
parser.add_argument('--save_groundtruth', action='store_true',
help="flag for saving the complete reconstruction resulting from groundtruth depth maps")
args = parser.parse_args()
run(reconstruction_folder=args.reconstruction_folder,
prediction_folder=args.prediction_folder,
data_folder=args.data_folder,
dataset_name=args.dataset_name,
scene_name=args.scene_name,
system_name=args.system_name,
voxel_size=args.voxel_size,
max_depth=args.max_depth,
use_groundtruth_to_anchor=args.use_groundtruth_to_anchor,
save_progressive=args.save_progressive,
save_groundtruth=args.save_groundtruth)
| 32,077 | 47.383107 | 150 | py |
uncertainty-project | uncertainty-project-master/Code/core/losses.py | '''
losses.py
Includes the losses / error functions for different models and tasks
'''
import sys
from data import predict_label
import numpy as np
from scipy.integrate import nquad
from scipy.stats import multivariate_normal
# === LOSSES FOR AMP WITH THE ORDER PARAMETERS
def mse(x : np.ndarray, xhat : np.ndarray) -> float:
'''
Compares the estimator and the ground truth parameter
'''
return np.mean((x - xhat)**2)
def logistic_loss(y : np.ndarray, yhat : np.ndarray) -> float:
"""
Remark : yhat can be preactivation
"""
loss = lambda z : np.log(1 + np.exp(-z))
return np.mean(loss(yhat * y))
def square_loss(y : np.ndarray, yhat : np.ndarray):
return np.mean((y - yhat)**2)
def classification_error(y : np.ndarray, yhat : np.ndarray) -> float:
'''
Error for the classfication task, hence the proportion factor
yhat and y must be -1 or 1
'''
# equivalent to y != yhat since they take only 2 values -1 or 1
return 0.25 * square_loss(y, yhat)
def classification_error_overlap(q : float, rho : float =1., sig :float = 0.) -> float:
"""
NOTE : On peut simplement utiliser la formule avec l'arccos, non ?
NOTE : Usable only in the bayes optimal setting !!
arguments :
- q : teacher-student overlap
- rho : teacher-teacher overlap
"""
Delta = sig**2
covariance = np.array([[rho + Delta, q],[q, q]])
mean = np.array([0., 0.])
ranges = [(float('-inf'), float('inf')), (float('-inf'), float('inf'))]
res = nquad(lambda x, y : float(np.sign(x) != np.sign(y)) * multivariate_normal.pdf([x, y], mean=mean, cov=covariance), ranges)
return res[0]
'''
Below : dictionnaries relating the task to their corresponding loss functions
NOTE : For ridge, we could express the generalizsation error analytically as
a function of w and what, but for now we compute the error by sampling a test set
'''
# Training losses
dic_losses = {
'logistic' : logistic_loss,
'ridge' : square_loss,
'l2_classification' : square_loss,
}
# Remember, we'll alway use the preactivations as input !
# They must be already normalized by d
dic_error = {
'ridge' : square_loss,
'logistic' : lambda y, yhat : classification_error(np.sign(y), np.sign(yhat)),
'l2_classification' : lambda y, yhat : classification_error(predict_label(y), predict_label(yhat))
}
| 2,392 | 30.486842 | 131 | py |
uncertainty-project | uncertainty-project-master/Code/core/utility.py | # utility.py
# various useful functions
from typing import List
import numpy as np
import scipy.optimize as opt4
from scipy.optimize import minimize_scalar, root_scalar
from scipy.special import erf, erfc
from datetime import datetime
# FUNCTION OF THE LOGIT MODEL
sigmoid = np.vectorize(lambda x : 1. / (1. + np.exp( -x )))
sigmoid_inv = np.vectorize(lambda y : np.log(y/(1-y)))
erf_prime = np.vectorize(lambda x : 2. / np.sqrt(np.pi) * np.exp(-x**2))
erfc_prime = np.vectorize(lambda x : -2. / np.sqrt(np.pi) * np.exp(-x**2))
bernoulli_variance = np.vectorize(lambda p : 4 * p * (1. - p))
def probit(lf, sigma):
return 0.5 * erfc(- lf / np.sqrt(2 * sigma**2))
# Different kinds of variances
def compute_vector_variance(w_list : List[np.ndarray]) -> float:
'''
NOTE : Outdated
Compute the quantity E( ||w||^2 ) - || E(w) ||^2
'''
d = len(w_list[0])
w_array = np.array(w_list)
vars = np.var(w_array, axis=0)
return np.mean(vars)
def compute_variances(w_list : List[np.ndarray]) -> dict:
'''
NOTE : Outdated
Argument :
- w_list : N x d matrix
Returns :
- variance of w as computed by 'compute_vector_variance'
- variance of the squared norm of w, renormalized by d
'''
d = len(w_list[0])
w_list = np.array(w_list)
sqd_norm = np.sum(w_list**2, axis=1)
return {
'w_variance' : compute_vector_variance(w_list),
'sqd_norm_variance' : np.var(sqd_norm) / d
}
def proximal_operator_by_derivation(func : callable, func_prime : callable, x : float, tau : float) -> float:
"""
Returns the root of func_prime + (z - x) / tau
Need func_prime to use newton
"""
to_root = lambda z : (z - x )/ tau + func(z)
to_root_prime = lambda z : 1 / tau + func_prime(z)
res = root_scalar(to_root, x0=x, fprime=to_root_prime)
return res.root
def proximal_operator(func : callable, x : float, tau : float) -> float:
to_minimize = lambda z : ((z - x)**2) / (2 * tau) + func(z)
res = minimize_scalar(to_minimize, method='Golden')
if res['x'] > 1e10:
print(res['x'])
return res['x']
# res.x here is an array with a single element inside
# ==== VARIANCE FOR THE BAYES-OPTIMAL VARIANCE ====
def Zy(y : int, w : float, V : float, sigma : float = 0.0) -> float:
delta = 1e-10
U = V + sigma**2 + delta
return 0.5 * (1 + erf(y * w / np.sqrt(2*U)))
def y_teacher_proba(w : List[float], x : List[float], y : int, sigma : float = 0.) -> float:
"""
Only works for y = -1 or 1
"""
return Zy(y, w @ x, 0.0, sigma)
def y_bo_proba(what : List[float], vhat : List[float], x : List[float], y : int, sigma : float = 0.) -> float:
"""
Only works for y = -1 or 1
"""
return Zy(y, what @ x, vhat @ x**2, sigma)
def y_bo_expectation(what : List[float], vhat : List[float], x : List[float], sigma : float = 0.) -> float:
return y_bo_proba(what, vhat, x, 1, sigma) - y_bo_proba(what, vhat, x, -1, sigma)
def y_bo_variance(what : np.ndarray, vhat : np.ndarray, x : List[float], sigma : float = 0.0) -> float:
# reminder : expectation of y^2 is 1 because of binary labels
expectation = y_bo_expectation(what, vhat, x, sigma)
return 1. - expectation**2
# ==== INTEGRATE WITH MONTE CARLO W.R.T GAUSSIAN MEASURE ====
def gaussian_mc(func : callable, mean : List[float], cov : List[List[float]], n_samples : int = 10000) -> float:
tmp = []
Xs = np.random.multivariate_normal(mean, cov, size=(n_samples,))
for i in range(n_samples):
tmp.append(func(Xs[i]))
return np.mean(tmp)
# === DAMPING USED E.G. to compute
def damping(q_new : float, q_old : float, coef_damping : float =0.5) -> float:
if q_old == float('inf') or np.isnan(q_old):
return q_new
return (1 - coef_damping) * q_new + coef_damping * q_old | 3,849 | 33.375 | 112 | py |
uncertainty-project | uncertainty-project-master/Code/core/data.py | '''
data.py
File containing the functions to generate the data
So far, only data w/ gaussian covariance matrix
NOTE : For now, all
'''
from typing import Tuple
import numpy as np
import sys
from numpy.core.fromnumeric import size
sys.path.append('..')
sys.path.append('core')
import core.utility as utility
def iid_teacher(d : float) -> np.ndarray:
return np.random.normal(0., 1., size=(d, ))
def iid_input(n : int, d : float) -> np.ndarray:
return np.random.normal(0., 1., size=(n, d)) / np.sqrt(d)
class DataSampler:
def __init__(self, **kwargs) -> None:
# by default, we are deterministic (also to be backward compatible)
if 'sig' in kwargs:
self.sig = kwargs['sig']
else:
self.sig = 0.
def sample_instance(self, d : int, alpha : float) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
x0 = iid_teacher(d)
F, y = self.sample_data(x0, alpha)
return x0, F, y
def sample_data_n(self, w0 : np.ndarray, n : int) -> Tuple[np.ndarray, np.ndarray]:
d = len(w0)
F = iid_input(n, d)
return F, self.activation(F @ w0)
def sample_data(self, w0 : np.ndarray, alpha : float) -> Tuple[np.ndarray, np.ndarray]:
d = len(w0)
n = int(np.ceil(alpha * d))
return self.sample_data_n(w0, n)
def activation(self, preactivation : np.ndarray) -> np.ndarray:
raise NotImplementedError
class LogitModelData(DataSampler):
def activation(self, preactivation: np.ndarray) -> np.ndarray:
assert self.sig >= 0
if self.sig == 0.:
return np.sign(preactivation)
n = len(preactivation)
probas = utility.sigmoid(preactivation / self.sig)
return 2*np.ceil(probas - np.random.uniform(size=n)) - 1.
class ProbitModelData(DataSampler):
def activation(self, preactivation : np.ndarray)-> np.ndarray:
n = len(preactivation)
return np.sign(preactivation + self.sig * np.random.normal(0., 1., size=(n, )))
def predict_label(X : np.ndarray, w : np.ndarray) -> np.ndarray:
'''
For classification tasks only
DONT PUT THE NORMALIZATION HERE, IT'S DONE AT SAMPLING
'''
return np.sign(X@w)
# NOTE : We keep these functions not to break the rest of the (old) code
# dictionnaire pour pouvoir faire la conversion
data_classes_names = {
'logit' : LogitModelData,
'probit' : ProbitModelData,
}
def sample_instance(d : int, alpha : float, data_model : str, **kwargs):
"""
Sample instance of the problem.
Samples F from P(F) and {x, y} from P(x, y | F)
"""
sampler = data_classes_names[data_model](**kwargs)
return sampler.sample_instance(d, alpha)
def sample_data(w0 : np.ndarray, alpha : float, data_model : str, **kwargs):
sampler = data_classes_names[data_model](**kwargs)
return sampler.sample_data(w0, alpha)
def sample_data_n(w0 : np.ndarray, n : int, data_model : str, **kwargs):
sampler = data_classes_names[data_model](**kwargs)
return sampler.sample_data_n(w0, n) | 3,045 | 31.752688 | 99 | py |
uncertainty-project | uncertainty-project-master/Code/core/erm.py | '''
erm.py
Compute exact ERM on logistic or ridge regression w/ sklearn
'''
from typing import List, Dict
import pandas as pd
from sklearn.linear_model import LogisticRegression, Ridge
from data import *
import losses
def get_errors(w0 : List[float], X : List[List[float]], y : List[float], w : List[float], task : str, data_model : str, sig : float = 0.) -> Dict[str, float]:
n = len(X)
alpha = float(n) / len(w0)
# sample as many data than training data
X_test, y_test = sample_data(w0, n, data_model, sig = sig)
# Always use the preactivations !
yhat_test, yhat_train = X_test @ w, X @ w
train_error = losses.dic_error[task](y, yhat_train)
# Note that we could compute the generalisation error with a closed form expression
test_error = losses.dic_error[task](y_test, yhat_test)
train_loss = losses.dic_losses[task](y, yhat_train)
mse = losses.mse(w0, w)
return {'train_error' : train_error,
'test_error' : test_error,
'train_loss' : train_loss,
'mse' : mse}
# Functions to do the regressions
def erm_ridge_regression(X : List[List[float]], y : List[float], lamb : float =1.) -> List[float]:
lr = Ridge(alpha = lamb, fit_intercept=False, tol=1e-7)
lr.fit(X, y)
return lr.coef_[0]
def erm_logistic_regression(X : List[List[float]], y : List[float], lamb : float =1.) -> List[float]:
"""
Logistic regrsssion with L2 penalty"""
# CAUTION : Cannot do train / test split because that would change the ratio alpha
# so we will sample the training set independently
lamb = float(lamb)
max_iter = 10000
tol = 1e-16
if lamb > 0.:
lr = LogisticRegression(penalty='l2',solver='lbfgs',fit_intercept=False,
C = (1. / lamb), max_iter=max_iter, tol=tol, verbose=0)
else:
lr = LogisticRegression(penalty='none',solver='lbfgs',fit_intercept=False, max_iter=max_iter, tol=tol, verbose=0)
lr.fit(X, y)
if lr.n_iter_ == max_iter:
print('Attention : logistic regression reached max number of iterations ({:.2f})'.format(max_iter))
w = lr.coef_[0]
return w
| 2,178 | 34.145161 | 158 | py |
uncertainty-project | uncertainty-project-master/Code/core/calibration.py | # calibration.py
# Functions to compute (empirically and theoretically) the calibration
import numpy as np
from scipy.integrate import nquad, quad
import scipy.linalg
from scipy.special import erfc, erfcinv
import scipy.stats as stats
from core import *
import data
from overlaps import *
import utility
def compute_bo_cond_mean(p : float, qbo : float, qerm : float, Q : float, sigma : float = 0.0) -> float:
rho = 1.0
sig_inv_p = utility.sigmoid_inv(p)
cond_mean= (Q / qerm) * sig_inv_p
cond_var = (1.0 - (Q**2 / (qbo * qerm))) * qbo
# add sigma squared to account for the noise in the prediction
return 0.5 * erfc(- cond_mean / np.sqrt(2 * (cond_var + (rho - qbo + sigma**2))))
def compute_bo_cond_variance(p : float, qbo : float, qerm : float, Q : float, sigma : float = 0.0) -> float:
"""
Return variance of distribution of y b.o conditioned on proba for ERM is p
"""
rho = 1.0
sig_inv_p = utility.sigmoid_inv(p)
cond_mean= (Q / qerm) * sig_inv_p
cond_var = (1. - (Q**2 / (qbo * qerm))) * qbo
std = np.sqrt(cond_var)
# compute mean of square : no analytical expression ?
def to_integrate(l):
lprime = cond_mean + (l * std)
return (0.5 * erfc(- lprime / np.sqrt(2.0 * (rho - qbo + sigma**2))))**2 * stats.norm.pdf(l, loc=0.0, scale=1.0)
mean_of_square = quad(to_integrate, float('-inf'), float('inf'))[0]
squared_mean = compute_bo_cond_mean(p, qbo, qerm, Q, sigma)**2
return mean_of_square - squared_mean
def compute_teacher_cond_variance(p : float, rho : float, qerm : float, m : float, sigma : float = 0.0) -> float:
"""
Return variance of distribution of y b.o conditioned on proba for ERM is p
"""
rho = 1.0
sig_inv_p = utility.sigmoid_inv(p)
cond_mean= (m / qerm) * sig_inv_p
cond_var = rho - (m**2 / qerm)
std = np.sqrt(cond_var)
# compute mean of square : no analytical expression ?
def to_integrate(l):
lprime = cond_mean + (l * std)
return (0.5 * erfc(- lprime / np.sqrt(2.0 * (sigma**2))))**2 * stats.norm.pdf(l, loc=0.0, scale=1.0)
mean_of_square = quad(to_integrate, float('-inf'), float('inf'))[0]
squared_mean = (p - compute_teacher_calibration(p, rho, qerm, m, sigma))**2
return mean_of_square - squared_mean
def compute_bo_calibration(p : float, qbo : float, qerm : float, Q : float, sigma : float = 0.0) -> float:
return p - compute_bo_cond_mean(p, qbo, qerm, Q, sigma)
def compute_teacher_bo_calibration(p : float, rho : float, q : float, sigma : float) -> float:
Delta = sigma**2
# NOTE : Expression false if rho not 1
bo_inv_p = - erfcinv(2 * p) * np.sqrt(2*(rho - q + Delta))
cond_mean_nu = bo_inv_p
cond_var_nu = (1. - (q**2 / (rho * q))) * rho
return p - 0.5 * erfc(- cond_mean_nu / np.sqrt(2. * (cond_var_nu + Delta)))
def compute_teacher_calibration(p : float, rho : float, qerm : float, m : float, sigma : float) -> float:
"""
Calibration of ERM with respect to teacher
"""
sig_inv_p = utility.sigmoid_inv(p)
Delta = sigma**2
cond_mean_nu = (m / qerm) * sig_inv_p
cond_var_nu = (1. - (m**2 / (rho * qerm))) * rho
return p - 0.5 * erfc(- cond_mean_nu / np.sqrt(2. * (cond_var_nu + Delta)))
#### EMPIRICAL COMPUTATION OF CALIBRATION FOR EXPERIMENTS
def compute_experimental_teacher_calibration(p, w, werm, Xtest, Ytest, sigma):
# size of bins where we put the probas
n, d = Xtest.shape
dp = 0.025
Ypred = utility.sigmoid(Xtest @ werm)
index = [i for i in range(n) if p - dp <= Ypred[i] <= p + dp]
return p - np.mean([utility.probit(w @ Xtest[i], sigma) for i in index])
def compute_experimental_teacher_variance(p, w, werm, Xtest, Ytest, sigma):
# size of bins where we put the probas
n, d = Xtest.shape
dp = 0.025
Ypred = utility.sigmoid(Xtest @ werm)
index = [i for i in range(n) if p - dp <= Ypred[i] <= p + dp]
return np.var([utility.probit(w @ Xtest[i], sigma) for i in index])
def compute_experimental_bo_calibration(p, what, vhat, werm, Xtest, Ytest, sigma):
# size of bins where we put the probas
n, d = Xtest.shape
dp = 0.025
Ypred = utility.sigmoid(Xtest @ werm)
index = [i for i in range(n) if p - dp <= Ypred[i] <= p + dp]
# add noise of Bayes to the probit
return p - np.mean([0.5 * utility.erfc(-(what @ Xtest[i]) / np.sqrt(2 * (sigma**2 + 1. - vhat @ Xtest[i]**2))) for i in index])
def compute_experimental_bo_variance(p, what, vhat, werm, Xtest, Ytest, sigma):
# size of bins where we put the probas
n, d = Xtest.shape
dp = 0.025
Ypred = utility.sigmoid(Xtest @ werm)
index = [i for i in range(n) if p - dp <= Ypred[i] <= p + dp]
# add noise of Bayes to the probit
return np.var([0.5 * utility.erfc(-(what @ Xtest[i]) / np.sqrt(2 * (sigma**2 + 1. - vhat @ Xtest[i]**2))) for i in index])
| 4,936 | 36.976923 | 131 | py |
uncertainty-project | uncertainty-project-master/Code/core/gamp.py | # gamp.py
# File containing the code for GAMP with sign perceptron and gaussian prior
from typing import List
import numpy as np
from scipy.special import erfc
from scipy.integrate import nquad
import matplotlib.pyplot as plt
import pandas as pd
import data
from models.bayes_optimal import BayesOptimal
from models.amp_erm import ERM
# Functions specific to the prior and activation function
def check_increasing(mses : List[float], epochs : int =5) -> bool:
if epochs > len(mses):
print('Number of epochs must be smaller than length of array!')
return False
else:
return True if np.all(np.diff(mses[-epochs:]) > 0) else False
# G-AMP
def iterate_gamp(W : List[List[float]], y : List[float], x0 : List[float] =None, model : type = BayesOptimal, max_iter : int =200, tol : float =1e-7,
damp : float =0.2, early_stopping : bool =False, verbose : bool = True, sig : float = 0.) -> dict:
"""
MAIN FUNCTION : Runs G-AMP and returns the finals parameters. If we study
the variance, we are interested in the vhat quantities. The 'variance' of the vector
w will (normally) be the sum of the vhat.
parameters :
- W : data matrix
- y : funciton output
- x0 : ground truth
returns :
- retour : dictionnary with informations
"""
assert not x0 is None
d = len(x0)
# Preprocessing
y_size, x_size = W.shape
W2 = W * W
# Initialisation
xhat = np.zeros(x_size)
vhat = np.ones(x_size)
g = np.zeros(y_size)
count = 0
mses = np.zeros(max_iter)
status = None
q_list, m_list = [], []
for t in range(max_iter):
q = np.mean(xhat**2)
m = np.mean(xhat * x0)
q_list.append(q)
m_list.append(m)
if verbose:
print(f'q = {q} and m = {m}. Relative difference is {np.abs(m - q) / q}')
# First part: m-dimensional variables
V = W2 @ vhat
# here we see that V is the Onsager term
omega = W @ xhat - V * g
g, dg = model.channel(y, omega, V, sig = sig)
# Second part
A = -W2.T @ dg
b = A*xhat + W.T @ g
xhat_old = xhat.copy() # Keep a copy of xhat to compute diff
vhat_old = vhat.copy()
xhat, vhat = model.prior(b, A)
diff = np.mean(np.abs(xhat-xhat_old))
# Expression of MSE has been changed
mses[t] = 1. - np.mean(xhat * x0)
if count == 5:
status = 'Early stopping'
return mses[:t-4]
# if verbose:
# print('t: {}, diff: {}, mse: {}'.format(t, diff, mses[t]))
if (diff < tol) or (mses[t] < tol):
status = 'Done'
break
if verbose:
print('t : ', t)
retour = {}
retour['mse'] = mses[t]
retour['status'] = status
retour['estimator'] = xhat
retour['variances'] = vhat
retour['q_list'] = q_list
retour['m_list'] = m_list
return retour | 3,045 | 26.196429 | 150 | py |
uncertainty-project | uncertainty-project-master/Code/core/__init__.py | # __all__ = ['bo_state_evolution', 'calibration' 'data', 'erm', 'gamp', 'gcm_erm', 'losses', 'overlaps', 'utility']
| 116 | 57.5 | 115 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.