repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
libai | libai-main/tests/inference/test_text_classification.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from libai.inference.text_classification import TextClassificationPipeline
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/bert-base-chinese-vocab.txt" # noqa
VOCAB_MD5 = "65ac8a72466e859cd3c6b279ed8e532a"
class TestTextClassificationPipeline(flow.unittest.TestCase):
def setUp(self) -> None:
self.texts = ["cat ", "you ", "dog ", "dragon ", "牛 ", "羊 "]
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_data")
# prepare tokenizer
if dist.get_local_rank() == 0:
# download tokenzier vocab on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_tensor_parallel(self):
self.pipeline = TextClassificationPipeline("configs/bert_classification.py", 1, 4, 1)
text = list(np.random.randint(0, 6, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(text)
dict2 = self.pipeline(text)
if dist.is_main_process():
assert dict1["score"] == dict2["score"]
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_pipeline_parallel(self):
self.pipeline = TextClassificationPipeline("configs/bert_classification.py", 1, 1, 4)
text = list(np.random.randint(0, 6, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(text)
dict2 = self.pipeline(text)
if dist.is_main_process():
assert dict1["score"] == dict2["score"]
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_tensor_pipeline_parallel(self):
self.pipeline = TextClassificationPipeline("configs/bert_classification.py", 1, 2, 2)
text = list(np.random.randint(0, 6, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(text)
dict2 = self.pipeline(text)
if dist.is_main_process():
assert dict1["score"] == dict2["score"]
if __name__ == "__main__":
unittest.main()
| 3,160 | 38.024691 | 136 | py |
libai | libai-main/tests/structures/test_instance.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import oneflow as flow
from libai.data import DistTensorData, Instance
class TestInstance(unittest.TestCase):
def test_init_args(self):
inst = Instance(images=flow.rand(4, 5))
inst.tokens = flow.rand(5, 10)
self.assertTrue(inst.has("images"))
self.assertTrue(inst.has("tokens"))
inst.remove("images")
self.assertFalse(inst.has("images"))
inst.meta_tensor = DistTensorData(flow.rand(5, 6))
self.assertTrue(inst.has("meta_tensor"))
self.assertTrue(isinstance(inst.get("meta_tensor"), DistTensorData))
def test_order_args(self):
inst = Instance(a=1, b=2, c=3)
inst.d = 4
inst.e = 5
inst_key = []
for key in inst.get_fields():
inst_key.append(key)
self.assertEqual(inst_key, ["a", "b", "c", "d", "e"])
def test_stack(self):
inst_list = [
Instance(images=flow.rand(3, 4), masks=flow.rand(4, 5), bbox=[3, 4, 5, 6])
for _ in range(10)
]
inst = Instance.stack(inst_list)
self.assertTrue(inst.has("images"))
self.assertTrue(inst.has("masks"))
self.assertFalse(inst.has("tokens"))
self.assertEqual(inst.get("images").shape, (10, 3, 4))
self.assertEqual(inst.get("masks").shape, (10, 4, 5))
self.assertEqual(len(inst.get("bbox")), 10)
if __name__ == "__main__":
unittest.main()
| 2,063 | 29.80597 | 86 | py |
libai | libai-main/tests/structures/test_metadata.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import oneflow as flow
from libai.data import DistTensorData
from libai.utils import distributed as dist
class TestMetadata(unittest.TestCase):
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
def test_to_global(self):
x = flow.rand(10, 10)
x_meta = DistTensorData(x)
x_meta.to_global()
x_consistent = x.to_global(
sbp=flow.sbp.broadcast,
placement=flow.placement("cuda", [0]),
)
self.assertEqual(x_meta.tensor.sbp, x_consistent.sbp)
self.assertEqual(x_meta.tensor.placement, x_consistent.placement)
self.assertTrue((flow.eq(x_meta.tensor, x_consistent)).sum().item() == 100)
x_meta.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]),
placement=dist.get_layer_placement(5),
)
x_consistent = x.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.split(0)]),
placement=dist.get_layer_placement(5),
)
self.assertEqual(x_meta.tensor.sbp, x_consistent.sbp)
self.assertEqual(x_meta.tensor.placement, x_consistent.placement)
self.assertTrue((flow.eq(x_meta.tensor, x_consistent)).sum().item() == 100)
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
def test_stack(self):
x_list = [DistTensorData(flow.rand(10, 8)) for _ in range(5)]
x_list.append(DistTensorData(flow.rand(10, 9))) # shape mismatch
with self.assertRaises(Exception):
DistTensorData.stack(x_list)
x_list.pop(-1)
x_list.append(DistTensorData(flow.rand(10, 8), sbp_list=["broadcast"])) # sbp mismatch
with self.assertRaises(Exception):
DistTensorData.stack(x_list)
x_list.pop(-1)
x_list.append(DistTensorData(flow.rand(10, 8), placement_idx=2)) # placement mismatch
with self.assertRaises(Exception):
DistTensorData.stack(x_list)
x_list.pop(-1)
x_stack = DistTensorData.stack(x_list)
self.assertTrue(x_stack.tensor.shape == (5, 10, 8))
if __name__ == "__main__":
unittest.main()
| 2,806 | 35.454545 | 95 | py |
libai | libai-main/tests/structures/__init__.py | 0 | 0 | 0 | py | |
libai | libai-main/configs/resmlp_imagenet.py | from libai.config import LazyCall
from .common.models.resmlp.resmlp_12 import model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.imagenet import dataloader
import oneflow as flow
import flowvision.transforms as transforms
from flowvision.transforms import InterpolationMode
from flowvision.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from flowvision.data import Mixup
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
# Refine output dir
train.output_dir = "./output_resmlp"
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.test[0].dataset.root = "/path/to/imagenet"
# Refine test data augmentation for resmlp model
resmlp_test_aug = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.Resize)(
size=int(224 / 0.9),
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.CenterCrop)(
size=224,
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
]
)
dataloader.test[0].dataset.transform = resmlp_test_aug
# Refine model cfg for resmlp training on imagenet
model.cfg.num_classes = 1000
model.cfg.loss_func = SoftTargetCrossEntropy()
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=model.cfg.num_classes,
)
# Refine optimizer cfg for resmlp model
optim._target_ = flow.optim.LAMB # use lamb optimizer
optim.lr = 5e-3 # default batch size equals to 256 * 8 = 2048
optim.eps = 1e-8
optim.weight_decay = 0.2
optim.params.clip_grad_max_norm = None
optim.params.clip_grad_norm_type = None
optim.params.overrides = {
"alpha": {"weight_decay": 0.0},
"beta": {"weight_decay": 0.0},
"gamma_1": {"weight_decay": 0.0},
"gamma_2": {"weight_decay": 0.0},
}
# Refine train cfg for resmlp model
train.train_micro_batch_size = 256
train.test_micro_batch_size = 64
train.train_epoch = 400
train.warmup_ratio = 5 / 400
train.evaluation.eval_period = 1000
train.log_period = 1
# Scheduler
train.scheduler.warmup_factor = 0.001
train.scheduler.alpha = 0.01
train.scheduler.warmup_method = "linear"
# Set fp16 ON
train.amp.enabled = True
# Distributed Settings
train.dist.pipeline_num_layers = model.cfg.depth
train.dist.data_parallel_size = 1
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
| 2,630 | 28.233333 | 81 | py |
libai | libai-main/configs/vit_imagenet.py | from libai.config import LazyCall
from .common.models.vit.vit_base_patch16_224 import model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.imagenet import dataloader
from flowvision.data import Mixup
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.test[0].dataset.root = "/path/to/imagenet"
# Refine model cfg for vit training on imagenet
model.cfg.num_classes = 1000
model.cfg.loss_func = SoftTargetCrossEntropy()
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=model.cfg.num_classes,
)
# Refine optimizer cfg for vit model
optim.lr = 1e-3 # 5e-4 * 1024 (batchsize) / 512
optim.eps = 1e-8
optim.weight_decay = 0.05
optim.params.clip_grad_max_norm = None
optim.params.clip_grad_norm_type = None
optim.params.overrides = {"pos_embed": {"weight_decay": 0.0}, "cls_token": {"weight_decay": 0.0}}
# Refine train cfg for vit model
train.train_micro_batch_size = 128
train.test_micro_batch_size = 128
train.train_epoch = 300
train.warmup_ratio = 5 / 300
train.evaluation.eval_period = 1000
train.log_period = 1
# Scheduler
train.scheduler.warmup_factor = 0.001
train.scheduler.alpha = 0.01
train.scheduler.warmup_method = "linear"
# Set fp16 ON
train.amp.enabled = True
# Distributed Settings
train.dist.pipeline_num_layers = model.cfg.depth
train.dist.data_parallel_size = 1
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
| 1,670 | 27.810345 | 97 | py |
libai | libai-main/configs/gpt2_pretrain.py | from libai.config import LazyCall
from libai.evaluation import PPLEvaluator
from .common.models.gpt import pretrain_model as model
from .common.train import train
from .common.optim import optim
from .common.data.gpt_dataset import dataloader, tokenization
from .common.models.graph import graph
vocab_file = "./data_test/gpt_data/gpt2-vocab.json"
merge_files = "./data_test/gpt_data/gpt2-merges.txt"
data_prefix = "./data_test/gpt_data/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
tokenization.tokenizer.merges_file = merge_files
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
dataloader.test[0].dataset.data_prefix = data_prefix
dataloader.test[0].dataset.indexed_dataset.data_prefix = data_prefix
# GPT-2 model config
model.cfg.embedding_dropout_prob = 0.1
model.cfg.attention_dropout_prob = 0.1
model.cfg.num_attention_heads = 16
model.cfg.hidden_size = 384
model.cfg.ffn_hidden_size = 1536
model.cfg.hidden_layers = 6
model.cfg.max_seq_length = 1024
train.input_placement_device = "cpu"
train.dist.pipeline_num_layers = model.cfg.hidden_layers
for ds in dataloader.train.dataset:
ds.max_seq_length = model.cfg.max_seq_length
optim.lr = 1.5e-4
train.train_micro_batch_size = 4
train.amp.enabled = True
train.evaluation.evaluator = LazyCall(PPLEvaluator)()
train.output_dir = "./output/gpt2_output"
| 1,427 | 30.733333 | 69 | py |
libai | libai-main/configs/swin_cifar100.py | from libai.config import LazyCall
from .common.models.swin.swin_tiny_patch4_window7_224 import model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.cifar100 import dataloader
from flowvision.data import Mixup
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=100,
)
# Refine model cfg for vit training on cifar100
model.cfg.num_classes = 100
model.cfg.loss_func = SoftTargetCrossEntropy()
# Refine optimizer cfg for swin model
optim.lr = 5e-4
optim.eps = 1e-8
optim.weight_decay = 0.05
optim.params.clip_grad_max_norm = None
optim.params.clip_grad_norm_type = None
# Refine train cfg for swin model
train.train_micro_batch_size = 32
train.num_accumulation_steps = 1
train.test_micro_batch_size = 32
train.train_epoch = 300
train.warmup_ratio = 20 / 300
train.evaluation.eval_period = 200
train.log_period = 20
# Scheduler
train.scheduler.warmup_factor = 5e-7
train.scheduler.alpha = 0.0
train.scheduler.warmup_method = "linear"
# parallel strategy settings
train.dist.data_parallel_size = 8
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
train.dist.pipeline_num_layers = sum(model.cfg.depths)
train.output_dir = "./output"
# Set fp16 ON
train.amp.enabled = False
train.activation_checkpoint.enabled = False
# train.zero_optimization.enabled = True
# train.zero_optimization.stage = 1
graph.enabled = False
| 1,606 | 26.237288 | 66 | py |
libai | libai-main/configs/bert_large_pretrain.py | from libai.config import LazyCall
from libai.evaluation import PPLEvaluator
from .common.models.bert import pretrain_model as model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.bert_dataset import dataloader, tokenization
vocab_file = "./data_test/bert_data/bert-base-chinese-vocab.txt"
data_prefix = "./data_test/bert_data/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
dataloader.test[0].dataset.data_prefix = data_prefix
dataloader.test[0].dataset.indexed_dataset.data_prefix = data_prefix
# Bert-large model config
model.cfg.num_attention_heads = 16
model.cfg.hidden_size = 768
model.cfg.hidden_layers = 8
train.input_placement_device = "cpu"
train.dist.pipeline_num_layers = model.cfg.hidden_layers
train.train_micro_batch_size = 16
train.amp.enabled = True
for ds in dataloader.train.dataset:
ds.max_seq_length = model.cfg.max_position_embeddings
train.evaluation.evaluator = LazyCall(PPLEvaluator)()
train.output_dir = "output/bert_output"
| 1,192 | 31.243243 | 69 | py |
libai | libai-main/configs/swin_imagenet.py | from libai.config import LazyCall
from .common.models.swin.swin_tiny_patch4_window7_224 import model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.imagenet import dataloader
from flowvision.data import Mixup
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.test[0].dataset.root = "/path/to/imagenet"
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=1000,
)
# Refine model cfg for vit training on imagenet
model.cfg.num_classes = 1000
model.cfg.loss_func = SoftTargetCrossEntropy()
# Refine optimizer cfg for vit model
optim.lr = 1e-3
optim.eps = 1e-8
optim.weight_decay = 0.05
optim.params.clip_grad_max_norm = None
optim.params.clip_grad_norm_type = None
# Refine train cfg for vit model
train.train_micro_batch_size = 128
train.test_micro_batch_size = 128
train.train_epoch = 300
train.warmup_ratio = 20 / 300
train.eval_period = 1562
train.log_period = 100
# Scheduler
train.scheduler.warmup_factor = 0.001
train.scheduler.alpha = 0.01
train.scheduler.warmup_method = "linear"
# Set fp16 ON
train.amp.enabled = True
| 1,341 | 25.84 | 66 | py |
libai | libai-main/configs/swinv2_imagenet.py | from libai.config import LazyCall
from .common.models.swinv2.swinv2_tiny_patch4_window8_256 import model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.imagenet import dataloader
from flowvision import transforms
from flowvision.data import Mixup
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
from flowvision.transforms import InterpolationMode
from flowvision.transforms.functional import str_to_interp_mode
from flowvision.data.constants import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
)
from flowvision.data.auto_augment import rand_augment_transform
from flowvision.data.random_erasing import RandomErasing
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.test[0].dataset.root = "/path/to/imagenet"
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=1000,
)
dataloader.train.dataset[0].transform = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.RandomResizedCrop)(
size=256,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.RandomHorizontalFlip)(p=0.5),
LazyCall(rand_augment_transform)(
config_str="rand-m9-mstd0.5-inc1",
hparams=dict(
translate_const=int(256 * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in IMAGENET_DEFAULT_MEAN]),
interpolation=str_to_interp_mode("bicubic"),
),
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
LazyCall(RandomErasing)(
probability=0.25,
mode="pixel",
max_count=1,
num_splits=0,
device="cpu",
),
]
)
dataloader.test[0].dataset.transform = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.Resize)(
size=256,
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.CenterCrop)(
size=256,
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
]
)
# Refine model cfg for vit training on imagenet
model.cfg.num_classes = 1000
model.cfg.loss_func = SoftTargetCrossEntropy()
# Refine optimizer cfg for vit model
optim.lr = 1e-3 # The pytorch version is 1024 as the total batch size, 1e-3 as the learning rate
optim.eps = 1e-8
optim.weight_decay = 0.05
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
def set_weight_decay(model, skip_list=(), skip_keywords=()):
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if (
len(param.shape) == 1
or name.endswith(".bias")
or (name in skip_list)
or check_keywords_in_name(name, skip_keywords)
):
no_decay.append(param)
else:
has_decay.append(param)
return [{"params": has_decay}, {"params": no_decay, "weight_decay": 0.0}]
optim.params = LazyCall(set_weight_decay)(
model=model,
skip_list=("absolute_pos_embed"),
skip_keywords=("cpb_mlp", "logit_scale", "relative_position_bias_table"),
)
# Refine train cfg for vit model
train.train_micro_batch_size = 128
train.test_micro_batch_size = 128
train.train_epoch = 300
train.warmup_ratio = 20 / 300
train.eval_period = 1562
train.log_period = 100
graph.enabled = False
train.rdma_enabled = True
# Scheduler
train.scheduler.warmup_factor = 0.001
train.scheduler.alpha = 0.01
train.scheduler.warmup_method = "linear"
# Set fp16 ON
train.amp.enabled = True
| 4,195 | 28.549296 | 97 | py |
libai | libai-main/configs/t5_large_pretrain.py | from libai.config import LazyCall
from libai.evaluation import PPLEvaluator
from .common.models.t5 import pretrain_model as model
from .common.train import train
from .common.optim import optim
from .common.data.t5_dataset import dataloader, tokenization
from .common.models.graph import graph
vocab_file = "./data_test/bert_data/bert-base-chinese-vocab.txt"
data_prefix = "./data_test/bert_data/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
dataloader.test[0].dataset.data_prefix = data_prefix
dataloader.test[0].dataset.indexed_dataset.data_prefix = data_prefix
# T5-large model config
model.cfg.num_attention_heads = 12
model.cfg.hidden_size = 384
model.cfg.hidden_layers = 6
train.input_placement_device = "cpu"
train.dist.pipeline_num_layers = 2 * model.cfg.hidden_layers
train.train_micro_batch_size = 16
train.amp.enabled = True
train.evaluation.evaluator = LazyCall(PPLEvaluator)()
train.output_dir = "./output/t5_output"
| 1,095 | 31.235294 | 69 | py |
libai | libai-main/configs/swinv2_cifar100.py | from libai.config import LazyCall
from .common.models.swinv2.swinv2_tiny_patch4_window8_256 import model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.cifar100 import dataloader
from flowvision import transforms
from flowvision.transforms import InterpolationMode
from flowvision.transforms.functional import str_to_interp_mode
from flowvision.data import Mixup
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=100,
)
dataloader.train.dataset[0].transform = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.RandomResizedCrop)(
size=(256, 256),
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=str_to_interp_mode("bicubic"),
),
LazyCall(transforms.RandomHorizontalFlip)(),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(mean=CIFAR100_TRAIN_MEAN, std=CIFAR100_TRAIN_STD),
]
)
dataloader.test[0].dataset.transform = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.Resize)(
size=256,
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.CenterCrop)(
size=256,
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=CIFAR100_TRAIN_MEAN,
std=CIFAR100_TRAIN_STD,
),
]
)
# Refine model cfg for vit training on cifar100
model.cfg.num_classes = 100
model.cfg.loss_func = SoftTargetCrossEntropy()
# Refine optimizer cfg for swinv2 model
optim.lr = 5e-4
optim.eps = 1e-8
optim.weight_decay = 0.05
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
def set_weight_decay(model, skip_list=(), skip_keywords=()):
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if (
len(param.shape) == 1
or name.endswith(".bias")
or (name in skip_list)
or check_keywords_in_name(name, skip_keywords)
):
no_decay.append(param)
else:
has_decay.append(param)
return [{"params": has_decay}, {"params": no_decay, "weight_decay": 0.0}]
optim.params = LazyCall(set_weight_decay)(
model=model,
skip_list=("absolute_pos_embed"),
skip_keywords=("cpb_mlp", "logit_scale", "relative_position_bias_table"),
)
# Refine train cfg for swin model
train.train_micro_batch_size = 32
train.num_accumulation_steps = 8
train.test_micro_batch_size = 32
train.train_epoch = 300
train.warmup_ratio = 20 / 300
train.evaluation.eval_period = 1562
train.log_period = 10
# Scheduler
train.scheduler.warmup_factor = 5e-7
train.scheduler.alpha = 0.0
train.scheduler.warmup_method = "linear"
# parallel strategy settings
train.dist.data_parallel_size = 1
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
train.dist.pipeline_num_layers = sum(model.cfg.depths)
train.output_dir = "./output"
train.rdma_enabled = False
# Set fp16 ON
train.amp.enabled = False
train.activation_checkpoint.enabled = False
# train.zero_optimization.enabled = True
# train.zero_optimization.stage = 1
graph.enabled = False
| 3,754 | 28.566929 | 89 | py |
libai | libai-main/configs/bert_classification.py | from libai.config import LazyCall
from libai.models.bert_model import BertForClassification
from .common.models.bert import cfg as bert_cfg
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.bert_dataset import tokenization, dataloader
vocab_file = "./data_test/bert_data/bert-base-chinese-vocab.txt"
data_prefix = "./data_test/bert_data/loss_compara_content_sentence"
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
bert_cfg.num_labels = 2
bert_cfg.classifier_dropout = 0.1
model = LazyCall(BertForClassification)(cfg=bert_cfg)
tokenization.tokenizer.vocab_file = vocab_file
model.cfg.vocab_size = 21128
model.cfg.intermediate_size = 3072
model.cfg.num_attention_heads = 12
model.cfg.hidden_layers = 12
model.cfg.hidden_size = 768
train.amp.enabled = True
train.activation_checkpoint.enabled = True
train.dist.pipeline_num_layers = model.cfg.hidden_layers
train.output_dir = "output/bert_classification_output"
| 1,065 | 32.3125 | 69 | py |
libai | libai-main/configs/roberta_pretrain.py | from libai.config import LazyCall
from libai.evaluation import PPLEvaluator
from .common.models.roberta import pretrain_model as model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.roberta_dataset import dataloader, tokenization
vocab_file = "./data_test/roberta_data/roberta-vocab.json"
merge_files = "./data_test/roberta_data/roberta-merges.txt"
data_prefix = "./data_test/roberta_data/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
tokenization.tokenizer.merges_file = merge_files
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
dataloader.test[0].dataset.data_prefix = data_prefix
dataloader.test[0].dataset.indexed_dataset.data_prefix = data_prefix
# RoBERTa model config
model.cfg.num_attention_heads = 12
model.cfg.hidden_size = 768
model.cfg.hidden_layers = 8
train.input_placement_device = "cpu"
# parallel strategy settings
train.dist.data_parallel_size = 8
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
train.dist.pipeline_num_layers = model.cfg.hidden_layers
train.train_micro_batch_size = 2
train.amp.enabled = True
for ds in dataloader.train.dataset:
ds.max_seq_length = model.cfg.max_position_embeddings
train.evaluation.evaluator = LazyCall(PPLEvaluator)()
train.output_dir = "output/roberta_output"
| 1,442 | 31.066667 | 70 | py |
libai | libai-main/configs/common/optim.py | import oneflow as flow
from libai.optim import get_default_optimizer_params
from libai.config import LazyCall
optim = LazyCall(flow.optim.AdamW)(
params=LazyCall(get_default_optimizer_params)(
# params.model is meant to be set to the model object,
# before instantiating the optimizer.
clip_grad_max_norm=1.0,
clip_grad_norm_type=2.0,
weight_decay_norm=0.0,
weight_decay_bias=0.0,
),
lr=1e-4,
weight_decay=0.01,
betas=(0.9, 0.999),
eps=1e-8,
do_bias_correction=True,
)
| 547 | 25.095238 | 62 | py |
libai | libai-main/configs/common/train.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.scheduler import WarmupCosineLR
from libai.evaluation import ClsEvaluator
# fmt: off
train = dict(
# Directory where output files are written
output_dir="./output",
# `train_micro_batch_size` is number of samples per batch on each GPU.
# train_mini_batch_size = train_micro_batch_size * num_accumulation_steps.
# This is also the number of training samples per step (i.e. per iteration).
# If we use 8 GPUs for data parallel groups, `train_micro_batch_size = 2` and
# `num_accumulation_steps = 4`, then each GPU will see 2 samples per batch and
# 8 samples per iteration.
# Total 64 samples will be trained per iteration across all GPUs.
# global_batch_size = micro_batch_size * num_grad_acc * data_parallel_groups
train_micro_batch_size=32,
global_batch_size=None,
num_accumulation_steps=None,
# The total training iterations
train_iter=10000,
# The total training epochs, will be scaled to training iterations automatically.
# The actual total training iterations will be calculated by the
# formula `max(train_iter, train_epoch * iter_per_epoch)`.
train_epoch=0,
consumed_train_samples=0,
consumed_valid_samples=0,
train_samples=None,
# Fraction of lr-warmup-iters to use for warmup (as a float)
warmup_ratio=0,
# The start iteration, usually needn't set it manually.
# It can be computed automatically when resuming training.
start_iter=0,
# Enable automatic mixed precision for training which does not
# change model's inference behavior.
amp=dict(enabled=False),
# Enable activation checkpointing to allow for training
# with larger models, sequences, and batch sizes.
# If enabled, checkpoint the input activations of each transformer layers by default.
activation_checkpoint=dict(enabled=False),
# NCCL fusion threshold megabytes, set to 0 to
# compatible with previous version of OneFlow.
nccl_fusion_threshold_mb=16,
# Maximum number of ops of NCCL fusion, set to 0 to
# compatible with previous version of OneFlow.
nccl_fusion_max_ops=24,
# Enable ZeRO Optimization to allow for training with larger models.
# This optimization will reduce optimizer stages memory consumption
# as described in ZeRO https://arxiv.org/abs/1910.02054.
zero_optimization=dict(
enabled=False,
stage=1,
),
# Save a model checkpoint after every this number of iterations,
# and maximum number of checkpoint will be kept.
checkpointer=dict(period=5000, max_to_keep=100, save_model_after_n_epoch=None),
# Options for evaluation
# `test_micro_batch_size` is number of samples per batch on each GPU for testing.
# If we use 8 GPUs for data parallel groups and `test_micro_batch_size = 2`, then
# total 16 samples will be used per iteration across all GPUs.
test_micro_batch_size=32,
# Enabled evaluation during training, after every `eval_period` number of iterations
# will perform the evaluation process.
# You can set the maximum evaluation iterations to run for validation/test.
# You can also set a customized evaluator for use.
evaluation=dict(
enabled=True,
# evaluator for calculating top-k acc
evaluator=LazyCall(ClsEvaluator)(topk=(1, 5)),
eval_period=5000,
eval_after_n_epoch=None,
eval_iter=1e5, # running steps for validation/test
# Metrics to be used for best model checkpoint.
eval_metric="Acc@1",
eval_mode="max",
),
# Path to a checkpoint file to be loaded to the model for training or evaluation.
load_weight="",
# Output log to console after every this number of iterations.
log_period=20,
# lr_scheduler arguments
# See libai/scheduler/lr_scheduler.py for definition.
scheduler=LazyCall(WarmupCosineLR)(
# In DefaultTrainer we will automatically set `max_iter`
# and `warmup_iter` by the given train cfg.
warmup_factor=0.001,
alpha=0.01,
warmup_method="linear",
),
# Distributed arguments
# See https://libai.readthedocs.io/en/latest/tutorials/Getting%20Started.html for more detail.
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
# users must set the `pipeline_num_layers` attribute when `pipeline_parallel_size > 1`
pipeline_num_layers=None,
# users could customize the number of layers in different stages
# by setting the `custom_pipeline_stage_id ` attribute which is used for
# manually balance calculation between stages when running pipeline parallelism
# e.g. you can set `custom_pipeline_stage_id=[0, 0, 0, 1]`
# for `pipeline_num_layers=4 and pipeline_parallel_size=2`
# which means the first 3 layers will be placed on stage0 and
# the last layer will be placed on stage1
# NOTE: if it is None, LiBai will automatically set pipeline_stage_id
# `auto_pipeline_stage_id` and `actual_pipeline_stage_id` will be saved in `config.yaml`
custom_pipeline_stage_id=None,
),
# the device type of input tensors for model, defaults to "cuda".
# if you want to accelerate the model training when pipeline_parallel > 1
# you can set `input_placement_device="cpu"` then call input_tensor.to_global()
# inside your model.forward() method
# see `libai/models/bert_model.py` as reference
input_placement_device="cuda",
# set to `True` to enable rdma for improving speed of pipeline_parallel
rdma_enabled=True,
# Set seed to positive to use a fixed seed. Note that a fixed seed increases
# reproducibility but does not guarantee fully deterministic behavior.
# Disabling all parallelism further increases reproducibility.
seed=1234,
)
# fmt: on
train = DictConfig(train)
| 5,991 | 38.682119 | 98 | py |
libai | libai-main/configs/common/models/bert.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import BertModel, BertForPreTraining
cfg = dict(
vocab_size=30522,
hidden_size=768,
hidden_layers=24,
num_attention_heads=12,
intermediate_size=4096,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
num_tokentypes=2,
add_pooling_layer=True,
initializer_range=0.02,
layernorm_eps=1e-5,
bias_gelu_fusion=True,
bias_dropout_fusion=True,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
add_binary_head=True,
amp_enabled=False,
)
cfg = DictConfig(cfg)
bert_model = LazyCall(BertModel)(cfg=cfg)
pretrain_model = LazyCall(BertForPreTraining)(cfg=cfg)
| 806 | 23.454545 | 54 | py |
libai | libai-main/configs/common/models/roberta.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import RobertaModel, RobertaForPreTraining, RobertaForCausalLM
cfg = dict(
vocab_size=50265,
hidden_size=768,
hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=514,
num_tokentypes=1,
add_pooling_layer=True,
initializer_range=0.02,
layernorm_eps=1e-5,
pad_token_id=1,
bias_gelu_fusion=True,
bias_dropout_fusion=True,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
)
cfg = DictConfig(cfg)
roberta_model = LazyCall(RobertaModel)(cfg=cfg)
roberta_causal_lm = LazyCall(RobertaForCausalLM)(cfg=cfg)
pretrain_model = LazyCall(RobertaForPreTraining)(cfg=cfg)
| 894 | 24.571429 | 80 | py |
libai | libai-main/configs/common/models/graph.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models.utils import GraphBase
graph = dict(
# options for graph or eager mode
enabled=True,
debug=-1, # debug mode for graph
auto_parallel=dict(
enabled=False,
enable_auto_parallel_ignore_user_sbp_config=False, # ignore all .to_global() in graph
trunk_algo=True, # consider overlapping calculate time and transfer time
sbp_collector=False, # use proxy node when one node transfer to many nodes
),
train_graph=LazyCall(GraphBase)(
is_train=True,
),
global_mode=dict(
enabled=False,
),
eval_graph=LazyCall(GraphBase)(is_train=False),
)
graph = DictConfig(graph)
| 732 | 28.32 | 94 | py |
libai | libai-main/configs/common/models/gpt.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import GPTModel, GPTForPreTraining
cfg = dict(
hidden_layers=6,
vocab_size=30522,
hidden_size=384,
ffn_hidden_size=1536,
num_attention_heads=12,
max_seq_length=1024,
embedding_dropout_prob=0,
attention_dropout_prob=0,
output_dropout_prob=0,
layernorm_epsilon=1e-5,
initializer_range=0.02,
use_scaled_init_for_output_weights=True,
bias_gelu_fusion=True,
bias_dropout_fusion=True,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
)
cfg = DictConfig(cfg)
gpt_model = LazyCall(GPTModel)(cfg=cfg)
pretrain_model = LazyCall(GPTForPreTraining)(cfg=cfg)
| 783 | 23.5 | 53 | py |
libai | libai-main/configs/common/models/t5.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import T5Model, T5ForPreTraining
cfg = dict(
vocab_size=30522,
hidden_size=768,
hidden_layers=6,
num_attention_heads=16,
intermediate_size=1536,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
embedding_dropout_prob=0.1,
initializer_range=0.02,
layernorm_eps=1e-5,
bias_gelu_fusion=True,
bias_dropout_fusion=True,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
)
cfg = DictConfig(cfg)
t5_model = LazyCall(T5Model)(cfg=cfg)
pretrain_model = LazyCall(T5ForPreTraining)(cfg=cfg)
| 751 | 23.258065 | 52 | py |
libai | libai-main/configs/common/models/resmlp/resmlp_24.py | from libai.config import LazyCall
from libai.models import ResMLP
from .resmlp_12 import cfg
cfg.depth = 24
cfg.init_scale = 1e-5
model = LazyCall(ResMLP)(cfg=cfg)
| 168 | 14.363636 | 33 | py |
libai | libai-main/configs/common/models/resmlp/resmlp_12.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import ResMLP
cfg = dict(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=384,
depth=12,
drop_rate=0.0,
drop_path_rate=0.05,
init_scale=0.1,
num_classes=1000,
loss_func=None,
)
cfg = DictConfig(cfg)
model = LazyCall(ResMLP)(cfg=cfg)
| 365 | 15.636364 | 33 | py |
libai | libai-main/configs/common/models/resmlp/resmlp_36.py | from libai.config import LazyCall
from libai.models import ResMLP
from .resmlp_12 import cfg
cfg.depth = 36
cfg.init_scale = 1e-6
model = LazyCall(ResMLP)(cfg=cfg)
| 168 | 14.363636 | 33 | py |
libai | libai-main/configs/common/models/resmlp/resmlpB_24.py | from libai.config import LazyCall
from libai.models import ResMLP
from .resmlp_12 import cfg
cfg.patch_size = 8
cfg.embed_dim = 768
cfg.depth = 24
cfg.init_scale = 1e-6
model = LazyCall(ResMLP)(cfg=cfg)
| 207 | 15 | 33 | py |
libai | libai-main/configs/common/models/swin/swin_large_patch4_window12_384.py | from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.img_size = 384
cfg.embed_dim = 192
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [6, 12, 24, 48]
cfg.window_size = 12
cfg.drop_path_rate = 0.1
model = LazyCall(SwinTransformer)(cfg=cfg)
| 312 | 19.866667 | 45 | py |
libai | libai-main/configs/common/models/swin/swin_large_patch4_window7_224.py | from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.embed_dim = 192
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [6, 12, 24, 48]
cfg.drop_path_rate = 0.1
model = LazyCall(SwinTransformer)(cfg=cfg)
| 272 | 20 | 45 | py |
libai | libai-main/configs/common/models/swin/swin_tiny_c24_patch4_window8_256.py | from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.img_size = 256
cfg.num_heads = [4, 8, 16, 32]
cfg.window_size = 8
model = LazyCall(SwinTransformer)(cfg=cfg)
| 237 | 20.636364 | 45 | py |
libai | libai-main/configs/common/models/swin/swin_tiny_patch4_window7_224.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import SwinTransformer
cfg = dict(
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
loss_func=None,
)
cfg = DictConfig(cfg)
model = LazyCall(SwinTransformer)(cfg=cfg)
| 513 | 17.357143 | 42 | py |
libai | libai-main/configs/common/models/swin/swin_base_patch4_window12_384.py | from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.img_size = 384
cfg.embed_dim = 128
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [4, 8, 16, 32]
cfg.drop_path_rate = 0.1
model = LazyCall(SwinTransformer)(cfg=cfg)
| 290 | 19.785714 | 45 | py |
libai | libai-main/configs/common/models/swin/swin_base_patch4_window7_224.py | from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.embed_dim = 128
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [4, 8, 16, 32]
cfg.drop_path_rate = 0.5
model = LazyCall(SwinTransformer)(cfg=cfg)
| 271 | 19.923077 | 45 | py |
libai | libai-main/configs/common/models/swin/swin_small_patch4_window7_224.py | from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.depths = [2, 2, 18, 2]
cfg.drop_path_rate = 0.3
model = LazyCall(SwinTransformer)(cfg=cfg)
| 219 | 21 | 45 | py |
libai | libai-main/configs/common/models/vit/vit_base_patch32_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 32
cfg.embed_dim = 768
cfg.num_heads = 12
model = LazyCall(VisionTransformer)(cfg=cfg)
| 223 | 17.666667 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_small_patch32_224.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 32
cfg.embed_dim = 384
cfg.num_heads = 6
model = LazyCall(VisionTransformer)(cfg=cfg)
| 255 | 18.692308 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_gigantic_patch14_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 14
cfg.embed_dim = 1664
cfg.mlp_ratio = 64 / 13
cfg.depth = 48
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
| 263 | 17.857143 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_large_patch32_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 32
cfg.embed_dim = 1024
cfg.depth = 24
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
| 239 | 17.461538 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_giant_patch14_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 14
cfg.embed_dim = 1408
cfg.mlp_ratio = 48 / 11
cfg.depth = 40
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
| 263 | 17.857143 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_base_patch16_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 16
cfg.embed_dim = 768
cfg.num_heads = 12
model = LazyCall(VisionTransformer)(cfg=cfg)
| 223 | 17.666667 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_small_patch16_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 16
cfg.embed_dim = 384
cfg.num_heads = 6
model = LazyCall(VisionTransformer)(cfg=cfg)
| 222 | 17.583333 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_large_patch16_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 16
cfg.embed_dim = 1024
cfg.depth = 24
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
| 239 | 17.461538 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_huge_patch14_224.py | from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 16
cfg.embed_dim = 1280
cfg.depth = 32
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
| 239 | 17.461538 | 44 | py |
libai | libai-main/configs/common/models/vit/vit_tiny_patch16_224.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import VisionTransformer
cfg = dict(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4.0,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
num_classes=1000,
loss_func=None,
)
cfg = DictConfig(cfg)
model = LazyCall(VisionTransformer)(cfg=cfg)
| 426 | 16.791667 | 44 | py |
libai | libai-main/configs/common/models/swinv2/swinv2_tiny_patch4_window8_256.py | from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import SwinTransformerV2
cfg = dict(
img_size=256,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=8,
mlp_ratio=4.0,
qkv_bias=True,
drop_rate=0.0,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
pretrained_window_sizes=[0, 0, 0, 0],
loss_func=None,
)
cfg = DictConfig(cfg)
model = LazyCall(SwinTransformerV2)(cfg=cfg)
| 539 | 19 | 44 | py |
libai | libai-main/configs/common/models/swinv2/swinv2_small_patch4_window16_256.py | from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.window_size = 16
cfg.depths = [2, 2, 18, 2]
cfg.drop_path_rate = 0.3
model = LazyCall(SwinTransformerV2)(cfg=cfg)
| 245 | 23.6 | 47 | py |
libai | libai-main/configs/common/models/swinv2/swinv2_base_patch4_window16_256.py | from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.window_size = 16
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [4, 8, 16, 32]
cfg.drop_path_rate = 0.5
model = LazyCall(SwinTransformerV2)(cfg=cfg)
| 276 | 24.181818 | 47 | py |
libai | libai-main/configs/common/models/swinv2/swinv2_base_patch4_window8_256.py | from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [4, 8, 16, 32]
cfg.drop_path_rate = 0.5
model = LazyCall(SwinTransformerV2)(cfg=cfg)
| 255 | 24.6 | 47 | py |
libai | libai-main/configs/common/models/swinv2/swinv2_small_patch4_window8_256.py | from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.depths = [2, 2, 18, 2]
cfg.drop_path_rate = 0.3
model = LazyCall(SwinTransformerV2)(cfg=cfg)
| 224 | 24 | 47 | py |
libai | libai-main/configs/common/models/swinv2/swinv2_tiny_patch4_window16_256.py | from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.window_size = 16
model = LazyCall(SwinTransformerV2)(cfg=cfg)
| 193 | 23.25 | 47 | py |
libai | libai-main/configs/common/data/cifar100.py | from omegaconf import OmegaConf
from flowvision import transforms
from flowvision.data.mixup import Mixup
from flowvision.transforms import InterpolationMode
from flowvision.transforms.functional import str_to_interp_mode
from libai.data.datasets import CIFAR100Dataset
from libai.data.build import build_image_train_loader, build_image_test_loader
from libai.config import LazyCall
# mean and std of cifar100 dataset
CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
train_aug = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.RandomResizedCrop)(
size=(224, 224),
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=str_to_interp_mode("bicubic"),
),
LazyCall(transforms.RandomHorizontalFlip)(),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(mean=CIFAR100_TRAIN_MEAN, std=CIFAR100_TRAIN_STD),
]
)
test_aug = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.Resize)(
size=256,
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.CenterCrop)(
size=224,
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=CIFAR100_TRAIN_MEAN,
std=CIFAR100_TRAIN_STD,
),
]
)
# Dataloader config
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_image_train_loader)(
dataset=[
LazyCall(CIFAR100Dataset)(
root="./",
train=True,
download=True,
transform=train_aug,
),
],
num_workers=4,
mixup_func=LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=100,
),
)
dataloader.test = [
LazyCall(build_image_test_loader)(
dataset=LazyCall(CIFAR100Dataset)(
root="./",
train=False,
download=True,
transform=test_aug,
),
num_workers=4,
)
]
| 2,223 | 26.8 | 89 | py |
libai | libai-main/configs/common/data/gpt_dataset.py | from libai.config import LazyCall
from omegaconf import OmegaConf
from libai.data import build_nlp_test_loader, build_nlp_train_val_test_loader
from libai.data.datasets import GPT2Dataset
from libai.data.data_utils import get_indexed_dataset
from libai.tokenizer import GPT2Tokenizer
tokenization = OmegaConf.create()
tokenization.tokenizer = LazyCall(GPT2Tokenizer)(
vocab_file="/workspace/data/gpt_dataset/gpt2-vocab.json",
merges_file="/workspace/data/gpt_dataset/gpt2-merges.txt",
do_lower_case=True,
do_chinese_wwm=True,
)
tokenization.append_eod = False
tokenization.make_vocab_size_divisible_by = 128
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_val_test_loader)(
dataset=[
LazyCall(GPT2Dataset)(
name="gpt-2",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_seq_length=1024,
seed=1234,
),
],
train_val_test_num_samples=None, # a hint for deferred assignment
splits=[[949.0, 50.0, 1.0]],
weights=[1.0],
num_workers=4,
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(GPT2Dataset)(
name="gpt-2",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_seq_length=1024,
max_num_samples=10,
seed=1234,
),
test_batch_size=4,
)
]
| 1,912 | 30.883333 | 90 | py |
libai | libai-main/configs/common/data/bert_dataset.py | from libai.config import LazyCall
from omegaconf import OmegaConf
from libai.data import build_nlp_test_loader, build_nlp_train_val_test_loader
from libai.data.datasets import BertDataset
from libai.data.data_utils import get_indexed_dataset
from libai.tokenizer import BertTokenizer
tokenization = OmegaConf.create()
tokenization.tokenizer = LazyCall(BertTokenizer)(
vocab_file="bert-base-chinese-vocab.txt",
do_lower_case=True,
do_chinese_wwm=True,
)
tokenization.append_eod = False
tokenization.make_vocab_size_divisible_by = 128
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_val_test_loader)(
dataset=[
LazyCall(BertDataset)(
name="bert",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_seq_length=512,
mask_lm_prob=0.15,
short_seq_prob=0.1,
binary_head=True,
seed=1234,
masking_style="bert-cn-wwm",
),
],
train_val_test_num_samples=None, # a hint for deferred assignment
splits=[[949.0, 50.0, 1.0]],
weights=[1.0],
num_workers=4,
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(BertDataset)(
name="bert",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_num_samples=10,
max_seq_length=512,
mask_lm_prob=0.15,
short_seq_prob=0.1,
binary_head=True,
seed=1234,
masking_style="bert-cn-wwm",
),
test_batch_size=4,
)
]
| 2,097 | 30.313433 | 90 | py |
libai | libai-main/configs/common/data/roberta_dataset.py | from libai.config import LazyCall
from omegaconf import OmegaConf
from libai.data import build_nlp_test_loader, build_nlp_train_val_test_loader
from libai.data.datasets import RobertaDataset
from libai.data.data_utils import get_indexed_dataset
from libai.tokenizer import RobertaTokenizer
tokenization = OmegaConf.create()
tokenization.tokenizer = LazyCall(RobertaTokenizer)(
vocab_file="roberta-vocab.json", merges_file="roberta-merges.txt"
)
tokenization.append_eod = False
tokenization.make_vocab_size_divisible_by = 128
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_val_test_loader)(
dataset=[
LazyCall(RobertaDataset)(
name="roberta",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_seq_length=514,
mask_lm_prob=0.15,
short_seq_prob=0.0,
seed=1234,
masking_style="bert",
),
],
train_val_test_num_samples=None, # a hint for deferred assignment
splits=[[949.0, 50.0, 1.0]],
weights=[1.0],
num_workers=4,
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(RobertaDataset)(
name="roberta",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_num_samples=10,
max_seq_length=514,
mask_lm_prob=0.15,
short_seq_prob=0.1,
seed=1234,
masking_style="bert",
),
test_batch_size=4,
)
]
| 2,019 | 31.063492 | 90 | py |
libai | libai-main/configs/common/data/t5_dataset.py | from libai.config import LazyCall
from omegaconf import OmegaConf
from libai.data import build_nlp_test_loader, build_nlp_train_val_test_loader
from libai.data.datasets import T5Dataset
from libai.data.data_utils import get_indexed_dataset
from libai.tokenizer import BertTokenizer
tokenization = OmegaConf.create()
tokenization.setup = True
special_tokens = []
for i in range(100):
special_tokens.append(f"<extra_id_{i}>")
tokenization.tokenizer = LazyCall(BertTokenizer)(
vocab_file="/workspace/data/libai_dataset/bert-base-chinese-vocab.txt",
do_lower_case=True,
do_chinese_wwm=True,
bos_token="[BOS]",
eos_token="[EOS]",
additional_special_tokens=special_tokens,
)
tokenization.append_eod = False
tokenization.make_vocab_size_divisible_by = 128
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_val_test_loader)(
dataset=[
LazyCall(T5Dataset)(
name="t5",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_seq_length=512,
max_seq_length_dec=128,
masked_lm_prob=0.15,
short_seq_prob=0.1,
seed=1234,
),
],
train_val_test_num_samples=None, # a hint for deferred assignment
splits=[[949.0, 50.0, 1.0]],
weights=[1.0],
num_workers=4,
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(T5Dataset)(
name="t5",
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
indexed_dataset=LazyCall(get_indexed_dataset)(
data_prefix="/workspace/data/libai_dataset/loss_compara_content_sentence",
data_impl="mmap",
skip_warmup=False,
),
max_num_samples=10,
max_seq_length=512,
max_seq_length_dec=128,
masked_lm_prob=0.15,
short_seq_prob=0.1,
seed=1234,
),
test_batch_size=4,
)
]
| 2,256 | 29.917808 | 90 | py |
libai | libai-main/configs/common/data/imagenet.py | from omegaconf import OmegaConf
from flowvision import transforms
from flowvision.transforms import InterpolationMode
from flowvision.transforms.functional import str_to_interp_mode
from flowvision.data.constants import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
)
from flowvision.data.auto_augment import rand_augment_transform
from flowvision.data.random_erasing import RandomErasing
from libai.config import LazyCall
from libai.data.datasets import ImageNetDataset
from libai.data.build import build_image_train_loader, build_image_test_loader
train_aug = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.RandomResizedCrop)(
size=224,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.RandomHorizontalFlip)(p=0.5),
LazyCall(rand_augment_transform)(
config_str="rand-m9-mstd0.5-inc1",
hparams=dict(
translate_const=int(224 * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in IMAGENET_DEFAULT_MEAN]),
interpolation=str_to_interp_mode("bicubic"),
),
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
LazyCall(RandomErasing)(
probability=0.25,
mode="pixel",
max_count=1,
num_splits=0,
device="cpu",
),
]
)
test_aug = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.Resize)(
size=256,
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.CenterCrop)(
size=224,
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
]
)
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_image_train_loader)(
dataset=[
LazyCall(ImageNetDataset)(
root="./dataset",
train=True,
transform=train_aug,
),
],
num_workers=4,
mixup_func=None,
)
dataloader.test = [
LazyCall(build_image_test_loader)(
dataset=LazyCall(ImageNetDataset)(
root="./dataset",
train=False,
transform=test_aug,
),
num_workers=4,
)
]
| 2,535 | 26.868132 | 90 | py |
libai | libai-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import libai # noqa
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "libai"
copyright = "2021, OneFlow"
author = "OneFlow"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"recommonmark",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "Python"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "furo"
html_theme = "sphinx_rtd_theme"
def setup(app):
app.add_css_file("css/line_space.css")
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 2,293 | 30.861111 | 79 | py |
IRN | IRN-master/data_process.py | from __future__ import absolute_import
import os
import re
import numpy as np
from collections import Counter
#process Path-QA without KB-memory for other models
# kb: h \t r \t t
# form: question \t ans(ans1/ans2/) \t e1#r1#e2#r2#e3#<end>#e3
def process_data_c(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities):#relations is set, other is list(), *2id is dict()
read_KB(KB_file, entities, relations)
data,sentence_size = read_data(data_file, words)
#set ids
if len(word2id)==0:
word2id['<unk>'] = 0
if len(rel2id)==0:
rel2id['<end>'] = 0
if len(ent2id)==0:
ent2id['<unk>'] = 0
for r in relations:
# same r_id in rel2id and word2id
if not rel2id.has_key(r):
rel2id[r] = len(rel2id)
if not word2id.has_key(r):
word2id[r] = len(word2id)
for e in entities:
if not ent2id.has_key(e):
ent2id[e] = len(ent2id)
for word in words:
if not word2id.has_key(word):
word2id[word] = len(word2id)
print ('here are %d words in word2id(vocab)' %len(word2id))
print ('here are %d relations in rel2id(rel_vocab)' %len(rel2id))
print ('here are %d entities in ent2id(ent_vocab)' %len(ent2id))
Triples, KBs, tails_size = get_KB(KB_file,ent2id,rel2id)
print "#records or Triples", len(np.nonzero(KBs)[0])
Q = []
QQ = []
A = []
AA = []
P = []
PP = []
S = []
SS = []
for query, answer, path, answerset in data:
query = query.strip().split()
ls = max(0, sentence_size-len(query))
q = [word2id[w] for w in query] + [0] * ls
Q.append(q)
QQ.append(query)
a = np.zeros(len(ent2id)) # if use new ans-vocab, add 0 for 'end'
a[ent2id[answer]] = 1
A.append(a)
AA.append(ent2id[answer])
#p = [[ent2id[],rel2id[],ent2id[],rel2id[],ent2id[]], [], []]
# POSITION+'#'+"plays_position_inverse"+'#'+PLAYER+'*'+CLUB+'#'+"plays_in_club_inverse"+'#'+PLAYER
path = path.strip().split('*') #path = [POSITION+'#'+"plays_position_inverse"+'#'+PLAYER, CLUB+'#'+"plays_in_club_inverse"+'#'+PLAYER]
p=[]
for subpath in path:
subpath = subpath.split("#")
p.append([ent2id[subpath[0]], rel2id[subpath[1]], ent2id[subpath[2]],rel2id[subpath[3]],ent2id[subpath[4]]])
P.append(p) #N*2*3
PP.append(path)
anset = answerset.split('/')
anset = anset[:-1]
ass=[]
for a in anset:
ass.append(ent2id[a])
S.append(ass)
SS.append(anset)
return np.array(Q),np.array(A),np.array(P),np.array(S),Triples,sentence_size
def process_data(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities): #relations is set, other is list(), *2id is dict()
read_KB(KB_file, entities, relations)
data,sentence_size = read_data(data_file, words)
#set ids
if len(word2id)==0:
word2id['<unk>'] = 0
if len(rel2id)==0:
rel2id['<end>'] = 0
if len(ent2id)==0:
ent2id['<unk>'] = 0
for r in relations:
# same r_id in rel2id and word2id
if not rel2id.has_key(r):
rel2id[r] = len(rel2id)
if not word2id.has_key(r):
word2id[r] = len(word2id)
for e in entities:
if not ent2id.has_key(e):
ent2id[e] = len(ent2id)
for word in words:
if not word2id.has_key(word):
word2id[word] = len(word2id)
print ('here are %d words in word2id(vocab)' %len(word2id)) #75080
print ('here are %d relations in rel2id(rel_vocab)' %len(rel2id)) #13+1
print ('here are %d entities in ent2id(ent_vocab)' %len(ent2id)) #13+1
Triples, KBs,tails_size = get_KB(KB_file,ent2id,rel2id)
print "#records or Triples", len(np.nonzero(KBs)[0])
Q = []
QQ = []
A = []
AA = []
P = []
PP = []
S = []
SS = []
for query, answer, path, answerset in data:
path = path.strip().split('#') #path = [s,r1,m,r2,t]
#answer = path[-1]
query = query.strip().split()
ls = max(0, sentence_size-len(query))
q = [word2id[w] for w in query] + [0] * ls
Q.append(q)
QQ.append(query)
a = np.zeros(len(ent2id)) # if use new ans-vocab, add 0 for 'end'
a[ent2id[answer]] = 1
A.append(a)
AA.append(ent2id[answer])
#p = [ ent2id[path[0]], rel2id[path[1]], ent2id[path[2]], rel2id[path[3]], ent2id[path[4]] ]
p=[]
for i in range(len(path)):
if i % 2 == 0:
e = ent2id[path[i]]
# e = np.zeros(len(relations))
# e[0] = ent2id[path[i]]
p.append(e)
else:
r = rel2id[path[i]]
# r = np.zeros(len(relations))
# r[rel2id[path[i]]] =1
p.append(r)
#p.append(rel2id[path[3]])
#p.append(ent2id[path[4]])
P.append(p)
PP.append(path)
anset = answerset.split('/')
anset = anset[:-1]
ass=[]
for a in anset:
ass.append(ent2id[a])
S.append(ass)
SS.append(anset)
# return Q,A,P,D,QQ,AA,PP,DD,KBs,sentence_size,memory_size,tails_size
return np.array(Q),np.array(A),np.array(P),np.array(S),Triples,sentence_size
def read_KB(KB_file, entities, relations):
#example in KB_file: KBs.txt h \t r \t t
if os.path.isfile(KB_file):
with open(KB_file) as f:
lines = f.readlines()
else:
raise Exception("!! %s is not found!!" % KB_file)
for line in lines:
line = line.strip().split('\t')
entities.add(line[0])
entities.add(line[2])
relations.add(line[1])
def get_KB(KB_file,ent2id,rel2id):
nwords = len(ent2id)
nrels = len(rel2id)
tails = np.zeros([nwords*nrels,1], 'int32')
#KBmatrix = np.zeros([nwords, nrels,nwords], 'int32')
KBmatrix = np.zeros([nwords * nrels,nwords], 'int32')
Triples = []
f = open(KB_file)
control = 1
b = 0
for line in f.readlines():
line = line.strip().split('\t')
''' delete half triples
control += 1
if control % 2 == 0:
b += 1
continue
'''
h = ent2id[line[0]]
r = rel2id[line[1]]
t = ent2id[line[2]]
Triples.append([h,r,t])
#[h,r]->[h*nrels+r]
lenlist = tails[h*nrels+r]
KBmatrix[h*nrels+r,lenlist] = t
tails[h*nrels+r]+=1
print "delete triples:", b
return np.array(Triples), KBmatrix[:,:np.max(tails)], np.max(tails)
def read_data(data_file, words):
# q+'\t'+ans+'\t'+p+'\t'+ansset+'\t'+c+'\t'+sub+'\n'
# question \t ans(ans1/ans2/) \t e1#r1#e2#r2#e3#<end>#e3
# question \t ans \t e1#r1#e2#r2#e3#<end>#e3 \t ans1/ans2/ \t e1#r1#e2///e2#r2#e3#///s#r#t///s#r#t
if os.path.isfile(data_file):
with open(data_file) as f:
lines = f.readlines()
else:
raise Exception("!! %s is not found!!" % data_file)
data = []
questions = []
doc = []
for line in lines:
line = line.strip().split('\t')
qlist = line[0].strip().split()
k = line[1].find('(')
if not k == -1:
if line[1][k-1] == '_':
k += (line[1][k+1:-1].find('(') + 1)
asset = line[1][k+1:-1]
line[1]=line[1][:k]
else:
asset = line[3]
data.append([line[0],line[1],line[2],asset])
for w in qlist:
words.add(w)
questions.append(qlist)
sentence_size = max(len(i) for i in questions)
return data, sentence_size
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
| 8,105 | 27.744681 | 142 | py |
IRN | IRN-master/test.py | import os
import tensorflow as tf
import numpy as np
import time
from data_process import process_data, process_data_c
from utils import MultiAcc, MultiAcc_C, RealAnswer, ScoreRank, InSet, InnerRight
from sklearn import cross_validation, metrics
from model import IRN, IRN_C
flags = tf.app.flags
flags.DEFINE_integer("edim", 50, "words vector dimension [50]")
flags.DEFINE_integer("nhop", 3, "number of hops [2/3+1]")
flags.DEFINE_integer("batch_size", 50, "batch size to use during training [50]")
flags.DEFINE_integer("nepoch", 5000, "number of epoch to use during training [1000]")
flags.DEFINE_integer("inner_nepoch",3, "PRN inner loop [5]")
flags.DEFINE_float("init_lr", 0.001, "initial learning rate")
flags.DEFINE_float("epsilon", 1e-8, "Epsilon value for Adam Optimizer.")
#flags.DEFINE_float("init_hid", 0.1, "initial internal state value [0.1]")
#flags.DEFINE_float("init_std", 0.05, "weight initialization std [0.05]")
flags.DEFINE_float("max_grad_norm", 20, "clip gradients to this norm [20]")
flags.DEFINE_string("dataset", "pq", "pq2h/pq3h/pql2h/pql3h/wc/")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "checkpoint directory")
flags.DEFINE_boolean("unseen",False,"True to hide 3 relations when training [False]")
flags.DEFINE_boolean("show_case_only",False,"True to show case")
flags.DEFINE_integer("show_case_no",10, "show the case in the test file")
FLAGS = flags.FLAGS
FLAGS.data_dir = "data/WC2014"
FLAGS.KB_file = "WC2014"
if FLAGS.dataset == 'wc1h':
FLAGS.data_file = "WC-P1" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'wc2h':
FLAGS.data_file = "WC-P2" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'wcm':
FLAGS.data_file = "WC-P" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'wcc':
FLAGS.data_file = "WC-C" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'pql2h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQL-2H'
FLAGS.KB_file = 'PQL2-KB'
elif FLAGS.dataset == 'pql3h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQL-3H'
FLAGS.KB_file = 'PQL3-KB'
elif FLAGS.dataset == 'pq2h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQ-2H'
FLAGS.KB_file = '2H-kb'
elif FLAGS.dataset == 'pq3h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQ-3H'
FLAGS.KB_file = '3H-kb'
def main(_):
word2id = {}
ent2id = {}
rel2id = {}
words = set()
relations = set()
entities = set()
FLAGS.checkpoint_dir = os.path.join(FLAGS.checkpoint_dir,FLAGS.data_file)
FLAGS.checkpoint_dir = os.path.join(FLAGS.checkpoint_dir,FLAGS.KB_file)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
KB_file = '%s/%s.txt' % (FLAGS.data_dir, FLAGS.KB_file)
data_file = '%s/%s.txt' % (FLAGS.data_dir, FLAGS.data_file)
start = time.time()
if FLAGS.data_file == "WC-C":
Q,A,P,S,Triples,FLAGS.query_size = process_data_c(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities)
FLAGS.path_size = len(P[0][0]) #5
else:
Q,A,P,S,Triples,FLAGS.query_size = process_data(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities)
FLAGS.path_size = len(P[0]) #5 or 7 or
FLAGS.nhop = FLAGS.path_size / 2
print ("read data cost %f seconds" %(time.time()-start))
FLAGS.nwords = len(word2id)
FLAGS.nrels = len(rel2id)
FLAGS.nents = len(ent2id)
trainQ, testQ, trainA, testA, trainP, testP, trainS, testS = cross_validation.train_test_split(Q, A, P, S, test_size=.1, random_state=123)
# for UNSEEN relations (incomplete kb setting, change data_utils.py)
if FLAGS.unseen:
id_c=[]
for idx in range(trainQ.shape[0]):
if trainP[idx][-4] == 1 or trainP[idx][-4]==2 or trainP[idx][-4]==3:
id_c.append(idx)
trainQ = np.delete(trainQ,id_c,axis=0)
trainA = np.delete(trainA,id_c,axis=0)
trainP = np.delete(trainP,id_c,axis=0)
trainS = np.delete(trainS,id_c,axis=0)
#
#other data and some flags
#
id2word = dict(zip(word2id.values(), word2id.keys()))
id2ent = dict(zip(ent2id.values(), ent2id.keys()))
id2rel = dict(zip(rel2id.values(), rel2id.keys())) #{0: '<end>', 1: 'cause_of_death', 2: 'gender', 3: 'profession', 4: 'institution', 5: 'religion', 6: 'parents', 7: 'location', 8: 'place_of_birth', 9: 'nationality', 10: 'place_of_death', 11: 'spouse', 12: 'children', 13: 'ethnicity'}
test_labels = np.argmax(testA, axis=1)
print(flags.FLAGS.__flags)
with tf.Session() as sess:
if not FLAGS.data_file == "WC-C":
model = IRN(FLAGS,sess)
elif FLAGS.data_file == "WC-C":
model = IRN_C(FLAGS,sess)
model.load()
test_preds = model.predict(Triples,testQ, testP)
if not FLAGS.data_file == "WC-C":
test_acc = MultiAcc(testP,test_preds,FLAGS.path_size)
elif FLAGS.data_file == "WC-C":
test_acc = MultiAcc_C(testP,test_preds)
test_true_acc = InSet(testP,testS,test_preds)
show_k = FLAGS.show_case_no if FLAGS.show_case_no < testQ.shape[0] else 0
input_q = " ".join([id2word[w] for w in testQ[show_k]])
#output = test_preds[0][0]
path_words = []
for j in range(FLAGS.path_size):
if j % 2 == 0:
path_words.append(id2ent[test_preds[show_k][j]])
else:
path_words.append(id2rel[test_preds[show_k][j]])
output = "---".join(path_words)
if FLAGS.show_case_only:
print('-----------------------')
print('test input:',input_q)
print('test output:',output)
print('-----------------------')
return
print('-----------------------')
print('Test Data',data_file)
print('Test Accuracy:', test_true_acc)
print('Test Accuracy for whole Path:', test_acc)
print('-----------------------')
if __name__ == '__main__':
tf.app.run() | 6,041 | 35.179641 | 290 | py |
IRN | IRN-master/utils.py | import os
import math
import random
import numpy as np
import tensorflow as tf
from sklearn import cross_validation, metrics
def norm(matrix):
n = tf.sqrt(tf.reduce_sum(matrix*matrix,1))
return tf.reshape(n,[-1,1])
def MatrixCos(inputdata,key):
#inputdata = [batch,embed]
#key = [slot,embed]
#return most similar key_id for each inputdata
addressing = tf.matmul(inputdata, key, transpose_b = True) #(b,e)*(e,slots) -> (b,s)
norm1 = norm(inputdata) #(b,1)
norm2 = norm(key) #(s,1)
n = tf.matmul(norm1,norm2,transpose_b = True) + 1e-8 #(b,s)
addressing = tf.div(addressing,n)
index = tf.reshape(tf.argmax(addressing,1),[-1,1]) #(b,1)
return tf.to_int32(index)
def SimpleMatrixCos(inputdata,key):
inputdata = tf.nn.l2_normalize(inputdata,1)
key = tf.nn.l2_normalize(key,1)
addressing = tf.matmul(inputdata, key, transpose_b = True) #(b,4)*(4,5) -> (b,5)
index = tf.reshape(tf.argmax(addressing,1),[-1,1]) #(b,1)
return tf.to_int32(index)
def position_encoding(sentence_size, embedding_size):
"""
Position Encoding described in section 4.1 [1]
m_i = sum_j l_ij*A*x_ij /J/d
l_ij = Jd-jd-iJ+2ij = ij-Ji/2-jd/2+Jd/4
return l-matrix-transpose (fixed)
"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2)
encoding = (1 + 4 * encoding / embedding_size / sentence_size) / 2
return np.transpose(encoding)
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.name_scope(name, "add_gradient_noise",[t, stddev]) as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
def zero_nil_slot(t, name=None):
"""
Overwrites the nil_slot (first row) of the input Tensor with zeros.
The nil_slot is a dummy slot and should not be trained and influence
the training algorithm.
"""
with tf.name_scope(name, "zero_nil_slot",[t]) as name:
t = tf.convert_to_tensor(t, name="t")
s = tf.shape(t)[1]
z = tf.zeros(tf.stack([1, s])) #tf.zeros([1,s])
return tf.concat(axis=0, values=[z, tf.slice(t, [1, 0], [-1, -1])], name=name)
def MultiAcc_C(labels,preds):
#labels = [[[1,2,3],[4,5,3]], []
Acc=[]
batch_size=preds.shape[0]
correct = 0.0
pred_len = preds.shape[1]
for j in range(batch_size):
if(labels[j,0,-1]==preds[j,-1]):
correct += 1.0
for i in range(3):
Acc.append(round(metrics.accuracy_score(labels[:,0,i],preds[:,i]),3))
for i in range(3):
Acc.append(round(metrics.accuracy_score(labels[:,1,i],preds[:,pred_len/2+i]),3))
Acc.append(round( correct/batch_size ,3))
return Acc
def MultiAcc(labels,preds,length):
#length = path = 2 * hop + 1 (hop == path_l + cons_l + final == path_l * 2 + 1 )
#compare path and final answer accuracy
Acc = []
for i in range(length):
Acc.append(round(metrics.accuracy_score(labels[:,i],preds[:,i]),3))
batch_size = preds.shape[0]
correct = 0.0
for j in range(batch_size):
k = length - 1
while(labels[j,k]==0):
k -= 2
if(labels[j,k]==preds[j,k]):
correct += 1.0 #final answer accuracy
Acc.append(round( correct/batch_size ,3))
return Acc
def RealAnswer(labels,pathpreds):
#find answer-list from path-list
batch_size = preds.shape[0]
anspreds = np.zeros(batch_size,dtype=int)
for j in range(batch_size):
k = len(labels[0]) - 1
while(labels[j,k]==0):
k -= 2
anspreds[j] = pathpreds[j,k]
return anspreds
def ScoreRank(label,scores):
indexrank = np.argsort(-scores)
rank = 0.0
for i in range(len(label)):
row_rank= np.where(indexrank[i]==label[i])[0][0] #([0], )
if row_rank < 3:
rank += 1
return round(rank/len(label), 3)
def InSet(labels,anset,preds):
#get accuracy(whether in answer set or not)
#labels does not matter
#preds is path-list
#labels is path-labels
right = 0.0
for i in xrange(len(anset)):
if type(preds[i]) is np.int64:
ans_pred = preds[i]
else:
ans_pred = preds[i,-1]
'''
k = len(labels[0]) - 1
while(labels[i,k]==0):
k -= 2
ans_pred = preds[i,k]
'''
if ans_pred in anset[i]:
right += 1
return round(right/len(anset), 3)
def InnerRight(preds, KBs):
Acc = []
pl = len(preds[0])-2
batch = len(preds)
flags = np.ones(batch)
for l in range(0,pl,2):
right = 0.0
for j in range(batch):
if flags[j]==0:
continue
key = preds[j,l]*7+preds[j,l+1]
if preds[j,l+2] in KBs[key]:
right += 1
else:
flags[j]=0
Acc.append(round(right/batch ,3))
return Acc
| 5,374 | 30.804734 | 88 | py |
IRN | IRN-master/model.py | import os
import math
import random
import numpy as np
import tensorflow as tf
from utils import add_gradient_noise,zero_nil_slot,MatrixCos,position_encoding, ScoreRank
class IRN(object):
def __init__(self, config, sess):
self._data_file = config.data_file
self._margin = 4
self._batch_size = config.batch_size
self._vocab_size = config.nwords
self._rel_size = config.nrels
self._ent_size = config.nents
self._sentence_size = config.query_size
self._embedding_size = config.edim
self._path_size = config.path_size
self._memory_size = config.nrels
self._hops = config.nhop
self._max_grad_norm = config.max_grad_norm
self._init = tf.contrib.layers.xavier_initializer()
#self._init = tf.random_normal_initializer(stddev=config.init_std)
self._opt = tf.train.AdamOptimizer()
self._name = "IRN"
self._checkpoint_dir = config.checkpoint_dir+'/'+self._name
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._build_inputs()
self._build_vars()
self._saver = tf.train.Saver(max_to_keep=1)
self._encoding = tf.constant(position_encoding(self._sentence_size, self._embedding_size), name="encoding")
KB_batch_loss = self._pretranse()
KB_loss_op = tf.reduce_sum(KB_batch_loss, name="KB_loss_op")
KB_grads_and_vars = self._opt.compute_gradients(KB_loss_op,[self.EE,self.RE,self.Mse])
KB_nil_grads_and_vars = []
for g, v in KB_grads_and_vars:
if v.name in self._nil_vars:
KB_nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
KB_nil_grads_and_vars.append((g, v))
print "KB_grads_and_vars"
for g,v in KB_nil_grads_and_vars:
print g, v.name
KB_train_op = self._opt.apply_gradients(KB_grads_and_vars, name="KB_train_op")
#cross entropy as loss for QA:
batch_loss, p = self._inference() # (b,1), (batch_size, 5)
QA_loss_op = tf.reduce_sum(batch_loss, name="QA_loss_op")
QA_params = [self.QE,self.Mrq,self.Mrs]
QA_grads_and_vars = self._opt.compute_gradients(QA_loss_op,QA_params)
QA_grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in QA_grads_and_vars if g is not None ]
QA_grads_and_vars = [(add_gradient_noise(g), v) for g,v in QA_grads_and_vars]
QA_nil_grads_and_vars = []
for g, v in QA_grads_and_vars:
if v.name in self._nil_vars:
QA_nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
QA_nil_grads_and_vars.append((g, v))
print "QA_grads_and_vars"
for g,v in QA_nil_grads_and_vars:
print g, v.name
#grads_and_vars = [(tf.Print(g, [v.name,str(g.get_shape()),g], summarize=1e1/2), v) for g, v in grads_and_vars]
QA_train_op = self._opt.apply_gradients(QA_nil_grads_and_vars, name="QA_train_op")
# predict ops
QA_predict_op = p
# assign ops
self.KB_loss_op = KB_loss_op
self.KB_train_op = KB_train_op
self.QA_loss_op = QA_loss_op
self.QA_predict_op = QA_predict_op
self.QA_train_op = QA_train_op
init_op = tf.global_variables_initializer()
self._sess = sess
self._sess.run(init_op)
def _build_inputs(self):
self._KBs = tf.placeholder(tf.int32, [None,3], name="KBs") #_KB
self._keys = tf.placeholder(tf.int32, [None, self._memory_size],name="keys")
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
self._answers = tf.placeholder(tf.int32, [None, self._ent_size], name="answers") #id-hot for answer
self._answers_id = tf.placeholder(tf.int32, [None], name="answers_id") #id for answer
self._paddings = tf.placeholder(tf.int64, [None], name="paddings") #for id_padding
self._ones = tf.placeholder(tf.float32, [None], name="ones") #for multiple
self._zeros = tf.placeholder(tf.float32, [None], name="zeros") #for add
self._istrain = tf.placeholder(tf.int32,name="ground_truth")
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
nil_rel_slot = tf.zeros([1, self._embedding_size])
E = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._ent_size-1, self._embedding_size]) ])
Q = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
R = tf.concat(axis=0, values=[ nil_rel_slot, self._init([self._rel_size-1, self._embedding_size]) ])
self.EE = tf.Variable(E, name="EE") # encode entity to vector to calculate weight
self.QE = tf.Variable(Q, name="QE")# encode question-words to vector
self.RE = tf.Variable(R, name="RE") # encode relation to vector
self.Mrq = tf.Variable(self._init([self._embedding_size,self._embedding_size]), name="Mrq")
self.Mrs = tf.Variable(self._init([self._embedding_size,self._embedding_size]), name="Mrs")
self.Mse = tf.Variable(self._init([self._embedding_size,self._embedding_size]), name="Mse")
#self.GT = tf.Variable(self._init([self._rel_size,1]), name="GT")
self._nil_vars = set([self.EE.name, self.QE.name, self.RE.name]) #need to keep first line 0
def _pretranse(self):
with tf.variable_scope(self._name):
h = self._KBs[:,0] #(batch)
r = self._KBs[:,1] #(batch)
t = self._KBs[:,2] #(batch)
tt = self._paddings
h_emb = tf.nn.embedding_lookup(self.EE, h) #(batch,e)
r_emb = tf.nn.embedding_lookup(self.RE, r)
t_emb = tf.nn.embedding_lookup(self.EE, t)
tt_emb = tf.nn.embedding_lookup(self.EE, tt)
l_emb = tf.matmul((h_emb+r_emb), self.Mse) #M(h+r)
s = (l_emb-t_emb)*(l_emb-t_emb)
ss = (l_emb-tt_emb)*(l_emb-tt_emb)
loss = self._margin + tf.reduce_sum(s, 1) - tf.reduce_sum(ss, 1)
loss = tf.maximum(self._zeros,loss)
return loss
def _inference(self):
with tf.variable_scope(self._name):
#initial
loss = tf.reshape(self._zeros,[-1,1],name='loss') #(none,1)
s_index = tf.reshape(self._paths[:,0],[-1,1]) #(none,1)
q_emb = tf.nn.embedding_lookup(self.QE, self._queries) #Ax_ij shape is (batch, sentence_size ,embedding_size)
q = tf.reduce_sum(q_emb, 1) #shape is (batch,embed)
state = tf.nn.embedding_lookup(self.EE, s_index) #(b,1)->(b,1,e)
state = tf.squeeze(state,[1]) #(b,e)
p = s_index
for hop in range(self._hops):
step = 2 * hop
gate = tf.matmul(q, tf.matmul(self.RE, self.Mrq), transpose_b = True) + tf.matmul(state, tf.matmul(self.RE, self.Mrs), transpose_b = True) #(b,e)*(e,14) ->(b,14)
rel_logits = gate
r_index = tf.argmax(rel_logits,1) #(b,)
gate = tf.nn.softmax(gate) #(b,r)
#gumble-softmax: gate is unnormalized logits,
#u = tf.random_uniform(shape=tf.shape(gate),minval=0,maxval=1.0) #(b,r)
#g = -tf.log(-tf.log(u+1e-20)+1e-20)
#tau = tf.nn.relu(tf.matmul(gate,self.GT))+1e-8 #(batch,1)
#gate = tf.nn.softmax((gate) / tau) #(batch,v)
real_rel_onehot = tf.one_hot(self._paths[:,step+1], self._rel_size, on_value=1.0, off_value=0.0, axis=-1) #(b,rel_size)
predict_rel_onehot = tf.one_hot(r_index, self._rel_size, on_value=1.0, off_value=0.0, axis=-1)
state = state + tf.matmul(gate, tf.matmul(self.RE, self.Mrs))
loss += tf.reshape(tf.nn.softmax_cross_entropy_with_logits(logits=rel_logits, labels=real_rel_onehot),[-1,1]) #(b,1)
q = q - tf.matmul(gate,tf.matmul(self.RE, self.Mrq))
value = tf.matmul(state, self.Mse)
ans = tf.matmul(value, self.EE, transpose_b=True) #(b,ent)
t_index = tf.argmax(ans,1)
#if r_index == 0, stop inference, ans = previous ans; if not r_index==0, ans = ans
t_index = tf.cast(t_index,tf.float32)
r_index = tf.cast(r_index,tf.float32)
t_index = r_index /(r_index+1e-15) * t_index + (1 - r_index /(r_index+1e-15)) * tf.cast(p[:,-1],tf.float32)
p = tf.concat(axis=1,values=[p,tf.reshape(tf.cast(r_index,tf.int32),[-1,1])])
p = tf.concat(axis=1,values=[p,tf.reshape(tf.cast(t_index,tf.int32),[-1,1])])
real_ans_onehot = tf.one_hot(self._paths[:,step+2], self._ent_size, on_value=1.0, off_value=0.0, axis=-1) #(b,rel_size)
loss += tf.reshape(tf.nn.softmax_cross_entropy_with_logits(logits=ans, labels=real_ans_onehot),[-1,1]) #(b,1)
#FOR IRN-weak
#loss += tf.reshape(tf.nn.softmax_cross_entropy_with_logits(logits=ans, labels=tf.cast(self._answers, tf.float32)),[-1,1])
return loss, p
def match(self):
"""
show most similar words_id to each relation embedding
"""
#self.QE = tf.nn.l2_normalize(self.QE,1)
#self.RE = tf.nn.l2_normalize(self.RE,1)
Similar = tf.matmul(tf.matmul(self.RE,self.Mrq), self.QE, transpose_b=True) #(R,e) * (e,E)->(R,E)
self.match_op = tf.nn.top_k(Similar,k=5)
_,idx = self._sess.run(self.match_op)
return idx
def batch_pretrain(self, KBs, queries, answers, answers_id, paths):
"""
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
paths: Tensor
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = KBs.shape[0]
keys = np.repeat(np.reshape(np.arange(self._rel_size),[1,-1]),nexample,axis=0)
pad = np.random.randint(low = 0, high = self._ent_size, size = nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._keys: keys, self._KBs: KBs, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros, self._istrain:0}
loss, _, = self._sess.run([self.KB_loss_op, self.KB_train_op], feed_dict=feed_dict)
#self.EE = tf.nn.l2_normalize(self.EE,1)
#self.RE = tf.nn.l2_normalize(self.RE,1)
return loss
def batch_fit(self, KBs, queries, answers, answers_id, paths):
"""
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
paths: Tensor
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = queries.shape[0]
keys = np.repeat(np.reshape(np.arange(self._rel_size),[1,-1]),nexample,axis=0)
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._keys : keys, self._KBs: KBs, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros, self._istrain:0}
loss, _ = self._sess.run([self.QA_loss_op, self.QA_train_op], feed_dict=feed_dict)
self.EE = tf.nn.l2_normalize(self.EE,1)
self.RE = tf.nn.l2_normalize(self.RE,1)
self.QE = tf.nn.l2_normalize(self.QE,1)
return loss
def predict(self,KBs, queries, paths):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: id (None, 1) ,predict_op = max(1, [None,ent_size])
"""
nexample = queries.shape[0]
keys = np.repeat(np.reshape(np.arange(self._rel_size),[1,-1]),nexample,axis=0)
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._keys:keys, self._KBs: KBs, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros,self._istrain : 1}
return self._sess.run(self.QA_predict_op, feed_dict=feed_dict)
def store(self):
file = os.path.join(self._checkpoint_dir, self._name)
#print(" [*] save current parameters to %s." % file )
self._saver.save(self._sess, file)
def load(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self._checkpoint_dir,latest_filename = 'checkpoint')
if ckpt and ckpt.model_checkpoint_path:
print ("[*] Read from %s" % ckpt.model_checkpoint_path)
self._saver.restore(self._sess, ckpt.model_checkpoint_path)
else:
print (" [!] Test mode but no checkpoint found")
#raise Exception(" [!] Trest mode but no checkpoint found")
class IRN_C(object):
def __init__(self, config, sess):
self._data_file = config.data_file
self._margin = 2
self._batch_size = config.batch_size
self._vocab_size = config.nwords
self._rel_size = config.nrels
self._ent_size = config.nents
self._sentence_size = config.query_size
self._embedding_size = config.edim
self._path_size = config.path_size
self._memory_size = config.nrels
self._hops = config.nhop
self._max_grad_norm = config.max_grad_norm
self._init = tf.contrib.layers.xavier_initializer()
#self._init = tf.random_normal_initializer(stddev=config.init_std)
self._opt = tf.train.AdamOptimizer()
self._name = "IRN_C"
self._checkpoint_dir = config.checkpoint_dir+'/'+self._name
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._build_inputs()
self._build_vars()
self._saver = tf.train.Saver(max_to_keep=10)
self._encoding = tf.constant(position_encoding(self._sentence_size, self._embedding_size), name="encoding")
KB_batch_loss = self._pretranse()
KB_loss_op = tf.reduce_sum(KB_batch_loss, name="KB_loss_op")
KB_grads_and_vars = self._opt.compute_gradients(KB_loss_op,[self.EE,self.RE,self.Mse])
KB_nil_grads_and_vars = []
for g, v in KB_grads_and_vars:
if v.name in self._nil_vars:
KB_nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
KB_nil_grads_and_vars.append((g, v))
print "KB_grads_and_vars"
for g,v in KB_nil_grads_and_vars:
print g, v.name
KB_train_op = self._opt.apply_gradients(KB_grads_and_vars, name="KB_train_op")
KBE_norm_op = tf.nn.l2_normalize(self.EE,1)
KBR_norm_op = tf.nn.l2_normalize(self.RE,1)
#cross entropy as loss for QA:
batch_loss_1, p_1, ans_1 = self._inference(self._paths[:,0,:])
batch_loss_2, p_2, ans_2 = self._inference(self._paths[:,1,:])
QA_loss_op = tf.reduce_sum(batch_loss_1+batch_loss_2, name="QA_loss_op")
# gradient pipeline, seem not affect much
QA_grads_and_vars = self._opt.compute_gradients(QA_loss_op)
QA_grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in QA_grads_and_vars if g is not None]
QA_grads_and_vars = [(add_gradient_noise(g), v) for g,v in QA_grads_and_vars]
QA_nil_grads_and_vars = []
for g, v in QA_grads_and_vars:
if v.name in self._nil_vars:
QA_nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
QA_nil_grads_and_vars.append((g, v))
print "QA_grads_and_vars"
for g,v in QA_nil_grads_and_vars:
print g, v.name
#grads_and_vars = [(tf.Print(g, [v.name,str(g.get_shape()),g], summarize=1e1/2), v) for g, v in grads_and_vars]
QA_train_op = self._opt.apply_gradients(QA_nil_grads_and_vars, name="QA_train_op")
fans = ans_1+ans_2
final_ans = tf.reshape(tf.cast(tf.argmax(fans,1),tf.int32),[-1,1])
# predict ops
QA_predict_op = tf.concat(axis=1,values=[p_1,p_2,final_ans]) #(none,11)
# assign ops
self.KB_loss_op = KB_loss_op
self.KB_train_op = KB_train_op
self.KBE_norm_op = KBE_norm_op
self.KBR_norm_op = KBR_norm_op
self.QA_loss_op = QA_loss_op
self.QA_predict_op = QA_predict_op
self.QA_train_op = QA_train_op
init_op = tf.global_variables_initializer()
self._sess = sess
self._sess.run(init_op)
def _build_inputs(self):
self._KBs = tf.placeholder(tf.int32, [None,3], name="KBs") #_KB
self._keys = tf.placeholder(tf.int32, [None, self._memory_size],name="keys")
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
self._paths = tf.placeholder(tf.int32, [None, 2, self._path_size], name="paths") #id for [e1,r1,t, e2,r2,t]
self._answers = tf.placeholder(tf.int32, [None, self._ent_size], name="answers") #id-hot for answer
self._answers_id = tf.placeholder(tf.int32, [None], name="answers_id") #id for answer
self._paddings = tf.placeholder(tf.int64, [None], name="paddings") #for id_padding
self._ones = tf.placeholder(tf.float32, [None], name="paddings") #for multiple
self._zeros = tf.placeholder(tf.float32, [None], name="paddings") #for add
self._istrain = tf.placeholder(tf.int32,name="ground_truth")
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
nil_rel_slot = tf.zeros([1, self._embedding_size])
E = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._ent_size-1, self._embedding_size]) ])
Q = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
R = tf.concat(axis=0, values=[ nil_rel_slot, self._init([self._rel_size-1, self._embedding_size]) ])
self.EE = tf.Variable(E, name="EE") # encode entity to vector to calculate weight
self.QE = tf.Variable(Q, name="QE")# encode question-words to vector
self.RE = tf.Variable(R, name="RE") # encode relation to vector
#self.RE = self.QE[:self._rel_size]
self.Mrq = tf.Variable(self._init([self._embedding_size,self._embedding_size]), name="Mrq")
self.Mrs = tf.Variable(self._init([self._embedding_size,self._embedding_size]), name="Mrs")
self.Mse = tf.Variable(self._init([self._embedding_size,self._embedding_size]), name="Mse")
#self.GT = tf.Variable(self._init([self._rel_size,1]), name="GT")
self._nil_vars = set([self.EE.name, self.QE.name, self.RE.name]) #need to keep first line 0
def _pretranse(self):
with tf.variable_scope(self._name):
h = self._KBs[:,0] #(batch)
r = self._KBs[:,1] #(batch)
t = self._KBs[:,2] #(batch)
tt = self._paddings
h_emb = tf.nn.embedding_lookup(self.EE, h) #(batch,e)
r_emb = tf.nn.embedding_lookup(self.RE, r)
t_emb = tf.nn.embedding_lookup(self.EE, t)
tt_emb = tf.nn.embedding_lookup(self.EE, tt)
l_emb = tf.matmul((h_emb+r_emb), self.Mse) #M(h+r)
s = (l_emb-t_emb)*(l_emb-t_emb)
ss = (l_emb-tt_emb)*(l_emb-tt_emb)
loss = self._margin + tf.reduce_sum(s, 1) - tf.reduce_sum(ss, 1)
loss = tf.maximum(self._zeros,loss)
return loss
def _inference(self, _paths):
with tf.variable_scope(self._name):
#initial
loss = tf.reshape(self._zeros,[-1,1],name='loss') #(none,1)
s_index = tf.reshape(_paths[:,0],[-1,1]) #(none,1)
q_emb = tf.nn.embedding_lookup(self.QE, self._queries) #Ax_ij shape is (batch, sentence_size ,embedding_size)
q = tf.reduce_sum(q_emb, 1) #shape is (batch,embed)
state = tf.nn.embedding_lookup(self.EE, s_index) #(b,1)->(b,1,e)
state = tf.squeeze(state,[1]) #(b,e)
p = s_index
for hop in range(self._hops):
gate = tf.matmul(q, tf.matmul(self.RE, self.Mrq), transpose_b = True) + tf.matmul(state, tf.matmul(self.RE, self.Mrs), transpose_b = True)
#gate = tf.matmul(q, self.RE, transpose_b = True) + tf.matmul(state, self.RE, transpose_b = True) #(b,e)*(e,14) ->(b,14)
rel_logits = gate
r_index = tf.cast(tf.argmax(rel_logits,1),tf.int32) #(b,)
gate = tf.nn.softmax(gate)
#gumble-softmax: gate is unnormalized logits,
#u = tf.random_uniform(shape=tf.shape(gate),minval=0,maxval=1.0) #(b,r)
#g = -tf.log(-tf.log(u+1e-20)+1e-20)
#tau = tf.nn.relu(tf.matmul(gate,self.GT))+1e-8 #(batch,1)
#gate = tf.nn.softmax((gate+g) / tau) #(batch,v)
real_rel_onehot = tf.one_hot(_paths[:,2*hop+1], self._rel_size, on_value=1.0, off_value=0.0, axis=-1) #(b,rel_size)
predict_rel_onehot = tf.one_hot(r_index, self._rel_size, on_value=1.0, off_value=0.0, axis=-1)
#correct wrong ans
'''
train_state = state + tf.matmul(real_rel_onehot, tf.matmul(self.RE, self.Mrs)) #(b,14)*(14,e) (avg with weights) -> (b,e)
test_state = state + tf.matmul(predict_rel_onehot, tf.matmul(self.RE, self.Mrs)) #(b,14)*(14,e) (avg with weights) -> (b,e)
state = tf.cond(tf.equal(self._istrain,tf.constant(0)),lambda:train_state,lambda:test_state)
'''
state = state + tf.matmul(gate, tf.matmul(self.RE, self.Mrs))
#state = tf.nn.l2_normalize(state,1)
loss += tf.reshape(tf.nn.softmax_cross_entropy_with_logits(logits=rel_logits, labels=real_rel_onehot),[-1,1]) #(b,1)
#correct wrong ans
'''
train_q = q - tf.matmul(tf.nn.embedding_lookup(self.RE, _paths[:,2*hop+1]), self.Mrq)
test_q = q - tf.matmul(tf.nn.embedding_lookup(self.RE, r_index), self.Mrq)
q = tf.cond(tf.equal(self._istrain,tf.constant(0)),lambda:train_q,lambda:test_q)
'''
q = q - tf.matmul(gate,tf.matmul(self.RE, self.Mrq))
value = tf.matmul(state,self.Mse)
ans = tf.matmul(value, self.EE, transpose_b=True) #(b,ent)
t_index = tf.cast(tf.argmax(ans,1),tf.int32)
p = tf.concat(axis=1,values=[p,tf.reshape(r_index,[-1,1])])
p = tf.concat(axis=1,values=[p,tf.reshape(t_index,[-1,1])])
real_ans_onehot = tf.one_hot(_paths[:,2*hop+2], self._ent_size, on_value=1.0, off_value=0.0, axis=-1) #(b,rel_size)
loss += tf.reshape(tf.nn.softmax_cross_entropy_with_logits(logits=ans, labels=real_ans_onehot),[-1,1]) #(b,1)
#loss += tf.reshape(tf.nn.softmax_cross_entropy_with_logits(logits=ans, labels=tf.cast(self._answers, tf.float32)),[-1,1])
return loss, p, ans
def batch_pretrain(self, KBs, queries, answers, answers_id, paths):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
paths: Tensor
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = KBs.shape[0]
keys = np.repeat(np.reshape(np.arange(self._rel_size),[1,-1]),nexample,axis=0)
pad = np.random.randint(low = 0, high = self._ent_size, size = nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._keys: keys, self._KBs: KBs, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros, self._istrain :0}
loss, _, _, _ = self._sess.run([self.KB_loss_op, self.KB_train_op, self.KBE_norm_op, self.KBR_norm_op], feed_dict=feed_dict)
return loss
def batch_fit(self, KBs, queries, answers, answers_id, paths):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
paths: Tensor
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = queries.shape[0]
keys = np.repeat(np.reshape(np.arange(self._rel_size),[1,-1]),nexample,axis=0)
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._keys : keys, self._KBs: KBs, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros, self._istrain :0}
loss, _ = self._sess.run([self.QA_loss_op, self.QA_train_op], feed_dict=feed_dict)
return loss
def predict(self,KBs, queries, paths):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: id (None, 1) ,predict_op = max(1, [None,ent_size])
"""
nexample = queries.shape[0]
keys = np.repeat(np.reshape(np.arange(self._rel_size),[1,-1]),nexample,axis=0)
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._keys:keys, self._KBs: KBs, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros, self._istrain :1}
return self._sess.run(self.QA_predict_op, feed_dict=feed_dict)
def store(self):
file = os.path.join(self._checkpoint_dir, self._name)
#print(" [*] save current parameters to %s." % file )
self._saver.save(self._sess, file)
def load(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self._checkpoint_dir,latest_filename = 'checkpoint')
if ckpt and ckpt.model_checkpoint_path:
print ("[*] Read from %s" % ckpt.model_checkpoint_path)
self._saver.restore(self._sess, ckpt.model_checkpoint_path)
else:
print (" [!] Test mode but no checkpoint found")
#raise Exception(" [!] Trest mode but no checkpoint found")
| 27,496 | 45.213445 | 230 | py |
IRN | IRN-master/data_utils.py | from __future__ import absolute_import
import os
import re
import numpy as np
from collections import Counter
#process Path-QA or Conj-QA data&KB
# kb: h \t r \t t
# form: question \t ans \t e1#r1#e2#r2#e3#<end>#e3 \t ans1/ans2/ \t e1#r1#e2///e2#r2#e3#///s#r#t///s#r#t
# form: question \t ans \t e1#r1#ans#<end>#ans*e2#r2#ans#<end>#ans \t ans1/ \t e1#r1#e2///e2#r2#e3#///s#r#t///s#r#t \t e1/e2
# form: question \t ans \t e1#r1#e2#rc2#ec2#r2#e3#rc3#ec3#<end>#e3#<end>#e3 \t ans1/ \t e1#r1#e2///e2#r2#e3#///s#r#t///s#r#t
def process_data_c(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities):#relations is set, other is list(), *2id is dict()
read_KB(KB_file, entities, relations)
data,sentence_size,memory_size = read_data(data_file, words)
#set ids
if len(word2id)==0:
word2id['<unk>'] = 0
if len(rel2id)==0:
rel2id['<end>'] = 0
if len(ent2id)==0:
ent2id['<unk>'] = 0
for r in relations:
# same r_id in rel2id and word2id
if not rel2id.has_key(r):
rel2id[r] = len(rel2id)
if not word2id.has_key(r):
word2id[r] = len(word2id)
for e in entities:
if not ent2id.has_key(e):
ent2id[e] = len(ent2id)
for word in words:
if not word2id.has_key(word):
word2id[word] = len(word2id)
print ('here are %d words in word2id(vocab)' %len(word2id)) #75080
print ('here are %d relations in rel2id(rel_vocab)' %len(rel2id)) #13+1
print ('here are %d entities in ent2id(ent_vocab)' %len(ent2id)) #13+1
Triples, KBs, tails_size = get_KB(KB_file,ent2id,rel2id)
print "#records or Triples", len(np.nonzero(KBs)[0])
Q = []
QQ = []
A = []
AA = []
P = []
PP = []
S = []
SS = []
D = []
DD = []
for query, answer, path, answerset, subgraph, subject in data:
query = query.strip().split()
ls = max(0, sentence_size-len(query))
q = [word2id[w] for w in query] + [0] * ls
Q.append(q)
QQ.append(query)
a = np.zeros(len(ent2id)) # if use new ans-vocab, add 0 for 'end'
a[ent2id[answer]] = 1
A.append(a)
AA.append(ent2id[answer])
#p = [[ent2id[],rel2id[],ent2id[],rel2id[],ent2id[]], [], []]
# POSITION+'#'+"plays_position_inverse"+'#'+PLAYER+'*'+CLUB+'#'+"plays_in_club_inverse"+'#'+PLAYER
path = path.strip().split('*') #path = [POSITION+'#'+"plays_position_inverse"+'#'+PLAYER, CLUB+'#'+"plays_in_club_inverse"+'#'+PLAYER]
p=[]
for subpath in path:
subpath = subpath.split("#")
p.append([ent2id[subpath[0]], rel2id[subpath[1]], ent2id[subpath[2]],rel2id[subpath[3]],ent2id[subpath[4]]])
P.append(p) #N*2*3
PP.append(path)
sg = []
subgraph = subgraph.split('///') #subgraph is a list including many triple-str for memn2n-t
#isubgraph=list(set(subgraph.replace('///','#').split('#')))
ls = max(0, memory_size-len(subgraph))
b = 0
for t in subgraph:
t = t.split('#')
if not len(t)==3:
print "subgraph not a triple form!"
print t
tt = [ent2id[t[0]],rel2id[t[1]],ent2id[t[2]]]
if not tt in Triples.tolist():
b += 1
continue
sg.append(tt)
'''
for t in isubgraph:
sg.append([ent2id[t]])
'''
for i in range(ls):
#sg.append( [0,0,0] )
sg.append([0])
D.append(sg)
DD.append(subgraph)
anset = answerset.split('/')
anset = anset[:-1]
ass=[]
for a in anset:
ass.append(ent2id[a])
S.append(ass)
SS.append(anset)
return np.array(Q),np.array(A),np.array(P),np.array(D),np.array(S),QQ,AA,PP,DD,SS,Triples,KBs,sentence_size,memory_size, tails_size
def process_data(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities): #relations is set, other is list(), *2id is dict()
read_KB(KB_file, entities, relations)
data,sentence_size,memory_size = read_data(data_file, words)
#set ids
if len(word2id)==0:
word2id['<unk>'] = 0
if len(rel2id)==0:
rel2id['<end>'] = 0
if len(ent2id)==0:
ent2id['<unk>'] = 0
for r in relations:
# same r_id in rel2id and word2id
if not rel2id.has_key(r):
rel2id[r] = len(rel2id)
if not word2id.has_key(r):
word2id[r] = len(word2id)
for e in entities:
if not ent2id.has_key(e):
ent2id[e] = len(ent2id)
for word in words:
if not word2id.has_key(word):
word2id[word] = len(word2id)
print ('here are %d words in word2id(vocab)' %len(word2id)) #75080
print ('here are %d relations in rel2id(rel_vocab)' %len(rel2id)) #13+1
print ('here are %d entities in ent2id(ent_vocab)' %len(ent2id)) #13+1
Triples, KBs,tails_size = get_KB(KB_file,ent2id,rel2id)
print "#records or Triples", len(np.nonzero(KBs)[0])
Q = []
QQ = []
A = []
AA = []
P = []
PP = []
S = []
SS = []
D = []
DD = []
for query, answer, path, answerset, subgraph in data:
path = path.strip().split('#') #path = [s,r1,m,r2,t]
#answer = path[-1]
query = query.strip().split()
ls = max(0, sentence_size-len(query))
q = [word2id[w] for w in query] + [0] * ls
Q.append(q)
QQ.append(query)
a = np.zeros(len(ent2id)) # if use new ans-vocab, add 0 for 'end'
a[ent2id[answer]] = 1
A.append(a)
AA.append(ent2id[answer])
#p = [ ent2id[path[0]], rel2id[path[1]], ent2id[path[2]], rel2id[path[3]], ent2id[path[4]] ]
p=[]
for i in range(len(path)):
if i % 2 == 0:
e = ent2id[path[i]]
# e = np.zeros(len(relations))
# e[0] = ent2id[path[i]]
p.append(e)
else:
r = rel2id[path[i]]
# r = np.zeros(len(relations))
# r[rel2id[path[i]]] =1
p.append(r)
#p.append(rel2id[path[3]])
#p.append(ent2id[path[4]])
P.append(p)
PP.append(path)
sg = []
subgraph = subgraph.split('///') #subgraph is a list including many triple-str for memn2n-t |||| 3 in this function & 1 in read-doc
#isubgraph=list(set(subgraph.replace('///','#').split('#')))
ls = max(0, memory_size-len(subgraph))
for t in subgraph:
t = t.split('#')
if not len(t)==3:
print "subgraph not a triple form!"
print t
tt = [ent2id[t[0]],rel2id[t[1]],ent2id[t[2]]]
if not tt in Triples.tolist():
ls += 1 #add padding
continue
sg.append(tt)
'''
for t in isubgraph:
sg.append([ent2id[t],0,0])
'''
for i in range(ls):
sg.append( [0,0,0] )
D.append(sg)
DD.append(subgraph)
anset = answerset.split('/')
anset = anset[:-1]
ass=[]
for a in anset:
ass.append(ent2id[a])
S.append(ass)
SS.append(anset)
# return Q,A,P,D,QQ,AA,PP,DD,KBs,sentence_size,memory_size,tails_size
return np.array(Q),np.array(A),np.array(P),np.array(D),np.array(S),QQ,AA,PP,DD,SS,Triples,KBs,sentence_size,memory_size, tails_size
def read_KB(KB_file, entities, relations):
#example in KB_file: KBs.txt h \t r \t t
if os.path.isfile(KB_file):
with open(KB_file) as f:
lines = f.readlines()
else:
raise Exception("!! %s is not found!!" % KB_file)
for line in lines:
line = line.strip().split('\t')
entities.add(line[0])
entities.add(line[2])
relations.add(line[1])
def get_KB(KB_file,ent2id,rel2id):
nwords = len(ent2id)
nrels = len(rel2id)
tails = np.zeros([nwords*nrels,1], 'int32')
#KBmatrix = np.zeros([nwords, nrels,nwords], 'int32')
KBmatrix = np.zeros([nwords * nrels,nwords], 'int32')
Triples = []
f = open(KB_file)
control = 1
b = 0
for line in f.readlines():
line = line.strip().split('\t')
''' delete half triples
control += 1
if control % 2 == 0:
b += 1
continue
'''
h = ent2id[line[0]]
r = rel2id[line[1]]
t = ent2id[line[2]]
Triples.append([h,r,t])
#[h,r]->[h*nrels+r]
lenlist = tails[h*nrels+r]
KBmatrix[h*nrels+r,lenlist] = t
tails[h*nrels+r]+=1
print "delete triples:", b
return np.array(Triples), KBmatrix[:,:np.max(tails)], np.max(tails)
def read_data(data_file, words):
#example in data_file: WC-C: q+'\t'+ans+'\t'+p+'\t'+ansset+'\t'+c+'\t'+sub+'\n'
if os.path.isfile(data_file):
with open(data_file) as f:
lines = f.readlines()
else:
raise Exception("!! %s is not found!!" % data_file)
data = []
questions = []
doc = []
for line in lines:
line = line.strip().split('\t')
data.append(line)
for w in line[0].strip().split():
words.add(w)
questions.append(line[0].strip().split())
doc.append(line[4].strip().split('///')) #for memn2n-triple
#doc.append(list(set(line[4].strip().replace('///','#').split('#')))) #for memn2n-entity
sentence_size = max(len(i) for i in questions)
memory_size = max(len(i) for i in doc)
return data, sentence_size , memory_size
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
| 10,106 | 28.380814 | 142 | py |
IRN | IRN-master/baseline.py | import os
import math
import random
import numpy as np
import tensorflow as tf
from utils import add_gradient_noise,zero_nil_slot,position_encoding
from tensorflow.contrib.seq2seq import *
from tensorflow.python.layers.core import Dense
class MemN2N(object):
"""End-To-End Memory Network. reference memn2n_qa"""
def __init__(self, config, sess):
self._data_file = config.data_file
self._path_size = config.path_size
self._batch_size = config.batch_size
self._vocab_size = config.nwords
self._rel_size = config.nrels
self._ent_size = config.nents
self._sentence_size = config.query_size
self._memory_size = config.mem_size
self._embedding_size = config.edim
self._hops = config.nhop
self._max_grad_norm = config.max_grad_norm
self._init = tf.contrib.layers.xavier_initializer()
# self._nonlin = nonlin
# self._init = tf.random_normal_initializer(stddev=config.init_std)
self._opt = tf.train.AdamOptimizer()
#self._opt = tf.train.GradientDescentOptimizer(learning_rate=config.init_lr)
self._name = "MemN2N"
self._checkpoint_dir = config.checkpoint_dir+'/'+self._name
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._encoder = config.encoder
self._build_inputs()
self._build_vars()
self._saver = tf.train.Saver(max_to_keep=10)
#encoding_shape = _sentence_size * _embedding_size
#self._encoding = tf.constant(position_encoding(self._sentence_size, self._embedding_size), name="encoding")
# cross entropy as loss
inner_loss, ans_list, logits = self._inference() # (batch_size, ent_size)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self._answers, tf.float32), name="cross_entropy")
loss_op = tf.reduce_sum(cross_entropy, name="loss_op") + inner_loss
# gradient pipeline, seem not affect much
#grads_and_vars = self._opt.compute_gradients(loss_op,[self.A,self.B,self.C,self.R,self.TA,self.TC])
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
for g,v in nil_grads_and_vars:
print g, v.name
#grads_and_vars = [(tf.Print(g, [v.name,str(g.get_shape()),g], summarize=1e1/2), v) for g, v in nil_grads_and_vars]
#train_op = self._opt.apply_gradients(grads_and_vars, name="train_op")
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
# predict ops
predict_op = tf.argmax(logits, 1, name="predict_op") #(b,)
predict_proba_op = tf.nn.softmax(logits, name="predict_proba_op")
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
self.predict_list_op = ans_list
self.predict_proba_op = predict_proba_op
self.train_op = train_op
init_op = tf.global_variables_initializer()
self._sess = sess
self._sess.run(init_op)
def _build_inputs(self):
self._stories = tf.placeholder(tf.int32, [None, self._memory_size, 3], name="stories")
#self._stories = tf.placeholder(tf.int32, [None, self._memory_size, 1], name="stories")
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
self._answers = tf.placeholder(tf.int32, [None, self._ent_size], name="answers")
if self._data_file == "WC-C":
self._paths = tf.placeholder(tf.int32, [None, 2, self._path_size], name="paths") #id for e1,r1,e2,r2,a
else:
self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
self._answers_id = tf.placeholder(tf.int32, [None], name="answers_id") #id for answer
self._paddings = tf.placeholder(tf.int64, [None], name="paddings") #for id_padding
self._ones = tf.placeholder(tf.float32, [None], name="paddings") #for multiple
self._zeros = tf.placeholder(tf.float32, [None], name="paddings") #for add
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
nil_rel_slot = tf.zeros([1, self._embedding_size])
A = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._ent_size-1, self._embedding_size]) ])
B = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
R = tf.concat(axis=0, values=[ nil_rel_slot, self._init([self._rel_size-1, self._embedding_size]) ])
self.A = tf.Variable(A, name="A") # encode entity to vector to calculate weight
self.B = tf.Variable(B, name="B") # encode question-words to vector
self.C = tf.Variable(A, name="C") # encode entity to vector
self.R = tf.Variable(R, name="R") # encode relation to vector
#self.A = tf.Variable(self._init([self._ent_size, self._embedding_size]), name="A") # encode entity to vector to calculate weight
#self.B = tf.Variable(self._init([self._vocab_size, self._embedding_size]), name="B") # encode question-words to vector
#self.C = tf.Variable(self._init([self._ent_size, self._embedding_size]), name="C") # encode entity to vector
#self.R = tf.Variable(self._init([self._rel_size, self._embedding_size]), name="R") # encode relation to vector
#self.TA = tf.Variable(self._init([self._memory_size, self._embedding_size]), name='TA')
#self.TC = tf.Variable(self._init([self._memory_size, self._embedding_size]), name='TC')
self.H = tf.Variable(self._init([self._embedding_size, self._embedding_size]), name="H")
self.W = tf.Variable(self._init([self._embedding_size, self._ent_size]), name="W")
self._nil_vars = set([self.A.name, self.C.name, self.B.name, self.R.name]) #need to keep first line 0
def _inference(self):
with tf.variable_scope(self._name):
q_emb = tf.nn.embedding_lookup(self.B, self._queries) #Ax_ij shape is (batch, sentence_size ,embedding_size)
u_0 = tf.reduce_sum(q_emb, 1) #shape is (batch,embed)
q = tf.transpose(q_emb,[1,0,2]) #(s,b,e)
q = tf.reshape(q,[-1,self._embedding_size]) #(s*b,e)
q = tf.split(axis=0,num_or_size_splits=self._sentence_size,value=q) #a list of sentence_size tensors of shape [batch,embed]
'''
# Define a lstm cell with tensorflow
if self._encoder:
lstm_cell = rnn_cell.BasicLSTMCell(self._embedding_size, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, q, dtype=tf.float32) #s * (b,e) list
u_0 = outputs[-1] #(b,e)
'''
u=[u_0]
a_index = tf.argmax(tf.matmul(u_0, self.W),1)
al = tf.reshape(tf.cast(a_index,tf.int32),[-1,1])
#d1 = stories.get_shape().as_list()[0] #b = None
d2 = self._stories.get_shape().as_list()[1] #memory
d3 = self._stories.get_shape().as_list()[2] #triple_size = 3
e1 = tf.reshape(self._stories[:,:,0],[-1,d2,1]) #(batch,memory,1)
r = tf.reshape(self._stories[:,:,1],[-1,d2,1])
e2 = tf.reshape(self._stories[:,:,2],[-1,d2,1])
inner_loss = 0
for hop in range(self._hops):
#m is attention
m_1 = tf.nn.embedding_lookup(self.A, e1) #shape is (batch,memory,1,embedding)
m_2 = tf.nn.embedding_lookup(self.R, r) #shape is (batch,memory,1,embedding)
m_3 = tf.nn.embedding_lookup(self.A, e2) #shape is (batch,memory,1,embedding)
m_emb = tf.concat(axis=2,values=[m_1,m_2,m_3]) #shape is (batch,memory,3,embedding)
m = tf.reduce_sum(m_emb, 2) #+ self.TA #(batch,memory,embed)
# mm = tf.reduce_sum(tf.nn.embedding_lookup(self.C,stories),2) + self.TC #(b,m,s,e)->(b,m,e)
mm_1 = tf.nn.embedding_lookup(self.C, e1) #shape is (batch,memory,1,embedding)
mm_2 = tf.nn.embedding_lookup(self.R, r) #shape is (batch,memory,1,embedding)
mm_3 = tf.nn.embedding_lookup(self.C, e2) #shape is (batch,memory,1,embedding)
mm_emb = tf.concat(axis=2,values=[mm_1,mm_2,mm_3]) #shape is (batch,memory,3,embedding)
mm = tf.reduce_sum(mm_emb, 2) #+ self.TC #(batch,memory,embed)
'''
m = tf.squeeze(tf.nn.embedding_lookup(self.A, self._stories)) #(b,m,1,e)->(b,m,e)
mm = tf.squeeze(tf.nn.embedding_lookup(self.C, self._stories)) #(b,m,1,e)->(b,m,e)
'''
# hack to get around no reduce_dot
u_temp = tf.transpose(tf.expand_dims(u[-1], -1), [0, 2, 1]) #(b,e,1)->(b,1,e)
dotted = tf.reduce_sum(m * u_temp, 2) #(b,m,e)->(b,m)
# Calculate probabilities/ weights over slots
probs = tf.nn.softmax(dotted)
probs_temp = tf.transpose(tf.expand_dims(probs, -1), [0, 2, 1]) #(batch,m,1)->(batch,1,m)
c_temp = tf.transpose(mm, [0, 2, 1]) #(b,e,m)
o_k = tf.reduce_sum(c_temp * probs_temp, 2) #(b,e,m)->(b,e) sum_over_memoryslots
u_k = tf.matmul(u[-1], self.H) + o_k #(batch, embed)
# nonlinearity
#if self._nonlin:
# u_k = nonlin(u_k)
a_index = tf.argmax(tf.matmul(u_k, self.W),1)
u.append(u_k)
al = tf.concat(axis=1,values=[al,tf.reshape(tf.cast(tf.zeros_like(a_index),tf.int32),[-1,1])])
al = tf.concat(axis=1,values=[al,tf.reshape(tf.cast(a_index,tf.int32),[-1,1])])
#additional supervision, wc-c is not applicable
#logits = tf.matmul(u_k, self.W)
#real_ans_onehot = tf.one_hot(self._paths[:,2 * hop+2], self._ent_size, on_value=1.0, off_value=0.0, axis=-1) #(b,rel_size)
#inner_loss = inner_loss + tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=real_ans_onehot)) #(b,1)
#u_k = tf.matmul(u[-1], self.H)
return inner_loss, al, tf.matmul(u_k, self.W) #(b,e)*(e,ent) -> (b,ent)
def batch_fit(self, stories, queries, answers, answers_id, paths):
#def batch_fit(self, stories, queries, answers):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._stories: stories, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
#feed_dict = {self._stories: stories, self._queries: queries, self._answers: answers}
loss, _ = self._sess.run([self.loss_op, self.train_op], feed_dict=feed_dict)
return loss
def predict(self, stories, queries, paths):
#def predict(self, stories, queries):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: id (None, 1) ,predict_op = max(1, [None,ent_size])
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._stories: stories, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
#feed_dict = {self._stories: stories, self._queries: queries}
return self._sess.run([self.predict_op,self.predict_list_op], feed_dict=feed_dict)
def predict_proba(self, stories, queries, paths):
#def predict_proba(self, stories):
"""Predicts probabilities of answers.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, ent_size)
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._stories: stories, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
#feed_dict = {self._stories: stories, self._queries: queries}
return self._sess.run(self.predict_proba_op, feed_dict=feed_dict)
def store(self):
file = os.path.join(self._checkpoint_dir, self._name)
print(" [*] save current parameters to %s." % file )
self._saver.save(self._sess, file)
def load(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self._checkpoint_dir,latest_filename = 'checkpoint')
if ckpt and ckpt.model_checkpoint_path:
print ("[*] Read from %s" % ckpt.model_checkpoint_path)
self._saver.restore(self._sess, ckpt.model_checkpoint_path)
else:
print (" [!] Test mode but no checkpoint found")
#raise Exception(" [!] Trest mode but no checkpoint found")
class KVMemN2N(object):
def __init__(self, config, sess):
self._data_file = config.data_file
self._path_size = config.path_size
self._batch_size = config.batch_size
self._vocab_size = config.nwords
self._rel_size = config.nrels
self._ent_size = config.nents
self._sentence_size = config.query_size
#|key| = |value|
self._memory_key_size = config.mem_size
self._memory_value_size = config.mem_size
self._embedding_size = config.edim
self._feature_size = config.feature
self._hops = config.nhop
self._max_grad_norm = config.max_grad_norm
self._init = tf.contrib.layers.xavier_initializer()
# self._init = tf.random_normal_initializer(stddev=config.init_std)
self._opt = tf.train.AdamOptimizer()
self._name = "KVMemN2N"
self._checkpoint_dir = config.checkpoint_dir+'/'+self._name
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._build_inputs()
self._build_vars()
self._saver = tf.train.Saver()
#self._encoding = tf.constant(position_encoding(self._sentence_size, self._embedding_size), name="encoding")
# cross entropy as loss
'''
out = self._inference(self._KBcandidates, self._queries) # (f,b)
out = tf.transpose(out) #(b,f)
y_tmp = tf.matmul(self.A, self.V, transpose_b=True) # (feature ,embedding/hidden) * (embedding ,ent_size) -> (f,v)
logits = tf.matmul(out, y_tmp) # (b,f)*(f,ent) -> (b,ent)
'''
out, logits, ans_list, inner_loss = self._inference(self._KBcandidates, self._queries)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self._answers, tf.float32), name="cross_entropy")
loss_op = tf.reduce_sum(cross_entropy, name="loss_op") + inner_loss
# gradient pipeline
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars if g is not None]
#grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
#nil_grads_and_vars = []
#for g, v in grads_and_vars:
# if v.name in self._nil_vars:
# nil_grads_and_vars.append((zero_nil_slot(g), v))
# else:
# nil_grads_and_vars.append((g, v))
#grads_and_vars = [(tf.Print(g, [v.name,g], summarize=1e0), v) if g is not None else None for g, v in grads_and_vars]
train_op = self._opt.apply_gradients(grads_and_vars, name="train_op")
# predict ops
predict_op = tf.argmax(logits, 1, name="predict_op")
predict_proba_op = tf.nn.softmax(logits, name="predict_proba_op")
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
self.predict_list_op = ans_list
self.predict_proba_op = predict_proba_op
self.train_op = train_op
init_op = tf.global_variables_initializer()
self._sess = sess
self._sess.run(init_op)
def _build_inputs(self):
#self._keys = tf.placeholder(tf.int32, [None, self._memory_key_size, 2], name="memory_key")
#self._values = tf.placeholder(tf.int32, [None, self._memory_value_size,1],name="memory_value")
self._KBcandidates = tf.placeholder(tf.int32, [None, self._memory_key_size,3],name="KBcandidates")
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
self._answers = tf.placeholder(tf.int32, [None, self._ent_size], name="answers")
if self._data_file == "WC-C":
self._paths = tf.placeholder(tf.int32, [None, 2, self._path_size], name="paths") #id for e1,r1,e2,r2,a
else:
self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
self._answers_id = tf.placeholder(tf.int32, [None], name="answers_id") #id for answer
self._paddings = tf.placeholder(tf.int64, [None], name="paddings") #for id_padding
self._ones = tf.placeholder(tf.float32, [None], name="paddings") #for multiple
self._zeros = tf.placeholder(tf.float32, [None], name="paddings") #for add
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
nil_rel_slot = tf.zeros([1, self._embedding_size])
Erep = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._ent_size-1, self._embedding_size]) ])
Wrep = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
Rrep = tf.concat(axis=0, values=[ nil_rel_slot, self._init([self._rel_size-1, self._embedding_size]) ])
self.K = tf.Variable(Erep, name="K") # encode key-entity to vector to calculate weight
self.B = tf.Variable(Wrep, name="B") # encode question-words to vector
self.V = tf.Variable(Erep, name="V") # encode value-entity to vector
self.R = tf.Variable(Rrep, name="R") # encode relation to vector
self.TK = tf.Variable(self._init([self._memory_key_size, self._embedding_size]), name='TK')
self.TV = tf.Variable(self._init([self._memory_value_size, self._embedding_size]), name='TV')
self.A = tf.Variable(self._init([self._feature_size, self._embedding_size]), name ='A')
self.H_list=[]
for _ in range(self._hops):
# define R for variables
H = tf.get_variable('H{}'.format(_), shape=[self._feature_size, self._feature_size],
initializer=tf.contrib.layers.xavier_initializer())
self.H_list.append(H)
self._nil_vars = set([self.K.name, self.V.name, self.B.name, self.R.name])
def _inference(self, KBcandidates, queries):
with tf.variable_scope(self._name):
q_emb = tf.reduce_sum(tf.nn.embedding_lookup(self.B, queries), 1) #\sum Bx_ij shape is (batch, (sentence_size ),embedding_size)
u_0 = tf.matmul(self.A, q_emb, transpose_b = True) #shape is (feature * embed) * (batch,embed)^T = (feature, batch)
u = [u_0] #feature * batch
out = tf.transpose(u[-1]) #(b,f)
y_tmp = tf.matmul(self.A, self.V, transpose_b=True) # (feature ,embedding/hidden) * (embedding ,ent_size) -> (f,v)
logits = tf.matmul(out, y_tmp) # (b,f)*(f,ent) -> (b,ent)
a_index = tf.argmax(logits,1)
al = tf.reshape(tf.cast(a_index,tf.int32),[-1,1])
#d1 = KBcandidates.get_shape().as_list()[0] #b = None
d2 = KBcandidates.get_shape().as_list()[1] # memory_key/value_size
d3 = KBcandidates.get_shape().as_list()[2] #triple_size = 3
e1 = tf.reshape(KBcandidates[:,:,0],[-1,d2,1]) #(batch,memory,1)
r = tf.reshape(KBcandidates[:,:,1],[-1,d2,1])
e2 = tf.reshape(KBcandidates[:,:,2],[-1,d2])
m_1 = tf.nn.embedding_lookup(self.K, e1) #shape is (batch,memory,1,embedding)
m_2 = tf.nn.embedding_lookup(self.R, r) #shape is (batch,memory,1,embedding)
mvalues = tf.nn.embedding_lookup(self.V, e2) + self.TV #shape is (batch,memory,embedding)
key_emb = tf.concat(axis=2,values=[m_1,m_2]) #shape is (batch,memory,3,embedding)
mkeys = tf.reduce_sum(key_emb, 2) + self.TK #(batch,memory,embed)
inner_loss = 0
for h in range(self._hops):
H = self.H_list[h] #(f,f)
k_tmp = tf.reshape(tf.transpose(mkeys, [2, 0, 1]), [self._embedding_size, -1]) # [embedding_size, batch_size x memory_key_size]
a_k_tmp = tf.matmul(self.A, k_tmp) # [feature_size, batch_size x memory_key_size]
a_k = tf.reshape(tf.transpose(a_k_tmp), [-1, self._memory_key_size, self._feature_size]) # [batch_size, memory_key_size, feature_size]
v_tmp = tf.reshape(tf.transpose(mvalues, [2, 0, 1]), [self._embedding_size, -1]) # [embedding_size, batch_size x memory_value_size]
a_v_tmp = tf.matmul(self.A, v_tmp) # [feature_size, batch_size x memory_key_size]
a_v = tf.reshape(tf.transpose(a_v_tmp), [-1, self._memory_value_size, self._feature_size]) # [batch_size, memory_value_size, feature_size]
# hack to get around no reduce_dot
u_expanded = tf.expand_dims(tf.transpose(u[-1]), [1]) #(b,f)->(b,1,f)
dotted = tf.reduce_sum(a_k * u_expanded, 2) # (b,mk,f) * (b,1,f) -> (b,mk)
# Calculate probabilities/ weights
probs = tf.nn.softmax(dotted)
probs_temp = tf.expand_dims(probs, -1) #(b,m) -> (batch,m,1)
o_k = tf.transpose( tf.reduce_sum(probs_temp * a_v, 1) ) #(b,m,f)->(b,f)->(f,b) sum_over_memoryslots
u_k = tf.matmul(H, u[-1]+o_k) #(f,f)*(f,b) -> (f,b)
# nonlinearity
#if self._nonlin:
# u_k = nonlin(u_k)
#out = self._inference(self._KBcandidates, self._queries) # (f,b)
out = tf.transpose(u_k) #(b,f)
y_tmp = tf.matmul(self.A, self.V, transpose_b=True) # (feature ,embedding/hidden) * (embedding ,ent_size) -> (f,v)
logits = tf.matmul(out, y_tmp) # (b,f)*(f,ent) -> (b,ent)
a_index = tf.argmax(logits,1)
al = tf.concat(axis=1,values=[al,tf.reshape(tf.cast(tf.zeros_like(a_index),tf.int32),[-1,1])])
al = tf.concat(axis=1,values=[al,tf.reshape(tf.cast(a_index,tf.int32),[-1,1])])
#additional supervision, wc-c is not applicable
#real_ans_onehot = tf.one_hot(self._paths[:,2 * h+2], self._ent_size, on_value=1.0, off_value=0.0, axis=-1) #(b,rel_size)
#inner_loss = inner_loss + tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=real_ans_onehot)) #(b,1)
u.append(u_k)
return u[-1], logits, al, inner_loss
#def batch_fit(self, KBcandidates, queries, answers):
def batch_fit(self, KBcandidates, queries, answers, answers_id, paths):
"""Runs the training algorithm over the passed batch
Args:
KBcandidates: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._KBcandidates: KBcandidates, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
#feed_dict = {self._KBcandidates: KBcandidates, self._queries: queries, self._answers: answers}
loss, _ = self._sess.run([self.loss_op, self.train_op], feed_dict=feed_dict)
return loss
#def predict(self, KBcandidates, queries):
def predict(self, KBcandidates, queries, paths):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: id (None, 1) ,predict_op = max(1, [None,ent_size])
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._KBcandidates: KBcandidates, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
#feed_dict = {self._KBcandidates: KBcandidates, self._queries: queries}
return self._sess.run([self.predict_op,self.predict_list_op], feed_dict=feed_dict)
#def predict_proba(self, KBcandidates, queries):
def predict_proba(self, KBcandidates, queries):
"""Predicts probabilities of answers.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, ent_size)
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._KBcandidates: KBcandidates, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
#feed_dict = {self._KBcandidates: KBcandidates, self._queries: queries}
return self._sess.run(self.predict_proba_op, feed_dict=feed_dict)
def store(self):
file = os.path.join(self._checkpoint_dir, self._name)
print(" [*] save current parameters to %s." % file )
self._saver.save(self._sess, file)
def load(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self._checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
self._saver.restore(self._sess, ckpt.model_checkpoint_path)
else:
print(" [!] Test mode but no checkpoint found")
class Embed(object):
def __init__(self, config, sess):
self._data_file = config.data_file
self._batch_size = config.batch_size
self._vocab_size = config.nwords #also entity_size
self._rel_size = config.nrels
self._ent_size = config.nents
self._tail_size = config.tails_size #3rd-dim of KB-matrix
self._sentence_size = config.query_size
self._embedding_size = config.edim
self._path_size = config.path_size
self._memory_key_size = config.mem_size
self._encoder = config.encoder
self._margin = 1
self._hops = config.nhop
self._max_grad_norm = config.max_grad_norm
self._init = tf.contrib.layers.xavier_initializer()
#self._init = tf.random_normal_initializer(stddev=config.init_std)
#self._opt = tf.train.GradientDescentOptimizer(learning_rate=config.init_lr)
#self._opt = tf.train.AdadeltaOptimizer(learning_rate=config.init_lr)
self._opt = tf.train.AdamOptimizer()
self._name = "Embed"
self._checkpoint_dir = config.checkpoint_dir+'/'+self._name
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._build_inputs()
self._build_vars()
self._saver = tf.train.Saver(max_to_keep=10)
self._encoding = tf.constant(position_encoding(self._sentence_size, self._embedding_size), name="encoding")
batch_loss, p, self.score_op = self._inference() # (b,1), (batch_size, 5)
loss_op = tf.reduce_sum(batch_loss, name="loss_op")# + 0.000005 * 1.7 * tf.reduce_sum(tf.square(tf.abs(self.M)))
E_norm_op = tf.nn.l2_normalize(self.EE,1)
Q_norm_op = tf.nn.l2_normalize(self.QE,1)
# gradient pipeline, seem not affect much
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
print "nil_grads_and_vars"
for g,v in nil_grads_and_vars:
print g, v.name
#grads_and_vars = [(tf.Print(g, [v.name,str(g.get_shape()),g], summarize=1e1/2), v) for g, v in grads_and_vars]
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
'''
predict_op = tf.argmax(logits, 1, name="predict_op") #(b,)
predict_proba_op = tf.nn.softmax(logits, name="predict_proba_op")
'''
predict_op = p
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
#self.predict_proba_op = predict_proba_op
self.train_op = train_op
self.Q_norm_op = Q_norm_op
self.E_norm_op = E_norm_op
init_op = tf.global_variables_initializer()
self._sess = sess
self._sess.run(init_op)
def _build_inputs(self):
self._KBs = tf.placeholder(tf.int32, [self._ent_size * self._rel_size, self._tail_size], name="KBs") #_KBs[i*14+j]=[k1,k2,k3] stand for (i,j,k1)(i,j,k2)
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
if self._data_file == "WC-C":
self._paths = tf.placeholder(tf.int32, [None, 2, self._path_size], name="paths") #id for e1,r1,e2,r2,a
else:
self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
#self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
self._answers = tf.placeholder(tf.int32, [None, self._ent_size], name="answers") #id-hot for answer
self._answers_id = tf.placeholder(tf.int32, [None], name="answers_id") #id for answer
self._paddings = tf.placeholder(tf.int64, [None], name="paddings") #for id_padding
self._ones = tf.placeholder(tf.float32, [None], name="paddings") #for multiple
self._zeros = tf.placeholder(tf.float32, [None], name="paddings") #for add
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
nil_rel_slot = tf.zeros([1, self._embedding_size])
E = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._ent_size-1, self._embedding_size]) ])
Q = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
R = tf.concat(axis=0, values=[ nil_rel_slot, self._init([self._rel_size-1, self._embedding_size]) ])
self.EE = tf.Variable(E, name="EE") # encode entity to vector to calculate weight
self.QE = tf.Variable(Q, name="QE") # encode question-words to vector
#self.M = tf.Variable(self._init([self._embedding_size, self._embedding_size]), name="M") #eembed->nwords
self._nil_vars = set([self.EE.name, self.QE.name]) #need to keep first line 0
def _inference(self):
with tf.variable_scope(self._name):
#initial
loss = tf.reshape(self._zeros,[-1,1],name='loss') #(none,1)
if not self._data_file == 'WC-C':
s_index = tf.reshape(self._paths[:,0],[-1,1]) #(none,1)
q_emb = tf.nn.embedding_lookup(self.QE, self._queries) #Ax_ij shape is (batch, sentence_size ,embedding_size)
'''
if self._encoder:
q = tf.transpose(q_emb,[1,0,2]) #(s,b,e)
q = tf.reshape(q,[-1,self._embedding_size]) #(s*b,e)
q = tf.split(0,self._sentence_size,q) #a list of sentence_size tensors of shape [batch,embed]
# Define a lstm cell with tensorflow
fw_lstm_cell = rnn_cell.BasicLSTMCell(self._embedding_size/2, forget_bias=1.0)
bw_lstm_cell = rnn_cell.BasicLSTMCell(self._embedding_size/2, forget_bias=1.0)
# Get lstm cell output
#outputs, states = rnn.rnn(lstm_cell, q, dtype=tf.float32) #s * (b,e) list
outputs,_,_ = rnn.bidirectional_rnn(fw_lstm_cell,bw_lstm_cell,q,dtype=tf.float32) # s * (b,2e) list
q_emb = tf.transpose(tf.pack(outputs,0),[1,0,2]) #(s,b,2e)->(b,s,2e)
'''
#q = outputs[-1] #(b,e)
q = tf.reduce_sum(q_emb, 1) #shape is (batch,embed) V^T*bag_of_words(q)
t = tf.nn.embedding_lookup(self.EE, self._answers_id) #(batch,embed)
tt = tf.nn.embedding_lookup(self.EE, self._paddings)
s = tf.reduce_sum(q*t, 1) #gold score
ss = tf.reduce_sum(q*tt, 1) #wrong score
'''
tmp = tf.matmul(q,self.M)
s = tf.reduce_sum(tmp * t, 1)
ss = tf.reduce_sum(tmp * tt, 1)
'''
loss = self._margin + ss - s
loss = tf.maximum(self._zeros,loss) #(b,1)
logits = tf.matmul(q, self.EE, transpose_b = True) #(b,e)*(v,e) =(b,v)
#logits = tf.matmul(tmp, self.EE, transpose_b = True) #(b,e)*(v,e) =(b,v)
if self._data_file == 'WC-C':
p = tf.reshape(self._paths[:,0,0:2],[-1,2])
p = tf.concat(axis=1,values=[p,tf.reshape(tf.cast(tf.argmax(logits,1),tf.int32),[-1,1])])
return loss, p, logits
p = s_index
#p = tf.concat(1,[p,tf.reshape(self._paths[:,1],[-1,1])])
#p = tf.concat(1,[p,tf.reshape(tf.cast(tf.argmax(logits,1),tf.int32),[-1,1])])
#p = tf.concat(1,[p,tf.reshape(self._paths[:,3],[-1,1])])
#p = tf.concat(1,[p,tf.reshape(tf.cast(tf.argmax(logits,1),tf.int32),[-1,1])])
for i in range(0,self._hops*2,2):
p = tf.concat(axis=1,values=[p,tf.reshape(self._paths[:,i+1],[-1,1])])
p = tf.concat(axis=1,values=[p,tf.reshape(tf.cast(tf.argmax(logits,1),tf.int32),[-1,1])])
return loss, p, logits
def batch_fit(self, KBs, queries, answers, answers_id, paths):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
paths: Tensor
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = queries.shape[0]
pad = np.random.randint(self._ent_size,size=nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._KBs: KBs, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
self._istrain = True
loss, _ , _, _= self._sess.run([self.loss_op, self.train_op, self.Q_norm_op, self.E_norm_op], feed_dict=feed_dict)
return loss
def predict(self, KBs, queries, paths):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: id (None, 1) ,predict_op = max(1, [None,ent_size])
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._KBs: KBs, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
self._istrain = False
return self._sess.run([self.predict_op,self.score_op], feed_dict=feed_dict)
def store(self):
file = os.path.join(self._checkpoint_dir, self._name)
#print(" [*] save current parameters to %s." % file )
self._saver.save(self._sess, file)
def load(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self._checkpoint_dir,latest_filename = 'checkpoint')
if ckpt and ckpt.model_checkpoint_path:
print ("[*] Read from %s" % ckpt.model_checkpoint_path)
self._saver.restore(self._sess, ckpt.model_checkpoint_path)
else:
print (" [!] Test mode but no checkpoint found")
#raise Exception(" [!] Trest mode but no checkpoint found")
class SubgraphEmbed(object):
def __init__(self, config, sess):
self._data_file = config.data_file
self._batch_size = config.batch_size
self._vocab_size = config.nwords #also entity_size
self._rel_size = config.nrels
self._ent_size = config.nents
self._tail_size = config.tails_size #3rd-dim of KB-matrix
self._sentence_size = config.query_size
self._embedding_size = config.edim
self._path_size = config.path_size
self._memory_key_size = config.mem_size
self._encoder = config.encoder
self._margin = 1
self._hops = config.nhop
self._max_grad_norm = config.max_grad_norm
self._init = tf.contrib.layers.xavier_initializer()
#self._init = tf.random_normal_initializer(stddev=config.init_std)
#self._opt = tf.train.GradientDescentOptimizer(learning_rate=config.init_lr)
#self._opt = tf.train.AdadeltaOptimizer(learning_rate=config.init_lr)
self._opt = tf.train.AdamOptimizer()
self._name = "SubgraphEmbed"
self._checkpoint_dir = config.checkpoint_dir+'/'+self._name
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._build_inputs()
self._build_vars()
self._saver = tf.train.Saver(max_to_keep=10)
self._encoding = tf.constant(position_encoding(self._sentence_size, self._embedding_size), name="encoding")
batch_loss, p, self.score_op = self._inference() # (b,1), (batch_size, 5)
loss_op = tf.reduce_sum(batch_loss, name="loss_op")# + 0.000005 * 1.7 * tf.reduce_sum(tf.square(tf.abs(self.M)))
E_norm_op = tf.nn.l2_normalize(self.W,1)
Q_norm_op = tf.nn.l2_normalize(self.V,1)
# gradient pipeline, seem not affect much
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
print "nil_grads_and_vars"
for g,v in nil_grads_and_vars:
print g, v.name
#grads_and_vars = [(tf.Print(g, [v.name,str(g.get_shape()),g], summarize=1e1/2), v) for g, v in grads_and_vars]
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
predict_op = p
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
self.train_op = train_op
self.E_norm_op = E_norm_op
self.Q_norm_op = Q_norm_op
init_op = tf.global_variables_initializer()
self._sess = sess
self._sess.run(init_op)
def _build_inputs(self):
self._KBs = tf.placeholder(tf.int32, [self._ent_size * self._rel_size, self._tail_size], name="KBs") #_KBs[i*14+j]=[k1,k2,k3] stand for (i,j,k1)(i,j,k2)
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
if self._data_file == "WC-C":
self._paths = tf.placeholder(tf.int32, [None, 2, self._path_size], name="paths") #id for e1,r1,e2,r2,a
else:
self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
#self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
self._answers = tf.placeholder(tf.int32, [None, self._ent_size], name="answers") #id-hot for answer
self._answers_id = tf.placeholder(tf.int32, [None], name="answers_id") #id for answer
self._paddings = tf.placeholder(tf.int64, [None], name="paddings") #for id_padding
self._ones = tf.placeholder(tf.float32, [None], name="paddings") #for multiple
self._zeros = tf.placeholder(tf.float32, [None], name="paddings") #for add
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
E = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._ent_size-1, self._embedding_size]) ])
Q = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
self.Q = tf.Variable(Q, name="Q") # encode question
self.W = tf.Variable(E, name="W") # encode entity and path
self.V = tf.Variable(E, name="v") # encode subgraph
self._nil_vars = set([self.W.name, self.V.name, self.Q.name]) #need to keep first line 0
def _inference(self):
with tf.variable_scope(self._name):
#initial
loss = tf.reshape(self._zeros,[-1,1],name='loss') #(none,1)
if not self._data_file == 'WC-C':
s_index = tf.reshape(self._paths[:,0],[-1,1]) #(none,1)
q_emb = tf.nn.embedding_lookup(self.Q, self._queries) #Ax_ij shape is (batch, sentence_size ,embedding_size)
q = tf.reduce_sum(q_emb, 1) #shape is (batch,embed) V^T*bag_of_words(q)
#single entity
t = tf.nn.embedding_lookup(self.W, self._answers_id) #(batch,embed)
tt = tf.nn.embedding_lookup(self.W, self._paddings)
ent_embedding = self.W #(ent,embed)
#path representation
wrong_path = self._paths
tf.random_shuffle(wrong_path)
if not self._data_file == 'WC-C':
t += tf.reduce_sum(tf.nn.embedding_lookup(self.W, self._paths),1) - tf.nn.embedding_lookup(self.W, self._paths[:,2])
tt += tf.reduce_sum(tf.nn.embedding_lookup(self.W, wrong_path),1) - tf.nn.embedding_lookup(self.W, wrong_path[:,2])
else:
t += tf.reduce_sum(tf.nn.embedding_lookup(self.W, self._paths[:,0,:]),1) - tf.nn.embedding_lookup(self.W, self._paths[:,0,2])
tt += tf.reduce_sum(tf.nn.embedding_lookup(self.W, wrong_path[:,0,:]),1) - tf.nn.embedding_lookup(self.W, wrong_path[:,0,2])
t += tf.reduce_sum(tf.nn.embedding_lookup(self.W, self._paths[:,1,:]),1) - tf.nn.embedding_lookup(self.W, self._paths[:,1,2])
tt += tf.reduce_sum(tf.nn.embedding_lookup(self.W, wrong_path[:,1,:]),1) - tf.nn.embedding_lookup(self.W, wrong_path[:,1,2])
#subgraph representation
'''
KBs = tf.reshape(self._KBs,[self._ent_size,-1]) #(ent, rel * tail)
subgraph = tf.nn.embedding_lookup(KBs,self._answers_id) #(batch, rel*tail)
wrong_graph = tf.nn.embedding_lookup(KBs, self._paddings) #(batch, rel*tail)
t += tf.reduce_sum(tf.nn.embedding_lookup(self.V, subgraph),1) #(b,r*t,e)->(b,e)
tt += tf.reduce_sum(tf.nn.embedding_lookup(self.V, wrong_graph),1)
ent_embedding += tf.reduce_sum(tf.nn.embedding_lookup(self.V, KBs),1) #(v,r*t,e)_>(v,e)
'''
s = tf.reduce_sum(q * t, 1) #gold score, dot product
ss = tf.reduce_sum(q * tt, 1) #wrong score
loss = self._margin + ss - s
loss = tf.maximum(self._zeros,loss) #(b,1)
logits = tf.matmul(q, ent_embedding, transpose_b = True) #(b,e)*(v,e) =(b,v)
#logits = tf.matmul(q, self.EE, transpose_b = True) #(b,e)*(v,e) =(b,v)
if self._data_file == 'WC-C':
p = tf.reshape(self._paths[:,0,0:2],[-1,2])
p = tf.concat(axis=1,values=[p,tf.reshape(tf.cast(tf.argmax(logits,1),tf.int32),[-1,1])])
return loss, p, logits
p = s_index
for i in range(0,self._hops*2,2):
p = tf.concat(axis=1,values=[p,tf.reshape(self._paths[:,i+1],[-1,1])])
p = tf.concat(axis=1,values=[p,tf.reshape(tf.cast(tf.argmax(logits,1),tf.int32),[-1,1])])
return loss, p, logits
def batch_fit(self, KBs, queries, answers, answers_id, paths):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
answers: Tensor (None, ent_size)
paths: Tensor
Returns:
loss: floating-point number, the loss computed for the batch
"""
nexample = queries.shape[0]
pad = np.random.randint(self._ent_size,size=nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._KBs: KBs, self._queries: queries, self._answers: answers, self._answers_id: answers_id, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
self._istrain = True
loss, _ , _, _= self._sess.run([self.loss_op, self.train_op, self.Q_norm_op, self.E_norm_op], feed_dict=feed_dict)
return loss
def predict(self, KBs, queries, paths):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, 3)
queries: Tensor (None, sentence_size)
Returns:
answers: id (None, 1) ,predict_op = max(1, [None,ent_size])
"""
nexample = queries.shape[0]
pad = np.arange(nexample)
ones = np.ones(nexample)
zeros = np.zeros(nexample)
feed_dict = {self._KBs: KBs, self._queries: queries, self._paths: paths, self._paddings: pad, self._ones: ones, self._zeros: zeros}
self._istrain = False
return self._sess.run([self.predict_op,self.score_op], feed_dict=feed_dict)
def store(self):
file = os.path.join(self._checkpoint_dir, self._name)
#print(" [*] save current parameters to %s." % file )
self._saver.save(self._sess, file)
def load(self):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self._checkpoint_dir,latest_filename = 'checkpoint')
if ckpt and ckpt.model_checkpoint_path:
print ("[*] Read from %s" % ckpt.model_checkpoint_path)
self._saver.restore(self._sess, ckpt.model_checkpoint_path)
else:
print (" [!] Test mode but no checkpoint found")
#raise Exception(" [!] Trest mode but no checkpoint found")
class Seq2Seq(object):
def __init__(self, config, sess):
self._data_file = config.data_file
self._path_size = config.path_size
self._batch_size = config.batch_size
self._vocab_size = config.nwords
self._rel_size = config.nrels
self._ent_size = config.nents
self._kb_size = config.nrels + config.nents
self._sentence_size = config.query_size
self._embedding_size = config.edim
self._hops = config.nhop
self._max_grad_norm = config.max_grad_norm
self._init = tf.contrib.layers.xavier_initializer()
# self._nonlin = nonlin
# self._init = tf.random_normal_initializer(stddev=config.init_std)
self._opt = tf.train.AdamOptimizer(learning_rate=config.init_lr)
#self._opt = tf.train.GradientDescentOptimizer(learning_rate=config.init_lr)
self._name = "Seq2Seq"
self._checkpoint_dir = config.checkpoint_dir+'/'+self._name
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._encoder = config.encoder
self._build_inputs()
self._build_vars()
self._saver = tf.train.Saver(max_to_keep=10)
#encoding_shape = _sentence_size * _embedding_size
#self._encoding = tf.constant(position_encoding(self._sentence_size, self._embedding_size), name="encoding")
# cross entropy as loss
stepwise_cross_entropy, self._decoder_prediction = self._inference() # (batch_size, ent_size)
#print "predict",self._decoder_prediction #(b,7)
loss_op = tf.reduce_mean(stepwise_cross_entropy,name="loss_op")
# gradient pipeline, seem not affect much
#grads_and_vars = self._opt.compute_gradients(loss_op,[self.A,self.B,self.C,self.R,self.TA,self.TC])
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
'''
for g,v in nil_grads_and_vars:
print g, v.name
'''
#grads_and_vars = [(tf.Print(g, [v.name,str(g.get_shape()),g], summarize=1e1/2), v) for g, v in nil_grads_and_vars]
#train_op = self._opt.apply_gradients(grads_and_vars, name="train_op")
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
# assign ops
self.loss_op = loss_op
self.train_op = train_op
init_op = tf.global_variables_initializer()
self._sess = sess
self._sess.run(init_op)
def _build_inputs(self):
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
if self._data_file == "WC-C":
self._paths = tf.placeholder(tf.int32, [None, 2, self._path_size], name="paths")
else:
self._paths = tf.placeholder(tf.int32, [None, self._path_size], name="paths") #id for e1,r1,e2,r2,a
self._zeros = tf.placeholder(tf.int32, [None], name="paddings") #for add
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
E = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._ent_size-1, self._embedding_size]) ])
Q = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
self.EE = tf.Variable(E, name="EE") # encode entity to vector to calculate weight
self.QE = tf.Variable(Q, name="QE")# encode question-words to vector
self._nil_vars = set([self.EE.name, self.QE.name]) #need to keep first line 0
def _inference(self):
if not self._data_file == 'WC-C':
_paths = self._paths
else:
_paths = tf.reshape(self._paths,[-1,10])
encoder_inputs_embedded = tf.nn.embedding_lookup(self.QE, self._queries) #(b,s,e)
eos = tf.nn.embedding_lookup(self.EE, self._zeros) #(b,e)
state = tf.nn.embedding_lookup(self.EE, _paths[:,0]) #(b,e)
decoder_inputs_embedded = tf.expand_dims(eos,1)
decoder_targets_embedded = tf.expand_dims(state,1)
decoder_targets = tf.reshape(_paths[:,0],[-1,1])
for hop in range(self._hops):
if not self._data_file == 'WC-C':
e_p = tf.nn.embedding_lookup(self.EE, self._paths[:,2*hop]) #(b,e)
r = tf.nn.embedding_lookup(self.EE, self._paths[:,2*hop+1]) #(b,e)
e = tf.nn.embedding_lookup(self.EE, self._paths[:,2*hop+2]) #(b,e)
decoder_targets = tf.concat(axis=1,values=[decoder_targets, tf.reshape(self._paths[:,2*hop+1],[-1,1]),tf.reshape(self._paths[:,2*hop+2],[-1,1])])
else:
e_p = tf.nn.embedding_lookup(self.EE, self._paths[:,hop,0]) #(b,e)
r = tf.nn.embedding_lookup(self.EE, self._paths[:,hop,1]) #(b,e)
e = tf.nn.embedding_lookup(self.EE, self._paths[:,hop,2]) #(b,e)
decoder_targets = tf.concat(axis=1,values=[decoder_targets, tf.reshape(self._paths[:,hop,1],[-1,1]),tf.reshape(self._paths[:,hop,2],[-1,1])])
decoder_inputs_embedded = tf.concat(axis=1,values=[decoder_inputs_embedded, tf.expand_dims(e_p,1), tf.expand_dims(r,1)])
decoder_targets_embedded = tf.concat(axis=1,values=[decoder_targets_embedded, tf.expand_dims(r,1), tf.expand_dims(e,1)])
#print "decoder_targets",decoder_targets,self._paths[:,2*hop+1]+self._ent_size,self._paths[:,2*hop+2]
encoder_cell = tf.contrib.rnn.LSTMCell(32)
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(encoder_cell, encoder_inputs_embedded, dtype=tf.float32, time_major=False)
decoder_cell = tf.contrib.rnn.LSTMCell(32)
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(decoder_cell, decoder_inputs_embedded,initial_state=encoder_final_state,dtype=tf.float32, time_major=False, scope="plain_decoder")
decoder_logits = tf.contrib.layers.linear(decoder_outputs,self._kb_size)
decoder_prediction = tf.argmax(decoder_logits, 2)
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(decoder_targets, depth=self._kb_size, dtype=tf.float32),logits=decoder_logits)
return stepwise_cross_entropy, decoder_prediction
def batch_fit(self, queries, paths):
nexample = queries.shape[0]
zeros = np.zeros(nexample)
feed_dict = {self._queries: queries, self._paths: paths, self._zeros: zeros}
loss, _ = self._sess.run([self.loss_op, self.train_op], feed_dict=feed_dict)
return loss
def predict(self, queries, paths):
nexample = queries.shape[0]
zeros = np.zeros(nexample)
feed_dict = {self._queries: queries, self._paths: paths, self._zeros: zeros}
predict_ = self._sess.run(self._decoder_prediction, feed_dict)
return predict_
| 55,852 | 48.166373 | 211 | py |
IRN | IRN-master/train.py | import os
import tensorflow as tf
import numpy as np
import time
from data_process import process_data, process_data_c
from utils import MultiAcc, MultiAcc_C, RealAnswer, ScoreRank, InSet, InnerRight
from sklearn import cross_validation, metrics
from model import IRN, IRN_C
flags = tf.app.flags
flags.DEFINE_integer("edim", 50, "words vector dimension [50]")
flags.DEFINE_integer("nhop", 3, "number of hops [2/3+1]")
flags.DEFINE_integer("batch_size", 50, "batch size to use during training [50]")
flags.DEFINE_integer("nepoch", 5000, "number of epoch to use during training [1000]")
flags.DEFINE_integer("inner_nepoch",3, "PRN inner loop [5]")
flags.DEFINE_float("init_lr", 0.001, "initial learning rate")
flags.DEFINE_float("epsilon", 1e-8, "Epsilon value for Adam Optimizer.")
#flags.DEFINE_float("init_hid", 0.1, "initial internal state value [0.1]")
#flags.DEFINE_float("init_std", 0.05, "weight initialization std [0.05]")
flags.DEFINE_float("max_grad_norm", 20, "clip gradients to this norm [20]")
flags.DEFINE_string("dataset", "pq2h", "pq/pql/wc/")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "checkpoint directory")
flags.DEFINE_boolean("unseen",False,"True to hide 3 relations when training [False]")
FLAGS = flags.FLAGS
FLAGS.data_dir = "WC2014"
FLAGS.KB_file = "WC2014"
if FLAGS.dataset == 'wc1h':
FLAGS.data_file = "WC-P1" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'wc2h':
FLAGS.data_file = "WC-P2" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'wcm':
FLAGS.data_file = "WC-P" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'wcc':
FLAGS.data_file = "WC-C" #"WC-C/P1/P2/P"
elif FLAGS.dataset == 'pql2h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQL-2H'
FLAGS.KB_file = 'PQL2-KB'
elif FLAGS.dataset == 'pql3h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQL-3H'
FLAGS.KB_file = 'PQL3-KB'
elif FLAGS.dataset == 'pq2h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQ-2H'
FLAGS.KB_file = '2H-kb'
elif FLAGS.dataset == 'pq3h':
FLAGS.data_dir = "PathQuestion"
FLAGS.data_file = 'PQ-3H'
FLAGS.KB_file = '3H-kb'
def main(_):
word2id = {}
ent2id = {}
rel2id = {}
words = set()
relations = set()
entities = set()
FLAGS.checkpoint_dir = os.path.join(FLAGS.checkpoint_dir,FLAGS.data_file)
FLAGS.checkpoint_dir = os.path.join(FLAGS.checkpoint_dir,FLAGS.KB_file)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
KB_file = '%s/%s.txt' % (FLAGS.data_dir, FLAGS.KB_file)
data_file = '%s/%s.txt' % (FLAGS.data_dir, FLAGS.data_file)
start = time.time()
if FLAGS.data_file == "WC-C":
Q,A,P,S,Triples,FLAGS.query_size = process_data_c(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities)
FLAGS.path_size = len(P[0][0]) #5
else:
Q,A,P,S,Triples,FLAGS.query_size = process_data(KB_file, data_file, word2id, rel2id, ent2id, words, relations, entities)
FLAGS.path_size = len(P[0]) #5 or 7 or
FLAGS.nhop = FLAGS.path_size / 2
print ("read data cost %f seconds" %(time.time()-start))
FLAGS.nwords = len(word2id)
FLAGS.nrels = len(rel2id)
FLAGS.nents = len(ent2id)
trainQ, testQ, trainA, testA, trainP, testP, trainS, testS = cross_validation.train_test_split(Q, A, P, S, test_size=.1, random_state=123)
trainQ, validQ, trainA, validA, trainP, validP, trainS, validS = cross_validation.train_test_split(trainQ, trainA, trainP, trainS, test_size=.11, random_state=0)
# for UNSEEN relations (incomplete kb setting, change data_utils.py)
if FLAGS.unseen:
id_c=[]
for idx in range(trainQ.shape[0]):
if trainP[idx][-4] == 1 or trainP[idx][-4]==2 or trainP[idx][-4]==3:
id_c.append(idx)
trainQ = np.delete(trainQ,id_c,axis=0)
trainA = np.delete(trainA,id_c,axis=0)
trainP = np.delete(trainP,id_c,axis=0)
trainS = np.delete(trainS,id_c,axis=0)
n_train = trainQ.shape[0]
n_test = testQ.shape[0]
n_val = validQ.shape[0]
print("Training Size", n_train)
print("Validation Size", n_val)
print("Testing Size", n_test)
#
#other data and some flags
#
id2word = dict(zip(word2id.values(), word2id.keys()))
id2rel = dict(zip(rel2id.values(), rel2id.keys())) #{0: '<end>', 1: 'cause_of_death', 2: 'gender', 3: 'profession', 4: 'institution', 5: 'religion', 6: 'parents', 7: 'location', 8: 'place_of_birth', 9: 'nationality', 10: 'place_of_death', 11: 'spouse', 12: 'children', 13: 'ethnicity'}
train_labels = np.argmax(trainA, axis=1)
test_labels = np.argmax(testA, axis=1)
valid_labels = np.argmax(validA, axis=1)
print(flags.FLAGS.__flags)
#batch_id
#batches = [(start, end) for start, end in batches] abandom last few examples
batches = zip(range(0, n_train-FLAGS.batch_size, FLAGS.batch_size), range(FLAGS.batch_size, n_train, FLAGS.batch_size))
r = np.arange(n_train) # instance idx to be shuffled
l = n_train / FLAGS.batch_size * FLAGS.batch_size #total instances used in training
with tf.Session() as sess:
if not FLAGS.data_file == "WC-C":
model = IRN(FLAGS,sess)
print("KB Size", Triples.shape[0]) #144
pre_batches = zip(range(0, Triples.shape[0]-FLAGS.batch_size, FLAGS.batch_size), range(FLAGS.batch_size, Triples.shape[0], FLAGS.batch_size))
pre_val_preds = model.predict(Triples, validQ, validP)
pre_test_preds = model.predict(Triples, testQ, testP)
best_val_epoch = -1
best_val_acc = MultiAcc(validP,pre_val_preds,FLAGS.path_size)
best_val_true_acc = InSet(validP,validS,pre_val_preds)
for t in range(1,FLAGS.nepoch + 1):
start = time.time()
np.random.shuffle(batches)
for i in range(FLAGS.inner_nepoch):
np.random.shuffle(pre_batches)
pre_total_cost = 0.0
for s,e in pre_batches:
pre_total_cost += model.batch_pretrain(Triples[s:e],trainQ[0:FLAGS.batch_size],trainA[0:FLAGS.batch_size],np.argmax(trainA[0:FLAGS.batch_size], axis=1),trainP[0:FLAGS.batch_size])
total_cost = 0.0
for s,e in batches:
total_cost += model.batch_fit(Triples[s:e],trainQ[s:e],trainA[s:e],np.argmax(trainA[s:e], axis=1),trainP[s:e])
if t % 1 == 0:
train_preds = model.predict(Triples,trainQ,trainP)
train_acc = MultiAcc(trainP,train_preds,FLAGS.path_size)
train_true_acc = InSet(trainP,trainS,train_preds)
val_preds = model.predict(Triples,validQ, validP) # (n_val,1) each is answer id
val_acc = MultiAcc(validP,val_preds,FLAGS.path_size)
val_true_acc = InSet(validP,validS,val_preds)
if val_true_acc > best_val_true_acc:
best_val_epoch = t
best_val_true_acc = val_true_acc
model.store()
print('-----------------------')
print('Epoch', t)
print('timing', (time.time()-start))
print('Total Cost:', total_cost)
print('Train Accuracy:', train_true_acc)
print('Validation Accuracy:', val_true_acc)
print('Best Validation epoch & Acc:', best_val_epoch, best_val_true_acc)
print('-----------------------')
'''
if not t % 100 == 0:
continue
idx = model.match()
for i in range(1,14):
print "relation: ",id2word[i]
print "similar words are: "
for iid in idx[i]:
print id2word[iid]
print('-----------------------')
print('-----------------------')
'''
elif FLAGS.data_file == "WC-C":
model = IRN_C(FLAGS,sess)
print("KB Size", Triples.shape[0]) #144
pre_batches = zip(range(0, Triples.shape[0]-FLAGS.batch_size, FLAGS.batch_size), range(FLAGS.batch_size, Triples.shape[0], FLAGS.batch_size))
pre_val_preds = model.predict(Triples, validQ, validP)
pre_test_preds = model.predict(Triples, testQ, testP)
best_val_epoch = -1
best_val_acc = MultiAcc_C(validP,pre_val_preds)
best_val_true_acc = InSet(validP,validS,pre_val_preds)
for t in range(1,FLAGS.nepoch + 1):
start = time.time()
np.random.shuffle(batches)
for i in range(FLAGS.inner_nepoch):
np.random.shuffle(pre_batches)
pre_total_cost = 0.0
for s,e in pre_batches:
pre_total_cost += model.batch_pretrain(Triples[s:e],trainQ[0:FLAGS.batch_size],trainA[0:FLAGS.batch_size],np.argmax(trainA[0:FLAGS.batch_size], axis=1),trainP[0:FLAGS.batch_size])
total_cost = 0.0
for s,e in batches:
total_cost += model.batch_fit(Triples[s:e],trainQ[s:e],trainA[s:e],np.argmax(trainA[s:e], axis=1),trainP[s:e])
if t % 1 == 0:
train_preds = model.predict(Triples,trainQ,trainP)
train_acc = MultiAcc_C(trainP,train_preds)
train_true_acc = InSet(trainP,trainS,train_preds)
val_preds = model.predict(Triples,validQ, validP) # (n_val,1) each is answer id
val_acc = MultiAcc_C(validP,val_preds)
val_true_acc = InSet(validP,validS,val_preds)
if val_true_acc > best_val_true_acc:
best_val_epoch = t
best_val_true_acc = val_true_acc
model.store()
print('-----------------------')
print('Epoch', t)
print('timing', (time.time()-start))
print('Total Cost:', total_cost)
print('Train Accuracy:', train_true_acc)
print('Validation Accuracy:', val_true_acc)
print('Best Validation epoch & Acc:', best_val_epoch, best_val_true_acc)
print('-----------------------')
if __name__ == '__main__':
tf.app.run() | 10,722 | 42.412955 | 290 | py |
spring | spring-main/setup.py | from setuptools import setup
setup(
name='spring_amr',
version='1.0',
packages=['spring_amr'],
url='https://github.com/SapienzaNLP/spring',
license='CC BY-NC-SA 4.0',
author='Michele Bevilacqua, Rexhina Blloshmi and Roberto Navigli',
author_email='{bevilacqua,blloshmi,navigli}@di.uniroma1.it',
description='Parse sentences into AMR graphs and generate sentences from AMR graphs without breaking a sweat!'
)
| 441 | 33 | 114 | py |
spring | spring-main/spring_amr/entities.py | from collections import defaultdict
def read_entities(sentences, graphs, just_tagged=True):
for i, (s, g) in enumerate(zip(sentences, graphs)):
with_wikis = {}
name_to_entity = {}
name_to_ops = defaultdict(list)
for nt, t in enumerate(g.triples):
n1, rel, n2 = t
if n2 == '-' and just_tagged:
continue
if rel == ':wiki':
with_wikis[n1] = (nt, n2)
for t in g.triples:
n1, rel, n2 = t
if (n1 in with_wikis) and (rel == ':name'):
name_to_entity[n2] = n1
for nt, t in enumerate(g.triples):
n1, rel, n2 = t
if (n1 in name_to_entity) and rel.startswith(':op'):
name_to_ops[n1].append(t)
yield (i, with_wikis, name_to_entity, name_to_ops) | 848 | 27.3 | 64 | py |
spring | spring-main/spring_amr/optim.py | # taken from
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
p.data.copy_(p_data_fp32)
elif step_size > 0:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss | 4,345 | 42.46 | 111 | py |
spring | spring-main/spring_amr/penman.py | from penman import load as load_, Graph, Triple
from penman import loads as loads_
from penman import encode as encode_
from penman.model import Model
from penman.models.noop import NoOpModel
from penman.models import amr
op_model = Model()
noop_model = NoOpModel()
amr_model = amr.model
DEFAULT = op_model
def _get_model(dereify):
if dereify is None:
return DEFAULT
elif dereify:
return op_model
else:
return noop_model
def _remove_wiki(graph):
metadata = graph.metadata
triples = []
for t in graph.triples:
v1, rel, v2 = t
if rel == ':wiki':
t = Triple(v1, rel, '+')
triples.append(t)
graph = Graph(triples)
graph.metadata = metadata
return graph
def load(source, dereify=None, remove_wiki=False):
model = _get_model(dereify)
out = load_(source=source, model=model)
if remove_wiki:
for i in range(len(out)):
out[i] = _remove_wiki(out[i])
return out
def loads(string, dereify=None, remove_wiki=False):
model = _get_model(dereify)
out = loads_(string=string, model=model)
if remove_wiki:
for i in range(len(out)):
out[i] = _remove_wiki(out[i])
return out
def encode(g, top=None, indent=-1, compact=False):
model = amr_model
return encode_(g=g, top=top, indent=indent, compact=compact, model=model) | 1,382 | 24.611111 | 77 | py |
spring | spring-main/spring_amr/utils.py | from glob import glob
from pathlib import Path
import torch
from transformers import AutoConfig
from spring_amr.dataset import AMRDataset, AMRDatasetTokenBatcherAndLoader
from spring_amr.modeling_bart import AMRBartForConditionalGeneration
from spring_amr.tokenization_bart import AMRBartTokenizer, PENMANBartTokenizer
def instantiate_model_and_tokenizer(
name=None,
checkpoint=None,
additional_tokens_smart_init=True,
dropout = 0.15,
attention_dropout = 0.15,
from_pretrained = True,
init_reverse = False,
collapse_name_ops = False,
penman_linearization = False,
use_pointer_tokens = False,
raw_graph = False,
):
if raw_graph:
assert penman_linearization
skip_relations = False
if name is None:
name = 'facebook/bart-large'
if name == 'facebook/bart-base':
tokenizer_name = 'facebook/bart-large'
else:
tokenizer_name = name
config = AutoConfig.from_pretrained(name)
config.output_past = False
config.no_repeat_ngram_size = 0
config.prefix = " "
config.output_attentions = True
config.dropout = dropout
config.attention_dropout = attention_dropout
if penman_linearization:
tokenizer = PENMANBartTokenizer.from_pretrained(
tokenizer_name,
collapse_name_ops=collapse_name_ops,
use_pointer_tokens=use_pointer_tokens,
raw_graph=raw_graph,
config=config,
)
else:
tokenizer = AMRBartTokenizer.from_pretrained(
tokenizer_name,
collapse_name_ops=collapse_name_ops,
use_pointer_tokens=use_pointer_tokens,
config=config,
)
if from_pretrained:
model = AMRBartForConditionalGeneration.from_pretrained(name, config=config)
else:
model = AMRBartForConditionalGeneration(config)
model.resize_token_embeddings(len(tokenizer.encoder))
if additional_tokens_smart_init:
modified = 0
for tok, idx in tokenizer.encoder.items():
tok = tok.lstrip(tokenizer.INIT)
if idx < tokenizer.old_enc_size:
continue
elif tok.startswith('<pointer:') and tok.endswith('>'):
tok_split = ['pointer', str(tok.split(':')[1].strip('>'))]
elif tok.startswith('<'):
continue
elif tok.startswith(':'):
if skip_relations:
continue
elif tok.startswith(':op'):
tok_split = ['relation', 'operator', str(int(tok[3:]))]
elif tok.startswith(':snt'):
tok_split = ['relation', 'sentence', str(int(tok[4:]))]
elif tok.startswith(':ARG'):
tok_split = ['relation', 'argument', str(int(tok[4:]))]
else:
tok_split = ['relation'] + tok.lstrip(':').split('-')
else:
tok_split = tok.split('-')
tok_split_ = tok_split
tok_split = []
for s in tok_split_:
s_ = s + tokenizer.INIT
if s_ in tokenizer.encoder:
tok_split.append(s_)
else:
tok_split.extend(tokenizer._tok_bpe(s))
vecs = []
for s in tok_split:
idx_split = tokenizer.encoder.get(s, -1)
if idx_split > -1:
vec_split = model.model.shared.weight.data[idx_split].clone()
vecs.append(vec_split)
if vecs:
vec = torch.stack(vecs, 0).mean(0)
noise = torch.empty_like(vec)
noise.uniform_(-0.1, +0.1)
model.model.shared.weight.data[idx] = vec + noise
modified += 1
if init_reverse:
model.init_reverse_model()
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint, map_location='cpu')['model'])
return model, tokenizer
def instantiate_loader(
glob_pattn,
tokenizer,
batch_size=500,
evaluation=True,
out=None,
use_recategorization=False,
remove_longer_than=None,
remove_wiki=False,
dereify=True,
):
paths = []
if isinstance(glob_pattn, str) or isinstance(glob_pattn, Path):
glob_pattn = [glob_pattn]
for gpattn in glob_pattn:
paths += [Path(p) for p in glob(gpattn)]
if evaluation:
assert out is not None
Path(out).write_text(
'\n\n'.join([p.read_text() for p in paths]))
dataset = AMRDataset(
paths,
tokenizer,
use_recategorization=use_recategorization,
remove_longer_than=remove_longer_than,
remove_wiki=remove_wiki,
dereify=dereify,
)
loader = AMRDatasetTokenBatcherAndLoader(
dataset,
batch_size=batch_size,
shuffle=not evaluation,
)
return loader
| 5,027 | 28.751479 | 84 | py |
spring | spring-main/spring_amr/dataset.py | import logging
import random
import torch
from cached_property import cached_property
from torch.utils.data import Dataset
from spring_amr.IO import read_raw_amr_data
def reverse_direction(x, y, pad_token_id=1):
input_ids = torch.cat([y['decoder_input_ids'], y['lm_labels'][:, -1:]], 1)
attention_mask = torch.ones_like(input_ids)
attention_mask[input_ids == pad_token_id] = 0
decoder_input_ids = x['input_ids'][:,:-1]
lm_labels = x['input_ids'][:,1:]
x = {'input_ids': input_ids, 'attention_mask': attention_mask}
y = {'decoder_input_ids': decoder_input_ids, 'lm_labels': lm_labels}
return x, y
class AMRDataset(Dataset):
def __init__(
self,
paths,
tokenizer,
device=torch.device('cpu'),
use_recategorization=False,
remove_longer_than=None,
remove_wiki=False,
dereify=True,
):
self.paths = paths
self.tokenizer = tokenizer
self.device = device
graphs = read_raw_amr_data(paths, use_recategorization, remove_wiki=remove_wiki, dereify=dereify)
self.graphs = []
self.sentences = []
self.linearized = []
self.linearized_extra = []
self.remove_longer_than = remove_longer_than
for g in graphs:
l, e = self.tokenizer.linearize(g)
try:
self.tokenizer.batch_encode_sentences([g.metadata['snt']])
except:
logging.warning('Invalid sentence!')
continue
if remove_longer_than and len(l) > remove_longer_than:
continue
if len(l) > 1024:
logging.warning('Sequence longer than 1024 included. BART does not support it!')
self.sentences.append(g.metadata['snt'])
self.graphs.append(g)
self.linearized.append(l)
self.linearized_extra.append(e)
def __len__(self):
return len(self.sentences)
def __getitem__(self, idx):
sample = {}
sample['id'] = idx
sample['sentences'] = self.sentences[idx]
if self.linearized is not None:
sample['linearized_graphs_ids'] = self.linearized[idx]
sample.update(self.linearized_extra[idx])
return sample
def size(self, sample):
return len(sample['linearized_graphs_ids'])
def collate_fn(self, samples, device=torch.device('cpu')):
x = [s['sentences'] for s in samples]
x, extra = self.tokenizer.batch_encode_sentences(x, device=device)
if 'linearized_graphs_ids' in samples[0]:
y = [s['linearized_graphs_ids'] for s in samples]
y, extra_y = self.tokenizer.batch_encode_graphs_from_linearized(y, samples, device=device)
extra.update(extra_y)
else:
y = None
extra['ids'] = [s['id'] for s in samples]
return x, y, extra
class AMRDatasetTokenBatcherAndLoader:
def __init__(self, dataset, batch_size=800 ,device=torch.device('cpu'), shuffle=False, sort=False):
assert not (shuffle and sort)
self.batch_size = batch_size
self.tokenizer = dataset.tokenizer
self.dataset = dataset
self.device = device
self.shuffle = shuffle
self.sort = sort
def __iter__(self):
it = self.sampler()
it = ([[self.dataset[s] for s in b] for b in it])
it = (self.dataset.collate_fn(b, device=self.device) for b in it)
return it
@cached_property
def sort_ids(self):
lengths = [len(s.split()) for s in self.dataset.sentences]
ids, _ = zip(*sorted(enumerate(lengths), reverse=True))
ids = list(ids)
return ids
def sampler(self):
ids = list(range(len(self.dataset)))[::-1]
if self.shuffle:
random.shuffle(ids)
if self.sort:
ids = self.sort_ids.copy()
batch_longest = 0
batch_nexamps = 0
batch_ntokens = 0
batch_ids = []
def discharge():
nonlocal batch_longest
nonlocal batch_nexamps
nonlocal batch_ntokens
ret = batch_ids.copy()
batch_longest *= 0
batch_nexamps *= 0
batch_ntokens *= 0
batch_ids[:] = []
return ret
while ids:
idx = ids.pop()
size = self.dataset.size(self.dataset[idx])
cand_batch_ntokens = max(size, batch_longest) * (batch_nexamps + 1)
if cand_batch_ntokens > self.batch_size and batch_ids:
yield discharge()
batch_longest = max(batch_longest, size)
batch_nexamps += 1
batch_ntokens = batch_longest * batch_nexamps
batch_ids.append(idx)
if len(batch_ids) == 1 and batch_ntokens > self.batch_size:
yield discharge()
if batch_ids:
yield discharge()
| 4,991 | 32.503356 | 105 | py |
spring | spring-main/spring_amr/IO.py | import glob
from typing import List, Union, Iterable
from pathlib import Path
from spring_amr.penman import load as pm_load
def read_raw_amr_data(
paths: List[Union[str, Path]],
use_recategorization=False,
dereify=True,
remove_wiki=False,
):
assert paths
if not isinstance(paths, Iterable):
paths = [paths]
graphs = []
for path_ in paths:
for path in glob.glob(str(path_)):
path = Path(path)
graphs.extend(pm_load(path, dereify=dereify, remove_wiki=remove_wiki))
assert graphs
if use_recategorization:
for g in graphs:
metadata = g.metadata
metadata['snt_orig'] = metadata['snt']
tokens = eval(metadata['tokens'])
metadata['snt'] = ' '.join([t for t in tokens if not ((t.startswith('-L') or t.startswith('-R')) and t.endswith('-'))])
return graphs | 917 | 27.6875 | 131 | py |
spring | spring-main/spring_amr/linearization.py | import abc
import itertools
from collections import deque, defaultdict
import re
from typing import List, Optional, Dict, Any, Set, TypeVar
from cached_property import cached_property
from dataclasses import dataclass
import networkx as nx
import penman
@dataclass
class SemanticGraph:
nodes_var: List[str]
"""
List of linearized nodes, with special tokens.
"""
edges: Optional[List[str]]
"""
List of linearized edges, with special tokens.
"""
backreferences: List[int]
"""
List of backpointers to handle rentrancies and cycles.
"""
var2instance: Dict[str, str]
"""
Dict from var ids to 'lemmatized' readable strings qualifying the node (collapsing the :instance edge for AMR).
"""
extra: Dict[str, Any]
"""
Holds extra stuff that might be useful, e.g. alignments, NER, EL.
"""
@cached_property
def variables(self) -> Set[str]:
"""Set of variables in this semantic graph"""
variables = {v for v in self.nodes_var if not v.startswith('<')}
return variables
@property
def resolved_nodes_var(self) -> List[str]:
return [self.nodes_var[b] for b in self.backreferences]
@cached_property
def nodes(self) -> List[str]:
"""Linearized nodes with varids replaced by instances"""
return [self.var2instance.get(node, node) for node in self.nodes_var]
@property
def resolved_nodes(self) -> List[str]:
return [self.nodes[b] for b in self.backreferences]
def src_occurrence(self, var: str) -> int:
pass
class BaseLinearizer(metaclass=abc.ABCMeta):
@abc.abstractmethod
def linearize(self, *args, **kwargs) -> SemanticGraph:
pass
class AMRTokens:
START, END = '<', '>'
_TEMPL = START + '{}' + END
BOS_N = _TEMPL.format('s')
EOS_N = _TEMPL.format('/s')
START_N = _TEMPL.format('start')
STOP_N = _TEMPL.format('stop')
PNTR_N = _TEMPL.format('pointer')
LIT_START = _TEMPL.format( 'lit')
LIT_END = _TEMPL.format('/lit')
BACKR_SRC_N = _TEMPL.format('backr:src:XXX')
BACKR_TRG_N = _TEMPL.format('backr:trg:XXX')
BOS_E = _TEMPL.format('s')
EOS_E = _TEMPL.format('/s')
START_E = _TEMPL.format('start')
STOP_E = _TEMPL.format('stop')
_FIXED_SPECIAL_TOKENS_N = {
BOS_N, EOS_N, START_N, STOP_N}
_FIXED_SPECIAL_TOKENS_E = {
BOS_E, EOS_E, START_E, STOP_E}
_FIXED_SPECIAL_TOKENS = _FIXED_SPECIAL_TOKENS_N | _FIXED_SPECIAL_TOKENS_E
# match and read backreferences
_re_BACKR_SRC_N = re.compile(BACKR_SRC_N.replace('XXX', r'([0-9]+)'))
_re_BACKR_TRG_N = re.compile(BACKR_TRG_N.replace('XXX', r'([0-9]+)'))
@classmethod
def is_node(cls, string: str) -> bool:
if isinstance(string, str) and string.startswith(':'):
return False
elif string in cls._FIXED_SPECIAL_TOKENS_E:
return False
return True
@classmethod
def read_backr(cls, string: str) -> Optional:
m_src = cls._re_BACKR_SRC_N.search(string)
if m_src is not None:
return m_src
m_trg = cls._re_BACKR_TRG_N.search(string)
if m_trg is not None:
return m_trg
return None
T = TypeVar('T')
def index_default(
item: T, list_: List[T],
start: Optional[int] = None,
stop: Optional[int] = None,
default: Optional[int] = None
):
if start is None:
start = 0
if stop is None:
stop = len(list_)
return next((i for i, x in enumerate(list_[start:stop], start=start) if x == item), default)
class AMRLinearizer(BaseLinearizer):
def __init__(
self,
use_pointer_tokens: bool = True,
collapse_name_ops: bool = False,
):
self.collapse_name_ops = collapse_name_ops
self.interleave_edges = False
self.use_pointer_tokens = use_pointer_tokens
def _collapse_name_ops(self, amr):
# identify name triples
name_vars = {}
for i, (v1, rel, v2) in enumerate(amr.triples):
if rel == ':instance' and v2 == 'name':
name_vars[v1] = 1
# check if they have ops
name_vars_to_ops = defaultdict(list)
for i, (v1, rel, v2) in enumerate(amr.triples):
if v1 in name_vars and rel.startswith(':op'):
name_vars_to_ops[v1].append((i, rel, v2.strip('"')))
triples = amr.triples.copy()
for nv, ops in name_vars_to_ops.items():
ops = sorted(ops, key=lambda x: int(x[1][3:]))
idx, _, lits = zip(*ops)
for i in idx:
triples[i] = None
lit = '"' + '_'.join(lits) + '"'
triples[min(idx)] = penman.Triple(nv, ':op1', lit)
triples = [t for t in triples if t is not None]
amr_ = penman.Graph(triples)
amr_.metadata = amr.metadata
return amr_
def linearize(self, amr: penman.Graph) -> SemanticGraph:
if self.collapse_name_ops:
amr = self._collapse_name_ops(amr)
linearized = self._linearize(amr)
linearized = self._interleave(linearized)
if self.use_pointer_tokens:
linearized = self._add_pointer_tokens(linearized)
return linearized
def _linearize(self, amr: penman.Graph) -> SemanticGraph:
variables = set(amr.variables())
variables = {'var:' + v for v in variables}
var2instance = {}
graph = nx.MultiDiGraph()
triples2order = {k: i for i, k in enumerate(amr.triples)}
for triple in amr.triples:
var, rel, instance = triple
order = triples2order[triple]
if rel != ':instance':
continue
for expansion_candidate in itertools.chain(range(order - 1, -1), range(order + 1, len(amr.triples))):
if var == amr.triples[expansion_candidate][2]:
expansion = expansion_candidate
break
else:
expansion = 0
var = 'var:' + var
var2instance[var] = instance
graph.add_node(var, instance=instance, order=order, expansion=expansion)
for triple in amr.edges():
var1, rel, var2 = triple
order = triples2order[triple]
if rel == ':instance':
continue
var1 = 'var:' + var1
var2 = 'var:' + var2
graph.add_edge(var1, var2, rel=rel, order=order)
for triple in amr.attributes():
var, rel, attr = triple
order = triples2order[triple]
if rel == ':instance':
continue
var = 'var:' + var
graph.add_edge(var, attr, rel=rel, order=order)
# nodes that are not reachable from the root (e.g. because of reification)
# will be present in the not_explored queue
# undirected_graph = graph.to_undirected()
# print(amr.variables())
not_explored = deque(sorted(variables, key=lambda x: nx.get_node_attributes(graph, 'order')[x]))
# (
# len(nx.shortest_path(undirected_graph, 'var:' + amr.top, x)),
# -graph.out_degree(x),
# )
first_index = {}
explored = set()
added_to_queue = set()
nodes_visit = [AMRTokens.BOS_N]
edges_visit = [AMRTokens.BOS_E]
backreferences = [0]
queue = deque()
queue.append('var:' + amr.top)
while queue or not_explored:
if queue:
node1 = queue.popleft()
else:
node1 = not_explored.popleft()
if node1 in added_to_queue:
continue
if not list(graph.successors(node1)):
continue
if node1 in variables:
if node1 in explored:
continue
if node1 in first_index:
nodes_visit.append(AMRTokens.BACKR_TRG_N)
backreferences.append(first_index[node1])
else:
backreferences.append(len(nodes_visit))
first_index[node1] = len(nodes_visit)
nodes_visit.append(node1)
edges_visit.append(AMRTokens.START_E)
successors = []
for node2 in graph.successors(node1):
for edge_data in graph.get_edge_data(node1, node2).values():
rel = edge_data['rel']
order = edge_data['order']
successors.append((order, rel, node2))
successors = sorted(successors)
for order, rel, node2 in successors:
edges_visit.append(rel)
# node2 is a variable
if node2 in variables:
# ... which was mentioned before
if node2 in first_index:
nodes_visit.append(AMRTokens.BACKR_TRG_N)
backreferences.append(first_index[node2])
# .. which is mentioned for the first time
else:
backreferences.append(len(nodes_visit))
first_index[node2] = len(nodes_visit)
nodes_visit.append(node2)
# 1) not already in Q
# 2) has children
# 3) the edge right before its expansion has been encountered
if (node2 not in added_to_queue) and list(graph.successors(node2)) and (nx.get_node_attributes(graph, 'expansion')[node2] <= order):
queue.append(node2)
added_to_queue.add(node2)
# node2 is a constant
else:
backreferences.append(len(nodes_visit))
nodes_visit.append(node2)
backreferences.append(len(nodes_visit))
nodes_visit.append(AMRTokens.STOP_N)
edges_visit.append(AMRTokens.STOP_E)
explored.add(node1)
else:
backreferences.append(len(nodes_visit))
nodes_visit.append(node1)
explored.add(node1)
backreferences.append(len(nodes_visit))
nodes_visit.append(AMRTokens.EOS_N)
edges_visit.append(AMRTokens.EOS_E)
assert len(nodes_visit) == len(edges_visit) == len(backreferences)
return SemanticGraph(
nodes_visit,
edges_visit,
backreferences,
var2instance,
extra={'graph': graph, 'amr': amr}
)
def _interleave(self, graph: SemanticGraph) -> SemanticGraph:
new_backreferences_map = []
new_nodes = []
new_edges = None
new_backreferences = []
# to isolate sublist to the stop token
start_i = 1
end_i = index_default(AMRTokens.STOP_N, graph.nodes_var, start_i, -1, -1)
def add_node(node, backr = None):
old_n_node = len(new_backreferences_map)
new_n_node = len(new_nodes)
if backr is None:
backr = old_n_node
new_backreferences_map.append(new_n_node)
new_nodes.append(node)
if old_n_node == backr:
new_backreferences.append(new_n_node)
else:
new_backreferences.append(new_backreferences_map[backr])
def add_edge(edge):
new_nodes.append(edge)
new_backreferences.append(len(new_backreferences))
add_node(AMRTokens.BOS_N)
while end_i > -1:
# src node
add_node(graph.nodes_var[start_i], graph.backreferences[start_i])
# edges and trg nodes, interleaved
nodes = graph.nodes_var[start_i+1:end_i]
edges = graph.edges[start_i+1:end_i]
backr = graph.backreferences[start_i+1:end_i]
for n, e, b in zip(nodes, edges, backr):
add_edge(e)
add_node(n, b)
# stop
add_node(graph.nodes_var[end_i], graph.backreferences[end_i])
start_i = end_i + 1
end_i = index_default(AMRTokens.STOP_N, graph.nodes_var, start_i, -1, -1)
add_node(AMRTokens.EOS_N)
new_graph = SemanticGraph(
new_nodes,
None,
new_backreferences,
graph.var2instance,
extra=graph.extra,
)
return new_graph
def _add_pointer_tokens(self, graph: SemanticGraph) -> SemanticGraph:
new_nodes = []
var2pointer = {}
for node, backr in zip(graph.nodes_var, graph.backreferences):
if node == AMRTokens.BACKR_TRG_N:
node = graph.nodes_var[backr]
pointer = var2pointer[node]
new_nodes.append(pointer)
elif node in graph.var2instance:
pointer = var2pointer.setdefault(node, f"<pointer:{len(var2pointer)}>")
new_nodes.append(pointer)
new_nodes.append(node)
else:
new_nodes.append(node)
new_backreferences = list(range(len(new_nodes)))
new_graph = SemanticGraph(
new_nodes,
None,
new_backreferences,
graph.var2instance,
extra=graph.extra,
)
return new_graph | 13,577 | 32.525926 | 156 | py |
spring | spring-main/spring_amr/modeling_bart.py | import copy
import math
import random
from typing import *
import torch
from torch import Tensor
from torch import nn
from torch.nn import functional as F
from transformers import modeling_bart as bart
from transformers.modeling_utils import BeamHypotheses, calc_banned_ngram_tokens, calc_banned_bad_words_ids, \
top_k_top_p_filtering
def extract_backreferences(ids, num_embeddings, backpointer_idx):
ids_mask = ids >= num_embeddings
backreferences = ids.clone() - num_embeddings
backreferences[~ids_mask] = 0
backreferences += (~ids_mask).long() * torch.arange(
ids.size(1),
dtype=ids.dtype,
device=ids.device)
ids = ids.clone()
ids[ids_mask] = backpointer_idx
return ids, backreferences
class AMRBartEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer
is a :class:`EncoderLayer`.
Args:
config: BartConfig
"""
def __init__(self, config: bart.BartConfig, embed_tokens, backpointer_idx):
super().__init__()
self.backpointer_idx = backpointer_idx
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = config.max_position_embeddings
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = bart.SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = bart.LearnedPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx, #config.extra_pos_embeddings,
)
self.layers = nn.ModuleList([bart.EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = bart.LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity()
# mbart has one extra layer_norm
self.layer_norm = bart.LayerNorm(config.d_model) if config.normalize_before else None
def forward(
self, input_ids, embedded=None, attention_mask=None,
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens.
Returns:
Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *self.output_hidden_states:* is True.
- **all_attentions** (List[Tensor]): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = bart.invert_mask(attention_mask)
input_ids, backreferences = extract_backreferences(
input_ids, self.embed_tokens.num_embeddings, self.backpointer_idx)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
if embedded is not None:
x += embedded
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states, all_attentions = [], []
for encoder_layer in self.layers:
if self.output_hidden_states:
encoder_states.append(x)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
attn = None
else:
x, attn = encoder_layer(x, attention_mask)
if self.output_attentions:
all_attentions.append(attn)
if self.layer_norm:
x = self.layer_norm(x)
if self.output_hidden_states:
encoder_states.append(x)
# T x B x C -> B x T x C
encoder_states = [hidden_state.transpose(0, 1) for hidden_state in encoder_states]
x = x.transpose(0, 1)
return x, encoder_states, all_attentions
class AMRBartDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer
is a :class:`DecoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: bart.BartConfig, embed_tokens: nn.Embedding, backpointer_idx, amr_mode=True):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.backpointer_idx = backpointer_idx
embed_dim = embed_tokens.embedding_dim
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = bart.SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = bart.LearnedPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx, #config.extra_pos_embeddings,
)
self.layers = nn.ModuleList(
[bart.DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
self.layernorm_embedding = bart.LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity()
self.layer_norm = bart.LayerNorm(config.d_model) if config.add_final_layer_norm else None
self.pointer_k = nn.Linear(config.d_model, config.d_model)
# self.pointer_k.weight.data = self.layers[-1].self_attn.k_proj.weight.data.clone()
self.pointer_q = nn.Linear(config.d_model, config.d_model)
# self.pointer_q.weight.data = self.layers[-1].self_attn.q_proj.weight.data.clone()
# self.pointer_k = nn.Sequential(
# nn.Linear(config.d_model, config.decoder_ffn_dim),
# nn.GELU(),
# nn.Linear(config.decoder_ffn_dim, config.d_model),
# )
# self.pointer_q = nn.Sequential(
# nn.Linear(config.d_model, config.decoder_ffn_dim),
# nn.GELU(),
# nn.Linear(config.decoder_ffn_dim, config.d_model),
# )
self.amr_mode = amr_mode
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
decoder_cached_states=None,
use_cache=False,
**unused
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
decoder_cached_states (dict or None): dictionary used for storing state during generation
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = bart.invert_mask(encoder_padding_mask)
input_ids, backreferences = extract_backreferences(
input_ids,
self.embed_tokens.num_embeddings,
self.backpointer_idx)
# embed positions
embed_pos = self.embed_positions(input_ids, use_cache=use_cache)
positions = embed_pos
# to do this during prediction the old positions should be removed
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
# assert input_ids.ne(self.padding_idx).any()
x = self.embed_tokens(input_ids) * self.embed_scale
x += positions
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = ()
all_self_attns = ()
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if self.output_hidden_states:
all_hidden_states += (x,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = decoder_cached_states[idx] if decoder_cached_states is not None else None
x, layer_self_attn, layer_past = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
)
if use_cache:
next_decoder_cache.append(layer_past.copy())
if self.layer_norm and (idx == len(self.layers) - 1): # last layer of mbart
x = self.layer_norm(x)
if self.output_attentions:
all_self_attns += (layer_self_attn,)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
all_hidden_states = [hidden_state.transpose(0, 1) for hidden_state in all_hidden_states]
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
xq = self.pointer_q(x)
xk = self.pointer_k(x)
if decoder_cached_states is not None:
if 'prev_key' in decoder_cached_states[-1].get('pointer', {}):
last_state = decoder_cached_states[-1]['pointer']
xk = torch.cat([last_state['prev_key'], xk], dim=1)
next_state = {'pointer': {'prev_key': xk}}
if use_cache:
next_decoder_cache.append(next_state)
if self.amr_mode:
scores = torch.einsum('bqh,bkh->bqk', xq, xk)
if decoder_cached_states:
mask = torch.full_like(scores[0], float('-inf'))
mask = mask.triu(diagonal=xk.size(1) - 1)
else:
mask = torch.full_like(scores[0], float('-inf'))
mask = mask.triu()
scores += mask.unsqueeze(0)
else:
scores = torch.full((xq.size(0), xq.size(1), xk.size(1)), float('-inf'), device=xq.device)
if use_cache:
next_cache = ((encoder_hidden_states, encoder_padding_mask), next_decoder_cache)
else:
next_cache = None
return (x, scores), next_cache, all_hidden_states, list(all_self_attns)
class AMRBartModel(bart.PretrainedBartModel):
def __init__(self, config: bart.BartConfig, backpointer_idx=None):
super().__init__(config)
self.output_attentions = True
self.output_hidden_states = config.output_hidden_states
self.padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, self.padding_idx)
if backpointer_idx is not None:
self.backpointer_idx = backpointer_idx
else:
self.backpointer_idx = self.shared.num_embeddings - 1
self.encoder = AMRBartEncoder(config, self.shared, backpointer_idx=self.backpointer_idx)
self.decoder = AMRBartDecoder(config, self.shared, backpointer_idx=self.backpointer_idx)
self.init_weights()
@property
def sentence_mode(self):
return self.decoder.amr_mode
@sentence_mode.setter
def sentence_mode(self, value):
assert isinstance(value, bool)
self.decoder.amr_mode = value
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
encoder_outputs: Optional[Tuple] = None,
decoder_attention_mask=None,
decoder_cached_states=None,
use_cache=False,
):
# make masks if user doesn't supply
if not use_cache:
decoder_input_ids, decoder_padding_mask, causal_mask = bart._prepare_bart_decoder_inputs(
self.config,
input_ids,
decoder_input_ids=decoder_input_ids,
decoder_padding_mask=decoder_attention_mask,
causal_mask_dtype=self.shared.weight.dtype,
)
else:
decoder_padding_mask, causal_mask = None, None
assert decoder_input_ids is not None
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
assert isinstance(encoder_outputs, tuple)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
encoder_outputs[0],
attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
# Attention and hidden_states will be [] or None if they aren't needed
# decoder_outputs: Tuple = bart._filter_out_falsey_values(decoder_outputs)
assert isinstance(decoder_outputs[0][0], torch.Tensor)
assert isinstance(decoder_outputs[0][1], torch.Tensor)
encoder_outputs: Tuple = bart._filter_out_falsey_values(encoder_outputs)
return decoder_outputs + encoder_outputs
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_output_embeddings(self):
return bart._make_linear_from_emb(self.shared) # make it on the fly
class AMRBartForConditionalGeneration(bart.PretrainedBartModel):
base_model_prefix = "model"
def __init__(self, config: bart.BartConfig, backpointer_idx=None):
super().__init__(config)
base_model = AMRBartModel(config, backpointer_idx)
self.model = base_model
self.pad_index = base_model.shared.padding_idx
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.backpointer_idx = backpointer_idx
self._rev = None
def init_reverse_model(self):
rev = AMRBartForConditionalGeneration(self.model.config, self.backpointer_idx)
rev.model.shared = self.model.shared
rev.model.encoder = self.model.encoder
rev.model.decoder.embed_tokens = self.model.decoder.embed_tokens
rev.model.decoder.embed_positions = self.model.decoder.embed_positions
self.amr_mode = True
rev.amr_mode = False
self._rev = rev
@property
def rev(self):
if self._rev is None:
return self
else:
return self._rev
@property
def amr_mode(self):
return self.model.decoder.amr_mode
@amr_mode.setter
def amr_mode(self, value):
assert isinstance(value, bool)
self.model.decoder.amr_mode = value
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
lm_labels=None,
use_cache=False,
**unused
):
r"""
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels
in ``[0, ..., config.vocab_size]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# Mask filling only works for bart-large
from transformers import BartTokenizer, BartForConditionalGeneration
tokenizer = BartTokenizer.from_pretrained('bart-large')
TXT = "My friends are <mask> but they eat too many carbs."
model = BartForConditionalGeneration.from_pretrained('bart-large')
input_ids = tokenizer.batch_encode_plus([TXT], return_tensors='pt')['input_ids']
logits = model(input_ids)[0]
masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
probs = logits[0, masked_index].softmax(dim=0)
values, predictions = probs.topk(5)
tokenizer.decode(predictions).split()
# ['good', 'great', 'all', 'really', 'very']
"""
# outputs = self.model(
# input_ids,
# attention_mask=attention_mask,
# decoder_input_ids=decoder_input_ids,
# encoder_outputs=encoder_outputs,
# decoder_attention_mask=decoder_attention_mask,
# decoder_cached_states=decoder_cached_states,
# use_cache=use_cache,
# )
# lm_logits = F.linear(outputs[0][0], self.model.shared.weight, bias=self.final_logits_bias)
# po_logits = outputs[0][1]
# po_padding = torch.full_like(po_logits[:, :, 0:1], float('-inf'))
# po_padding = po_padding.repeat(1, 1, 1024 - po_logits.size(-1))
# po_logits = torch.cat([po_logits, po_padding], -1)
# uni_logits = torch.cat([lm_logits, po_logits], -1)
#
# outputs = (uni_logits,) + outputs[1:] # Add cache, hidden states and attention if they are here
outputs = self.compute_logits(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
if lm_labels is not None:
uni_logits = outputs[0]
masked_lm_loss = F.nll_loss(
uni_logits.log_softmax(-1).contiguous().view(-1, uni_logits.size(-1)),
lm_labels.contiguous().view(-1),
ignore_index=self.pad_index)
outputs = (masked_lm_loss,) + outputs
return outputs
def compute_logits(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
use_cache=False,
):
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = F.linear(outputs[0][0], self.model.shared.weight, bias=self.final_logits_bias)
po_logits = outputs[0][1]
po_padding = torch.full_like(po_logits[:, :, 0:1], float('-inf'))
po_padding = po_padding.repeat(1, 1, 1024 - po_logits.size(-1))
po_logits = torch.cat([po_logits, po_padding], -1)
uni_logits = torch.cat([lm_logits, po_logits], -1)
outputs = (uni_logits,) + outputs[1:] # Add cache, hidden states and attention if they are here
return outputs
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
**model_specific_kwargs
) -> torch.LongTensor:
r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
pad_token_id: (`optional`) int
Padding token. Default to specicic model pad_token_id or None if it does not exist.
bos_token_id: (`optional`) int
BOS token. Defaults to `bos_token_id` as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to `eos_token_id` as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
use_cache: (`optional`) bool
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.
model_specific_kwargs: (`optional`) dict
Additional model specific kwargs will be forwarded to the `forward` function of the model.
Return:
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
vocab_size += 1024
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
return output
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits, batch_size, num_beams, input_ids, repetition_penalty,
)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.prepare_logits_for_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence or last iteration
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token if it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# Check if were done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len=cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1)
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# finalize all open beam hypotheses and end to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() is not eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are filled with pad_token
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
@staticmethod
def _reorder_cache(past: Tuple, beam_idx: Tensor) -> Tuple[Tensor]:
return tuple(layer_past.index_select(1, beam_idx) for layer_past in past)
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
old_num_tokens = self.model.shared.num_embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.shared = new_embeddings
self._resize_final_logits_bias(new_num_tokens, old_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, use_cache, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
# first step, decoder_cached_states are empty
if not past[1]:
encoder_outputs, decoder_cached_states = past, None
else:
encoder_outputs, decoder_cached_states = past
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"decoder_cached_states": decoder_cached_states,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_logits_for_generation(self, logits, cur_len, max_length):
#if cur_len == 1:
# self._force_token_ids_generation(logits, self.config.bos_token_id)
if cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(logits, self.config.eos_token_id)
return logits
def _force_token_ids_generation(self, scores, token_ids) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0"""
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
((enc_out, enc_mask), decoder_cached_states) = past
reordered_past = []
for layer_past in decoder_cached_states:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: bart._reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
new_enc_out = enc_out if enc_out is None else enc_out.index_select(0, beam_idx)
new_enc_mask = enc_mask if enc_mask is None else enc_mask.index_select(0, beam_idx)
past = ((new_enc_out, new_enc_mask), reordered_past)
return past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return bart._make_linear_from_emb(self.model.shared) # make it on the fly
| 60,795 | 46.055728 | 236 | py |
spring | spring-main/spring_amr/__init__.py | __version__ = "0.0.1"
from pathlib import Path
ROOT = Path(__file__).parent.parent
| 85 | 13.333333 | 35 | py |
spring | spring-main/spring_amr/evaluation.py | import datetime
from pathlib import Path
import penman
from sacrebleu import corpus_bleu
import torch
from tqdm import tqdm
import smatch
from spring_amr.dataset import reverse_direction
def predict_amrs(
loader, model, tokenizer, beam_size=1, tokens=None, restore_name_ops=False, return_all=False):
shuffle_orig = loader.shuffle
sort_orig = loader.sort
loader.shuffle = False
loader.sort = True
total = len(loader.dataset)
model.eval()
model.amr_mode = True
if tokens is None:
ids = []
tokens = []
with tqdm(total=total) as bar:
for x, y, extra in loader:
ii = extra['ids']
ids.extend(ii)
with torch.no_grad():
out = model.generate(
**x,
max_length=1024,
decoder_start_token_id=0,
num_beams=beam_size,
num_return_sequences=beam_size)
nseq = len(ii)
for i1 in range(0, out.size(0), beam_size):
tokens_same_source = []
tokens.append(tokens_same_source)
for i2 in range(i1, i1+beam_size):
tokk = out[i2].tolist()
tokens_same_source.append(tokk)
bar.update(nseq)
# reorder
tokens = [tokens[i] for i in ids]
tokens = [t for tt in tokens for t in tt]
graphs = []
for i1 in range(0, len(tokens), beam_size):
graphs_same_source = []
graphs.append(graphs_same_source)
for i2 in range(i1, i1+beam_size):
tokk = tokens[i2]
graph, status, (lin, backr) = tokenizer.decode_amr(tokk, restore_name_ops=restore_name_ops)
graph.status = status
graph.nodes = lin
graph.backreferences = backr
graph.tokens = tokk
graphs_same_source.append(graph)
graphs_same_source[:] = tuple(zip(*sorted(enumerate(graphs_same_source), key=lambda x: (x[1].status.value, x[0]))))[1]
for gps, gg in zip(graphs, loader.dataset.graphs):
for gp in gps:
metadata = gg.metadata.copy()
metadata['annotator'] = 'bart-amr'
metadata['date'] = str(datetime.datetime.now())
if 'save-date' in metadata:
del metadata['save-date']
gp.metadata = metadata
loader.shuffle = shuffle_orig
loader.sort = sort_orig
if not return_all:
graphs = [gg[0] for gg in graphs]
return graphs
def predict_sentences(loader, model, tokenizer, beam_size=1, tokens=None, return_all=False):
shuffle_orig = loader.shuffle
sort_orig = loader.sort
loader.shuffle = False
loader.sort = True
total = len(loader.dataset)
model.eval()
model.amr_mode = False
if tokens is None:
ids = []
tokens = []
with tqdm(total=total) as bar:
for x, y, extra in loader:
ids.extend(extra['ids'])
x, y = reverse_direction(x, y)
x['input_ids'] = x['input_ids'][:, :1024]
x['attention_mask'] = x['attention_mask'][:, :1024]
with torch.no_grad():
out = model.generate(
**x,
max_length=350,
decoder_start_token_id=0,
num_beams=beam_size,
num_return_sequences=beam_size)
for i1 in range(0, len(out), beam_size):
tokens_same_source = []
tokens.append(tokens_same_source)
for i2 in range(i1, i1+beam_size):
tokk = out[i2]
tokk = [t for t in tokk.tolist() if t > 2]
tokens_same_source.append(tokk)
bar.update(out.size(0) // beam_size)
#reorder
tokens = [tokens[i] for i in ids]
sentences = []
for tokens_same_source in tokens:
if return_all:
sentences.append([tokenizer.decode(tokk).strip() for tokk in tokens_same_source])
else:
sentences.append(tokenizer.decode(tokens_same_source[0]).strip())
loader.shuffle = shuffle_orig
loader.sort = sort_orig
return sentences
def write_predictions(predictions_path, tokenizer, graphs):
pieces = [penman.encode(g) for g in graphs]
Path(predictions_path).write_text('\n\n'.join(pieces).replace(tokenizer.INIT, ''))
return predictions_path
def compute_smatch(test_path, predictions_path):
with Path(predictions_path).open() as p, Path(test_path).open() as g:
score = next(smatch.score_amr_pairs(p, g))
return score[2]
def compute_bleu(gold_sentences, pred_sentences):
return corpus_bleu(pred_sentences, [gold_sentences])
| 4,920 | 32.937931 | 126 | py |
spring | spring-main/spring_amr/tokenization_bart.py | import copy
import sys
from pathlib import Path
import penman
import regex as re
import torch
from transformers import BartTokenizer
from spring_amr import ROOT, postprocessing
from spring_amr.linearization import AMRTokens, AMRLinearizer
from spring_amr.penman import encode
class AMRBartTokenizer(BartTokenizer):
INIT = 'Ġ'
ADDITIONAL = [
AMRTokens.PNTR_N,
AMRTokens.STOP_N,
AMRTokens.LIT_START,
AMRTokens.LIT_END,
AMRTokens.BACKR_SRC_N,
AMRTokens.BACKR_TRG_N,]
def __init__(self, *args, use_pointer_tokens=False, collapse_name_ops=False, **kwargs):
super().__init__(*args, **kwargs)
self.patterns = re.compile(
r""" ?<[a-z]+:?\d*>| ?:[^\s]+|'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
self.linearizer = AMRLinearizer(use_pointer_tokens=use_pointer_tokens, collapse_name_ops=collapse_name_ops)
self.use_pointer_tokens = use_pointer_tokens
self.collapse_name_ops = collapse_name_ops
self.recategorizations = set()
self.modified = 0
@classmethod
def from_pretrained(cls, pretrained_model_path, pred_min=5, *args, **kwargs):
inst = super().from_pretrained(pretrained_model_path, *args, **kwargs)
inst.init_amr_vocabulary(pred_min=pred_min)
return inst
def init_amr_vocabulary(self, pred_min=5):
for tok in [self.bos_token, self.eos_token, self.pad_token, '<mask>', '<unk>']:
ntok = self.INIT + tok
i = self.encoder[tok]
self.decoder[i] = ntok
del self.encoder[tok]
self.encoder[ntok] = i
tokens = []
for line in Path(ROOT/'data/vocab/predicates.txt').read_text().strip().splitlines():
tok, count = line.split()
if int(count) >= pred_min:
tokens.append(tok)
for tok in Path(ROOT/'data/vocab/additions.txt').read_text().strip().splitlines():
tokens.append(tok)
for tok in Path(ROOT/'data/vocab/recategorizations.txt').read_text().strip().splitlines():
if not tok.startswith('_'):
self.recategorizations.add(tok)
tokens.append(tok)
if self.use_pointer_tokens:
for cnt in range(512):
tokens.append(f"<pointer:{cnt}>")
tokens += self.ADDITIONAL
tokens = [self.INIT + t if t[0] not in ('_', '-') else t for t in tokens]
tokens = [t for t in tokens if t not in self.encoder]
self.old_enc_size = old_enc_size = len(self.encoder)
for i, t in enumerate(tokens, start= old_enc_size):
self.encoder[t] = i
self.encoder = {k: i for i, (k,v) in enumerate(sorted(self.encoder.items(), key=lambda x: x[1]))}
self.decoder = {v: k for k, v in sorted(self.encoder.items(), key=lambda x: x[1])}
self.modified = len(tokens)
self.bos_token = self.INIT + '<s>'
self.pad_token = self.INIT + '<pad>'
self.eos_token = self.INIT + '</s>'
self.unk_token = self.INIT + '<unk>'
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def _tokenize(self, text):
""" Tokenize a string. Modified in order to handle sentences with recategorization pointers"""
bpe_tokens = []
for tok_span in text.lstrip().split(' '):
tok_span = tok_span.strip()
recats = tok_span.rsplit('_', 1)
if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:
bpe_tokens.extend([self.INIT + recats[0], '_' + recats[1]])
else:
for token in re.findall(self.pat, ' ' + tok_span):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _tok_bpe(self, token, add_space=True):
# if add_space:
# token = ' ' + token.lstrip()
tokk = []
tok = token.strip()
recats = tok.rsplit('_', 1)
if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:
tokk.extend([self.INIT + recats[0], '_' + recats[1]])
else:
for tok in self.patterns.findall(' ' + token):
tok = "".join(
self.byte_encoder[b] for b in tok.encode("utf-8"))
toks = self.bpe(tok).split(' ')
tokk.extend(toks)
return tokk
def _get_nodes_and_backreferences(self, graph):
lin = self.linearizer.linearize(graph)
linearized_nodes, backreferences = lin.nodes, lin.backreferences
return linearized_nodes, backreferences
def tokenize_amr(self, graph):
linearized_nodes, backreferences = self._get_nodes_and_backreferences(graph)
bpe_tokens = []
bpe_backreferences = []
counter = 0
for i, (backr, tokk) in enumerate(zip(backreferences, linearized_nodes)):
is_in_enc = self.INIT + tokk in self.encoder
is_rel = tokk.startswith(':') and len(tokk) > 1
is_spc = tokk.startswith('<') and tokk.endswith('>')
is_of = tokk.startswith(':') and tokk.endswith('-of')
is_frame = re.match(r'.+-\d\d', tokk) is not None
if tokk.startswith('"') and tokk.endswith('"'):
tokk = tokk[1:-1].replace('_', ' ')
bpe_toks = [self.INIT + AMRTokens.LIT_START]
bpe_toks += self._tok_bpe(tokk, add_space=True)
bpe_toks.append(self.INIT + AMRTokens.LIT_END)
elif (is_rel or is_spc or is_frame or is_of):
if is_in_enc:
bpe_toks = [self.INIT + tokk]
elif is_frame:
bpe_toks = self._tok_bpe(tokk[:-3], add_space=True) + [tokk[-3:]]
elif is_of:
rel = tokk[:-3]
if self.INIT + rel in self.encoder:
bpe_toks = [self.INIT + rel, '-of']
else:
bpe_toks = [self.INIT + ':'] + self._tok_bpe(rel[1:], add_space=True) + ['-of']
elif is_rel:
bpe_toks = [self.INIT + ':'] + self._tok_bpe(tokk[1:], add_space=True)
else:
raise
else:
if is_in_enc:
bpe_toks = [self.INIT + tokk]
else:
bpe_toks = self._tok_bpe(tokk, add_space=True)
bpe_tokens.append(bpe_toks)
if i == backr:
bpe_backr = list(range(counter, counter + len(bpe_toks)))
counter += len(bpe_toks)
bpe_backreferences.append(bpe_backr)
else:
bpe_backreferences.append(bpe_backreferences[backr][0:1])
counter += 1
bpe_tokens = [b for bb in bpe_tokens for b in bb]
bpe_token_ids = [self.encoder.get(b, self.unk_token_id) for b in bpe_tokens]
bpe_backreferences = [b for bb in bpe_backreferences for b in bb]
return bpe_tokens, bpe_token_ids, bpe_backreferences
def batch_encode_sentences(self, sentences, device=torch.device('cpu')):
sentences = [s for s in sentences]
extra = {'sentences': sentences}
batch = super().batch_encode_plus(sentences, return_tensors='pt', pad_to_max_length=True)
batch = {k: v.to(device) for k, v in batch.items()}
return batch, extra
def linearize(self, graph):
shift = len(self.encoder)
tokens, token_ids, backreferences = self.tokenize_amr(graph)
extra = {'linearized_graphs': tokens, 'graphs': graph}
token_uni_ids = \
[idx if i == b else b + shift for i, (idx, b) in enumerate(zip(token_ids, backreferences))]
if token_uni_ids[-1] != (self.INIT + AMRTokens.EOS_N):
tokens.append(self.INIT + AMRTokens.EOS_N)
token_ids.append(self.eos_token_id)
token_uni_ids.append(self.eos_token_id)
backreferences.append(len(backreferences))
return token_uni_ids, extra
def batch_encode_graphs(self, graphs, device=torch.device('cpu')):
linearized, extras = zip(*[self.linearize(g) for g in graphs])
return self.batch_encode_graphs_from_linearized(linearized, extras, device=device)
def batch_encode_graphs_from_linearized(self, linearized, extras=None, device=torch.device('cpu')):
if extras is not None:
batch_extra = {'linearized_graphs': [], 'graphs': []}
for extra in extras:
batch_extra['graphs'].append(extra['graphs'])
batch_extra['linearized_graphs'].append(extra['linearized_graphs'])
else:
batch_extra = {}
maxlen = 0
batch = []
for token_uni_ids in linearized:
maxlen = max(len(token_uni_ids), maxlen)
batch.append(token_uni_ids)
batch = [x + [self.pad_token_id] * (maxlen - len(x)) for x in batch]
batch = torch.tensor(batch).to(device)
batch = {'decoder_input_ids': batch[:, :-1], 'lm_labels': batch[:, 1:]}
return batch, batch_extra
def decode_amr(self, tokens, restore_name_ops=False):
try:
nodes, backreferences = postprocessing.decode_into_node_and_backreferences(tokens, self)
except Exception as e:
print('Decoding failure:', file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
if self.use_pointer_tokens:
nodes, backreferences = postprocessing.restore_backreferences_from_pointers(nodes)
try:
graph_ = graph = postprocessing.build_graph(nodes, backreferences, restore_name_ops=restore_name_ops)
except Exception as e:
print('Building failure:', file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
try:
graph, status = postprocessing.connect_graph_if_not_connected(graph)
if status == postprocessing.ParsedStatus.BACKOFF:
print('Reconnection 1 failure:')
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return graph, status, (nodes, backreferences)
except Exception as e:
print('Reconnction 2 failure:', file=sys.stderr)
print(e, file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (nodes, backreferences)
class PENMANBartTokenizer(AMRBartTokenizer):
def __init__(self, *args, raw_graph=False, **kwargs):
super().__init__(*args, **kwargs)
self.linearizer = None
self.remove_pars = False
self.raw_graph = raw_graph
def _tokenize_encoded_graph(self, encoded):
linearized = re.sub(r"(\".+?\")", r' \1 ', encoded)
pieces = []
for piece in linearized.split():
if piece.startswith('"') and piece.endswith('"'):
pieces.append(piece)
else:
piece = piece.replace('(', ' ( ')
piece = piece.replace(')', ' ) ')
piece = piece.replace(':', ' :')
piece = piece.replace('/', ' / ')
piece = piece.strip()
pieces.append(piece)
linearized = re.sub(r'\s+', ' ', ' '.join(pieces)).strip()
linearized_nodes = [AMRTokens.BOS_N] + linearized.split(' ')
return linearized_nodes
def tokenize_amr(self, graph):
if self.raw_graph:
graph_ = copy.deepcopy(graph)
graph_.metadata = {}
linearized = penman.encode(graph_)
linearized = re.sub(r"\s+", ' ', linearized)
bpe_tokens = [self.bos_token] + self._tokenize(linearized)[:1022]
bpe_token_ids = [self.encoder.get(b, self.unk_token_id) for b in bpe_tokens]
bpe_backreferences = list(range(len(bpe_token_ids)))
return bpe_tokens, bpe_token_ids, bpe_backreferences
else:
return super().tokenize_amr(graph)
def _get_nodes_and_backreferences(self, graph):
graph_ = copy.deepcopy(graph)
graph_.metadata = {}
linearized = penman.encode(graph_)
linearized_nodes = self._tokenize_encoded_graph(linearized)
if self.use_pointer_tokens:
remap = {}
for i in range(1, len(linearized_nodes)):
nxt = linearized_nodes[i]
lst = linearized_nodes[i-1]
if nxt == '/':
remap[lst] = f'<pointer:{len(remap)}>'
i = 1
linearized_nodes_ = [linearized_nodes[0]]
while i < (len(linearized_nodes)):
nxt = linearized_nodes[i]
lst = linearized_nodes_[-1]
if nxt in remap:
if lst == '(' and linearized_nodes[i+1] == '/':
nxt = remap[nxt]
i += 1
elif lst.startswith(':'):
nxt = remap[nxt]
linearized_nodes_.append(nxt)
i += 1
linearized_nodes = linearized_nodes_
if self.remove_pars:
linearized_nodes = [n for n in linearized_nodes if n != '(']
backreferences = list(range(len(linearized_nodes)))
return linearized_nodes, backreferences
def _classify(self, node):
if not isinstance(node, str):
return "CONST"
elif node == 'i':
return "I"
elif re.match(r'^[a-z]\d*$', node) is not None:
return "VAR"
elif node[0].isdigit():
return "CONST"
elif node.startswith('"') and node.endswith('"'):
return "CONST"
elif node in ('+', '-'):
return "CONST"
elif node == ':mode':
return 'MODE'
elif node.startswith(':'):
return "EDGE"
elif node in ['/', '(', ')']:
return node
elif node[0].isalpha():
for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\'):
if char in node:
return "CONST"
return "INST"
else:
return 'CONST'
def _fix_and_make_graph(self, nodes):
nodes_ = []
for n in nodes:
if isinstance(n, str):
if n.startswith('<') and n.endswith('>') and (not n.startswith('<pointer:')):
pass
else:
nodes_.append(n)
else:
nodes_.append(n)
nodes = nodes_
if self.use_pointer_tokens:
i = 0
nodes_ = []
while i < len(nodes):
nxt = nodes[i]
pst = None
if isinstance(nxt, str) and nxt.startswith('<pointer:'):
e = nxt.find('>')
if e != len(nxt) -1:
pst = nxt[e+1:]
nxt = nxt[:e+1]
nodes_.append(nxt)
if pst is not None:
nodes_.append(pst)
else:
nodes_.append(nxt)
i += 1
nodes = nodes_
i = 1
nodes_ = [nodes[0]]
while i < len(nodes):
nxt = nodes[i]
if isinstance(nxt, str) and nxt.startswith('<pointer:'):
nxt = 'z' + nxt[9:-1]
fol = nodes[i+1]
# is not expansion
if isinstance(fol, str) and (fol.startswith(':') or (fol == ')')):
nodes_.append(nxt)
else:
if self.remove_pars:
nodes_.append('(')
else:
if nodes_[-1] != '(':
nodes_.append('(')
#pass
nodes_.append(nxt)
nodes_.append('/')
else:
nodes_.append(nxt)
i += 1
nodes = nodes_
i = 0
nodes_ = []
while i < (len(nodes) - 1):
if nodes[i] == ':':
nodes_.append(nodes[i] + nodes[i+1])
i += 2
last = False
else:
nodes_.append(nodes[i])
i += 1
last = True
if last:
nodes_.append(nodes[-1])
nodes = nodes_
i = 0
nodes_ = []
while i < (len(nodes)):
if i < 2:
nodes_.append(nodes[i])
i += 1
elif nodes_[-2] == '/' and nodes[i] == '/':
i += 2
else:
nodes_.append(nodes[i])
i += 1
nodes = nodes_
i = 0
newvars = 0
variables = set()
remap = {}
nodes_ = []
while i < (len(nodes)):
next = nodes[i]
if next == '/':
last = nodes_[-1]
if last in variables:
last_remap = f"z{newvars+1000}"
newvars += 1
nodes_[-1] = last_remap
remap[last] = last_remap
variables.add(last)
nodes_.append(next)
elif self._classify(next) == 'VAR' and next in remap and (i < len(nodes) - 1) and nodes[i+1] != '/':
next = remap[next]
nodes_.append(next)
else:
nodes_.append(next)
i += 1
nodes = nodes_
pieces_ = []
open_cnt = 0
closed_cnt = 0
if nodes[0] != '(':
pieces_.append('(')
open_cnt += 1
for p in nodes:
if p == '(':
open_cnt += 1
elif p == ')':
closed_cnt += 1
pieces_.append(p)
if open_cnt == closed_cnt:
break
nodes = pieces_ + [')'] * (open_cnt - closed_cnt)
pieces = []
for piece in nodes:
if not pieces:
pieces.append('(')
else:
piece = str(piece)
if piece.startswith('"') or piece.startswith('"') or '"' in piece.strip('"'):
piece = '"' + piece.replace('"', '') + '"'
prev = self._classify(pieces[-1])
next = self._classify(piece)
if next == 'CONST':
quote = False
for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\', '_', '='):
if char in piece:
quote = True
break
if quote:
piece = '"' + piece.strip('"') + '"'
if prev == '(':
if next in ('VAR', 'I'):
pieces.append(piece)
elif prev == ')':
if next in (')', 'EDGE', 'MODE'):
pieces.append(piece)
elif prev == 'VAR':
if next in ('/', 'EDGE', 'MODE', ')'):
pieces.append(piece)
elif prev == '/':
if next in ('INST', 'I'):
pieces.append(piece)
elif prev == 'INST':
if next in (')', 'EDGE', 'MODE'):
pieces.append(piece)
elif prev == 'I':
if next in ('/', ')', 'EDGE', 'MODE'):
pieces.append(piece)
elif prev == 'EDGE':
if next in ('(', 'VAR', 'CONST', 'I'):
pieces.append(piece)
elif next == ')':
pieces[-1] = piece
elif next in ('EDGE', 'MODE'):
pieces[-1] = piece
elif prev == 'MODE':
if next == 'INST':
pieces.append(piece)
elif prev == 'CONST':
if next in (')', 'EDGE', 'MODE'):
pieces.append(piece)
pieces_ = []
open_cnt = 0
closed_cnt = 0
if pieces[0] != '(':
pieces_.append('(')
open_cnt += 1
for p in pieces:
if p == '(':
open_cnt += 1
elif p == ')':
closed_cnt += 1
pieces_.append(p)
if open_cnt == closed_cnt:
break
pieces = pieces_ + [')'] * (open_cnt - closed_cnt)
linearized = re.sub(r'\s+', ' ', ' '.join(pieces)).strip()
"""
line = linearized
# make sure parentheses match
# copied from https://github.com/RikVN/AMR/blob/master/restoreAMR/restore_amr.py
open_count = 0
close_count = 0
for i, c in enumerate(line):
if c == '(':
open_count += 1
elif c == ')':
close_count += 1
if open_count == close_count and open_count > 0:
line = line[:i].strip()
break
old_line = line
while True:
open_count = len(re.findall(r'\(', line))
close_count = len(re.findall(r'\)', line))
if open_count > close_count:
line += ')' * (open_count - close_count)
elif close_count > open_count:
for i in range(close_count - open_count):
line = line.rstrip(')')
line = line.rstrip(' ')
if old_line == line:
break
old_line = line
"""
graph = penman.decode(linearized + ' ')
triples = []
newvars = 2000
for triple in graph.triples:
x, rel, y = triple
if x is None:
pass
elif rel == ':instance' and y is None:
triples.append(penman.Triple(x, rel, 'thing'))
elif y is None:
var = f'z{newvars}'
newvars += 1
triples.append(penman.Triple(x, rel, var))
triples.append(penman.Triple(var, ':instance', 'thing'))
else:
triples.append(triple)
graph = penman.Graph(triples)
linearized = encode(graph)
def fix_text(linearized=linearized):
n = 0
def _repl1(match):
nonlocal n
out = match.group(1) + match.group(2) + str(3000 + n) + ' / ' + match.group(2) + match.group(3)
n += 1
return out
linearized = re.sub(r'(\(\s?)([a-z])([^\/:\)]+[:\)])', _repl1, linearized,
flags=re.IGNORECASE | re.MULTILINE)
def _repl2(match):
return match.group(1)
linearized = re.sub(r'(\(\s*[a-z][\d+]\s*\/\s*[^\s\)\(:\/]+\s*)((?:/\s*[^\s\)\(:\/]+\s*)+)', _repl2,
linearized,
flags=re.IGNORECASE | re.MULTILINE)
# adds a ':' to args w/o it
linearized = re.sub(r'([^:])(ARG)', r'\1 :\2', linearized)
# removes edges with no node
# linearized = re.sub(r':[^\s\)\(:\/]+?\s*\)', ')', linearized, flags=re.MULTILINE)
return linearized
linearized = fix_text(linearized)
g = penman.decode(linearized)
return g
def decode_amr(self, tokens, restore_name_ops=None):
try:
if self.raw_graph:
nodes = self._tokenize_encoded_graph(self.decode(tokens))
backreferences = list(range(len(nodes)))
else:
nodes, backreferences = postprocessing.decode_into_node_and_backreferences(tokens, self)
nodes_ = nodes
except Exception as e:
print('Decoding failure:', file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
try:
graph_ = graph = self._fix_and_make_graph(nodes)
if self.collapse_name_ops:
graph_ = graph = postprocessing._split_name_ops(graph)
except Exception as e:
print('Building failure:', file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
try:
graph, status = postprocessing.connect_graph_if_not_connected(graph)
if status == postprocessing.ParsedStatus.BACKOFF:
print('Reconnection 1 failure:')
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return graph, status, (nodes_, backreferences)
except Exception as e:
print('Reconnction 2 failure:', file=sys.stderr)
print(e, file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (nodes_, backreferences)
| 26,484 | 38.412202 | 120 | py |
spring | spring-main/spring_amr/postprocessing.py | from collections import defaultdict, Counter
import enum
import re
import networkx as nx
import penman
from spring_amr.penman import encode
from spring_amr.linearization import AMRTokens
BACKOFF = penman.Graph([
penman.Triple('d2', ':instance', 'dog'),
penman.Triple('b1', ':instance', 'bark-01'),
penman.Triple('b1', ':ARG0', 'd2'),])
def token_processing(tok):
if tok is None:
return None
elif tok.isdigit():
try:
return eval(tok)
except:
return tok
elif tok.startswith('"') and (not tok.endswith('"')):
return tok + '"'
elif tok.endswith('"') and (not tok.startswith('"')):
return '"' + tok
else:
return tok
def decode_into_node_and_backreferences(subtoken_ids, tokenizer):
rex_arg = re.compile(f"^{tokenizer.INIT}(op|snt|conj|prep)")
rex_spc = re.compile(r"<(s|/s|lit|/lit|stop|unk|pad|mask)>")
# get strings
subtokens = [tokenizer.decoder.get(t) for t in subtoken_ids]
# fix backreferences
subtoken_backreferences = [max(t - len(tokenizer.encoder), -1) for t in subtoken_ids]
# strip padding
subtokens, subtoken_backreferences = zip(
*[(s, b) for s, b in zip(subtokens, subtoken_backreferences) if s != (tokenizer.INIT + '<pad>')])
# subword collapse
tokens = []
backreferences = []
subword_to_token_map = {}
current_token_i = 0
for subw_i, (subw_backr, subtok) in enumerate(zip(subtoken_backreferences, subtokens)):
subword_to_token_map[subw_i] = current_token_i
# if empty you cannot do anything but add a new word
if not tokens:
tokens.append(subtok.lstrip(tokenizer.INIT))
backreferences.append(-1)
current_token_i += 1
# backref can't be splitted
elif subw_backr > -1:
tokens.append(None)
backreferences.append(subword_to_token_map[subw_backr])
current_token_i += 1
# after a special token release
elif isinstance(tokens[-1], str) and rex_spc.match(tokens[-1]):
tokens.append(subtok.lstrip(tokenizer.INIT))
backreferences.append(-1)
current_token_i += 1
# after a subtoken ':' (which should be followed by the rest of the edge) ignore tokenizer.INIT
# TODO: this is an ugly patch due to the fact that BART tokenizer splits after ':'
elif (tokens[-1] == ':') and rex_arg.match(subtok):
tokens[-1] = tokens[-1] + subtok[1:]
# leading tokenizer.INIT
elif subtok.startswith(tokenizer.INIT):
tokens.append(subtok.lstrip(tokenizer.INIT))
backreferences.append(-1)
current_token_i += 1
# very ugly patch for some cases in which tokenizer.INIT is not in the following token to the edge
elif isinstance(tokens[-1], str) and tokens[-1].startswith(':') and tokens[-1][-1].isdigit() and (subtok != '-of'):
tokens.append(subtok.lstrip(tokenizer.INIT))
backreferences.append(-1)
current_token_i += 1
# in any other case attach to the previous
else:
tokens[-1] = tokens[-1] + subtok
# strip INIT and fix byte-level
tokens = [tokenizer.convert_tokens_to_string(list(t)).lstrip() if isinstance(t, str) else t for t in tokens]
# tokens = [t.replace(tokenizer.INIT, '') if isinstance(t, str) else t for t in tokens]
# unks are substituted with thing
tokens = [t if t != '<unk>' else 'thing' for t in tokens]
old_tokens = tokens
old_backreferences = backreferences
# <lit> Barack Obama </lit> -> "Barack Obama"
tokens = []
backreferences = []
token_to_token_map = {}
start_search = 0
removed = 0
while True:
try:
lit_start = old_tokens.index('<lit>', start_search)
token_addition = old_tokens[start_search:lit_start]
for i, t in enumerate(token_addition, start=start_search):
token_to_token_map[i] = i - removed
tokens += token_addition
backreferences_addition = [token_to_token_map[b] if b > -1 else -1 for b in
old_backreferences[start_search:lit_start]]
backreferences += backreferences_addition
lit_end = min(lit_start + 2, len(old_tokens) - 1)
while lit_end < len(old_tokens):
old_tok = old_tokens[lit_end]
if isinstance(old_tok, str) and (
(old_tok.startswith(':') and len(old_tok) > 3) or (old_tok == '<stop>')):
res_tok = old_tokens[lit_start + 1:lit_end]
for i in range(lit_start, lit_end):
token_to_token_map[i] = len(tokens)
# Remove possible wrong None
res = old_tokens[lit_start+1:lit_end]
res = [str(r) for r in res if r is not None]
res = '"' + '_'.join(res) + '"'
removed += len(res_tok)
start_search = lit_end
tokens += [res, old_tok]
backreferences += [-1, -1]
break
elif old_tok == '</lit>':
res_tok = old_tokens[lit_start + 1:lit_end]
for i in range(lit_start, lit_end + 1):
token_to_token_map[i] = len(tokens)
# Remove possible wrong None
res = old_tokens[lit_start+1:lit_end]
res = [str(r) for r in res if r is not None]
res = '"' + '_'.join(res) + '"'
removed += len(res_tok) + 1
start_search = lit_end + 1
tokens.append(res)
backreferences.append(-1)
break
else:
lit_end += 1
start_search = lit_end
except ValueError:
token_addition = old_tokens[start_search:]
for i, t in enumerate(token_addition, start=start_search):
token_to_token_map[i] = i - removed
backreferences_addition = [token_to_token_map[b] if b > -1 else b for b in
old_backreferences[start_search:]]
tokens += token_addition
backreferences += backreferences_addition
break
tokens = [token_processing(t) for t in tokens]
shift = 1
if tokens[1] == '<s>':
shift = 2
tokens = tokens[shift:]
backreferences = [b if b == -1 else b - shift for b in backreferences[shift:]]
if tokens[-1] == '</s>':
tokens.pop()
backreferences.pop()
return tokens, backreferences
def index_of(element, iterable, default=None, start=None, end=None):
if not callable(element):
def check(x):
return element == x
else:
check = element
if start is None:
start = 0
if end is None:
end = len(iterable)
item = start
while item < end:
if check(iterable[item]):
return item
item += 1
return default
def separate_edges_nodes(edges_nodes_slice, *other):
is_arg = lambda x: isinstance(x, str) and x.startswith(':')
start = 0
edges = []
nodes = []
l = len(edges_nodes_slice)
while start < l:
edge_index = index_of(
is_arg,
edges_nodes_slice,
start=start)
if edge_index is None or edge_index == (l - 1):
break
if is_arg(edges_nodes_slice[edge_index + 1]):
start = edge_index + 1
continue
edges.append(edge_index)
nodes.append(edge_index + 1)
start = edge_index + 2
ret = []
for oth in other:
edges_oth = [oth[i] for i in edges]
nodes_oth = [oth[i] for i in nodes]
ret.append((edges_oth, nodes_oth))
return ret
def _split_name_ops(graph):
# identify name triples
name_vars = {}
for i, (v1, rel, v2) in enumerate(graph.triples):
if rel == ':instance' and v2 == 'name':
name_vars[v1] = 1
# check if they have ops
name_vars_to_ops = defaultdict(list)
for i, (v1, rel, v2) in enumerate(graph.triples):
if v1 in name_vars and rel.startswith(':op'):
name_vars_to_ops[v1].append((i, rel, v2.strip('"')))
triples = graph.triples.copy()
for nv, ops in name_vars_to_ops.items():
ops = sorted(ops, key=lambda x: int(x[1][3:]))
idx, _, lits = zip(*ops)
for i in idx:
triples[i] = None
lits = ['"' + l + '"' for lit in lits for l in lit.split('_')]
tt = []
for i, l in enumerate(lits, start=1):
rel = ':op' + str(i)
tt.append(penman.Triple(nv, rel, l))
triples[min(idx)] = tt
triples = [t if isinstance(t, list) else [t] for t in triples if t is not None]
triples = [t for tt in triples for t in tt]
graph_ = penman.Graph(triples)
graph_.metadata = graph.metadata
return graph_
def _reconstruct_graph_from_nodes(nodes, backreferences):
triples = []
triples_added = set()
variable2index = {}
index2variable = {}
start_index = 0
cnt = defaultdict(Counter)
while start_index < len(nodes):
stop_index = index_of('<stop>', nodes, default=len(nodes) + 1, start=start_index)
old_start_index = start_index
start_index = stop_index + 1
src_node, src_backr = nodes[old_start_index], backreferences[old_start_index]
if src_node == '<stop>':
continue
trg_nodes_edges = nodes[old_start_index:stop_index]
trg_nodes_edges_backr = backreferences[old_start_index:stop_index]
trg_nodes_edges_indices = list(range(old_start_index, stop_index))
if isinstance(src_node, str):
if src_node in ('<s>', '</s>', '<stop>'):
continue
elif ('/' in src_node) or (':' in src_node) or ('(' in src_node) or (')' in src_node):
src_node = 'thing'
if src_node is not None:
src_node = str(src_node)
src_var = src_node[0].lower()
if not src_var not in 'abcdefghijklmnopqrstuvwxyz':
src_var = 'x'
#src_var = f'{src_var}_{len(variable2index)}'
src_var = f'{src_var}{len(variable2index)}'
src_var_i = old_start_index
variable2index[src_var] = src_var_i
index2variable[src_var_i] = src_var
triple = penman.Triple(src_var, ':instance', src_node)
if triple not in triples_added:
triples.append(triple)
triples_added.add(triple)
else:
if src_backr in index2variable:
src_var = index2variable[src_backr]
# more resilient logic here
(trg_edges, trg_nodes), (_, trg_nodes_backr), (_, trg_nodes_indices) = \
separate_edges_nodes(
trg_nodes_edges,
trg_nodes_edges,
trg_nodes_edges_backr,
trg_nodes_edges_indices)
for n, e, nb, ni in zip(trg_nodes, trg_edges, trg_nodes_backr, trg_nodes_indices):
if isinstance(n, str) and n.startswith(':'):
continue
if isinstance(n, str) and n.startswith('<') and n.endswith('>'):
continue
if e == ':li':
pass
elif len(e) < 4 or (not e.startswith(':')):
continue
# same edge more than once
num = cnt[src_var][e]
# num = 0
if num:
if e.startswith(':op') or e.startswith(':snt'):
continue
#elif e.startswith(':ARG'):
# continue
elif num > 3:
continue
if n is None:
if nb not in index2variable:
continue
trg_var = index2variable[nb]
trg = trg_var
elif e == ':mode':
trg = n
elif (not isinstance(n, str)) or re.match(r"^[+-]?\d+\.?\d*$", n) or (n == '-') or (n == '+'):
trg = str(n)
elif (n.startswith('"') and n.endswith('"') and len(n) > 2):
trg = '"' + n.replace('"', '') + '"'
elif ('/' in n) or (':' in n) or ('(' in n) or (')' in n) or ('=' in n):
trg = f'"{n}"'
elif n == '"':
continue
elif (n.startswith('"') and (not n.endswith('"'))) or (not n.startswith('"') and (n.endswith('"'))) or ('"' in n):
trg = '"' + n.replace('"', '') + '"'
else:
trg_var = n[0].lower()
if trg_var not in 'abcdefghijklmnopqrstuvwxyz':
trg_var = 'x'
#trg_var = f'{trg_var}_{len(variable2index)}'
trg_var = f'{trg_var}{len(variable2index)}'
trg_var_i = ni
variable2index[trg_var] = trg_var_i
index2variable[trg_var_i] = trg_var
triple = penman.Triple(trg_var, ':instance', n)
if triple not in triples_added:
triples.append(triple)
triples_added.add(triple)
trg = trg_var
triple = penman.Triple(src_var, e, trg)
if triple not in triples_added:
triples.append(triple)
triples_added.add(triple)
cnt[src_var][e] += 1
return penman.Graph(triples)
def build_graph(nodes, backreferences, restore_name_ops=False):
graph = _reconstruct_graph_from_nodes(nodes, backreferences)
if restore_name_ops:
graph = _split_name_ops(graph)
return graph
class ParsedStatus(enum.Enum):
OK = 0
FIXED = 1
BACKOFF = 2
def connect_graph_if_not_connected(graph):
try:
encoded = encode(graph)
return graph, ParsedStatus.OK
except:
pass
nxgraph = nx.MultiGraph()
variables = graph.variables()
for v1, _, v2 in graph.triples:
if v1 in variables and v2 in variables:
nxgraph.add_edge(v1, v2)
elif v1 in variables:
nxgraph.add_edge(v1, v1)
triples = graph.triples.copy()
new_triples = []
addition = f'a{len(variables) + 1}'
triples.append(penman.Triple(addition, ':instance', 'and'))
for i, conn_set in enumerate(nx.connected_components(nxgraph), start=1):
edge = f':op{i}'
conn_set = sorted(conn_set, key=lambda x: int(x[1:]))
conn_set = [c for c in conn_set if c in variables]
node = conn_set[0]
new_triples.append(penman.Triple(addition, edge, node))
triples = new_triples + triples
metadata = graph.metadata
graph = penman.Graph(triples)
graph.metadata.update(metadata)
encode(graph)
return graph, ParsedStatus.FIXED
def restore_backreferences_from_pointers(nodes):
new_nodes, new_backreferences = [], []
prev_pointer = None
pointer2i = {}
for n in nodes:
is_pointer = isinstance(n, str) and n.startswith('<pointer:') and n.endswith('>')
if not is_pointer:
if prev_pointer is not None:
if prev_pointer in pointer2i:
new_nodes.append(None)
new_backreferences.append(pointer2i[prev_pointer])
new_nodes.append(n)
new_backreferences.append(-1)
else:
pointer2i[prev_pointer] = len(new_nodes)
new_nodes.append(n)
new_backreferences.append(-1)
else:
new_nodes.append(n)
new_backreferences.append(-1)
prev_pointer = None
else:
prev_pointer = n
return new_nodes, new_backreferences | 15,987 | 33.908297 | 126 | py |
spring | spring-main/bin/predict_sentences.py | from pathlib import Path
import penman
import torch
from spring_amr import ROOT
from spring_amr.evaluation import predict_amrs, compute_smatch, predict_sentences, compute_bleu
from spring_amr.penman import encode
from spring_amr.utils import instantiate_loader, instantiate_model_and_tokenizer
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description="Script to predict AMR graphs given sentences. LDC format as input.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--datasets', type=str, required=True, nargs='+',
help="Required. One or more glob patterns to use to load amr files.")
parser.add_argument('--checkpoint', type=str, required=True,
help="Required. Checkpoint to restore.")
parser.add_argument('--model', type=str, default='facebook/bart-large',
help="Model config to use to load the model class.")
parser.add_argument('--beam-size', type=int, default=1,
help="Beam size.")
parser.add_argument('--batch-size', type=int, default=1000,
help="Batch size (as number of linearized graph tokens per batch).")
parser.add_argument('--device', type=str, default='cuda',
help="Device. 'cpu', 'cuda', 'cuda:<n>'.")
parser.add_argument('--pred-path', type=Path, default=ROOT / 'data/tmp/inf-pred-sentences.txt',
help="Where to write predictions.")
parser.add_argument('--gold-path', type=Path, default=ROOT / 'data/tmp/inf-gold-sentences.txt',
help="Where to write the gold file.")
parser.add_argument('--add-to-graph-file', action='store_true')
parser.add_argument('--use-reverse-decoder', action='store_true')
parser.add_argument('--deinvert', action='store_true')
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--collapse-name-ops', action='store_true')
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--raw-graph', action='store_true')
parser.add_argument('--return-all', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
args.model,
dropout=0.,
attention_dropout=0.,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
collapse_name_ops=args.collapse_name_ops,
init_reverse=args.use_reverse_decoder,
raw_graph=args.raw_graph,
)
model.load_state_dict(torch.load(args.checkpoint, map_location='cpu')['model'])
model.to(device)
model.rev.amr_mode = False
loader = instantiate_loader(
args.datasets,
tokenizer,
batch_size=args.batch_size,
evaluation=True, out='/tmp/a.txt',
dereify=args.deinvert)
loader.device = device
pred_sentences = predict_sentences(loader, model.rev, tokenizer, beam_size=args.beam_size, return_all=args.return_all)
if args.add_to_graph_file:
graphs = loader.dataset.graphs
for ss, g in zip(pred_sentences, graphs):
if args.return_all:
g.metadata['snt-pred'] = '\t\t'.join(ss)
else:
g.metadata['snt-pred'] = ss
args.pred_path.write_text('\n\n'.join([encode(g) for g in graphs]))
else:
if args.return_all:
pred_sentences = [s for ss in pred_sentences for s in ss]
args.gold_path.write_text('\n'.join(loader.dataset.sentences))
args.pred_path.write_text('\n'.join(pred_sentences))
if not args.return_all:
score = compute_bleu(loader.dataset.sentences, pred_sentences)
print(f'BLEU: {score.score:.2f}')
| 3,988 | 45.383721 | 122 | py |
spring | spring-main/bin/patch_legacy_checkpoint.py | if __name__ == '__main__':
from argparse import ArgumentParser
import torch
parser = ArgumentParser()
parser.add_argument('legacy_checkpoint')
parser.add_argument('patched_checkpoint')
parser.parse_args()
args = parser.parse_args()
to_remove = []
fixed = False
w = torch.load(args.legacy_checkpoint, map_location='cpu')
for name in w['model']:
if 'backreferences' in name:
fixed = True
to_remove.append(name)
print('Deleting parameters:', name)
if not fixed:
print('The checkpoint was fine as it was!')
else:
for name in to_remove:
del w['model'][name]
torch.save(w, args.patched_checkpoint)
| 730 | 24.206897 | 62 | py |
spring | spring-main/bin/predict_amrs_from_plaintext.py | from pathlib import Path
import penman
import torch
from tqdm import tqdm
from spring_amr.penman import encode
from spring_amr.utils import instantiate_model_and_tokenizer
def read_file_in_batches(path, batch_size=1000, max_length=100):
data = []
idx = 0
for line in Path(path).read_text().strip().splitlines():
line = line.strip()
if not line:
continue
n = len(line.split())
if n > max_length:
continue
data.append((idx, line, n))
idx += 1
def _iterator(data):
data = sorted(data, key=lambda x: x[2], reverse=True)
maxn = 0
batch = []
for sample in data:
idx, line, n = sample
if n > batch_size:
if batch:
yield batch
maxn = 0
batch = []
yield [sample]
else:
curr_batch_size = maxn * len(batch)
cand_batch_size = max(maxn, n) * (len(batch) + 1)
if 0 < curr_batch_size <= batch_size and cand_batch_size > batch_size:
yield batch
maxn = 0
batch = []
maxn = max(maxn, n)
batch.append(sample)
if batch:
yield batch
return _iterator(data), len(data)
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description="Script to predict AMR graphs given sentences. LDC format as input.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--texts', type=str, required=True, nargs='+',
help="Required. One or more files containing \\n-separated sentences.")
parser.add_argument('--checkpoint', type=str, required=True,
help="Required. Checkpoint to restore.")
parser.add_argument('--model', type=str, default='facebook/bart-large',
help="Model config to use to load the model class.")
parser.add_argument('--beam-size', type=int, default=1,
help="Beam size.")
parser.add_argument('--batch-size', type=int, default=1000,
help="Batch size (as number of linearized graph tokens per batch).")
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--restore-name-ops', action='store_true')
parser.add_argument('--device', type=str, default='cuda',
help="Device. 'cpu', 'cuda', 'cuda:<n>'.")
parser.add_argument('--only-ok', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
args.model,
dropout=0.,
attention_dropout=0,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
)
model.load_state_dict(torch.load(args.checkpoint, map_location='cpu')['model'])
model.to(device)
model.eval()
for path in tqdm(args.texts, desc='Files:'):
iterator, nsent = read_file_in_batches(path, args.batch_size)
with tqdm(desc=path, total=nsent) as bar:
for batch in iterator:
if not batch:
continue
ids, sentences, _ = zip(*batch)
x, _ = tokenizer.batch_encode_sentences(sentences, device=device)
with torch.no_grad():
model.amr_mode = True
out = model.generate(**x, max_length=512, decoder_start_token_id=0, num_beams=args.beam_size)
bgraphs = []
for idx, sent, tokk in zip(ids, sentences, out):
graph, status, (lin, backr) = tokenizer.decode_amr(tokk.tolist(), restore_name_ops=args.restore_name_ops)
if args.only_ok and ('OK' not in str(status)):
continue
graph.metadata['status'] = str(status)
graph.metadata['source'] = path
graph.metadata['nsent'] = str(idx)
graph.metadata['snt'] = sent
bgraphs.append((idx, graph))
for i, g in bgraphs:
print(encode(g))
print()
# if bgraphs and args.reverse:
# bgraphs = [x[1] for x in bgraphs]
# x, _ = tokenizer.batch_encode_graphs(bgraphs, device)
# x = torch.cat([x['decoder_input_ids'], x['lm_labels'][:, -1:]], 1)
# att = torch.ones_like(x)
# att[att == tokenizer.pad_token_id] = 0
# x = {
# 'input_ids': x,
# #'attention_mask': att,
# }
# with torch.no_grad():
# model.amr_mode = False
# out = model.generate(**x, max_length=1024, decoder_start_token_id=0, num_beams=args.beam_size)
#
# for graph, tokk in zip(bgraphs, out):
# tokk = [t for t in tokk.tolist() if t > 2]
# graph.metadata['snt-pred'] = tokenizer.decode(tokk).strip()
bar.update(len(sentences))
exit(0)
ids, graphs = zip(*sorted(results, key=lambda x:x[0]))
for g in graphs:
print(encode(g))
print()
| 5,603 | 36.610738 | 125 | py |
spring | spring-main/bin/eval_bleu.py | import sys
import argparse
from typing import Iterable, Optional
import sacrebleu
import re
def argument_parser():
parser = argparse.ArgumentParser(description='Preprocess AMR data')
# Multiple input parameters
parser.add_argument(
"--in-tokens",
help="input tokens",
required=True,
type=str
)
parser.add_argument(
"--in-reference-tokens",
help="refrence tokens to compute metric",
type=str
)
args = parser.parse_args()
return args
def tokenize_sentence(text, debug=False):
text = re.sub(r"('ll|n't|'m|'s|'d|'re)", r" \1", text)
text = re.sub(r"(\s+)", r" ", text)
return text
def raw_corpus_bleu(hypothesis: Iterable[str], reference: Iterable[str],
offset: Optional[float] = 0.01) -> float:
bleu = sacrebleu.corpus_bleu(hypothesis, reference, smooth_value=offset,
force=True, use_effective_order=False,
lowercase=True)
return bleu.score
def raw_corpus_chrf(hypotheses: Iterable[str],
references: Iterable[str]) -> float:
return sacrebleu.corpus_chrf(hypotheses, references,
order=sacrebleu.CHRF_ORDER,
beta=sacrebleu.CHRF_BETA,
remove_whitespace=True)
def read_tokens(in_tokens_file):
with open(in_tokens_file) as fid:
lines = fid.readlines()
return lines
if __name__ == '__main__':
# Argument handlig
args = argument_parser()
# read files
ref = read_tokens(args.in_reference_tokens)
hyp = read_tokens(args.in_tokens)
# Lower evaluation
for i in range(len(ref)):
ref[i] = ref[i].lower()
# Lower case output
for i in range(len(hyp)):
if '<generate>' in hyp[i]:
hyp[i] = hyp[i].split('<generate>')[-1]
hyp[i] = tokenize_sentence(hyp[i].lower())
# results
bleu = raw_corpus_bleu(hyp, [ref])
print('BLEU {:.2f}'.format(bleu))
chrFpp = raw_corpus_chrf(hyp, ref).score * 100
print('chrF++ {:.2f}'.format(chrFpp)) | 2,151 | 26.240506 | 76 | py |
spring | spring-main/bin/blinkify.py | import blink.main_dense as main_dense
from logging import getLogger
from penman import Triple, Graph
from spring_amr.evaluation import write_predictions
from spring_amr.tokenization_bart import AMRBartTokenizer
import json
from pathlib import Path
from spring_amr.IO import read_raw_amr_data
from spring_amr.entities import read_entities
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datasets', nargs='+', required=True)
parser.add_argument('--blink-models-dir', type=str, required=True)
parser.add_argument('--out', type=str, required=True)
parser.add_argument('--device', type=str, default='cuda',
help="Device. 'cpu', 'cuda', 'cuda:<n>'.")
parser.add_argument('--all', action='store_true')
parser.add_argument('--fast', action='store_true')
args = parser.parse_args()
graphs = read_raw_amr_data(args.datasets)
sentences = [g.metadata['snt'] for g in graphs]
for_blink = []
sample_id = 0
for sent, (i, with_wikis, name_to_entity, name_to_ops) in zip(sentences, read_entities(sentences, graphs, just_tagged=not args.all)):
for name, parent in name_to_entity.items():
nt, wiki = with_wikis[parent]
ops_triples = name_to_ops[name]
ops_triples = sorted(ops_triples, key=lambda t: t[1])
ops_triples = [t[2].strip('"') for t in ops_triples]
string = ' '.join(ops_triples)
found = string.lower() in sent.lower()
if found:
left = sent.lower().find(string.lower())
right = left + len(string)
sample = {
"id": sample_id,
"label": "unknown",
"label_id": -1,
"context_left": sent[:left].strip().lower(),
"mention": string.lower(),
"context_right": sent[right:].strip().lower(),
"graph_n": i,
"triple_n": nt,
}
sample_id += 1
for_blink.append(sample)
main_dense.logger = logger = getLogger('BLINK')
models_path = args.blink_models_dir # the path where you stored the BLINK models
config = {
"test_entities": None,
"test_mentions": None,
"interactive": False,
"biencoder_model": models_path+"biencoder_wiki_large.bin",
"biencoder_config": models_path+"biencoder_wiki_large.json",
"entity_catalogue": models_path+"entity.jsonl",
"entity_encoding": models_path+"all_entities_large.t7",
"crossencoder_model": models_path+"crossencoder_wiki_large.bin",
"crossencoder_config": models_path+"crossencoder_wiki_large.json",
"top_k": 10,
"show_url": False,
"fast": args.fast, # set this to be true if speed is a concern
"output_path": models_path+"logs/", # logging directory
"faiss_index": None,#"flat",
"index_path": models_path+"faiss_flat_index.pkl",
}
args_blink = argparse.Namespace(**config)
models = main_dense.load_models(args_blink, logger=logger)
_, _, _, _, _, predictions, scores, = main_dense.run(args_blink, logger, *models, test_data=for_blink, device=args.device)
for s, pp in zip(for_blink, predictions):
pp = [p for p in pp if not p.startswith('List of')]
p = f'"{pp[0]}"' if pp else '-'
p = p.replace(' ', '_')
graph_n = s['graph_n']
triple_n = s['triple_n']
triples = [g for g in graphs[graph_n].triples]
n, rel, w = triples[triple_n]
triples[triple_n] = Triple(n, rel, p)
g = Graph(triples)
g.metadata = graphs[graph_n].metadata
graphs[graph_n] = g
write_predictions(args.out, AMRBartTokenizer, graphs)
| 3,827 | 39.294737 | 137 | py |
spring | spring-main/bin/predict_amrs.py | from pathlib import Path
import penman
import torch
from spring_amr import ROOT
from spring_amr.evaluation import predict_amrs, compute_smatch
from spring_amr.penman import encode
from spring_amr.utils import instantiate_loader, instantiate_model_and_tokenizer
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description="Script to predict AMR graphs given sentences. LDC format as input.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--datasets', type=str, required=True, nargs='+',
help="Required. One or more glob patterns to use to load amr files.")
parser.add_argument('--checkpoint', type=str, required=True,
help="Required. Checkpoint to restore.")
parser.add_argument('--model', type=str, default='facebook/bart-large',
help="Model config to use to load the model class.")
parser.add_argument('--beam-size', type=int, default=1,
help="Beam size.")
parser.add_argument('--batch-size', type=int, default=1000,
help="Batch size (as number of linearized graph tokens per batch).")
parser.add_argument('--device', type=str, default='cuda',
help="Device. 'cpu', 'cuda', 'cuda:<n>'.")
parser.add_argument('--pred-path', type=Path, default=ROOT / 'data/tmp/inf-pred.txt',
help="Where to write predictions.")
parser.add_argument('--gold-path', type=Path, default=ROOT / 'data/tmp/inf-gold.txt',
help="Where to write the gold file.")
parser.add_argument('--use-recategorization', action='store_true',
help="Predict using Zhang recategorization on top of our linearization (requires recategorized sentences in input).")
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--raw-graph', action='store_true')
parser.add_argument('--restore-name-ops', action='store_true')
parser.add_argument('--return-all', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
args.model,
dropout=0.,
attention_dropout=0.,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
raw_graph=args.raw_graph,
)
model.amr_mode = True
model.load_state_dict(torch.load(args.checkpoint, map_location='cpu')['model'])
model.to(device)
gold_path = args.gold_path
pred_path = args.pred_path
loader = instantiate_loader(
args.datasets,
tokenizer,
batch_size=args.batch_size,
evaluation=True, out=gold_path,
use_recategorization=args.use_recategorization,
)
loader.device = device
graphs = predict_amrs(
loader,
model,
tokenizer,
beam_size=args.beam_size,
restore_name_ops=args.restore_name_ops,
return_all=args.return_all,
)
if args.return_all:
graphs = [g for gg in graphs for g in gg]
pieces = [encode(g) for g in graphs]
pred_path.write_text('\n\n'.join(pieces))
if not args.return_all:
score = compute_smatch(gold_path, pred_path)
print(f'Smatch: {score:.3f}')
| 3,415 | 38.264368 | 125 | py |
spring | spring-main/bin/inspect_.py | import torch
import penman
from spring_amr.utils import instantiate_model_and_tokenizer
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True)
parser.add_argument('--beam-size', type=int, default=1)
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--restore-name-ops', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
name='facebook/bart-large',
checkpoint=args.checkpoint,
dropout=0., attention_dropout=0.,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
)
model.eval().to(device)
while True:
sentence = [input('Sentence to parse:\n')]
x, extra = tokenizer.batch_encode_sentences(sentence, device)
with torch.no_grad():
out = model.generate(**x, max_length=1024, decoder_start_token_id=0, num_beams=args.beam_size)
out = out[0].tolist()
graph, status, (lin, backr) = tokenizer.decode_amr(out, restore_name_ops=args.restore_name_ops)
print('-' * 5)
print('Status:', status)
print('-' * 5)
print('Graph:')
print(penman.encode(graph))
print('-' * 5)
print('Linearization:')
print(lin)
print('\n')
| 1,673 | 37.045455 | 106 | py |
spring | spring-main/bin/train.py | from pathlib import Path
import torch
try:
from torch.cuda.amp import autocast
autocast_available = True
except ImportError:
class autocast:
def __init__(self, enabled=True): pass
def __enter__(self): return self
def __exit__(self, exc_type, exc_value, exc_traceback): pass
autocast_available = False
from torch.cuda.amp.grad_scaler import GradScaler
import transformers
from spring_amr import ROOT
from spring_amr.dataset import reverse_direction
from spring_amr.optim import RAdam
from spring_amr.evaluation import write_predictions, compute_smatch, predict_amrs, predict_sentences, compute_bleu
from spring_amr.utils import instantiate_model_and_tokenizer, instantiate_loader
from ignite.engine import Engine, Events
from ignite.metrics import RunningAverage
from ignite.handlers import ModelCheckpoint, global_step_from_engine
def do_train(checkpoint=None, direction='amr', split_both_decoder=False, fp16=False):
assert direction in ('amr', 'text', 'both')
model, tokenizer = instantiate_model_and_tokenizer(
config['model'],
checkpoint=checkpoint,
additional_tokens_smart_init=config['smart_init'],
dropout=config['dropout'],
attention_dropout=config['attention_dropout'],
from_pretrained=config['warm_start'],
init_reverse=split_both_decoder,
penman_linearization=config['penman_linearization'],
collapse_name_ops=config['collapse_name_ops'],
use_pointer_tokens=config['use_pointer_tokens'],
raw_graph=config.get('raw_graph', False)
)
print(model)
print(model.config)
if checkpoint is not None:
print(f'Checkpoint restored ({checkpoint})!')
if direction == 'both' and split_both_decoder:
params_dir_enc = list(model.model.encoder.parameters())
params_dir_enc_check = {id(p) for p in params_dir_enc}
params_dir_dec = set()
params_dir_dec |= {p for p in model.model.decoder.parameters() if id(p) not in params_dir_enc_check}
params_dir_dec |= {p for p in model.rev.model.decoder.parameters() if id(p) not in params_dir_enc_check}
params_dir_dec = list(params_dir_dec)
optimizer = RAdam(
[{'params': params_dir_enc, 'lr': config['learning_rate']},
{'params': params_dir_dec, 'lr': config['learning_rate'] * 2},],
weight_decay=config['weight_decay'])
else:
optimizer = RAdam(
model.parameters(),
lr=config['learning_rate'],
weight_decay=config['weight_decay'])
if checkpoint is not None:
optimizer.load_state_dict(torch.load(checkpoint)['optimizer'])
if config['scheduler'] == 'cosine':
scheduler = transformers.get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=config['warmup_steps'],
num_training_steps=config['training_steps'])
elif config['scheduler'] == 'constant':
scheduler = transformers.get_constant_schedule_with_warmup(
optimizer,
num_warmup_steps=config['warmup_steps'])
else:
raise ValueError
scaler = GradScaler(enabled=fp16)
train_loader = instantiate_loader(
config['train'],
tokenizer,
batch_size=config['batch_size'],
evaluation=False,
use_recategorization=config['use_recategorization'],
remove_longer_than=config['remove_longer_than'],
remove_wiki=config['remove_wiki'],
dereify=config['dereify'],
)
dev_gold_path = ROOT / 'data/tmp/dev-gold.txt'
dev_pred_path = ROOT / 'data/tmp/dev-pred.txt'
dev_loader = instantiate_loader(
config['dev'],
tokenizer,
batch_size=config['batch_size'],
evaluation=True, out=dev_gold_path,
use_recategorization=config['use_recategorization'],
remove_wiki=config['remove_wiki'],
dereify=config['dereify'],
)
if direction == 'amr':
def train_step(engine, batch):
model.train()
x, y, extra = batch
model.amr_mode = True
with autocast(enabled=fp16):
loss, *_ = model(**x, **y)
scaler.scale((loss / config['accum_steps'])).backward()
return loss.item()
@torch.no_grad()
def eval_step(engine, batch):
model.eval()
x, y, extra = batch
model.amr_mode = True
loss, *_ = model(**x, **y)
return loss.item()
elif direction == 'text':
def train_step(engine, batch):
model.train()
x, y, extra = batch
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
with autocast(enabled=fp16):
loss, *_ = model.rev(**x, **y)
scaler.scale((loss / config['accum_steps'])).backward()
return loss.item()
@torch.no_grad()
def eval_step(engine, batch):
model.eval()
x, y, extra = batch
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
loss, *_ = model(**x, **y)
return loss.item()
elif direction == 'both':
def train_step(engine, batch):
model.train()
x, y, extra = batch
model.amr_mode = True
with autocast(enabled=fp16):
loss1, *_ = model(**x, **y)
scaler.scale((loss1 / config['accum_steps'] * 0.5)).backward()
loss1 = loss1.item()
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
with autocast(enabled=fp16):
loss2, *_ = model.rev(**x, **y)
scaler.scale((loss2 / config['accum_steps'] * 0.5)).backward()
return loss1, loss2.item()
@torch.no_grad()
def eval_step(engine, batch):
model.eval()
x, y, extra = batch
model.amr_mode = True
loss1, *_ = model(**x, **y)
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
loss2, *_ = model.rev(**x, **y)
return loss1.item(), loss2.item()
else:
raise ValueError
trainer = Engine(train_step)
evaluator = Engine(eval_step)
@trainer.on(Events.STARTED)
def update(engine):
print('training started!')
@trainer.on(Events.EPOCH_COMPLETED)
@trainer.on(Events.ITERATION_COMPLETED(every=config['accum_steps']))
def update(engine):
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), config['grad_norm'])
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
scheduler.step()
@trainer.on(Events.EPOCH_COMPLETED)
def log_trn_loss(engine):
log_msg = f"training epoch: {engine.state.epoch}"
if direction in ('amr', 'both'):
log_msg += f" | loss_amr: {engine.state.metrics['trn_amr_loss']:.3f}"
if direction in ('text', 'both'):
log_msg += f" | loss_text: {engine.state.metrics['trn_text_loss']:.3f}"
print(log_msg)
@trainer.on(Events.EPOCH_COMPLETED)
def run_dev_eval(engine):
dev_loader.batch_size = config['batch_size']
dev_loader.device = next(model.parameters()).device
evaluator.run(dev_loader)
if not config['best_loss']:
if direction in ('amr', 'both'):
@evaluator.on(Events.EPOCH_COMPLETED)
def smatch_eval(engine):
device = next(model.parameters()).device
dev_loader.device = device
graphs = predict_amrs(dev_loader, model, tokenizer, restore_name_ops=config['collapse_name_ops'])
write_predictions(dev_pred_path, tokenizer, graphs)
try:
smatch = compute_smatch(dev_gold_path, dev_pred_path)
except:
smatch = 0.
engine.state.metrics['dev_smatch'] = smatch
if direction in ('text', 'both'):
@evaluator.on(Events.EPOCH_COMPLETED)
def smatch_eval(engine):
device = next(model.parameters()).device
dev_loader.device = device
pred_sentences = predict_sentences(dev_loader, model.rev, tokenizer, beam_size=config['beam_size'])
bleu = compute_bleu(dev_loader.dataset.sentences, pred_sentences)
engine.state.metrics['dev_bleu'] = bleu.score
@evaluator.on(Events.EPOCH_COMPLETED)
def log_dev_loss(engine):
log_msg = f"dev epoch: {trainer.state.epoch}"
if direction in ('amr', 'both'):
log_msg += f" | loss_amr: {engine.state.metrics['dev_amr_loss']:.3f}"
if not config['best_loss']:
log_msg += f" | smatch: {engine.state.metrics['dev_smatch']:.3f}"
if direction in ('text', 'both'):
log_msg += f" | loss_text: {engine.state.metrics['dev_text_loss']:.3f}"
if not config['best_loss']:
log_msg += f" | bleu: {engine.state.metrics['dev_bleu']:.3f}"
print(log_msg)
if direction == 'amr':
RunningAverage(output_transform=lambda out: out).attach(trainer, 'trn_amr_loss')
RunningAverage(output_transform=lambda out: out).attach(evaluator, 'dev_amr_loss')
elif direction == 'text':
RunningAverage(output_transform=lambda out: out).attach(trainer, 'trn_text_loss')
RunningAverage(output_transform=lambda out: out).attach(evaluator, 'dev_text_loss')
elif direction == 'both':
RunningAverage(output_transform=lambda out: out[0]).attach(trainer, 'trn_amr_loss')
RunningAverage(output_transform=lambda out: out[1]).attach(trainer, 'trn_text_loss')
RunningAverage(output_transform=lambda out: out[0]).attach(evaluator, 'dev_amr_loss')
RunningAverage(output_transform=lambda out: out[1]).attach(evaluator, 'dev_text_loss')
if config['log_wandb']:
from ignite.contrib.handlers.wandb_logger import WandBLogger
wandb_logger = WandBLogger(init=False)
if direction == 'amr':
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_amr_loss",
output_transform=lambda loss: loss
)
elif direction == 'text':
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_text_loss",
output_transform=lambda loss: loss
)
if direction == 'both':
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_amr_loss",
output_transform=lambda loss: loss[0]
)
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_text_loss",
output_transform=lambda loss: loss[1]
)
if direction == 'amr':
metric_names_trn = ['trn_amr_loss']
metric_names_dev = ['dev_amr_loss']
if not config['best_loss']:
metric_names_dev.append('dev_smatch')
elif direction == 'text':
metric_names_trn = ['trn_text_loss']
metric_names_dev = ['dev_text_loss']
if not config['best_loss']:
metric_names_dev.append('dev_bleu')
elif direction == 'both':
metric_names_trn = ['trn_amr_loss', 'trn_text_loss']
metric_names_dev = ['dev_amr_loss', 'dev_smatch']
if not config['best_loss']:
metric_names_dev.extend(['dev_text_loss', 'dev_bleu'])
wandb_logger.attach_output_handler(
trainer,
event_name=Events.EPOCH_COMPLETED,
tag="epochs",
metric_names=metric_names_trn,
global_step_transform=lambda *_: trainer.state.iteration,
)
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="epochs",
metric_names=metric_names_dev,
global_step_transform=lambda *_: trainer.state.iteration,
)
@trainer.on(Events.ITERATION_COMPLETED)
def wandb_log_lr(engine):
wandb.log({'lr': scheduler.get_last_lr()[0]}, step=engine.state.iteration)
if config['save_checkpoints']:
if direction in ('amr', 'both'):
if config['best_loss']:
prefix = 'best-loss-amr'
score_function = lambda x: 1 / evaluator.state.metrics['dev_amr_loss']
else:
prefix = 'best-smatch'
score_function = lambda x: evaluator.state.metrics['dev_smatch']
else:
if config['best_loss']:
prefix = 'best-loss-text'
score_function = lambda x: 1 / evaluator.state.metrics['dev_amr_loss']
else:
prefix = 'best-bleu'
score_function = lambda x: evaluator.state.metrics['dev_bleu']
to_save = {'model': model, 'optimizer': optimizer}
if config['log_wandb']:
where_checkpoints = str(wandb_logger.run.dir)
else:
root = ROOT/'runs'
try:
root.mkdir()
except:
pass
where_checkpoints = root/str(len(list(root.iterdir())))
try:
where_checkpoints.mkdir()
except:
pass
where_checkpoints = str(where_checkpoints)
print(where_checkpoints)
handler = ModelCheckpoint(
where_checkpoints,
prefix,
n_saved=1,
create_dir=True,
score_function=score_function,
global_step_transform=global_step_from_engine(trainer),
)
evaluator.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
model.cuda()
device = next(model.parameters()).device
train_loader.device = device
trainer.run(train_loader, max_epochs=config['max_epochs'])
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import yaml
import wandb
parser = ArgumentParser(
description="Trainer script",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--direction', type=str, default='amr', choices=['amr', 'text', 'both'],
help='Train a uni- (amr, text) or bidirectional (both).')
parser.add_argument('--split-both-decoder', action='store_true')
parser.add_argument('--config', type=Path, default=ROOT/'configs/sweeped.yaml',
help='Use the following config for hparams.')
parser.add_argument('--checkpoint', type=str,
help='Warm-start from a previous fine-tuned checkpoint.')
parser.add_argument('--fp16', action='store_true')
args, unknown = parser.parse_known_args()
if args.fp16 and autocast_available:
raise ValueError('You\'ll need a newer PyTorch version to enable fp16 training.')
with args.config.open() as y:
config = yaml.load(y, Loader=yaml.FullLoader)
if config['log_wandb']:
wandb.init(
entity="SOME-RUNS",
project="SOME-PROJECT",
config=config,
dir=str(ROOT / 'runs/'))
config = wandb.config
print(config)
if args.checkpoint:
checkpoint = args.checkpoint
else:
checkpoint = None
do_train(
checkpoint=checkpoint,
direction=args.direction,
split_both_decoder=args.split_both_decoder,
fp16=args.fp16,
) | 15,893 | 36.574468 | 115 | py |
AOE-Net | AOE-Net-main/eval_det_thumos.py | import numpy as np
import json
import pickle
from argparse import ArgumentParser
thumos_class = {
7: 'BaseballPitch',
9: 'BasketballDunk',
12: 'Billiards',
21: 'CleanAndJerk',
22: 'CliffDiving',
23: 'CricketBowling',
24: 'CricketShot',
26: 'Diving',
31: 'FrisbeeCatch',
33: 'GolfSwing',
36: 'HammerThrow',
40: 'HighJump',
45: 'JavelinThrow',
51: 'LongJump',
68: 'PoleVault',
79: 'Shotput',
85: 'SoccerPenalty',
92: 'TennisSwing',
93: 'ThrowDiscus',
97: 'VolleyballSpiking',
}
def load_pkl(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
def add_topk_detection(proposals, class_scores, class_names, k=2, max_proposals=50000):
topk_indices = class_scores.argsort()[-k:][::-1]
topk_scores = class_scores[topk_indices]
detections = []
for i in range(k):
for proposal in proposals:
detection = {'segment': proposal[:2].tolist()}
detection['score'] = proposal[2] * topk_scores[i]
detection['label'] = class_names[topk_indices[i]]
detections.append(detection)
detections = sorted(detections, key=lambda x: x['score'], reverse=True)[:max_proposals]
return detections
def gen_detection(prop_file, cls_file, out_file):
proposals = load_pkl(prop_file)
class_names = [thumos_class[k] for k in thumos_class.keys()]
class_ids = np.array([k - 1 for k in thumos_class.keys()])
classifications = np.load(cls_file)
classifications = classifications[:, class_ids]
detections = {
'version': 'THUMOS14',
'external_data': 'used anet evaluation code',
'results': {}
}
for video_name, results in proposals.items():
video_id = int(video_name.split('_')[-1]) - 1
class_scores = classifications[video_id]
detections['results'][video_name] = add_topk_detection(results, class_scores, class_names)
with open(out_file, 'w') as out:
json.dump(detections, out)
'''
detections = {}
for video_name, results in proposals.items():
video_id = int(video_name.split('_')[-1]) - 1
class_scores = classifications[video_id]
detections[video_name] = add_topk_detection(results, class_scores, class_names)
with open(out_file, 'w') as out:
lines = []
for video_name, dets in detections.items():
for det in dets:
line = [video_name] + det['segment'] + [det['label'], det['score']]
lines.append(' '.join([str(x) for x in line]))
out.write('\n'.join(lines))
'''
def evaluate_detections(cfg, out_file='results/thumos_det.json', verbose=True, check_status=False):
prop_file = cfg.DATA.RESULT_PATH
cls_file = cfg.DATA.CLASSIFICATION_PATH
gt_file = cfg.DATA.ANNOTATION_FILE if cfg.DATA.DETECTION_GT_FILE is None else cfg.DATA.DETECTION_GT_FILE
split = cfg.VAL.SPLIT
if out_file is None:
out_file = prop_file
print("Detection processing start")
gen_detection(prop_file, cls_file, out_file)
print("Detection processing finished")
from evaluation_anet.eval_detection import ANETdetection
tious = [0.3, 0.4, 0.5, 0.6, 0.7]
anet_detection = ANETdetection(
ground_truth_filename=gt_file,
prediction_filename=out_file,
subset=split, tiou_thresholds=tious,
verbose=verbose, check_status=check_status)
anet_detection.evaluate()
mAP_at_tIoU = [f'mAP@{t:.2f}: {mAP*100:.3f}' for t, mAP in zip(anet_detection.tiou_thresholds, anet_detection.mAP)]
results = 'Detection: average-mAP {:.3f}.\n'.format(anet_detection.average_mAP * 100) + '\n'.join(mAP_at_tIoU)
print(results)
return anet_detection.average_mAP
def get_det_scores(prop_file, cls_file, gt_file, out_file=None, verbose=False, check_status=False):
if out_file is None:
out_file = prop_file
print("Detection processing start")
gen_detection(prop_file, cls_file, out_file)
print("Detection processing finished")
from evaluation_anet.eval_detection import ANETdetection
tious = [0.3, 0.4, 0.5, 0.6, 0.7]
anet_detection = ANETdetection(
ground_truth_filename=gt_file,
prediction_filename=out_file,
subset='testing', tiou_thresholds=tious,
verbose=verbose, check_status=check_status)
anet_detection.evaluate()
mAP_at_tIoU = [f'mAP@{t:.2f}: {mAP*100:.3f}' for t, mAP in zip(anet_detection.tiou_thresholds, anet_detection.mAP)]
results = 'Detection: average-mAP {:.3f}.\n'.format(anet_detection.average_mAP * 100) + '\n'.join(mAP_at_tIoU)
print(results)
return anet_detection.average_mAP
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-p', '--proposal-file', type=str, default='results/results.pkl')
parser.add_argument('-c', '--classification-file', type=str, default='results/uNet_test.npy')
parser.add_argument('-o', '--output-file', type=str, default='evaluation_thumos/detection_eval/detection_results.txt')
parser.add_argument('-g', '--groundtruth-file', type=str, default='../datasets/thumos14/thumos_annotations/thumos_det_gt.json')
args = parser.parse_args()
get_det_scores(
args.proposal_file,
args.classification_file,
args.groundtruth_file,
args.output_file,
verbose=True,
check_status=True)
| 5,425 | 34.464052 | 131 | py |
AOE-Net | AOE-Net-main/post_processing.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import multiprocessing as mp
from tqdm import tqdm
from collections import defaultdict
import pickle as pkl
from utils import iou_with_anchors
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def save_json(file, obj):
with open(file, 'w') as json_file:
json.dump(obj, json_file)
def save_pickle(file, obj):
with open(file, 'wb') as pkl_file:
pkl.dump(obj, pkl_file)
def get_durations(annotations):
durations = {}
for video_id, annot in annotations.items():
video_name = video_id.split('-')[0]
if video_id not in durations:
durations[video_name] = annot['master_duration']
else:
assert annot['master_duration'] == durations[video_name]
return durations
def getDatasetDict(annot_file, split):
annotations = {
vdo_id: anno for vdo_id, anno in load_json(annot_file)['database'].items()
if anno['subset'] == split
}
return annotations
def standardize_results_anet(video_dict):
result_dict = {
'version': 'ACTIVITY_NET_1.3',
'external_data': {
'used': 'true',
'details': 'Backbone 3D Network are trained on Kinetics training set.'
},
'results': {},
}
for video_id, results in video_dict.items():
result_dict['results'][video_id] = results
return result_dict
def standardize_results_thumos(video_dict):
result_dict = {}
for video_id, results in video_dict.items():
result_dict[video_id] = np.array([[
r['segment'][0],
r['segment'][1],
r['score']] for r in results
])
return result_dict
class PostProcessor(object):
def __init__(self, cfg, split):
self.result_path = cfg.DATA.RESULT_PATH
self.snms_alpha = cfg.BMN.POST_PROCESS.SOFT_NMS_ALPHA
self.snms_t1 = cfg.BMN.POST_PROCESS.SOFT_NMS_LOW_THRESHOLD
self.snms_t2 = cfg.BMN.POST_PROCESS.SOFT_NMS_HIGH_THRESHOLD
self.nms_thresh = cfg.BMN.POST_PROCESS.HARD_NMS_THRESHOLD
self.max_proposals = cfg.BMN.POST_PROCESS.MAX_PROPOSALS
self.n_threads = cfg.BMN.POST_PROCESS.NUM_THREADS
self.video_dict = getDatasetDict(cfg.DATA.ANNOTATION_FILE, split)
self.video_list = list(self.video_dict.keys()) # [:100]
self.result_dict = mp.Manager().dict()
self.dataset = cfg.DATASET
self.nms_func = self.hard_nms if cfg.BMN.POST_PROCESS.USE_HARD_NMS else self.soft_nms
if self.dataset == 'anet':
self.standardize_results = standardize_results_anet
self.get_duration = self.get_duration_anet
self.save_result = save_json
elif self.dataset == 'thumos':
self.standardize_results = standardize_results_thumos
self.durations = get_durations(self.video_dict)
self.get_duration = self.get_duration_thumos
self.save_result = save_pickle
self.video_groups = defaultdict(list)
for video_name in self.video_list:
self.video_groups[video_name.split('-')[0]].append(video_name)
for group_name in self.video_groups:
self.video_groups[group_name] = sorted(self.video_groups[group_name], key=lambda x: int(x.split('-')[-1]))
def get_duration_anet(self, video_name):
return self.video_dict[video_name]['duration']
def get_duration_thumos(self, video_name):
return self.durations[video_name]
def hard_nms(self, df):
'''
df: proposals generated by network;
'''
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
t1 = np.array(tstart)
t2 = np.array(tend)
scores = np.array(tscore)
durations = t2 - t1
order = scores.argsort()[::-1]
keep = []
while order.size > 0: # and len(keep) < self.max_proposals:
i = order[0]
keep.append(i)
tt1 = np.maximum(t1[i], t1[order[1:]])
tt2 = np.minimum(t2[i], t2[order[1:]])
intersection = tt2 - tt1
IoU = intersection / (durations[i] + durations[order[1:]] - intersection).astype(float)
inds = np.where(IoU <= self.nms_thresh)[0]
order = order[inds + 1]
rscore = [tscore[i] for i in keep]
rstart = [tstart[i] for i in keep]
rend = [tend[i] for i in keep]
newDf = pd.DataFrame()
newDf['score'] = rscore
newDf['xmin'] = rstart
newDf['xmax'] = rend
return newDf
def soft_nms(self, df):
'''
df: proposals generated by network;
alpha: alpha value of Gaussian decaying function;
t1, t2: threshold for soft nms.
'''
df = df.sort_values(by="score", ascending=False)
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart, rend, rscore = [], [], []
while len(tscore) > 1 and len(rscore) <= self.max_proposals:
max_index = tscore.index(max(tscore))
tmp_iou_list = iou_with_anchors(
np.array(tstart),
np.array(tend), tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = tend[max_index] - tstart[max_index]
if tmp_iou > self.snms_t1 + (self.snms_t2 - self.snms_t1) * tmp_width:
tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) / self.snms_alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = pd.DataFrame()
newDf['score'] = rscore
newDf['xmin'] = rstart
newDf['xmax'] = rend
return newDf
def video_post_process(self, video_list):
for video_name in video_list:
df = pd.read_feather("./results/outputs/" + video_name + ".feather")
if len(df) > 1:
df = self.nms_func(df)
df = df.sort_values(by="score", ascending=False)
video_duration = self.get_duration(video_name)
proposal_list = []
for j in range(min(self.max_proposals, len(df))):
tmp_proposal = {}
tmp_proposal["score"] = df.score.values[j]
tmp_proposal["segment"] = [
max(0, df.xmin.values[j]) * video_duration,
min(1, df.xmax.values[j]) * video_duration
]
proposal_list.append(tmp_proposal)
self.result_dict[video_name] = proposal_list
def __call__(self):
if self.dataset == 'thumos':
video_lengths = {}
for group_name, video_sequence in self.video_groups.items():
video_df = [
pd.read_feather('./results/outputs/' + video_name + '.feather')
for video_name in video_sequence]
video_df = pd.concat(video_df, ignore_index=True)
video_df.to_feather('./results/outputs/' + group_name + '.feather')
video_lengths[group_name] = len(video_df)
video_list = sorted(self.video_groups.keys(), key=lambda name: video_lengths[name], reverse=True)
processes = []
for video_name in tqdm(video_list):
if len(processes) < self.n_threads:
processes.append(mp.Process(target=self.video_post_process, args=([video_name],)))
processes[-1].start()
else:
process_done = False
while not process_done:
for j in range(self.n_threads):
if not processes[j].is_alive():
processes[j].join()
processes[j] = mp.Process(target=self.video_post_process, args=([video_name],))
processes[j].start()
process_done = True
break
for p in processes:
p.join()
elif self.dataset == 'anet':
video_list = self.video_list
linspace = np.linspace(0, len(video_list), self.n_threads + 1)
thrd_segms = [(int(linspace[i]), int(linspace[i + 1])) for i in range(self.n_threads)]
processes = []
for s_thrd, e_thrd in thrd_segms:
tmp_video_list = video_list[s_thrd:e_thrd]
processes.append(mp.Process(target=self.video_post_process, args=(tmp_video_list,)))
processes[-1].start()
for p in processes:
p.join()
self.result_dict = self.standardize_results(dict(self.result_dict))
self.save_result(self.result_path, self.result_dict)
| 9,246 | 35.405512 | 122 | py |
AOE-Net | AOE-Net-main/eval_anet.py | # -*- coding: utf-8 -*-
import sys
sys.path.append('./evaluation_anet')
from eval_proposal import ANETproposal
import matplotlib.pyplot as plt
import numpy as np
def run_evaluation(ground_truth_filename, proposal_filename,
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation'):
anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
tiou_thresholds=tiou_thresholds,
max_avg_nr_proposals=max_avg_nr_proposals,
subset=subset, verbose=True, check_status=True)
auc_score = anet_proposal.evaluate()
recall = anet_proposal.recall
average_recall = anet_proposal.avg_recall
average_nr_proposals = anet_proposal.proposals_per_video
return (average_nr_proposals, average_recall, recall, auc_score)
def plot_metric(cfg, average_nr_proposals, average_recall, recall, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
fn_size = 14
plt.figure(num=None, figsize=(12, 8))
ax = plt.subplot(1, 1, 1)
colors = ['k', 'r', 'yellow', 'b', 'c', 'm', 'b', 'pink', 'lawngreen', 'indigo']
area_under_curve = np.zeros_like(tiou_thresholds)
for i in range(recall.shape[0]):
area_under_curve[i] = np.trapz(recall[i], average_nr_proposals)
for idx, tiou in enumerate(tiou_thresholds[::2]):
ax.plot(average_nr_proposals, recall[2 * idx, :], color=colors[idx + 1],
label="tiou=[" + str(tiou) + "], area=" + str(int(area_under_curve[2 * idx] * 100) / 100.),
linewidth=4, linestyle='--', marker=None)
# Plots Average Recall vs Average number of proposals.
ax.plot(average_nr_proposals, average_recall, color=colors[0],
label="tiou = 0.5:0.05:0.95," + " area=" + str(int(np.trapz(average_recall, average_nr_proposals) * 100) / 100.),
linewidth=4, linestyle='-', marker=None)
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[-1]] + handles[:-1], [labels[-1]] + labels[:-1], loc='best')
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average Number of Proposals per Video', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1.0])
plt.setp(ax.get_xticklabels(), fontsize=fn_size)
plt.setp(ax.get_yticklabels(), fontsize=fn_size)
# plt.show()
plt.savefig(cfg.DATA.FIGURE_PATH)
def evaluate_proposals(cfg):
uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid, auc_score = run_evaluation(
cfg.DATA.ANNOTATION_FILE,
cfg.DATA.RESULT_PATH,
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation')
plot_metric(cfg, uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid)
print("AR@1 is \t", np.mean(uniform_recall_valid[:, 0]))
print("AR@5 is \t", np.mean(uniform_recall_valid[:, 4]))
print("AR@10 is \t", np.mean(uniform_recall_valid[:, 9]))
print("AR@100 is \t", np.mean(uniform_recall_valid[:, -1]))
return auc_score
if __name__ == '__main__':
from config.defaults import get_cfg
cfg = get_cfg()
evaluate_proposals(cfg)
| 3,295 | 40.2 | 125 | py |
AOE-Net | AOE-Net-main/main.py | import sys
import os
import argparse
from tqdm import tqdm
import pandas as pd
import torch
import torch.nn.parallel
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from models.model import EventDetection
from dataset import VideoDataSet, Collator
from loss_function import bmn_loss_func, get_mask
from post_processing import PostProcessor, getDatasetDict
from utils import ProposalGenerator
from eval_anet import evaluate_proposals as anet_evaluate_prop
from eval_thumos import evaluate_proposals as thumos_evaluate_prop
from eval_det_anet import evaluate_detections as anet_evaluate_det
from eval_det_thumos import evaluate_detections as thumos_evaluate_det
from config.defaults import get_cfg
sys.dont_write_bytecode = True
class Solver:
def __init__(self, cfg):
self.cfg = cfg
self.model = EventDetection(cfg).cuda()
self.model = torch.nn.DataParallel(self.model, device_ids=cfg.GPU_IDS)
if cfg.MODE not in ['train', 'training']: # TODO: add condition for resume feature.
checkpoint = torch.load(cfg.TEST.CHECKPOINT_PATH)
print('Loaded model at epoch %d.' % checkpoint['epoch'])
self.model.load_state_dict(checkpoint['state_dict'])
if cfg.MODE in ['train', 'training']:
self.optimizer = optim.AdamW(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY)
#self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.1)
self.train_collator = Collator(cfg, 'train')
self.test_collator = Collator(cfg, 'test')
self.temporal_dim = cfg.DATA.TEMPORAL_DIM
self.max_duration = cfg.DATA.MAX_DURATION
self.evaluate_func = None
if cfg.DATASET == 'anet':
if cfg.EVAL_TYPE == 'proposal':
self.evaluate_func = anet_evaluate_prop
elif cfg.EVAL_TYPE == 'detection':
self.evaluate_func = anet_evaluate_det
elif cfg.DATASET == 'thumos':
if cfg.EVAL_TYPE == 'proposal':
self.evaluate_func = thumos_evaluate_prop
elif cfg.EVAL_TYPE == 'detection':
self.evaluate_func = thumos_evaluate_det
if self.evaluate_func is None:
print('Evaluation function [{}] of dataset [{}] is not implemented'.format(cfg.EVAL_TYPE, cfg.DATASET))
def train_epoch(self, data_loader, bm_mask, epoch, writer):
cfg = self.cfg
self.model.train()
self.optimizer.zero_grad()
loss_names = ['Loss', 'TemLoss', 'PemLoss Regression', 'PemLoss Classification']
epoch_losses = [0] * 4
period_losses = [0] * 4
last_period_size = len(data_loader) % cfg.TRAIN.STEP_PERIOD
last_period_start = cfg.TRAIN.STEP_PERIOD * (len(data_loader) // cfg.TRAIN.STEP_PERIOD)
for n_iter, (env_features, agent_features, agent_masks, obj_features, obj_masks, label_confidence, label_start, label_end) in enumerate(tqdm(data_loader)):
env_features = env_features.cuda() if cfg.USE_ENV else None
agent_features = agent_features.cuda() if cfg.USE_AGENT else None
agent_masks = agent_masks.cuda() if cfg.USE_AGENT else None
obj_features = obj_features.cuda() if cfg.USE_OBJ else None
obj_masks = obj_masks.cuda() if cfg.USE_OBJ else None
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = self.model(env_features, agent_features, agent_masks, obj_features, obj_masks)
losses = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask)
period_size = cfg.TRAIN.STEP_PERIOD if n_iter < last_period_start else last_period_size
total_loss = losses[0] / period_size
total_loss.backward()
losses = [l.cpu().detach().numpy() / cfg.TRAIN.STEP_PERIOD for l in losses]
period_losses = [l + pl for l, pl in zip(losses, period_losses)]
if (n_iter + 1) % cfg.TRAIN.STEP_PERIOD != 0 and n_iter != (len(data_loader) - 1):
continue
self.optimizer.step()
self.optimizer.zero_grad()
epoch_losses = [el + pl for el, pl in zip(epoch_losses, period_losses)]
write_step = epoch * len(data_loader) + n_iter
for i, loss_name in enumerate(loss_names):
writer.add_scalar(loss_name, period_losses[i], write_step)
period_losses = [0] * 4
print(
"BMN training loss(epoch %d): tem_loss: %.03f, pem reg_loss: %.03f, pem cls_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_losses[1] / (n_iter + 1),
epoch_losses[2] / (n_iter + 1),
epoch_losses[3] / (n_iter + 1),
epoch_losses[0] / (n_iter + 1)))
def train(self, n_epochs):
exp_id = max([0] + [int(run.split('_')[-1]) for run in os.listdir(self.cfg.TRAIN.LOG_DIR)]) + 1
log_dir = os.path.join(self.cfg.TRAIN.LOG_DIR, 'run_' + str(exp_id))
if not os.path.isdir(os.path.dirname(log_dir)):
os.makedirs(os.path.dirname(log_dir))
writer = SummaryWriter(log_dir)
checkpoint_dir = os.path.join(self.cfg.MODEL.CHECKPOINT_DIR, 'checkpoint_' + str(exp_id))
assert not os.path.isdir(checkpoint_dir), 'Checkpoint directory %s has already been created.' % checkpoint_dir
os.makedirs(checkpoint_dir)
train_loader = torch.utils.data.DataLoader(
VideoDataSet(self.cfg, split=self.cfg.TRAIN.SPLIT),
batch_size=self.cfg.TRAIN.BATCH_SIZE, shuffle=True,
num_workers=12, pin_memory=True, collate_fn=self.train_collator)
eval_loader = torch.utils.data.DataLoader(
VideoDataSet(self.cfg, split=self.cfg.VAL.SPLIT),
batch_size=self.cfg.VAL.BATCH_SIZE, shuffle=False,
num_workers=12, pin_memory=True, drop_last=False, collate_fn=self.test_collator)
bm_mask = get_mask(self.temporal_dim, self.max_duration).cuda()
scores = []
for epoch in range(n_epochs):
#print('Current LR: {}'.format(self.scheduler.get_last_lr()[0]))
self.train_epoch(train_loader, bm_mask, epoch, writer)
#self.scheduler.step()
score = self.evaluate(eval_loader, self.cfg.VAL.SPLIT)
state = {
'epoch': epoch + 1,
'score': score,
'state_dict': self.model.state_dict()
}
if len(scores) == 0 or score > max(scores):
torch.save(state, os.path.join(checkpoint_dir, "best_{}.pth".format(self.cfg.EVAL_SCORE)))
torch.save(state, os.path.join(checkpoint_dir, "model_{}.pth".format(epoch + 1)))
writer.add_scalar(self.cfg.EVAL_SCORE, score, epoch)
scores.append(score)
def evaluate(self, data_loader=None, split=None):
self.inference(data_loader, split, self.cfg.VAL.BATCH_SIZE)
score = self.evaluate_func(self.cfg) # AUC if dataset=anet, AR@100 if dataset=thumos
return score
def inference(self, data_loader=None, split=None, batch_size=None):
if not os.path.isdir('results/outputs/'):
os.makedirs('results/outputs/')
annotations = getDatasetDict(self.cfg.DATA.ANNOTATION_FILE, split) if self.cfg.DATASET == 'thumos' else None
self.prop_gen = ProposalGenerator(self.temporal_dim, self.max_duration, annotations)
self.post_processing = PostProcessor(self.cfg, split)
if data_loader is None:
data_loader = torch.utils.data.DataLoader(
VideoDataSet(self.cfg, split=split),
batch_size=batch_size, shuffle=False,
num_workers=12, pin_memory=True, drop_last=False, collate_fn=self.test_collator)
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_score", "score"]
self.model.eval()
with torch.no_grad():
for video_names, env_features, agent_features, agent_masks, obj_features, obj_masks in tqdm(data_loader):
env_features = env_features.cuda() if self.cfg.USE_ENV else None
agent_features = agent_features.cuda() if self.cfg.USE_AGENT else None
agent_masks = agent_masks.cuda() if self.cfg.USE_AGENT else None
obj_features = obj_features.cuda() if self.cfg.USE_OBJ else None
obj_masks = obj_masks.cuda() if self.cfg.USE_OBJ else None
confidence_map, start_map, end_map = self.model(env_features, agent_features, agent_masks, obj_features, obj_masks)
confidence_map = confidence_map.cpu().numpy()
start_map = start_map.cpu().numpy()
end_map = end_map.cpu().numpy()
batch_props = self.prop_gen(start_map, end_map, confidence_map, video_names)
for video_name, new_props in zip(video_names, batch_props):
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_feather("./results/outputs/" + video_name + ".feather")
self.post_processing()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cfg-file',
default=None,
type=str,
help='Path to YAML config file.'
)
parser.add_argument(
"opts",
help="See slowfast/config/defaults.py for all options",
default=None,
nargs=argparse.REMAINDER
)
return parser.parse_args()
def main(args):
cfg = get_cfg()
if args.cfg_file:
cfg.merge_from_file(args.cfg_file)
if args.opts is not None:
cfg.merge_from_list(args.opts)
cfg.freeze()
solver = Solver(cfg)
if cfg.MODE in ["train", "training"]:
solver.train(cfg.TRAIN.NUM_EPOCHS)
elif cfg.MODE in ['validate', 'validation']:
solver.evaluate(split=cfg.VAL.SPLIT)
elif cfg.MODE in ['test', 'testing']:
solver.inference(split=cfg.TEST.SPLIT, batch_size=cfg.TEST.BATCH_SIZE)
if __name__ == '__main__':
args = get_args()
main(args)
| 10,325 | 43.317597 | 163 | py |
AOE-Net | AOE-Net-main/eval_det_anet.py | import numpy as np
import json
from argparse import ArgumentParser
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def add_topk_detection(proposals, class_scores, class_names, k=1):
topk_indices = class_scores.argsort()[-k:][::-1]
topk_scores = class_scores[topk_indices]
detections = []
for i in range(k):
for proposal in proposals:
detection = {'segment': proposal['segment']}
detection['score'] = proposal['score'] * topk_scores[i]
detection['label'] = class_names[topk_indices[i]]
detections.append(detection)
return detections
def gen_detection(prop_file, cls_file, out_file):
proposals = load_json(prop_file)
classifications = load_json(cls_file)
class_names = classifications['class']
detections = {
'version': proposals['version'],
'external_data': proposals['external_data'],
'results': {}
}
for video_name, results in proposals['results'].items():
class_scores = np.array(classifications['results'][video_name])
detections['results'][video_name] = add_topk_detection(results, class_scores, class_names)
with open(out_file, 'w') as out:
json.dump(detections, out)
def evaluate_detections(cfg, out_file=None, verbose=False, check_status=True):
prop_file = cfg.DATA.RESULT_PATH
cls_file = cfg.DATA.CLASSIFICATION_PATH
gt_file = cfg.DATA.ANNOTATION_FILE
if out_file is None:
out_file = prop_file
print("Detection processing start")
gen_detection(prop_file, cls_file, out_file)
print("Detection processing finished")
from evaluation_anet.eval_detection import ANETdetection
anet_detection = ANETdetection(
ground_truth_filename=gt_file,
prediction_filename=out_file,
subset='validation', verbose=verbose, check_status=check_status)
anet_detection.evaluate()
mAP_at_tIoU = [f'mAP@{t:.2f}: {mAP*100:.3f}' for t, mAP in zip(anet_detection.tiou_thresholds, anet_detection.mAP)]
results = 'Detection: average-mAP {:.3f}.\n'.format(anet_detection.average_mAP * 100) + '\n'.join(mAP_at_tIoU)
print(results)
return anet_detection.average_mAP
def get_det_scores(prop_file, cls_file, gt_file, out_file=None, verbose=False, check_status=False):
if out_file is None:
out_file = prop_file
print("Detection processing start")
gen_detection(prop_file, cls_file, out_file)
print("Detection processing finished")
from evaluation_anet.eval_detection import ANETdetection
anet_detection = ANETdetection(
ground_truth_filename=gt_file,
prediction_filename=out_file,
subset='validation', verbose=verbose, check_status=check_status)
anet_detection.evaluate()
mAP_at_tIoU = [f'mAP@{t:.2f}: {mAP*100:.3f}' for t, mAP in zip(anet_detection.tiou_thresholds, anet_detection.mAP)]
results = 'Detection: average-mAP {:.3f}.\n'.format(anet_detection.average_mAP * 100) + '\n'.join(mAP_at_tIoU)
print(results)
return anet_detection.average_mAP
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-p', '--proposal-file', type=str, default='results/results.json')
parser.add_argument('-c', '--classification-file', type=str, default='results/classification_results.json')
parser.add_argument('-o', '--output-file', type=str, default='results/detection_results.json')
parser.add_argument('-g', '--groundtruth-file', type=str, default='../datasets/activitynet/annotations/activity_net.v1-3.min.json')
args = parser.parse_args()
get_det_scores(
args.proposal_file,
args.classification_file,
args.groundtruth_file,
args.output_file,
verbose=True,
check_status=True)
| 3,831 | 36.568627 | 135 | py |
AOE-Net | AOE-Net-main/eval_thumos.py | # -*- coding: utf-8 -*-
import os
import requests
import pickle
import io
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
from evaluation_thumos import prop_eval
def run_evaluation(proposal_filename, groundtruth_filename='/home/ngan_uark/tqsang/AEN_BERT/thumos14_test_groundtruth.csv'):
frm_nums = pickle.load(open("evaluation_thumos/frm_num.pkl", 'rb'))
rows = prop_eval.pkl2dataframe(frm_nums, 'evaluation_thumos/movie_fps.pkl', proposal_filename)
aen_results = pd.DataFrame(rows, columns=['f-end', 'f-init', 'score', 'video-frames', 'video-name'])
# Retrieves and loads Thumos14 test set ground-truth.
if not os.path.isfile(groundtruth_filename):
ground_truth_url = ('https://gist.githubusercontent.com/cabaf/'
'ed34a35ee4443b435c36de42c4547bd7/raw/'
'952f17b9cdc6aa4e6d696315ba75091224f5de97/'
'thumos14_test_groundtruth.csv')
s = requests.get(ground_truth_url).content
groundtruth = pd.read_csv(io.StringIO(s.decode('utf-8')), sep=' ')
groundtruth.to_csv(groundtruth_filename)
else:
groundtruth = pd.read_csv(groundtruth_filename)
# Computes recall for different tiou thresholds at a fixed average number of proposals.
'''
recall, tiou_thresholds = prop_eval.recall_vs_tiou_thresholds(aen_results, ground_truth,
nr_proposals=nr_proposals,
tiou_thresholds=np.linspace(0.5, 1.0, 11))
recall = np.mean(recall)
'''
average_recall, average_nr_proposals = prop_eval.average_recall_vs_nr_proposals(aen_results, groundtruth)
return average_recall, average_nr_proposals
def evaluate_proposals(cfg, nr_proposals_list=(50, 100, 200, 500, 1000)):
average_recall, average_nr_proposals = run_evaluation(cfg.DATA.RESULT_PATH)
f = interp1d(average_nr_proposals, average_recall, axis=0, bounds_error=False, fill_value='extrapolate')
ar_results = {}
for nr_prop in nr_proposals_list:
ar_results[nr_prop] = float(f(nr_prop))
print("AR@{} is {}\n".format(nr_prop, ar_results[nr_prop]))
return ar_results[100]
def plot_metric(average_nr_proposals, recalls, labels, colors, linestyles, figure_file):
fn_size = 25
plt.figure(num=None, figsize=(30, 10))
#colors = ['#2CBDFE', '#47DBCD', '#F3A0F2', '#9D2EC5', '#661D98', '#F5B14C']
def plotting(sub_ax, recs, lbs, lnstls, clrs):
for idx, rec in enumerate(recs):
ax.plot(average_nr_proposals, rec, color=clrs[idx],
label=lbs[idx],
linewidth=6, linestyle=lnstls[idx], marker=None)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc='lower right', fontsize=fn_size)
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average Number of Proposals per Video', fontsize=fn_size)
plt.grid(b=True, which="both")
#plt.ylim([.35, .6])
plt.setp(ax.get_xticklabels(), fontsize=fn_size)
plt.setp(ax.get_yticklabels(), fontsize=fn_size)
ax = plt.subplot(1, 2, 1)
plotting(
ax,
recalls[:3],
labels[:3],
linestyles[:3],
colors[:3]
)
ax = plt.subplot(1, 2, 2)
plotting(
ax,
recalls[3:],
labels[3:],
linestyles[3:],
[colors[0]] + colors[3:]
)
# plt.show()
plt.savefig(figure_file, dpi=300)
def main_evaluate_proposals(result_file, nr_proposals_list):
average_recall, average_nr_proposals = run_evaluation(result_file)
f = interp1d(average_nr_proposals, average_recall, axis=0, bounds_error=False, fill_value='extrapolate')
ar_results = []
for nr_prop in nr_proposals_list:
ar_results.append(float(f(nr_prop)))
return ar_results
def main():
result_dir = 'results/ablation_study/'
result_files = [
'full_arch.pkl',
'act_only.pkl',
'env_only.pkl',
'full_arch.pkl',
'env+hard_attn_only.pkl',
'env+self_attn_only.pkl',
]
labels = [
'AEI (actor and environment)',
'Actor only',
'Environment only',
'AEI (main actor selection and feature fusion)',
'w/o feature fusion',
'w/o main actor selection',
]
#colors = ['#2f4858', '#55dde0', '#33658a', '#f6ae2d', '#f26419']
#colors = ['#390099', '#9e0059', '#ff0054', '#ff5400', '#ffbd00']
colors = ['tab:red', 'tab:purple', 'tab:green', 'tab:blue', 'tab:orange']
linestyles = ['-'] * 6
nr_props = list(range(50, 1000))
ar_results = []
for res_file in result_files:
ar_results.append(main_evaluate_proposals(os.path.join(result_dir, res_file), nr_props))
plot_metric(nr_props, ar_results, labels, colors, linestyles, 'ablation_study.png')
if __name__ == '__main__':
main()
| 5,009 | 34.785714 | 124 | py |
AOE-Net | AOE-Net-main/utils.py | import numpy as np
import subprocess
import os
def ioa_with_anchors(anchors_min, anchors_max, box_min, box_max):
# calculate the overlap proportion between the anchor and all bbox for supervise signal,
# the length of the anchor is 0.01
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
# print inter_len,union_len
jaccard = np.divide(inter_len, union_len)
return jaccard
class ProposalGenerator(object):
def __init__(self, temporal_dim=None, max_duration=None, annotations=None):
self.tscale = temporal_dim
self.max_duration = max_duration
self.annots = annotations # For THUMOS only
self.rescale_segment = self.rescale_segment_anet if self.annots is None else self.rescale_segment_thumos
def rescale_segment_anet(self, start_index, end_index, video_name=None):
return start_index / self.tscale, end_index / self.tscale
def rescale_segment_thumos(self, start_index, end_index, video_name=None):
b = self.annots[video_name]['start_snippet']
d = self.annots[video_name]['master_snippet_duration']
return (start_index + b) / d, (end_index + b) / d
def __call__(self, start, end, confidence_map, video_names):
batch_props = []
for i, video_name in enumerate(video_names):
start_scores = start[i]
end_scores = end[i]
clr_confidence = (confidence_map[i][1])
reg_confidence = (confidence_map[i][0])
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(self.tscale)
start_bins[0] = 1 # [1,0,0...,0,1]
for idx in range(1, self.tscale - 1):
if start_scores[idx] > start_scores[idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1
for idx in range(1, self.tscale - 1):
if end_scores[idx] > end_scores[idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
# generate proposals
new_props = []
for idx in range(self.max_duration):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx + 1
if end_index < self.tscale and start_bins[start_index] == 1 and end_bins[end_index] == 1:
xmin, xmax = self.rescale_segment(start_index, end_index, video_name)
xmin_score = start_scores[start_index]
xmax_score = end_scores[end_index]
clr_score = clr_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = xmin_score * xmax_score * clr_score * reg_score
new_props.append([xmin, xmax, xmin_score, xmax_score, clr_score, reg_score, score])
new_props = np.stack(new_props)
batch_props.append(new_props)
return batch_props
| 3,957 | 42.494505 | 112 | py |
AOE-Net | AOE-Net-main/dataset.py | # -*- coding: utf-8 -*-
import os
import json
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from utils import ioa_with_anchors, iou_with_anchors
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
class Collator(object):
def __init__(self, cfg, mode):
self.is_train = mode in ['train', 'training']
if self.is_train:
self.batch_names = ['env_feats', 'agent_feats', 'box_lens', 'obj_feats', 'obj_box_lens', 'conf_labels', 'start_labels', 'end_labels']
self.label_names = ['conf_labels', 'start_labels', 'end_labels']
else:
self.batch_names = ['video_ids', 'env_feats', 'agent_feats', 'box_lens', 'obj_feats', 'obj_box_lens']
self.label_names = []
self.feat_names = ['env_feats', 'agent_feats', 'box_lens', 'obj_feats', 'obj_box_lens']
self.tmp_dim = cfg.DATA.TEMPORAL_DIM
self.feat_dim = cfg.MODEL.AGENT_DIM
self.obj_feat_dim = cfg.MODEL.OBJ_DIM ####
def process_features(self, bsz, env_feats, agent_feats, box_lens, obj_feats, obj_box_lens):
if env_feats[0] is not None:
env_feats = torch.stack(env_feats)
else:
env_feats = None
# Make new order to inputs by their lengths (long-to-short)
if agent_feats[0] is not None:
box_lens = torch.stack(box_lens, dim=0)
max_box_dim = torch.max(box_lens).item()
# Make padding mask for self-attention
agent_mask = torch.arange(max_box_dim)[None, None, :] < box_lens[:, :, None]
# Pad agent features at temporal and box dimension
pad_agent_feats = torch.zeros(bsz, self.tmp_dim, max_box_dim, self.feat_dim)
for i, temporal_features in enumerate(agent_feats):
for j, box_features in enumerate(temporal_features):
if len(box_features) > 0:
pad_agent_feats[i, j, :len(box_features)] = torch.tensor(box_features)
else:
pad_agent_feats = None
agent_mask = None
# Make new order to inputs by their lengths (long-to-short)
if obj_feats[0] is not None:
obj_box_lens = torch.stack(obj_box_lens, dim=0)
max_box_dim = torch.max(obj_box_lens).item()
# Make padding mask for self-attention
obj_mask = torch.arange(max_box_dim)[None, None, :] < obj_box_lens[:, :, None]
# Pad agent features at temporal and box dimension
pad_obj_feats = torch.zeros(bsz, self.tmp_dim, max_box_dim, self.obj_feat_dim)
for i, temporal_features in enumerate(obj_feats):
for j, box_features in enumerate(temporal_features):
if len(box_features) > 0:
pad_obj_feats[i, j, :len(box_features)] = torch.tensor(box_features)
else:
pad_obj_feats = None
obj_mask = None
return env_feats, pad_agent_feats, agent_mask, pad_obj_feats, obj_mask
def __call__(self, batch):
input_batch = dict(zip(self.batch_names, zip(*batch)))
bsz = len(input_batch['env_feats'])
output_batch = [] if self.is_train else [input_batch['video_ids']]
# Process environment and agent features
input_feats = [input_batch[feat_name] for feat_name in self.feat_names]
output_batch.extend(self.process_features(bsz, *input_feats))
for label_name in self.label_names:
output_batch.append(torch.stack(input_batch[label_name]))
return output_batch
class VideoDataSet(Dataset):
def __init__(self, cfg, split='training'):
self.split = split
self.dataset_name = cfg.DATASET
self.video_anno_path = cfg.DATA.ANNOTATION_FILE
self.temporal_dim = cfg.DATA.TEMPORAL_DIM
self.max_duration = cfg.DATA.MAX_DURATION
self.temporal_gap = 1. / self.temporal_dim
self.env_feature_dir = cfg.DATA.ENV_FEATURE_DIR
self.agent_feature_dir = cfg.DATA.AGENT_FEATURE_DIR
self.obj_feature_dir = cfg.DATA.OBJ_FEATURE_DIR
self.use_env = cfg.USE_ENV
self.use_agent = cfg.USE_AGENT
self.use_obj = cfg.USE_OBJ
if split in ['train', 'training']:
self._get_match_map()
self.video_prefix = 'v_' if cfg.DATASET == 'anet' else ''
self._get_dataset()
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_dim):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.max_duration + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map
self.anchor_xmin = [self.temporal_gap * (i - 0.5) for i in range(self.temporal_dim)]
self.anchor_xmax = [self.temporal_gap * (i + 0.5) for i in range(1, self.temporal_dim + 1)]
# self.anchor_xmin = [self.temporal_gap * i for i in range(self.temporal_dim)]
# self.anchor_xmax = [self.temporal_gap * i for i in range(1, self.temporal_dim + 1)]
def get_filter_video_names(self, json_data, upper_thresh=.98, lower_thresh=.3):
"""
Select video according to length of ground truth
:param video_info_file: json file path of video information
:param gt_len_thres: max length of ground truth
:return: list of video names
"""
filter_video_names, augment_video_names = [], []
video_lists = list(json_data)
for video_name in video_lists:
# for video_name in video_lists[::-1]:
video_info = json_data[video_name]
if not os.path.isfile(os.path.join(self.env_feature_dir, 'v_' + video_name + '.json')):
filter_video_names.append(video_name)
continue
if video_info['subset'] != "training":
continue
video_second = video_info["duration"]
gt_lens = []
video_labels = video_info["annotations"]
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = tmp_info["segment"][0]
tmp_end = tmp_info["segment"][1]
tmp_start = max(min(1, tmp_start / video_second), 0)
tmp_end = max(min(1, tmp_end / video_second), 0)
gt_lens.append(tmp_end - tmp_start)
if len(gt_lens):
mean_len = np.mean(gt_lens)
if mean_len >= upper_thresh:
filter_video_names.append(video_name)
if mean_len < lower_thresh:
augment_video_names.append(video_name)
return filter_video_names, augment_video_names
def _get_dataset(self):
annotations = load_json(self.video_anno_path)['database']
if self.dataset_name == 'anet':
filter_video_names, augment_video_names = self.get_filter_video_names(annotations)
else:
filter_video_names, augment_video_names = [], []
# Read event segments
self.event_dict = {}
self.video_ids = []
for video_id, annotation in annotations.items():
if annotation['subset'] != self.split or video_id in filter_video_names:
continue
self.event_dict[video_id] = {
'duration': annotation['duration'],
'events': annotation['annotations']
# 'events': annotation['timestamps']
}
self.video_ids.append(video_id)
if self.split in ['train', 'training']:
self.video_ids.extend(augment_video_names)
print("Split: %s. Dataset size: %d" % (self.split, len(self.video_ids)))
def __getitem__(self, index):
env_features, agent_features, box_lengths, obj_features, obj_box_lengths = self._load_item(index)
if self.split == 'training':
match_score_start, match_score_end, confidence_score = self._get_train_label(index)
return env_features, agent_features, box_lengths, obj_features, obj_box_lengths, confidence_score, match_score_start, match_score_end
else:
return self.video_ids[index], env_features, agent_features, box_lengths, obj_features, obj_box_lengths
def _load_item(self, index):
video_name = self.video_prefix + self.video_ids[index]
'''
Read environment features at every timestamp
Feature size: TxF
T: number of timestamps
F: feature size
'''
if self.use_env is True:
env_features = load_json(os.path.join(self.env_feature_dir, video_name + '.json'))['video_features']
# env_segments = [env['segment'] for env in env_features]
env_features = torch.tensor([feature['features'] for feature in env_features]).float().squeeze(1)
else:
env_features = None
'''
Read agents features at every timestamp
Feature size: TxBxF
T: number of timestamps
B: max number of bounding boxes
F: feature size
'''
if self.use_agent is True:
agent_features = load_json(os.path.join(self.agent_feature_dir, video_name + '.json'))['video_features']
# agent_segments = [feature['segment'] for feature in agent_features]
agent_features = [feature['features'] for feature in agent_features]
# Create and pad agent_box_lengths if train
box_lengths = torch.tensor([len(x) for x in agent_features])
else:
agent_features = None
box_lengths = None
'''
Read agents features at every timestamp
Feature size: TxBxF
T: number of timestamps
B: max number of bounding boxes
F: feature size
'''
if self.use_obj is True:
try:
obj_features = load_json(os.path.join(self.obj_feature_dir, video_name + '.json'))['video_features']
except:
print('error', video_name)
pass
# agent_segments = [feature['segment'] for feature in agent_features]
obj_features = [feature['features'] for feature in obj_features]
# Create and pad agent_box_lengths if train
obj_box_lengths = torch.tensor([len(x) for x in obj_features])
else:
obj_features = None
obj_box_lengths = None
# assert env_segments == agent_segments and len(env_segments) == 100, 'Two streams must have 100 segments.'
return env_features, agent_features, box_lengths, obj_features, obj_box_lengths
def _get_train_label(self, index):
video_id = self.video_ids[index]
video_info = self.event_dict[video_id]
video_labels = video_info['events'] # the measurement is second, not frame
duration = video_info['duration']
##############################################################################################
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / duration), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / duration), 0)
gt_bbox.append([tmp_start, tmp_end])
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end)
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.max_duration, self.temporal_dim])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map)
##############################################################################################
##############################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox)
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
# gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
##############################################################################################
##############################################################################################
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(self.anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(self.anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.tensor(match_score_start)
match_score_end = torch.tensor(match_score_end)
##############################################################################################
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_ids)
| 14,047 | 44.170418 | 145 | py |
AOE-Net | AOE-Net-main/loss_function.py | # -*- coding: utf-8 -*-
import torch
import numpy as np
import torch.nn.functional as F
def get_mask(tscale, duration):
bm_mask = []
for idx in range(duration):
mask_vector = [1 for i in range(tscale - idx)
] + [0 for i in range(idx)]
bm_mask.append(mask_vector)
bm_mask = np.array(bm_mask, dtype=np.float32)
return torch.Tensor(bm_mask)
def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask):
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask
pem_reg_loss = pem_reg_loss_func(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = pem_cls_loss_func(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = tem_loss_func(pred_start, pred_end, gt_start, gt_end)
loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
return loss, tem_loss, pem_reg_loss, pem_cls_loss
def tem_loss_func(pred_start, pred_end, gt_start, gt_end):
def bi_loss(pred_score, gt_label):
pred_score = pred_score.view(-1)
gt_label = gt_label.view(-1)
pmask = (gt_label > 0.5).float()
num_entries = len(pmask)
num_positive = torch.sum(pmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon)*(1.0 - pmask)
loss = -1 * torch.mean(loss_pos + loss_neg)
return loss
loss_start = bi_loss(pred_start, gt_start)
loss_end = bi_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
def pem_reg_loss_func(pred_score, gt_iou_map, mask):
u_hmask = (gt_iou_map > 0.7).float()
u_mmask = ((gt_iou_map <= 0.7) & (gt_iou_map > 0.3)).float()
u_lmask = ((gt_iou_map <= 0.3) & (gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(loss * torch.ones(*weights.shape).cuda()) / torch.sum(weights)
return loss
def pem_cls_loss_func(pred_score, gt_iou_map, mask):
pmask = (gt_iou_map > 0.9).float()
nmask = (gt_iou_map <= 0.9).float()
nmask = nmask * mask
num_positive = torch.sum(pmask)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
| 3,233 | 32 | 89 | py |
AOE-Net | AOE-Net-main/config/defaults.py | from fvcore.common.config import CfgNode
_C = CfgNode()
_C.GPU_IDS = [0]
_C.MODE = 'training'
_C.EVAL_TYPE = 'proposal'
_C.DATASET = 'anet'
_C.USE_ENV = True
_C.USE_AGENT = True
_C.USE_OBJ = True
_C.EVAL_SCORE = 'AUC'
_C.TRAIN = CfgNode()
_C.TRAIN.SPLIT = 'training'
_C.TRAIN.NUM_EPOCHS = 10
_C.TRAIN.BATCH_SIZE = 16
_C.TRAIN.STEP_PERIOD = 1
_C.TRAIN.ATTENTION_STEPS = 1
_C.TRAIN.LR = 0.001
_C.TRAIN.WEIGHT_DECAY = 0.0001
_C.TRAIN.CHECKPOINT_FILE_PATH = ''
_C.TRAIN.LOG_DIR = 'runs/c3d_runs/'
_C.VAL = CfgNode()
_C.VAL.SPLIT = 'validation'
_C.VAL.BATCH_SIZE = 32
_C.TEST = CfgNode()
_C.TEST.SPLIT = 'testing'
_C.TEST.BATCH_SIZE = 32
_C.TEST.CHECKPOINT_PATH = 'checkpoints/c3d_checkpoints/checkpoint_6/best_auc.pth'
_C.DATA = CfgNode()
_C.DATA.ANNOTATION_FILE = '../datasets/activitynet/annotations/activity_net.v1-3.min.json'
_C.DATA.DETECTION_GT_FILE = None
_C.DATA.ENV_FEATURE_DIR = '../datasets/activitynet/c3d_env_features/'
_C.DATA.AGENT_FEATURE_DIR = '../datasets/activitynet/c3d_agent_features/'
_C.DATA.OBJ_FEATURE_DIR = '../c3d_obj_features/'
_C.DATA.CLASSIFICATION_PATH = 'results/classification_results.json'
_C.DATA.RESULT_PATH = 'results/results.json'
_C.DATA.FIGURE_PATH = 'results/result_figure.jpg'
_C.DATA.TEMPORAL_DIM = 100
_C.DATA.MAX_DURATION = 100
_C.MODEL = CfgNode()
_C.MODEL.BOUNDARY_MATCHING_MODULE = 'bmn'
_C.MODEL.SCORE_PATH = 'checkpoints/c3d_checkpoints/scores.json'
_C.MODEL.CHECKPOINT_DIR = 'checkpoints/c3d_checkpoints/'
_C.MODEL.ATTENTION_HEADS = 4
_C.MODEL.ATTENTION_LAYERS = 1
_C.MODEL.AGENT_DIM = 2048
_C.MODEL.OBJ_DIM = 2048
_C.MODEL.ENV_DIM = 2048
_C.MODEL.FEAT_DIM = 512
_C.MODEL.TRANSFORMER_DIM = 1024
_C.MODEL.ENV_HIDDEN_DIM = None
_C.MODEL.AGENT_HIDDEN_DIM = None
_C.MODEL.OBJ_HIDDEN_DIM = None
_C.MODEL.HIDDEN_DIM_1D = 256 # 256
_C.MODEL.HIDDEN_DIM_2D = 128 # 128
_C.MODEL.HIDDEN_DIM_3D = 512 # 512
_C.MODEL.TOPK_AGENTS = 4
_C.BMN = CfgNode()
_C.BMN.NUM_SAMPLES = 32
_C.BMN.NUM_SAMPLES_PER_BIN = 3
_C.BMN.PROP_BOUNDARY_RATIO = 0.5
_C.BMN.POST_PROCESS = CfgNode()
_C.BMN.POST_PROCESS.USE_HARD_NMS = False
_C.BMN.POST_PROCESS.SOFT_NMS_ALPHA = 0.4
_C.BMN.POST_PROCESS.SOFT_NMS_LOW_THRESHOLD = 0.5
_C.BMN.POST_PROCESS.SOFT_NMS_HIGH_THRESHOLD = 0.9
_C.BMN.POST_PROCESS.HARD_NMS_THRESHOLD = 0.65
_C.BMN.POST_PROCESS.NUM_THREADS = 12
_C.BMN.POST_PROCESS.MAX_PROPOSALS = 100
def _assert_and_infer_cfg(cfg):
assert cfg.TRAIN.BATCH_SIZE % len(cfg.GPU_IDS) == 0
return cfg
def get_cfg():
"""
Get a copy of the default config.
"""
return _assert_and_infer_cfg(_C.clone())
| 2,544 | 26.967033 | 90 | py |
AOE-Net | AOE-Net-main/config/__init__.py | 0 | 0 | 0 | py | |
AOE-Net | AOE-Net-main/models/utils.py | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
def masked_softmax(vector, mask, dim=-1, memory_efficient=False, mask_fill_value=-1e32):
"""A masked softmax module to correctly implement attention in Pytorch.
Implementation adapted from: https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If ``memory_efficient`` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and ``memory_efficient`` is false, this function
returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function
will treat every element as equal, and do softmax over equal numbers.
Args:
vector (torch.tensor): The tensor to softmax.
mask (torch.tensor): The tensor to indicate which indices are to be masked and not included in the softmax operation.
dim (int, optional): The dimension to softmax over.
Defaults to -1.
memory_efficient (bool, optional): Whether to use a less precise, but more memory efficient implementation of masked softmax.
Defaults to False.
mask_fill_value ([type], optional): The value to fill masked values with if `memory_efficient` is `True`.
Defaults to -1e32.
Returns:
torch.tensor: The masked softmaxed output
"""
if mask is None:
result = F.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = F.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = F.softmax(masked_vector, dim=dim)
return result
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise RuntimeError("activation should be relu/gelu, not %s." % activation)
class TransformerEncoder(nn.Module):
"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
def __init__(self, cfg, drop_out=0.1, activation='relu', norm=None):
super(TransformerEncoder, self).__init__()
num_features = cfg.MODEL.FEAT_DIM
dim_feedforward = cfg.MODEL.TRANSFORMER_DIM
num_heads = cfg.MODEL.ATTENTION_HEADS
num_layers = cfg.MODEL.ATTENTION_LAYERS
encoder_layer = TransformerEncoderLayer(num_features, num_heads, dim_feedforward, drop_out, activation)
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, key_padding_mask=None):
"""Pass the input through the encoder layers in turn.
Args:
src: the sequnce to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for i in range(self.num_layers):
output = self.layers[i](output, src_mask=mask,
key_padding_mask=key_padding_mask)
if self.norm:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, src, src_mask=None, key_padding_mask=None):
"""Pass the input through the encoder layer.
Args:
src: the sequnce to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=key_padding_mask)[0]
if key_padding_mask is not None:
src2 = src2.masked_fill(key_padding_mask.permute(1, 0).unsqueeze(-1), 0)
src = src + self.dropout1(src2)
src = self.norm1(src)
if hasattr(self, "activation"):
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
else: # for backward compatibility
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
if key_padding_mask is not None:
src = src.masked_fill(key_padding_mask.permute(1, 0).unsqueeze(-1), 0)
return src
| 8,205 | 44.588889 | 133 | py |
AOE-Net | AOE-Net-main/models/model.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import *
from .bmn import BoundaryMatchingNetwork
class EventDetection(nn.Module):
def __init__(self, cfg):
super(EventDetection, self).__init__()
self.use_env_linear = cfg.MODEL.ENV_HIDDEN_DIM is not None
self.use_agent_linear = cfg.MODEL.AGENT_HIDDEN_DIM is not None
self.use_obj_linear = cfg.MODEL.OBJ_HIDDEN_DIM is not None
if self.use_env_linear:
self.env_linear = nn.Linear(cfg.MODEL.ENV_DIM, cfg.MODEL.ENV_HIDDEN_DIM)
if self.use_agent_linear:
self.agent_linear = nn.Linear(cfg.MODEL.AGENT_DIM, cfg.MODEL.AGENT_HIDDEN_DIM)
if self.use_obj_linear:
self.obj_linear = nn.Linear(cfg.MODEL.OBJ_DIM, cfg.MODEL.OBJ_HIDDEN_DIM)
self.agents_fuser = TransformerEncoder(cfg)
self.agents_environment_fuser = TransformerEncoder(cfg)
self.objs_fuser = TransformerEncoder(cfg) #
self.objs_environment_fuser = TransformerEncoder(cfg) #
self.bmm_name = cfg.MODEL.BOUNDARY_MATCHING_MODULE
if self.bmm_name == 'bmn':
self.event_detector = BoundaryMatchingNetwork(cfg)
self.attention_steps = cfg.TRAIN.ATTENTION_STEPS
self.topk_hard_attention = cfg.MODEL.TOPK_AGENTS
def fuse_agent(self, agent_feats, agent_masks, env_feats):
bsz, tmprl_sz, n_boxes, ft_sz = agent_feats.size()
step = self.attention_steps
agent_env_feats = torch.unsqueeze(env_feats, 2) + agent_feats
# Fuse all agents together at every temporal point
smpl_bgn = 0
agent_fused_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
if n_boxes == 0:
return agent_fused_features
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
ae_feats = agent_env_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz) # bsz x n_boxes x feat_dim
masks = agent_masks[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes) # bsz x n_boxes
#hard_attn_masks = masks
l2_norm = torch.norm(ae_feats, dim=-1) # bsz x n_boxes
l2_norm_softmax = masked_softmax(l2_norm, masks) # bsz x n_boxes
# Adaptive threshold is 1 / number of bounding boxes:
ada_thresh = torch.clamp(1. / torch.sum(masks, dim=-1, keepdim=True), 0., 1.)
# Generate hard attention masks
hard_attn_masks = l2_norm_softmax >= ada_thresh # bsz x n_boxes
keep_mask = (torch.sum(hard_attn_masks, dim=-1) > 0) # bsz
keep_indices = torch.masked_select(torch.arange(hard_attn_masks.size(0)).cuda(), keep_mask) # keep_mask
fuser_input = agent_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz).permute(1, 0, 2) # n_boxes x bsz x feat_dim
if len(keep_indices) > 0:
fuser_input = fuser_input[:, keep_indices] # n_boxes x keep_mask x feat_dim
hard_attn_masks = hard_attn_masks[keep_indices] # keep_mask x n_boxes
padded_output = torch.zeros(bsz * (smpl_end - smpl_bgn), ft_sz).cuda() # bsz x feat_dim
fuser_output = self.agents_fuser(fuser_input, key_padding_mask=~hard_attn_masks) # n_boxes x keep_mask x feat_dim
#fuser_output = fuser_input * hard_attn_masks.permute(1, 0).contiguous().unsqueeze(-1)
fuser_output = torch.sum(fuser_output, dim=0) / torch.sum(hard_attn_masks, dim=-1, keepdim=True) # keep_mask x feat_dim
padded_output[keep_indices] = fuser_output
agent_fused_features[:, smpl_bgn:smpl_end] = padded_output.view(bsz, -1, ft_sz)
return agent_fused_features
def fuse_obj(self, obj_feats, obj_masks, env_feats):
bsz, tmprl_sz, n_boxes, ft_sz = obj_feats.size()
step = self.attention_steps
obj_env_feats = torch.unsqueeze(env_feats, 2) + obj_feats
# Fuse all agents together at every temporal point
smpl_bgn = 0
obj_fused_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
if n_boxes == 0:
return obj_fused_features
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
ae_feats = obj_env_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz) # bsz x n_boxes x feat_dim
masks = obj_masks[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes) # bsz x n_boxes
#hard_attn_masks = masks
l2_norm = torch.norm(ae_feats, dim=-1) # bsz x n_boxes
l2_norm_softmax = masked_softmax(l2_norm, masks) # bsz x n_boxes
# Adaptive threshold is 1 / number of bounding boxes:
ada_thresh = torch.clamp(1. / torch.sum(masks, dim=-1, keepdim=True), 0., 1.)
# Generate hard attention masks
hard_attn_masks = l2_norm_softmax >= ada_thresh # bsz x n_boxes
keep_mask = (torch.sum(hard_attn_masks, dim=-1) > 0) # bsz
keep_indices = torch.masked_select(torch.arange(hard_attn_masks.size(0)).cuda(), keep_mask) # keep_mask
fuser_input = obj_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz).permute(1, 0, 2) # n_boxes x bsz x feat_dim
if len(keep_indices) > 0:
fuser_input = fuser_input[:, keep_indices] # n_boxes x keep_mask x feat_dim
hard_attn_masks = hard_attn_masks[keep_indices] # keep_mask x n_boxes
padded_output = torch.zeros(bsz * (smpl_end - smpl_bgn), ft_sz).cuda() # bsz x feat_dim
fuser_output = self.objs_fuser(fuser_input, key_padding_mask=~hard_attn_masks) # n_boxes x keep_mask x feat_dim
#fuser_output = fuser_input * hard_attn_masks.permute(1, 0).contiguous().unsqueeze(-1)
fuser_output = torch.sum(fuser_output, dim=0) / torch.sum(hard_attn_masks, dim=-1, keepdim=True) # keep_mask x feat_dim
padded_output[keep_indices] = fuser_output
obj_fused_features[:, smpl_bgn:smpl_end] = padded_output.view(bsz, -1, ft_sz)
return obj_fused_features
def forward(self, env_features=None, agent_features=None, agent_masks=None, obj_features=None, obj_masks=None):
if self.use_env_linear and env_features is not None:
env_features = self.env_linear(env_features)
if self.use_agent_linear and agent_features is not None:
agent_features = self.agent_linear(agent_features)
if self.use_obj_linear and obj_features is not None:
obj_features = self.obj_linear(obj_features)
if agent_features is None and obj_features is None:
return self.event_detector(env_features.permute(0, 2, 1))
agent_fused_features, selected_agents = self.fuse_agent(agent_features, agent_masks, env_features)
obj_fused_features, selected_objs = self.fuse_obj(obj_features, obj_masks, env_features)
if env_features is None and obj_features is None:
return self.event_detector(agent_fused_features.permute(0, 2, 1))
if env_features is None and agent_features is None:
return self.event_detector(obj_fused_features.permute(0, 2, 1))
### Stack 2 fts 3 case
if obj_features is None:
env_agent_obj_cat_features = torch.stack([env_features, agent_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = env_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 2, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
return self.event_detector(context_features.permute(0, 2, 1))
if agent_features is None:
env_agent_obj_cat_features = torch.stack([env_features, obj_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = env_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 2, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
return self.event_detector(context_features.permute(0, 2, 1))
if env_features is None:
env_agent_obj_cat_features = torch.stack([agent_fused_features, obj_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = agent_fused_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 2, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
return self.event_detector(context_features.permute(0, 2, 1))
### stack all 3 e a o
env_agent_obj_cat_features = torch.stack([env_features, agent_fused_features,obj_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = env_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 3, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
selected_agents = torch.tensor(selected_agents).cuda()
selected_objs = torch.tensor(selected_objs).cuda()
conf_map, start_map, end_map = self.event_detector(context_features.permute(0, 2, 1))
return conf_map, start_map, end_map, torch.tensor(selected_agents).cuda(), torch.tensor(selected_objs).cuda()
| 11,169 | 47.146552 | 142 | py |
AOE-Net | AOE-Net-main/models/bmn.py | # -*- coding: utf-8 -*-
import math
import numpy as np
import torch
import torch.nn as nn
class BoundaryMatchingNetwork(nn.Module):
def __init__(self, cfg):
super(BoundaryMatchingNetwork, self).__init__()
self.prop_boundary_ratio = cfg.BMN.PROP_BOUNDARY_RATIO
self.num_sample = cfg.BMN.NUM_SAMPLES
self.num_sample_perbin = cfg.BMN.NUM_SAMPLES_PER_BIN
self.temporal_dim = cfg.DATA.TEMPORAL_DIM
self.max_duration = cfg.DATA.MAX_DURATION
self.feat_dim = cfg.MODEL.FEAT_DIM
self.hidden_dim_1d = cfg.MODEL.HIDDEN_DIM_1D
self.hidden_dim_2d = cfg.MODEL.HIDDEN_DIM_2D
self.hidden_dim_3d = cfg.MODEL.HIDDEN_DIM_3D
self.sample_mask = self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(self.feat_dim, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True)
)
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
self.x_1d_e = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
self.x_3d_p = nn.Sequential(
nn.Conv3d(self.hidden_dim_1d, self.hidden_dim_3d, kernel_size=(self.num_sample, 1, 1)),
nn.ReLU(inplace=True)
)
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x):
base_feature = self.x_1d_b(x)
start = self.x_1d_s(base_feature).squeeze(1)
end = self.x_1d_e(base_feature).squeeze(1)
confidence_map = self.x_1d_p(base_feature)
confidence_map = self._boundary_matching_layer(confidence_map)
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
confidence_map = self.x_2d_p(confidence_map)
return confidence_map, start, end
def _boundary_matching_layer(self, x):
input_size = x.size()
out = torch.matmul(x, self.sample_mask).reshape(
input_size[0],
input_size[1],
self.num_sample,
self.max_duration,
self.temporal_dim
)
return out
def _get_interp1d_bin_mask(self, seg_xmin, seg_xmax, tscale, num_sample, num_sample_perbin):
# generate sample mask for a boundary-matching pair
plen = float(seg_xmax - seg_xmin)
plen_sample = plen / (num_sample * num_sample_perbin - 1.0)
total_samples = [
seg_xmin + plen_sample * ii
for ii in range(num_sample * num_sample_perbin)
]
p_mask = []
for idx in range(num_sample):
bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) * num_sample_perbin]
bin_vector = np.zeros([tscale])
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if int(sample_down) <= (tscale - 1) and int(sample_down) >= 0:
bin_vector[int(sample_down)] += 1 - sample_decimal
if int(sample_upper) <= (tscale - 1) and int(sample_upper) >= 0:
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_sample_perbin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def _get_interp1d_mask(self):
# generate sample mask for each point in Boundary-Matching Map
mask_mat = []
for start_index in range(self.temporal_dim):
mask_mat_vector = []
for duration_index in range(self.max_duration):
if start_index + duration_index < self.temporal_dim:
p_xmin = start_index
p_xmax = start_index + duration_index
center_len = float(p_xmax - p_xmin) + 1
sample_xmin = p_xmin - center_len * self.prop_boundary_ratio
sample_xmax = p_xmax + center_len * self.prop_boundary_ratio
p_mask = self._get_interp1d_bin_mask(
sample_xmin, sample_xmax, self.temporal_dim, self.num_sample,
self.num_sample_perbin)
else:
p_mask = np.zeros([self.temporal_dim, self.num_sample])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
return nn.Parameter(torch.Tensor(mask_mat).view(self.temporal_dim, -1), requires_grad=False)
| 5,810 | 41.416058 | 100 | py |
AOE-Net | AOE-Net-main/evaluation_anet/eval_proposal.py | import json
import numpy as np
import pandas as pd
def get_blocked_videos(api=None):
with open('evaluation_anet/api.json', 'r') as f:
return json.load(f)
def interpolated_prec_rec(prec, rec):
"""
Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def segment_iou(target_segment, candidate_segments):
"""Compute the temporal intersection over union between a
target segment and all the test segments.
Parameters
----------
target_segment : 1d array
Temporal target segment containing [starting, ending] times.
candidate_segments : 2d array
Temporal candidate segments containing N x [starting, ending] times.
Outputs
-------
tiou : 1d array
Temporal intersection over union score of the N's candidate segments.
"""
tt1 = np.maximum(target_segment[0], candidate_segments[:, 0])
tt2 = np.minimum(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \
+ (target_segment[1] - target_segment[0]) - segments_intersection
# Compute overlap as the ratio of the intersection
# over union of two segments.
tIoU = segments_intersection.astype(float) / segments_union
return tIoU
def wrapper_segment_iou(target_segments, candidate_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
candidate_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [n x m] with IOU ratio.
Note: It assumes that candidate-segments are more scarce that target-segments
"""
if candidate_segments.ndim != 2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = np.empty((n, m))
for i in range(m):
tiou[:, i] = segment_iou(target_segments[i, :], candidate_segments)
return tiou
class ANETproposal(object):
GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
PROPOSAL_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, proposal_filename=None,
ground_truth_fields=GROUND_TRUTH_FIELDS,
proposal_fields=PROPOSAL_FIELDS,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
max_avg_nr_proposals=None,
subset='validation', verbose=False,
check_status=False):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not proposal_filename:
raise IOError('Please input a valid proposal file.')
self.subset = subset
self.tiou_thresholds = tiou_thresholds
self.max_avg_nr_proposals = max_avg_nr_proposals
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = proposal_fields
self.recall = None
self.avg_recall = None
self.proposals_per_video = None
self.check_status = check_status
# Retrieve blocked videos from server.
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
# Import ground truth and proposals.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.proposal = self._import_proposal(proposal_filename)
if self.verbose:
print('[INIT] Loaded annotations from {} subset.'.format(subset))
nr_gt = len(self.ground_truth)
print('\tNumber of ground truth instances: {}'.format(nr_gt))
nr_pred = len(self.proposal)
print('\tNumber of proposals: {}'.format(nr_pred))
print('\tFixed threshold for tiou score: {}'.format(self.tiou_thresholds))
def _import_ground_truth(self, ground_truth_filename):
"""Reads ground truth file, checks if it is well formatted, and returns
the ground truth instances and the activity classes.
Parameters
----------
ground_truth_filename : str
Full path to the ground truth json file.
Outputs
-------
ground_truth : df
Data frame containing the ground truth instances.
activity_index : dict
Dictionary containing class index.
"""
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format
if not all([field in data.keys() for field in self.gt_fields]):
raise IOError('Please input a valid ground truth file.')
# Read ground truth data.
activity_index, cidx = {}, 0
video_lst, t_start_lst, t_end_lst, label_lst = [], [], [], []
for videoid, v in data['database'].items():
if self.subset != v['subset']:
continue
if videoid in self.blocked_videos:
continue
for ann in v['annotations']:
if ann['label'] not in activity_index:
activity_index[ann['label']] = cidx
cidx += 1
video_lst.append(videoid)
t_start_lst.append(ann['segment'][0])
t_end_lst.append(ann['segment'][1])
label_lst.append(activity_index[ann['label']])
ground_truth = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst})
return ground_truth, activity_index
def _import_proposal(self, proposal_filename):
"""Reads proposal file, checks if it is well formatted, and returns
the proposal instances.
Parameters
----------
proposal_filename : str
Full path to the proposal json file.
Outputs
-------
proposal : df
Data frame containing the proposal instances.
"""
with open(proposal_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format...
if not all([field in data.keys() for field in self.pred_fields]):
raise IOError('Please input a valid proposal file.')
# Read predictions.
video_lst, t_start_lst, t_end_lst = [], [], []
score_lst = []
for videoid, v in data['results'].items():
if videoid in self.blocked_videos:
continue
for result in v:
video_lst.append(videoid)
t_start_lst.append(result['segment'][0])
t_end_lst.append(result['segment'][1])
score_lst.append(result['score'])
proposal = pd.DataFrame({
'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'score': score_lst
})
return proposal
def evaluate(self):
"""Evaluates a proposal file. To measure the performance of a
method for the proposal task, we computes the area under the
average recall vs average number of proposals per video curve.
"""
recall, avg_recall, proposals_per_video = average_recall_vs_avg_nr_proposals(
self.ground_truth, self.proposal,
max_avg_nr_proposals=self.max_avg_nr_proposals,
tiou_thresholds=self.tiou_thresholds
)
area_under_curve = np.trapz(avg_recall, proposals_per_video)
if self.verbose:
print('[RESULTS] Performance on ActivityNet proposal task.')
print('\tArea Under the AR vs AN curve: {}%'.format(100. * float(area_under_curve) / proposals_per_video[-1]))
self.recall = recall
self.avg_recall = avg_recall
self.proposals_per_video = proposals_per_video
return 100. * float(area_under_curve) / proposals_per_video[-1]
def average_recall_vs_avg_nr_proposals(ground_truth, proposals,
max_avg_nr_proposals=None,
tiou_thresholds=np.linspace(0.5, 0.95, 10)):
""" Computes the average recall given an average number
of proposals per video.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id', 't-start', 't-end']
proposal : df
Data frame containing the proposal instances.
Required fields: ['video-id, 't-start', 't-end', 'score']
tiou_thresholds : 1darray, optional
array with tiou thresholds.
Outputs
-------
recall : 2darray
recall[i,j] is recall at ith tiou threshold at the jth average number of average number of proposals per video.
average_recall : 1darray
recall averaged over a list of tiou threshold. This is equivalent to recall.mean(axis=0).
proposals_per_video : 1darray
average number of proposals per video.
"""
# Get list of videos.
video_lst = ground_truth['video-id'].unique()
if not max_avg_nr_proposals:
max_avg_nr_proposals = float(proposals.shape[0]) / video_lst.shape[0]
ratio = max_avg_nr_proposals * float(video_lst.shape[0]) / proposals.shape[0]
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
proposals_gbvn = proposals.groupby('video-id')
# For each video, computes tiou scores among the retrieved proposals.
score_lst = []
total_nr_proposals = 0
for videoid in video_lst:
# Get proposals for this video.
proposals_videoid = proposals_gbvn.get_group(videoid)
this_video_proposals = proposals_videoid.loc[:, ['t-start', 't-end']].values
# Sort proposals by score.
sort_idx = proposals_videoid['score'].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :]
# Get ground-truth instances associated to this video.
ground_truth_videoid = ground_truth_gbvn.get_group(videoid)
this_video_ground_truth = ground_truth_videoid.loc[:, ['t-start', 't-end']].values
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_lst.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(this_video_ground_truth, axis=0)
nr_proposals = np.minimum(int(this_video_proposals.shape[0] * ratio), this_video_proposals.shape[0])
total_nr_proposals += nr_proposals
this_video_proposals = this_video_proposals[:nr_proposals, :]
# Compute tiou scores.
tiou = wrapper_segment_iou(this_video_proposals, this_video_ground_truth)
score_lst.append(tiou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_lst = np.arange(1, 101) / 100.0 * (max_avg_nr_proposals * float(video_lst.shape[0]) / total_nr_proposals)
matches = np.empty((video_lst.shape[0], pcn_lst.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty((tiou_thresholds.shape[0], pcn_lst.shape[0]))
# Iterates over each tiou threshold.
for ridx, tiou in enumerate(tiou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_lst):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum tiou threshold.
true_positives_tiou = score >= tiou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum((score.shape[1] * pcn_lst).astype(np.int), score.shape[1])
for j, nr_proposals in enumerate(pcn_proposals):
# Compute the number of matches for each percentage of the proposals
matches[i, j] = np.count_nonzero((true_positives_tiou[:, :nr_proposals]).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_lst * (float(total_nr_proposals) / video_lst.shape[0])
return recall, avg_recall, proposals_per_video
| 13,403 | 38.307918 | 122 | py |
AOE-Net | AOE-Net-main/evaluation_anet/eval_detection.py | import json
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
def get_blocked_videos(api=None):
with open('evaluation_anet/api.json', 'r') as f:
return json.load(f)
def interpolated_prec_rec(prec, rec):
"""Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def segment_iou(target_segment, candidate_segments):
"""Compute the temporal intersection over union between a
target segment and all the test segments.
Parameters
----------
target_segment : 1d array
Temporal target segment containing [starting, ending] times.
candidate_segments : 2d array
Temporal candidate segments containing N x [starting, ending] times.
Outputs
-------
tiou : 1d array
Temporal intersection over union score of the N's candidate segments.
"""
tt1 = np.maximum(target_segment[0], candidate_segments[:, 0])
tt2 = np.minimum(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \
+ (target_segment[1] - target_segment[0]) - segments_intersection
# Compute overlap as the ratio of the intersection
# over union of two segments.
tIoU = segments_intersection.astype(float) / segments_union
return tIoU
def wrapper_segment_iou(target_segments, candidate_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
candidate_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [n x m] with IOU ratio.
Note: It assumes that candidate-segments are more scarce that target-segments
"""
if candidate_segments.ndim != 2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = np.empty((n, m))
for i in range(m):
tiou[:, i] = segment_iou(target_segments[i, :], candidate_segments)
return tiou
class ANETdetection(object):
#GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
GROUND_TRUTH_FIELDS = ['database']
PREDICTION_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, prediction_filename=None,
ground_truth_fields=GROUND_TRUTH_FIELDS,
prediction_fields=PREDICTION_FIELDS,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation', verbose=False,
check_status=True):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.subset = subset
self.tiou_thresholds = tiou_thresholds
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = prediction_fields
self.ap = None
self.check_status = check_status
# Retrieve blocked videos from server.
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
# Import ground truth and predictions.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.prediction = self._import_prediction(prediction_filename)
if self.verbose:
print('[INIT] Loaded annotations from {} subset.'.format(subset))
nr_gt = len(self.ground_truth)
print('\tNumber of ground truth instances: {}'.format(nr_gt))
nr_pred = len(self.prediction)
print('\tNumber of predictions: {}'.format(nr_pred))
print('\tFixed threshold for tiou score: {}'.format(self.tiou_thresholds))
def _import_ground_truth(self, ground_truth_filename):
"""Reads ground truth file, checks if it is well formatted, and returns
the ground truth instances and the activity classes.
Parameters
----------
ground_truth_filename : str
Full path to the ground truth json file.
Outputs
-------
ground_truth : df
Data frame containing the ground truth instances.
activity_index : dict
Dictionary containing class index.
"""
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format
if not all([field in data.keys() for field in self.gt_fields]):
raise IOError('Please input a valid ground truth file.')
# Read ground truth data.
activity_index, cidx = {}, 0
video_lst, t_start_lst, t_end_lst, label_lst = [], [], [], []
for videoid, v in data['database'].items():
if self.subset != v['subset']:
continue
if videoid in self.blocked_videos:
continue
for ann in v['annotations']:
if ann['label'] not in activity_index:
activity_index[ann['label']] = cidx
cidx += 1
video_lst.append(videoid)
t_start_lst.append(float(ann['segment'][0]))
t_end_lst.append(float(ann['segment'][1]))
label_lst.append(activity_index[ann['label']])
ground_truth = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst})
return ground_truth, activity_index
def _import_prediction(self, prediction_filename):
"""Reads prediction file, checks if it is well formatted, and returns
the prediction instances.
Parameters
----------
prediction_filename : str
Full path to the prediction json file.
Outputs
-------
prediction : df
Data frame containing the prediction instances.
"""
with open(prediction_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format...
if not all([field in data.keys() for field in self.pred_fields]):
raise IOError('Please input a valid prediction file.')
# Read predictions.
video_lst, t_start_lst, t_end_lst = [], [], []
label_lst, score_lst = [], []
for videoid, v in data['results'].items():
if videoid in self.blocked_videos:
continue
for result in v:
label = self.activity_index[result['label']]
video_lst.append(videoid)
t_start_lst.append(float(result['segment'][0]))
t_end_lst.append(float(result['segment'][1]))
label_lst.append(label)
score_lst.append(result['score'])
prediction = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst,
'score': score_lst})
return prediction
def _get_predictions_with_label(self, prediction_by_label, label_name, cidx):
"""Get all predicitons of the given label. Return empty DataFrame if there
is no predcitions with the given label.
"""
try:
return prediction_by_label.get_group(cidx).reset_index(drop=True)
except:
print('Warning: No predictions of label \'%s\' were provdied.' % label_name)
return pd.DataFrame()
def wrapper_compute_average_precision(self):
"""Computes average precision for each class in the subset.
"""
ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
# Adaptation to query faster
ground_truth_by_label = self.ground_truth.groupby('label')
prediction_by_label = self.prediction.groupby('label')
results = Parallel(n_jobs=len(self.activity_index))(
delayed(compute_average_precision_detection)(
ground_truth=ground_truth_by_label.get_group(cidx).reset_index(drop=True),
prediction=self._get_predictions_with_label(prediction_by_label, label_name, cidx),
tiou_thresholds=self.tiou_thresholds,
) for label_name, cidx in self.activity_index.items())
for i, cidx in enumerate(self.activity_index.values()):
ap[:, cidx] = results[i]
return ap
def evaluate(self):
"""Evaluates a prediction file. For the detection task we measure the
interpolated mean average precision to measure the performance of a
method.
"""
self.ap = self.wrapper_compute_average_precision()
self.mAP = self.ap.mean(axis=1)
self.average_mAP = self.mAP.mean()
if self.verbose:
print('[RESULTS] Performance on ActivityNet detection task.')
print('\tAverage-mAP: {}'.format(self.average_mAP))
def compute_average_precision_detection(ground_truth, prediction, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
"""Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matches as
true positive. This code is greatly inspired by Pascal VOC devkit.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id', 't-start', 't-end']
prediction : df
Data frame containing the prediction instances.
Required fields: ['video-id, 't-start', 't-end', 'score']
tiou_thresholds : 1darray, optional
Temporal intersection over union threshold.
Outputs
-------
ap : float
Average precision score.
"""
ap = np.zeros(len(tiou_thresholds))
if prediction.empty:
return ap
npos = float(len(ground_truth))
lock_gt = np.ones((len(tiou_thresholds), len(ground_truth))) * -1
# Sort predictions by decreasing score order.
sort_idx = prediction['score'].values.argsort()[::-1]
prediction = prediction.loc[sort_idx].reset_index(drop=True)
# Initialize true positive and false positive vectors.
tp = np.zeros((len(tiou_thresholds), len(prediction)))
fp = np.zeros((len(tiou_thresholds), len(prediction)))
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
# Assigning true positive to truly grount truth instances.
for idx, this_pred in prediction.iterrows():
try:
# Check if there is at least one ground truth in the video associated.
ground_truth_videoid = ground_truth_gbvn.get_group(this_pred['video-id'])
except Exception as e:
fp[:, idx] = 1
continue
this_gt = ground_truth_videoid.reset_index()
tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values,
this_gt[['t-start', 't-end']].values)
# We would like to retrieve the predictions with highest tiou score.
tiou_sorted_idx = tiou_arr.argsort()[::-1]
for tidx, tiou_thr in enumerate(tiou_thresholds):
for jdx in tiou_sorted_idx:
if tiou_arr[jdx] < tiou_thr:
fp[tidx, idx] = 1
break
if lock_gt[tidx, this_gt.loc[jdx]['index']] >= 0:
continue
# Assign as true positive after the filters above.
tp[tidx, idx] = 1
lock_gt[tidx, this_gt.loc[jdx]['index']] = idx
break
if fp[tidx, idx] == 0 and tp[tidx, idx] == 0:
fp[tidx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / npos
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for tidx in range(len(tiou_thresholds)):
ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx, :], recall_cumsum[tidx, :])
return ap
| 12,868 | 38.719136 | 110 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.