repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
if '.ckpt' in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'w':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = config.output_attentions
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None):
attn_outputs = self.attn(x, attention_mask=attention_mask, head_mask=head_mask)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class OpenAIGPTPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_openai_gpt
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
OPENAI_GPT_START_DOCSTRING = r""" OpenAI GPT model was proposed in
`Improving Language Understanding by Generative Pre-Training`_
by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
It's a causal (unidirectional) transformer pre-trained using language modeling on a large
corpus will long range dependencies, the Toronto Book Corpus.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Improving Language Understanding by Generative Pre-Training`:
https://openai.com/blog/language-unsupervised/
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.BPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices)
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(OpenAIGPTModel, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
self.tokens_embed = self._get_resized_embeddings(self.tokens_embed, new_num_tokens)
return self.tokens_embed
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
if position_ids is None:
# This was used when we had a single embedding matrice from position and token embeddings
# start = self.config.vocab_size + self.config.n_special
# end = start + input_ids.size(-1)
# position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = ()
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, attention_mask, head_mask[i])
hidden_states = outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = (hidden_states.view(*output_shape),)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden states), (all attentions)
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(OpenAIGPTLMHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.tokens_embed)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
labels=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, (all hidden states), (all attentions)
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""", OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
**lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
**mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
`multiple_choice_labels`: optional multiple choice labels: ``torch.LongTensor`` of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Multiple choice classification loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!)
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
mc_token_ids = torch.tensor([input_ids.size(-1), input_ids.size(-1)]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.tokens_embed)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
mc_token_ids=None, lm_labels=None, mc_labels=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, (all hidden_states), (attentions)
| 30,836 | 48.57717 | 148 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/configuration_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from .configuration_bert import BertConfig
from .configuration_openai import OpenAIGPTConfig
from .configuration_gpt2 import GPT2Config
from .configuration_transfo_xl import TransfoXLConfig
from .configuration_xlnet import XLNetConfig
from .configuration_xlm import XLMConfig
from .configuration_roberta import RobertaConfig
from .configuration_distilbert import DistilBertConfig
from .configuration_ctrl import CTRLConfig
logger = logging.getLogger(__name__)
class AutoConfig(object):
r""":class:`~transformers.AutoConfig` is a generic configuration class
that will be instantiated as one of the configuration classes of the library
when created with the `AutoConfig.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method take care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertConfig (DistilBERT model)
- contains `bert`: BertConfig (Bert model)
- contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)
- contains `gpt2`: GPT2Config (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)
- contains `xlnet`: XLNetConfig (XLNet model)
- contains `xlm`: XLMConfig (XLM model)
- contains `roberta`: RobertaConfig (RoBERTa model)
- contains `ctrl` : CTRLConfig (CTRL model)
This class cannot be instantiated using `__init__()` (throw an error).
"""
def __init__(self):
raise EnvironmentError("AutoConfig is designed to be instantiated "
"using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r""" Instantiate a one of the configuration classes of the library
from a pre-trained model configuration.
The configuration class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertConfig (DistilBERT model)
- contains `bert`: BertConfig (Bert model)
- contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)
- contains `gpt2`: GPT2Config (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)
- contains `xlnet`: XLNetConfig (XLNet model)
- contains `xlm`: XLMConfig (XLM model)
- contains `roberta`: RobertaConfig (RoBERTa model)
- contains `ctrl` : CTRLConfig (CTRL model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
kwargs: (`optional`) dict: key/value pairs with which to update the configuration object after loading.
- The values in kwargs of any keys which are configuration attributes will be used to override the loaded values.
- Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
return_unused_kwargs: (`optional`) bool:
- If False, then this function returns just the final configuration object.
- If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored.
Examples::
config = AutoConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
config = AutoConfig.from_pretrained('./test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = AutoConfig.from_pretrained('./test/bert_saved_model/my_configuration.json')
config = AutoConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = AutoConfig.from_pretrained('bert-base-uncased', output_attention=True,
foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'openai-gpt' in pretrained_model_name_or_path:
return OpenAIGPTConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'gpt2' in pretrained_model_name_or_path:
return GPT2Config.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'transfo-xl' in pretrained_model_name_or_path:
return TransfoXLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'ctrl' in pretrained_model_name_or_path:
return CTRLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path))
| 8,111 | 57.782609 | 296 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
'bert-base-german-dbmdz-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt",
'bert-base-german-dbmdz-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
}
PRETRAINED_INIT_CONFIGURATION = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(PreTrainedTokenizer):
r"""
Constructs a BertTokenizer.
:class:`~transformers.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token,
pad_token=pad_token, cls_token=cls_token,
mask_token=mask_token, **kwargs)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A BERT sequence has the following format:
single sequence: [CLS] X [SEP]
pair of sequences: [CLS] A [SEP] B [SEP]
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError("You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model.")
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
| first sequence | second sequence
if token_ids_1 is None, only returns the first portion of the mask (0's).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = vocab_path
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return (vocab_file,)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
never_split = self.never_split + (never_split if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 22,451 | 43.636183 | 183 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/configuration_ctrl.py | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Salesforce CTRL configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP = {"ctrl": "https://storage.googleapis.com/sf-ctrl/pytorch/ctrl-config.json"}
class CTRLConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `CTRLModel`.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CTRLModel` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
dff: Size of the inner dimension of the FFN.
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
pretrained_config_archive_map = CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(
self,
vocab_size_or_config_json_file=246534,
n_positions=256,
n_ctx=256,
n_embd=1280,
dff=8192,
n_layer=48,
n_head=16,
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-6,
initializer_range=0.02,
num_labels=1,
summary_type='cls_index',
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
"""Constructs CTRLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CTRLModel` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
dff: Size of the inner dimension of the FFN.
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
super(CTRLConfig, self).__init__(**kwargs)
self.vocab_size = vocab_size_or_config_json_file if isinstance(vocab_size_or_config_json_file, int) else -1
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.dff = dff
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.num_labels = num_labels
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif not isinstance(vocab_size_or_config_json_file, int):
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| 5,775 | 39.111111 | 120 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from .modeling_bert import BertModel, BertForMaskedLM, BertForSequenceClassification, BertForQuestionAnswering
from .modeling_openai import OpenAIGPTModel, OpenAIGPTLMHeadModel
from .modeling_gpt2 import GPT2Model, GPT2LMHeadModel
from .modeling_ctrl import CTRLModel, CTRLLMHeadModel
from .modeling_transfo_xl import TransfoXLModel, TransfoXLLMHeadModel
from .modeling_xlnet import XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassification, XLNetForQuestionAnswering
from .modeling_xlm import XLMModel, XLMWithLMHeadModel, XLMForSequenceClassification, XLMForQuestionAnswering
from .modeling_roberta import RobertaModel, RobertaForMaskedLM, RobertaForSequenceClassification
from .modeling_distilbert import DistilBertModel, DistilBertForQuestionAnswering, DistilBertForMaskedLM, DistilBertForSequenceClassification
from .modeling_utils import PreTrainedModel, SequenceSummary
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
class AutoModel(object):
r"""
:class:`~transformers.AutoModel` is a generic model class
that will be instantiated as one of the base model classes of the library
when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertModel (DistilBERT model)
- contains `roberta`: RobertaModel (RoBERTa model)
- contains `bert`: BertModel (Bert model)
- contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: GPT2Model (OpenAI GPT-2 model)
- contains `ctrl`: CTRLModel (Salesforce CTRL model)
- contains `transfo-xl`: TransfoXLModel (Transformer-XL model)
- contains `xlnet`: XLNetModel (XLNet model)
- contains `xlm`: XLMModel (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModel is designed to be instantiated "
"using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the base model classes of the library
from a pre-trained model configuration.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertModel (DistilBERT model)
- contains `roberta`: RobertaModel (RoBERTa model)
- contains `bert`: BertModel (Bert model)
- contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: GPT2Model (OpenAI GPT-2 model)
- contains `ctrl`: CTRLModel (Salesforce CTRL model)
- contains `transfo-xl`: TransfoXLModel (Transformer-XL model)
- contains `xlnet`: XLNetModel (XLNet model)
- contains `xlm`: XLMModel (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'openai-gpt' in pretrained_model_name_or_path:
return OpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'gpt2' in pretrained_model_name_or_path:
return GPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'transfo-xl' in pretrained_model_name_or_path:
return TransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'ctrl' in pretrained_model_name_or_path:
return CTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta, 'ctrl'".format(pretrained_model_name_or_path))
class AutoModelWithLMHead(object):
r"""
:class:`~transformers.AutoModelWithLMHead` is a generic model class
that will be instantiated as one of the language modeling model classes of the library
when created with the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)
- contains `roberta`: RobertaForMaskedLM (RoBERTa model)
- contains `bert`: BertForMaskedLM (Bert model)
- contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)
- contains `ctrl`: CTRLLMModel (Salesforce CTRL model)
- contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: XLNetLMHeadModel (XLNet model)
- contains `xlm`: XLMWithLMHeadModel (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the language modeling model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)
- contains `roberta`: RobertaForMaskedLM (RoBERTa model)
- contains `bert`: BertForMaskedLM (Bert model)
- contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: XLNetLMHeadModel (XLNet model)
- contains `xlm`: XLMWithLMHeadModel (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'openai-gpt' in pretrained_model_name_or_path:
return OpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'gpt2' in pretrained_model_name_or_path:
return GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'transfo-xl' in pretrained_model_name_or_path:
return TransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'ctrl' in pretrained_model_name_or_path:
return CTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta','ctrl'".format(pretrained_model_name_or_path))
class AutoModelForSequenceClassification(object):
r"""
:class:`~transformers.AutoModelForSequenceClassification` is a generic model class
that will be instantiated as one of the sequence classification model classes of the library
when created with the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)
- contains `roberta`: RobertaForSequenceClassification (RoBERTa model)
- contains `bert`: BertForSequenceClassification (Bert model)
- contains `xlnet`: XLNetForSequenceClassification (XLNet model)
- contains `xlm`: XLMForSequenceClassification (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the sequence classification model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)
- contains `roberta`: RobertaForSequenceClassification (RoBERTa model)
- contains `bert`: BertForSequenceClassification (Bert model)
- contains `xlnet`: XLNetForSequenceClassification (XLNet model)
- contains `xlm`: XLMForSequenceClassification (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'xlnet', 'xlm', 'roberta'".format(pretrained_model_name_or_path))
class AutoModelForQuestionAnswering(object):
r"""
:class:`~transformers.AutoModelForQuestionAnswering` is a generic model class
that will be instantiated as one of the question answering model classes of the library
when created with the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)
- contains `bert`: BertForQuestionAnswering (Bert model)
- contains `xlnet`: XLNetForQuestionAnswering (XLNet model)
- contains `xlm`: XLMForQuestionAnswering (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)
- contains `bert`: BertForQuestionAnswering (Bert model)
- contains `xlnet`: XLNetForQuestionAnswering (XLNet model)
- contains `xlm`: XLMForQuestionAnswering (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'xlnet', 'xlm'".format(pretrained_model_name_or_path))
| 36,985 | 72.384921 | 472 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_xlnet.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for XLNet model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import os
from shutil import copyfile
import unicodedata
import six
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-spiece.model",
'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-spiece.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
SPIECE_UNDERLINE = u'▁'
# Segments (not really needed)
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
class XLNetTokenizer(PreTrainedTokenizer):
"""
SentencePiece based tokenizer. Peculiarities:
- requires `SentencePiece <https://github.com/google/sentencepiece>`_
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file,
do_lower_case=False, remove_space=True, keep_accents=False,
bos_token="<s>", eos_token="</s>", unk_token="<unk>", sep_token="<sep>",
pad_token="<pad>", cls_token="<cls>", mask_token="<mask>",
additional_special_tokens=["<eop>", "<eod>"], **kwargs):
super(XLNetTokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token,
unk_token=unk_token, sep_token=sep_token,
pad_token=pad_token, cls_token=cls_token,
mask_token=mask_token, additional_special_tokens=
additional_special_tokens, **kwargs)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
try:
import sentencepiece as spm
except ImportError:
logger.warning("You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece")
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
@property
def vocab_size(self):
return len(self.sp_model)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning("You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
if not self.keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, return_unicode=True, sample=False):
""" Tokenize a string.
return_unicode is used only for py2
"""
text = self.preprocess_text(text)
# note(zhiliny): in some systems, sentencepiece only accepts str for py2
if six.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
if not sample:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(
piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
# note(zhiliny): convert back to unicode for py2
if six.PY2 and return_unicode:
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode('utf-8')
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index, return_unicode=True):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
token = self.sp_model.IdToPiece(index)
if six.PY2 and return_unicode and isinstance(token, str):
token = token.decode('utf-8')
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A RoBERTa sequence has the following format:
single sequence: <s> X </s>
pair of sequences: <s> A </s></s> B </s>
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return token_ids_0 + sep + cls
return token_ids_0 + sep + token_ids_1 + sep + cls
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError("You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model.")
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
return ([0] * len(token_ids_0)) + [1, 1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 2
| first sequence | second sequence | CLS segment ID
if token_ids_1 is None, only returns the first portion of the mask (0's).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
cls_segment_id = [2]
if token_ids_1 is None:
return len(token_ids_0 + sep + cls) * [0]
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
def save_vocabulary(self, save_directory):
""" Save the sentencepiece vocabulary (copy original file) and special tokens file
to a directory.
"""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| 10,317 | 39.622047 | 125 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_distilbert.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for DistilBERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_bert import BertTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'distilbert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
}
class DistilBertTokenizer(BertTokenizer):
r"""
Constructs a DistilBertTokenizer.
:class:`~transformers.DistilBertTokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
| 2,438 | 37.714286 | 144 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/configuration_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Transformer XL configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
class TransfoXLConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `TransfoXLModel`.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
pretrained_config_archive_map = TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02,
layer_norm_epsilon=1e-5,
**kwargs):
"""Constructs TransfoXLConfig.
"""
super(TransfoXLConfig, self).__init__(**kwargs)
self.n_token = vocab_size_or_config_json_file if isinstance(vocab_size_or_config_json_file, int) else -1
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
self.layer_norm_epsilon = layer_norm_epsilon
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif not isinstance(vocab_size_or_config_json_file, int):
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
@property
def max_position_embeddings(self):
return self.tgt_len + self.ext_len + self.mem_len
@property
def vocab_size(self):
return self.n_token
@vocab_size.setter
def vocab_size(self, value):
self.n_token = value
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| 6,819 | 39.35503 | 125 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import six
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
import requests
from tqdm import tqdm
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
import tensorflow as tf
assert hasattr(tf, '__version__') and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
try:
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'transformers')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE',
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path))
PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = 'tf_model.h5'
TF_WEIGHTS_NAME = 'model.ckpt'
CONFIG_NAME = "config.json"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
if not six.PY2:
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = ''.join(docstr) + fn.__doc__
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + ''.join(docstr)
return fn
return docstring_decorator
else:
# Not possible to update class docstrings on python2
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) ands '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
if url.endswith('.h5'):
filename += '.h5'
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url, proxies=None):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file, proxies=None):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None):
req = requests.get(url, stream=True, proxies=proxies)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None, force_download=False, proxies=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url, proxies=proxies)
else:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path) or force_download:
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file, proxies=proxies)
else:
http_get(url, temp_file, proxies=proxies)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
| 11,622 | 34.763077 | 144 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/__init__.py | __version__ = "2.1.1"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
absl.logging.set_verbosity('info')
absl.logging.set_stderrthreshold('info')
absl.logging._warn_preinit_stderr = False
except:
pass
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Files and general utilities
from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE,
cached_path, add_start_docstrings, add_end_docstrings,
WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME,
is_tf_available, is_torch_available)
# Tokenizers
from .tokenization_utils import (PreTrainedTokenizer)
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_auto import AutoConfig
from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
# Modeling
if is_torch_available():
from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D)
from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelWithLMHead)
from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
XLNetForSequenceClassification, XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple, XLNetForQuestionAnswering,
load_tf_weights_in_xlnet, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlm import (XLMPreTrainedModel , XLMModel,
XLMWithLMHeadModel, XLMForSequenceClassification,
XLMForQuestionAnswering, XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_roberta import (RobertaForMaskedLM, RobertaModel,
RobertaForSequenceClassification, RobertaForMultipleChoice,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_distilbert import (DistilBertForMaskedLM, DistilBertModel,
DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_albert import AlbertForSequenceClassification
# Optimization
from .optimization import (AdamW, ConstantLRSchedule, WarmupConstantSchedule, WarmupCosineSchedule,
WarmupCosineWithHardRestartsSchedule, WarmupLinearSchedule)
if not is_tf_available() and not is_torch_available():
logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used.")
| 5,761 | 58.402062 | 109 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/configuration_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT-2 configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-config.json",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-config.json",}
class GPT2Config(PretrainedConfig):
"""Configuration class to store the configuration of a `GPT2Model`.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
num_labels=1,
summary_type='cls_index',
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
super(GPT2Config, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.num_labels = num_labels
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| 6,053 | 40.751724 | 131 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_gpt2.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
try:
from functools import lru_cache
except ImportError:
# Just a dummy decorator to get the checks to run on python2
# because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
def lru_cache():
return lambda func: func
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json",
'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json",
'distilgpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-vocab.json",
},
'merges_file':
{
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt",
'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt",
'distilgpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'distilgpt2': 1024,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings.
We specifically avoids mapping to whitespace/control characters the bpe code barfs on.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class GPT2Tokenizer(PreTrainedTokenizer):
"""
GPT-2 BPE tokenizer. Peculiarities:
- Byte-level Byte-Pair-Encoding
- Requires a space to start the input string => the encoding methods should be called with the
``add_prefix_space`` flag set to ``True``.
Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve
the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"`
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, errors='replace', unk_token="<|endoftext|>",
bos_token="<|endoftext|>", eos_token="<|endoftext|>", **kwargs):
super(GPT2Tokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens
self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_data]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text, add_prefix_space=False):
""" Tokenize a string.
Args:
- add_prefix_space (boolean, default False):
Begin the sentence with at least one space toto get invariance to word order in GPT-2 (and RoBERTa) tokenizers.
"""
if add_prefix_space:
text = ' ' + text
bpe_tokens = []
for token in re.findall(self.pat, text):
if sys.version_info[0] == 2:
token = ''.join(self.byte_encoder[ord(b)] for b in token) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
else:
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
return vocab_file, merge_file | 9,802 | 40.893162 | 182 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/configuration_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RoBERTa configuration """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
from .configuration_bert import BertConfig
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json",
}
class RobertaConfig(BertConfig):
pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
| 1,415 | 38.333333 | 111 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/configuration_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"
}
class OpenAIGPTConfig(PretrainedConfig):
"""
Configuration class to store the configuration of a `OpenAIGPTModel`.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
afn: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
predict_special_tokens: should we predict special tokens (when the model has a LM head)
"""
pretrained_config_archive_map = OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(
self,
vocab_size_or_config_json_file=40478,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
predict_special_tokens=True,
num_labels=1,
summary_type='cls_index',
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
"""Constructs OpenAIGPTConfig.
"""
super(OpenAIGPTConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
self.num_labels = num_labels
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| 5,100 | 36.785185 | 121 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import json
import math
import logging
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_transfo_xl import TransfoXLConfig
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class RelPartialLearnableMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None, output_attentions=False,
layer_norm_epsilon=1e-5):
super(RelPartialLearnableMultiHeadAttn, self).__init__()
self.output_attentions = output_attentions
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def _rel_shift(self, x):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
return x
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and torch.sum(attn_mask).item():
attn_mask = (attn_mask == 1) # Switch to bool
if attn_mask.dim() == 2:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
outputs = [w + attn_out]
else:
##### residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'),
layer_norm_epsilon=layer_norm_epsilon)
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None):
attn_outputs = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems, head_mask=head_mask)
ff_output = self.pos_ff(attn_outputs[0])
outputs = [ff_output] + attn_outputs[1:]
return outputs
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = TransfoXLConfig
pretrained_model_archive_map = TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_transfo_xl
base_model_prefix = "transformer"
def _init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def _init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def _init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self._init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self._init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self._init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self._init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self._init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self._init_bias(m.bias)
else:
if hasattr(m, 'r_emb'):
self._init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self._init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self._init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self._init_bias(m.r_bias)
TRANSFO_XL_START_DOCSTRING = r""" The Transformer-XL model was proposed in
`Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context`_
by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
It's a causal (uni-directional) transformer with relative positioning (sinusoïdal) embeddings which can reuse
previously computed hidden-states to attend to longer context (memory).
This model also uses adaptive softmax inputs and outputs (tied).
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context`:
https://arxiv.org/abs/1901.02860
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.TransfoXLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
TRANSFO_XL_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
Transformer-XL is a model with relative position embeddings so you can either pad the inputs on
the right or on the left.
Indices can be obtained using :class:`transformers.TransfoXLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**mems**: (`optional`)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding and attend to longer context.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
TRANSFO_XL_START_DOCSTRING, TRANSFO_XL_INPUTS_DOCSTRING)
class TransfoXLModel(TransfoXLPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**mems**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` input above). Can be used to speed up sequential decoding and attend to longer context.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states, mems = outputs[:2]
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias,
output_attentions=self.output_attentions,
layer_norm_epsilon=config.layer_norm_epsilon)
)
else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
raise NotImplementedError # Removed them to avoid maintaining dead code
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
return self.word_emb
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def _prune_heads(self, heads):
logger.info("Head pruning is not implemented for Transformer-XL model")
pass
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def forward(self, input_ids, mems=None, head_mask=None):
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
qlen, bsz = input_ids.size()
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
word_emb = self.word_emb(input_ids)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1+mlen)[:,:,None]
hids = []
attentions = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
layer_outputs = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask,
mems=mems_i, head_mask=head_mask[i])
core_out = layer_outputs[0]
if self.output_attentions:
attentions.append(layer_outputs[1])
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
# We transpose back here to shape [bsz, len, hidden_dim]
outputs = [core_out.transpose(0, 1).contiguous(), new_mems]
if self.output_hidden_states:
# Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
hids.append(core_out)
hids = list(t.transpose(0, 1).contiguous() for t in hids)
outputs.append(hids)
if self.output_attentions:
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
attentions = list(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs.append(attentions)
return outputs # last hidden state, new_mems, (all hidden states), (all attentions)
@add_start_docstrings("""The Transformer-XL Model with a language modeling head on top
(adaptive softmax with weights tied to the adaptive input embeddings)""",
TRANSFO_XL_START_DOCSTRING, TRANSFO_XL_INPUTS_DOCSTRING)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
r"""
**lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``None`` if ``lm_labels`` is provided else ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
We don't output them when the loss is computed to speedup adaptive softmax decoding.
**mems**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` input above). Can be used to speed up sequential decoding and attend to longer context.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, mems = outputs[:2]
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.init_weights()
self.tie_weights()
def tie_weights(self):
"""
Run this to be sure output and input (adaptive) softmax weights are tied
"""
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self._tie_or_clone_weights(self.crit.out_layers[i],
self.transformer.word_emb.emb_layers[i])
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, mems=None, head_mask=None, labels=None):
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask)
last_hidden = transformer_outputs[0]
pred_hid = last_hidden[:, -tgt_len:]
outputs = transformer_outputs[1:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, labels, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
outputs = [softmax_output] + outputs
if labels is not None:
# TODO: This is not implemented
raise NotImplementedError
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), labels)
if labels is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
outputs = [softmax_output] + outputs
else:
softmax_output = softmax_output.view(bsz, tgt_len)
outputs = [softmax_output, None] + outputs
return outputs # (loss), logits or None if labels is not None (speed up adaptive softmax), new_mems, (all hidden states), (all attentions)
| 39,657 | 43.50954 | 157 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_albert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings
from .modeling_bert import (ACT2FN, BertSelfAttention, BertIntermediate,
BertPooler,BertPredictionHeadTransform)
logger = logging.getLogger(__name__)
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'albert-base': "",
'albert-large': "",
'albert-xlarge': "",
'albert-xxlarge': "",
}
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name[-13:] == '_embeddings_2':
pointer = getattr(pointer, 'weight')
array = np.transpose(array)
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
BertLayerNorm = torch.nn.LayerNorm
class AlbertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(AlbertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
# project layer
self.word_embeddings_2 = nn.Linear(config.embedding_size, config.hidden_size, bias=False)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
# project transform
words_embeddings = self.word_embeddings_2(words_embeddings)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.ln_type = config.ln_type
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if self.ln_type == 'preln':
# preln
hidden_states = hidden_states + input_tensor
else:
# postln
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
self.ln_type = config.ln_type
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_tensor, attention_mask=None, head_mask=None):
if self.ln_type == 'preln':
# pre_ln
hidden_state = self.output.LayerNorm(input_tensor)
self_outputs = self.self(hidden_state, attention_mask, head_mask)
else:
# postln
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.ln_type = config.ln_type
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if self.ln_type == 'preln':
# preln
hidden_states = hidden_states + input_tensor
else:
# postln
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.ln_type = config.ln_type
def forward(self, hidden_states, attention_mask=None, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
if self.ln_type == 'preln':
# preln
attention_output_pre = self.output.LayerNorm(attention_output)
else:
# postln
attention_output_pre = attention_output
intermediate_output = self.intermediate(attention_output_pre)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class AlbertEncoder(nn.Module):
def __init__(self, config):
super(AlbertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.num_hidden_layers = config.num_hidden_layers
self.layer_shared = BertLayer(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i in range(self.num_hidden_layers):
layer_module = self.layer_shared
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class AlbertLMPredictionHead(nn.Module):
def __init__(self, config):
super(AlbertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.project_layer = nn.Linear(config.hidden_size, config.embedding_size, bias=False)
self.decoder = nn.Linear(config.embedding_size,
config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.project_layer(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class AlbertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(AlbertOnlyMLMHead, self).__init__()
self.predictions = AlbertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class AlbertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(AlbertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class AlbertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(AlbertPreTrainingHeads, self).__init__()
self.predictions = AlbertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class AlbertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r""" The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertModel(AlbertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(AlbertModel, self).__init__(config)
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(
-1) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertForPreTraining(AlbertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(AlbertForPreTraining, self).__init__(config)
self.bert = AlbertModel(config)
self.cls = AlbertPreTrainingHeads(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
self._tie_or_clone_data(self.cls.predictions.project_layer,
self.bert.embeddings.word_embeddings_2)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None, next_sentence_label=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[
2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertForMaskedLM(AlbertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(AlbertForMaskedLM, self).__init__(config)
self.bert = AlbertModel(config)
self.cls = AlbertOnlyMLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
self._tie_or_clone_data(self.cls.predictions.project_layer,
self.bert.embeddings.word_embeddings_2)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertForNextSentencePrediction(AlbertPreTrainedModel):
r"""
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Next sequence prediction (classification) loss.
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
"""
def __init__(self, config):
super(AlbertForNextSentencePrediction, self).__init__(config)
self.bert = AlbertModel(config)
self.cls = AlbertOnlyNSPHead(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
next_sentence_label=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(AlbertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertForMultipleChoice(AlbertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(AlbertForMultipleChoice, self).__init__(config)
self.bert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
num_choices = input_ids.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1))
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertForTokenClassification(AlbertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
def __init__(self, config):
super(AlbertForTokenClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(AlbertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = AlbertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 54,163 | 49.810507 | 153 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_xlnet.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer, SequenceSummary, PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits
from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin",
'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
if hasattr(model, 'lm_loss'):
# We will load also the output bias
tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias
if hasattr(model, 'sequence_summary') and 'model/sequnece_summary/summary/kernel' in tf_weights:
# We will load also the sequence summary
tf_to_pt_map['model/sequnece_summary/summary/kernel'] = model.sequence_summary.summary.weight
tf_to_pt_map['model/sequnece_summary/summary/bias'] = model.sequence_summary.summary.bias
if hasattr(model, 'logits_proj') and config.finetuning_task is not None \
and 'model/regression_{}/logit/kernel'.format(config.finetuning_task) in tf_weights:
tf_to_pt_map['model/regression_{}/logit/kernel'.format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map['model/regression_{}/logit/bias'.format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update({'model/transformer/word_embedding/lookup_table': model.word_embedding.weight,
'model/transformer/mask_emb/mask_emb': model.mask_emb})
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update({
'model/transformer/r_r_bias': r_r_list,
'model/transformer/r_w_bias': r_w_list,
'model/transformer/r_s_bias': r_s_list,
'model/transformer/seg_embed': seg_embed_list})
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name and ('ff' in name or 'summary' in name or 'logit' in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
def gelu(x):
""" Implementation of the gelu activation function.
XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)
Also see https://arxiv.org/abs/1606.08415
"""
cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
return x * cdf
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm
except (ImportError, AttributeError) as e:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
from torch.nn import LayerNorm as XLNetLayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super(XLNetRelativeAttention, self).__init__()
self.output_attentions = config.output_attentions
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head))
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3]-1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum('ijbs,ibns->bnij', seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum('ijbn->bnij', attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum('ijbn->bnij', attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum('ijbn->bnij', head_mask)
# attention output
attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, torch.einsum('bnij->ijbn', attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(self, h, g,
attn_mask_h, attn_mask_g,
r, seg_mat,
mems=None, target_mapping=None, head_mask=None):
if g is not None:
###### Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
# content-based value head
v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)
# position-based key head
k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)
##### h-stream
# content-stream query head
q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
##### g-stream
# query-stream query head
q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
###### Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)
# positional heads
k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super(XLNetFeedForward, self).__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str) or \
(sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super(XLNetLayer, self).__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(self, output_h, output_g,
attn_mask_h, attn_mask_g,
r, seg_mat, mems=None, target_mapping=None, head_mask=None):
outputs = self.rel_attn(output_h, output_g, attn_mask_h, attn_mask_g,
r, seg_mat, mems=mems, target_mapping=target_mapping,
head_mask=head_mask)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = XLNetConfig
pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [module.q, module.k, module.v, module.o, module.r,
module.r_r_bias, module.r_s_bias, module.r_w_bias,
module.seg_embed]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
XLNET_START_DOCSTRING = r""" The XLNet model was proposed in
`XLNet: Generalized Autoregressive Pretraining for Language Understanding`_
by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method
to learn bidirectional contexts by maximizing the expected likelihood over all permutations
of the input sequence factorization order.
The specific attention pattern can be controlled at training and test time using the `perm_mask` input.
Do to the difficulty of training a fully auto-regressive model over various factorization order,
XLNet is pretrained using only a sub-set of the output tokens as target which are selected
with the `target_mapping` input.
To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and
`target_mapping` inputs to control the attention span and outputs (see examples in `examples/run_generation.py`)
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`XLNet: Generalized Autoregressive Pretraining for Language Understanding`:
http://arxiv.org/abs/1906.08237
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
XLNet is a model with relative position embeddings so you can either pad the inputs on
the right or on the left.
Indices can be obtained using :class:`transformers.XLNetTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The type indices in XLNet are NOT selected in the vocabulary, they can be arbitrary numbers and
the important thing is that they should be different for tokens which belong to different segments.
The model will compute relative segment differences from the given type indices:
0 if the segment id of two tokens are the same, 1 if not.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**mems**: (`optional`)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as output by the model
(see `mems` output below). Can be used to speed up sequential decoding and attend to longer context.
To activate mems you need to set up config.mem_len to a positive value which will be the max number of tokens in
the memory output by the model. E.g. `model = XLNetModel.from_pretrained('xlnet-base-case, mem_len=1024)` will
instantiate a model which can use up to 1024 tokens of memory (in addition to the input it self).
**perm_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, sequence_length)``:
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
**target_mapping**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_predict, sequence_length)``:
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The type indices in XLNet are NOT selected in the vocabulary, they can be arbitrary numbers and
the important thing is that they should be different for tokens which belong to different segments.
The model will compute relative segment differences from the given type indices:
0 if the segment id of two tokens are the same, 1 if not.
**input_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetModel(XLNetPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**mems**: (`optional`, returned when ``config.mem_len > 0``)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
See details in the docstring of the `mems` input above.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetModel.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(XLNetModel, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.output_past = config.output_past
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.n_token, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
self.word_embedding = self._get_resized_embeddings(self.word_embedding, new_num_tokens)
return self.word_embedding
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: TODO Lysandre didn't fill
mlen: TODO Lysandre didn't fill
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
return ret
def cache_mem(self, curr_out, prev_mem):
"""cache hidden states into memory."""
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[:self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len:]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len:]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
"""create relative positional encoding."""
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == 'bi':
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == 'uni':
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
return pos_emb
def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
token_type_ids=None, input_mask=None, head_mask=None):
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
input_ids = input_ids.transpose(0, 1).contiguous()
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
##### Attention mask
# causal attention mask
if self.attn_type == 'uni':
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == 'bi':
attn_mask = None
else:
raise ValueError('Unsupported attention type: {}'.format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
##### Word embeddings and prepare h & g hidden states
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
##### Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
##### Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask,
r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping,
head_mask=head_mask[i])
output_h, output_g = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
outputs = outputs + (new_mems,)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if self.output_attentions:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings("""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetLMHeadModel(XLNetPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mems**: (`optional`, returned when ``config.mem_len > 0``)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
See details in the docstring of the `mems` input above.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>")).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
def __init__(self, config):
super(XLNetLMHeadModel, self).__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the embeddings
"""
self._tie_or_clone_weights(self.lm_loss, self.transformer.word_embedding)
def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
token_type_ids=None, input_mask=None, head_mask=None, labels=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(logits.view(-1, logits.size(-1)),
labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings("""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**mems**: (`optional`, returned when ``config.mem_len > 0``)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
See details in the docstring of the `mems` input above.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(XLNetForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
token_type_ids=None, input_mask=None, head_mask=None, labels=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings("""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to scores.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**mems**: (`optional`, returned when ``config.mem_len > 0``)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
See details in the docstring of the `mems` input above.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForMultipleChoice.from_pretrained('xlnet-base-cased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(XLNetForMultipleChoice, self).__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
mems=None, perm_mask=None, target_mapping=None,
labels=None, head_mask=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
transformer_outputs = self.transformer(flat_input_ids, token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask, attention_mask=flat_attention_mask,
mems=mems, perm_mask=perm_mask, target_mapping=target_mapping,
head_mask=head_mask)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**mems**: (`optional`, returned when ``config.mem_len > 0``)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
See details in the docstring of the `mems` input above.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(XLNetForQuestionAnsweringSimple, self).__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
token_type_ids=None, input_mask=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.transformer(input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels whether a question has an answer or no answer (SQuAD 2.0)
**cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
**mems**: (`optional`, returned when ``config.mem_len > 0``)
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
See details in the docstring of the `mems` input above.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(XLNetForQuestionAnswering, self).__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
token_type_ids=None, input_mask=None, head_mask=None,
start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None,):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
| 72,560 | 52.002922 | 169 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_xlm.py | # coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLM model.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import itertools
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer, SequenceSummary, SQuADHead
from .configuration_xlm import XLMConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
XLM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-pytorch_model.bin",
'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-pytorch_model.bin",
'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-pytorch_model.bin",
'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-pytorch_model.bin",
'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-pytorch_model.bin",
'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-pytorch_model.bin",
'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-pytorch_model.bin",
'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-pytorch_model.bin",
'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-pytorch_model.bin",
'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-pytorch_model.bin",
}
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
def gelu(x):
"""
GELU activation
https://arxiv.org/abs/1606.08415
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14
https://github.com/huggingface/transformers/blob/master/modeling.py
"""
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))
def get_masks(slen, lengths, causal, padding_mask=None):
"""
Generate hidden states mask, and optionally an attention mask.
"""
bs = lengths.size(0)
if padding_mask is not None:
mask = padding_mask
else:
assert lengths.max().item() <= slen
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, config):
super(MultiHeadAttention, self).__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.output_attentions = config.output_attentions
self.dim = dim
self.n_heads = n_heads
self.dropout = config.attention_dropout
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
self.out_lin = nn.Linear(dim, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
mask = torch.ones(self.n_heads, attention_head_size)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input, mask, kv=None, cache=None, head_mask=None):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache['slen'] + qlen
else:
klen = kv.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
n_heads = self.n_heads
dim_per_head = self.dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
outputs = (self.out_lin(context),)
if self.output_attentions:
outputs = outputs + (weights,)
return outputs
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, config):
super(TransformerFFN, self).__init__()
self.dropout = config.dropout
self.lin1 = nn.Linear(in_dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, out_dim)
self.act = gelu if config.gelu_activation else F.relu
def forward(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class XLMPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = XLMConfig
pretrained_model_archive_map = XLM_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = None
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super(XLMPreTrainedModel, self).__init__(*inputs, **kwargs)
def _init_weights(self, module):
""" Initialize the weights. """
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, 0.)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
XLM_START_DOCSTRING = r""" The XLM model was proposed in
`Cross-lingual Language Model Pretraining`_
by Guillaume Lample*, Alexis Conneau*. It's a transformer pre-trained using one of the following objectives:
- a causal language modeling (CLM) objective (next token prediction),
- a masked language modeling (MLM) objective (Bert-like), or
- a Translation Language Modeling (TLM) object (extension of Bert's MLM to multiple language inputs)
Original code can be found `here`_.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Cross-lingual Language Model Pretraining`:
https://arxiv.org/abs/1901.07291
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
.. _`here`:
https://github.com/facebookresearch/XLM
Parameters:
config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLM_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
XLM is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.XLMTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**langs**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens to be used to indicate the language of each token in the input.
Indices are languages ids which can be obtained from the language names by using two conversion mappings
provided in the configuration of the model (only provided for multilingual models).
More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and
the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**lengths**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Length of each sentence that can be used to avoid performing attention on padding token indices.
You can also use `attention_mask` for the same result (see above), kept here for compatbility.
Indices selected in ``[0, ..., input_ids.size(-1)]``:
**cache**:
dictionary with ``torch.FloatTensor`` that contains pre-computed
hidden-states (key and values in the attention blocks) as computed by the model
(see `cache` output below). Can be used to speed up sequential decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare XLM Model transformer outputting raw hidden-states without any specific head on top.",
XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)
class XLMModel(XLMPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMModel.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config): #, dico, is_encoder, with_output):
super(XLMModel, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# encoder / decoder, output layer
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError("Currently XLM can only be used as an encoder")
# self.with_output = with_output
self.causal = config.causal
# dictionary / languages
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
# self.dico = dico
# self.id2lang = config.id2lang
# self.lang2id = config.lang2id
# assert len(self.dico) == self.n_words
# assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = config.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = config.n_heads # 8 by default
self.n_layers = config.n_layers
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads'
# embeddings
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
if config.sinusoidal_embeddings:
create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
# if self.is_decoder:
# self.layer_norm15 = nn.ModuleList()
# self.encoder_attn = nn.ModuleList()
for _ in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
if hasattr(config, "pruned_heads"):
pruned_heads = config.pruned_heads.copy().items()
config.pruned_heads = {}
for layer, heads in pruned_heads:
if self.attentions[int(layer)].n_heads == config.n_heads:
self.prune_heads({int(layer): list(map(int, heads))})
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
self.embeddings = self._get_resized_embeddings(self.embeddings, new_num_tokens)
return self.embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.attentions[layer].prune_heads(heads)
def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None,
lengths=None, cache=None, head_mask=None): # removed: src_enc=None, src_len=None
if lengths is None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
# mask = input_ids != self.pad_index
# check inputs
bs, slen = input_ids.size()
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# position_ids
if position_ids is None:
position_ids = input_ids.new((slen,)).long()
position_ids = torch.arange(slen, out=position_ids).unsqueeze(0)
else:
assert position_ids.size() == (bs, slen) # (slen, bs)
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (bs, slen) # (slen, bs)
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.n_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layers
# do not recompute cached elements
if cache is not None:
_slen = slen - cache['slen']
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
tensor = self.embeddings(input_ids)
tensor = tensor + self.position_embeddings(position_ids).expand_as(tensor)
if langs is not None and self.use_lang_emb:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = F.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
hidden_states = ()
attentions = ()
for i in range(self.n_layers):
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i])
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = F.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# Add last hidden state
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache['slen'] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
outputs = (tensor,)
if self.output_hidden_states:
outputs = outputs + (hidden_states,)
if self.output_attentions:
outputs = outputs + (attentions,)
return outputs # outputs, (hidden_states), (attentions)
class XLMPredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config):
super(XLMPredLayer, self).__init__()
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
dim = config.emb_dim
if config.asm is False:
self.proj = nn.Linear(dim, config.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(
in_features=dim,
n_classes=config.n_words,
cutoffs=config.asm_cutoffs,
div_value=config.asm_div_value,
head_bias=True, # default is False
)
def forward(self, x, y=None):
""" Compute the loss, and optionally the scores.
"""
outputs = ()
if self.asm is False:
scores = self.proj(x)
outputs = (scores,) + outputs
if y is not None:
loss = F.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction='elementwise_mean')
outputs = (loss,) + outputs
else:
scores = self.proj.log_prob(x)
outputs = (scores,) + outputs
if y is not None:
_, loss = self.proj(x, y)
outputs = (loss,) + outputs
return outputs
@add_start_docstrings("""The XLM Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)
class XLMWithLMHeadModel(XLMPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(XLMWithLMHeadModel, self).__init__(config)
self.transformer = XLMModel(config)
self.pred_layer = XLMPredLayer(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the embeddings
"""
self._tie_or_clone_weights(self.pred_layer.proj, self.transformer.embeddings)
def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None,
lengths=None, cache=None, head_mask=None, labels=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask)
output = transformer_outputs[0]
outputs = self.pred_layer(output, labels)
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings("""XLM Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)
class XLMForSequenceClassification(XLMPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(XLMForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.transformer = XLMModel(config)
self.sequence_summary = SequenceSummary(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None,
lengths=None, cache=None, head_mask=None, labels=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
@add_start_docstrings("""XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)
class XLMForQuestionAnsweringSimple(XLMPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels whether a question has an answer or no answer (SQuAD 2.0)
**cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
**p_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(XLMForQuestionAnsweringSimple, self).__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None,
lengths=None, cache=None, head_mask=None, start_positions=None, end_positions=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings("""XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)
class XLMForQuestionAnswering(XLMPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels whether a question has an answer or no answer (SQuAD 2.0)
**cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
**p_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(XLMForQuestionAnswering, self).__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = SQuADHead(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None,
lengths=None, cache=None, head_mask=None, start_positions=None, end_positions=None,
is_impossible=None, cls_index=None, p_mask=None):
transformer_outputs = self.transformer(input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask)
output = transformer_outputs[0]
outputs = self.qa_outputs(output, start_positions=start_positions, end_positions=end_positions,
cls_index=cls_index, is_impossible=is_impossible, p_mask=p_mask)
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
| 45,543 | 50.34611 | 163 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_ctrl.py | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch CTRL model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {"ctrl": "https://storage.googleapis.com/sf-ctrl/pytorch/seqlen256_v1.bin"}
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / torch.pow(10000, (2 * (i//2)) / d_model_size)
return pos * angle_rates
def positional_encoding(position, d_model_size, dtype):
# create the sinusoidal pattern for the positional encoding
angle_rads = (angle_defn(torch.arange(position, dtype=dtype).unsqueeze(1),
torch.arange(d_model_size, dtype=dtype).unsqueeze(0),
d_model_size))
sines = torch.sin(angle_rads[:, 0::2])
cosines = torch.cos(angle_rads[:, 1::2])
pos_encoding = torch.cat([sines, cosines], dim=-1)
return pos_encoding
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = torch.matmul(q, k.permute(0,1,3,2))
dk = k.shape[-1]
scaled_attention_logits = matmul_qk / np.sqrt(dk)
if mask is not None:
scaled_attention_logits += (mask * -1e4)
if attention_mask is not None:
# Apply the attention mask
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = torch.matmul(attention_weights, v)
return output, attention_weights
class MultiHeadAttention(torch.nn.Module):
def __init__(self, d_model_size, num_heads, output_attentions=False):
super(MultiHeadAttention, self).__init__()
self.output_attentions = output_attentions
self.num_heads = num_heads
self.d_model_size = d_model_size
self.depth = int(d_model_size / self.num_heads)
self.Wq = torch.nn.Linear(d_model_size, d_model_size)
self.Wk = torch.nn.Linear(d_model_size, d_model_size)
self.Wv = torch.nn.Linear(d_model_size, d_model_size)
self.dense = torch.nn.Linear(d_model_size, d_model_size)
def split_into_heads(self, x, batch_size):
x = x.reshape(batch_size, -1, self.num_heads, self.depth)
return x.permute([0, 2, 1, 3])
def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None):
batch_size = q.shape[0]
q = self.Wq(q)
k = self.Wk(k)
v = self.Wv(v)
q = self.split_into_heads(q, batch_size)
k = self.split_into_heads(k, batch_size)
v = self.split_into_heads(v, batch_size)
if layer_past is not None:
past_key, past_value = layer_past[0], layer_past[1]
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
present = torch.stack((k, v))
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
scaled_attention = output[0].permute([0, 2, 1, 3])
attn = output[1]
original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)
output = self.dense(original_size_attention)
outputs = (output, present)
if self.output_attentions:
outputs = outputs + (attn,)
return outputs
def point_wise_feed_forward_network(d_model_size, dff):
return torch.nn.Sequential(torch.nn.Linear(d_model_size, dff),
torch.nn.ReLU(),
torch.nn.Linear(dff, d_model_size))
class EncoderLayer(torch.nn.Module):
def __init__(self, d_model_size, num_heads, dff, rate=0.1, output_attentions=False):
super(EncoderLayer, self).__init__()
self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, output_attentions)
self.ffn = point_wise_feed_forward_network(d_model_size, dff)
self.layernorm1 = torch.nn.LayerNorm(d_model_size, eps=1e-6)
self.layernorm2 = torch.nn.LayerNorm(d_model_size, eps=1e-6)
self.dropout1 = torch.nn.Dropout(rate)
self.dropout2 = torch.nn.Dropout(rate)
def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None):
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(normed, normed, normed, mask,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
class CTRLPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = CTRLConfig
pretrained_model_archive_map = CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CTRL_START_DOCSTRING = r""" CTRL model was proposed in
`CTRL: A Conditional Transformer Language Model for Controllable Generation`_
by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.).
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`CTRL: A Conditional Transformer Language Model for Controllable Generation`:
https://www.github.com/salesforce/ctrl
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
CTRL_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
CTRL is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.CTRLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
CTRL_START_DOCSTRING, CTRL_INPUTS_DOCSTRING)
class CTRLModel(CTRLPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = CTRLModel.from_pretrained('ctrl')
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(CTRLModel, self).__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.d_model_size = config.n_embd
self.num_layers = config.n_layer
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)
self.w = nn.Embedding(config.vocab_size, config.n_embd)
self.dropout = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([EncoderLayer(config.n_embd,
config.n_head,
config.dff,
config.resid_pdrop,
config.output_attentions) for _ in range(config.n_layer)])
self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
self.w = self._get_resized_embeddings(self.w, new_num_tokens)
return self.w
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# Attention mask.
if attention_mask is not None:
attention_mask = attention_mask.view(-1, input_shape[-1])
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
token_type_embeds = self.w(token_type_ids)
token_type_embeds *= np.sqrt(self.d_model_size)
else:
token_type_embeds = 0
position_ids = position_ids.view(-1, input_shape[-1])
inputs_embeds = self.w(input_ids)
# inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
seq_len = input_ids.shape[-1]
mask = torch.triu(torch.ones(seq_len, seq_len), 1).to(inputs_embeds.device)
inputs_embeds *= np.sqrt(self.d_model_size)
pos_embeds = self.pos_encoding[position_ids, :].to(inputs_embeds.device)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states)
output_shape = input_shape + (inputs_embeds.size(-1),)
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = h(hidden_states,
mask,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i])
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = hidden_states.view(*output_shape)
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs
@add_start_docstrings("""The CTRL Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, CTRL_START_DOCSTRING, CTRL_INPUTS_DOCSTRING)
class CTRLLMHeadModel(CTRLPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import CTRLTokenizer, CTRLLMHeadModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = CTRLLMHeadModel.from_pretrained('ctrl')
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(CTRLLMHeadModel, self).__init__(config)
self.transformer = CTRLModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head, self.transformer.w)
def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
labels=None):
transformer_outputs = self.transformer(input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
| 23,436 | 47.22428 | 134 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import logging
import os
import sys
from collections import Counter, OrderedDict
from io import open
import numpy as np
from .file_utils import cached_path
from .tokenization_utils import PreTrainedTokenizer
try:
import torch
except ImportError:
pass
# if sys.version_info[0] == 2:
# import cPickle as pickle
# else:
# import pickle
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'pretrained_vocab_file': 'vocab.bin', 'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'pretrained_vocab_file':
{
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'transfo-xl-wt103': None,
}
PRETRAINED_CORPUS_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
}
CORPUS_NAME = 'corpus.bin'
class TransfoXLTokenizer(PreTrainedTokenizer):
"""
Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, special=None, min_freq=0, max_size=None, lower_case=False,
delimiter=None, vocab_file=None, pretrained_vocab_file=None,
never_split=None, unk_token="<unk>", eos_token="<eos>",
additional_special_tokens=["<formula>"], **kwargs):
super(TransfoXLTokenizer, self).__init__(unk_token=unk_token, eos_token=eos_token,
additional_special_tokens=additional_special_tokens,
**kwargs)
self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens
self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens
if never_split is None:
never_split = self.all_special_tokens
if special is None:
special = []
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
self.never_split = never_split
if pretrained_vocab_file is not None:
# Hack because, honestly this tokenizer was not made to be used
# in a library like ours, at all.
vocab_dict = torch.load(pretrained_vocab_file)
for key, value in vocab_dict.items():
if key not in self.__dict__:
self.__dict__[key] = value
if vocab_file is not None:
self.build_vocab()
def count_file(self, path, verbose=False, add_eos=False):
if verbose: logger.info('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: logger.info('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['pretrained_vocab_file'])
torch.save(self.__dict__, vocab_file)
return (vocab_file,)
def build_vocab(self):
if self.vocab_file:
logger.info('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
logger.info('final vocab size {}'.format(len(self)))
else:
logger.info('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
logger.info('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: logger.info('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: logger.info('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def _convert_id_to_token(self, idx):
"""Converts an id in a token (BPE) using the vocab."""
assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx)
return self.idx2sym[idx]
def _convert_token_to_id(self, sym):
""" Converts a token (str/unicode) in an id using the vocab. """
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# logger.info('encounter unk {}'.format(sym))
# assert '<eos>' not in sym
if hasattr(self, 'unk_idx'):
return self.sym2idx.get(sym, self.unk_idx)
# Backward compatibility with pre-trained models
elif '<unk>' in self.sym2idx:
return self.sym2idx['<unk>']
elif '<UNK>' in self.sym2idx:
return self.sym2idx['<UNK>']
else:
raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement')
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ' '.join(tokens).strip()
return out_string
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
@property
def vocab_size(self):
return len(self.idx2sym)
def _tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None: bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i+1:i+1+seq_len]
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
return data_out, target_out, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
yield data_out, target_out, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class TransfoXLCorpus(object):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file))
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(
corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, 'cache.pt')
fn_pickle = os.path.join(datadir, 'cache.pkl')
if os.path.exists(fn):
logger.info('Loading cached dataset...')
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
logger.info('Loading cached dataset from pickle...')
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
logger.info('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
| 21,824 | 36.62931 | 133 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
from io import open
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json",
},
'merges_file':
{
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'openai-gpt': 512,
}
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
class OpenAIGPTTokenizer(PreTrainedTokenizer):
"""
BPE tokenizer. Peculiarities:
- lower case all inputs
- uses SpaCy tokenizer and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs):
super(OpenAIGPTTokenizer, self).__init__(unk_token=unk_token, **kwargs)
self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens
self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens
try:
import ftfy
from spacy.lang.en import English
_nlp = English()
self.nlp = _nlp.Defaults.create_tokenizer(_nlp)
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
@property
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text):
""" Tokenize a string. """
split_tokens = []
if self.fix_text is None:
# Using BERT's BasicTokenizer
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
# Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an id in a token (BPE) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
return vocab_file, merge_file
| 7,784 | 36.248804 | 133 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utilities for PyTorch Transformer XL model.
Directly adapted from https://github.com/kimiyoung/transformer-xl.
"""
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.FloatTensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, labels=None, keep_order=False):
'''
Params:
hidden :: [len*bsz x d_proj]
labels :: [len*bsz]
Return:
if labels is None:
out :: [len*bsz] Negative log likelihood
else:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
We could replace this implementation by the native PyTorch one
if their's had an option to set bias on all clusters in the native one.
here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
'''
if labels is not None:
labels = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('Input and labels should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
if labels is not None:
out = -F.log_softmax(logit, dim=-1) \
.gather(1, labels.unsqueeze(1)).squeeze(1)
else:
out = F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if labels is None:
out = hidden.new_empty((head_logit.size(0), self.n_token))
else:
out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
mask_i = (labels >= l_idx) & (labels < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = labels.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = hidden.index_select(0, indices_i)
else:
hidden_i = hidden
if i == 0:
if labels is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
out[:, l_idx:r_idx] = logprob_i
if labels is not None:
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0, indices_i, -logprob_i)
else:
out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def log_prob(self, hidden):
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, n\_classes)`
"""
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
return F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
out = hidden.new_empty((head_logit.size(0), self.n_token))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
out[:, start_idx, stop_idx] = logprob_i
return out
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
| 13,568 | 39.747748 | 132 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu
from .configuration_roberta import RobertaConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin",
}
class RobertaEmbeddings(BertEmbeddings):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super(RobertaEmbeddings, self).__init__(config)
self.padding_idx = 1
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size,
padding_idx=self.padding_idx)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
# Position numbers begin at padding_idx+1. Padding symbols are ignored.
# cf. fairseq's `utils.make_positions`
position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
return super(RobertaEmbeddings, self).forward(input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids)
ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in
`RoBERTa: A Robustly Optimized BERT Pretraining Approach`_
by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
Veselin Stoyanov. It is based on Google's BERT model released in 2018.
It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
objective and training with much larger mini-batches and learning rates.
This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained
models.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:
https://arxiv.org/abs/1907.11692
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, RoBERTa input sequence should be formatted with <s> and </s> tokens as follows:
(a) For sequence pairs:
``tokens: <s> Is this Jacksonville ? </s> </s> No it is not . </s>``
(b) For single sequences:
``tokens: <s> the dog is hairy . </s>``
Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with
the ``add_special_tokens`` parameter set to ``True``.
RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional` need to be trained) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Optional segment token indices to indicate first and second portions of the inputs.
This embedding matrice is not trained (not pretrained during RoBERTa pretraining), you will have to train it
during finetuning.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class RobertaModel(BertModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaModel.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaModel, self).__init__(config)
self.embeddings = RobertaEmbeddings(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
if input_ids[:, 0].sum().item() != 0:
logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. "
"This model requires special tokens in order to work. "
"Please specify add_special_tokens=True in your tokenize.encode()"
"or tokenizer.convert_tokens_to_ids().")
return super(RobertaModel, self).forward(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """,
ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class RobertaForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForMaskedLM, self).__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None):
outputs = self.roberta(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super(RobertaLMHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x) + self.bias
return x
@add_start_docstrings("""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class RobertaForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
labels=None):
outputs = self.roberta(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""Roberta Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class RobertaForMultipleChoice(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, RoBerta input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMultipleChoice.from_pretrained('roberta-base')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForMultipleChoice, self).__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
| 25,678 | 53.52017 | 151 | py |
CLUE | CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_utils.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import os
import json
import six
import copy
from io import open
from .file_utils import cached_path, is_tf_available, is_torch_available
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json'
ADDED_TOKENS_FILE = 'added_tokens.json'
TOKENIZER_CONFIG_FILE = 'tokenizer_config.json'
class PreTrainedTokenizer(object):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method.
Parameters:
- ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
vocab_files_names = {}
pretrained_vocab_files_map = {}
pretrained_init_configuration = {}
max_model_input_sizes = {}
SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token",
"pad_token", "cls_token", "mask_token",
"additional_special_tokens"]
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
@property
def bos_token_id(self):
""" Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self):
""" Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self):
""" Id of the unknown token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self):
""" Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self):
""" Id of the padding token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.pad_token)
@property
def cls_token_id(self):
""" Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self):
""" Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self):
""" Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.additional_special_tokens)
def __init__(self, max_len=None, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._additional_special_tokens = []
self.max_len = max_len if max_len is not None else int(1e12)
# Added tokens
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = {}
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
setattr(self, key, value)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
force_download = kwargs.pop('force_download', False)
proxies = kwargs.pop('proxies', None)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if cls.pretrained_init_configuration and pretrained_model_name_or_path in cls.pretrained_init_configuration:
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path]
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path))
# Look for the tokenizer main vocabulary files
for file_id, file_name in cls.vocab_files_names.items():
if os.path.isdir(pretrained_model_name_or_path):
# If a directory is provided we look for the standard filenames
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
else:
# If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file)
full_file_name = pretrained_model_name_or_path
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
# Look for the additional tokens files
additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE,
'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE,
'tokenizer_config_file': TOKENIZER_CONFIG_FILE,
}
# If a path to a file was provided, get the parent directory
saved_directory = pretrained_model_name_or_path
if os.path.exists(saved_directory) and not os.path.isdir(saved_directory):
saved_directory = os.path.dirname(saved_directory)
for file_id, file_name in additional_files_names.items():
full_file_name = os.path.join(saved_directory, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
if all(full_file_name is None for full_file_name in vocab_files.values()):
raise EnvironmentError(
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path or url to a directory containing vocabulary files "
"named {} but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values())))
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = "Model name '{}' was not found in tokenizers model name list ({}). " \
"We assumed '{}' was a path or url to a directory containing vocabulary files " \
"named {}, but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()))
raise EnvironmentError(msg)
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(
file_path, resolved_vocab_files[file_id]))
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None)
if tokenizer_config_file is not None:
init_kwargs = json.load(open(tokenizer_config_file, encoding="utf-8"))
saved_init_inputs = init_kwargs.pop('init_inputs', ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
if max_len is not None and isinstance(max_len, (int, float)):
init_kwargs['max_len'] = min(init_kwargs.get('max_len', int(1e12)), max_len)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if special_tokens_map_file is not None:
special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8"))
for key, value in special_tokens_map.items():
if key not in init_kwargs:
init_kwargs[key] = value
# Instantiate tokenizer.
tokenizer = cls(*init_inputs, **init_kwargs)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# Add supplementary tokens.
if added_tokens_file is not None:
added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8"))
added_tok_decoder = {v:k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files together with:
- added tokens,
- special-tokens-to-class-attributes-mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
This won't save modifications other than (added tokens and special token mapping) you may have
applied to the tokenizer after the instantiation (e.g. modifying tokenizer.do_lower_case after creation).
This method make sure the full tokenizer can then be re-loaded using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
with open(added_tokens_file, 'w', encoding='utf-8') as f:
if self.added_tokens_encoder:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
else:
out_str = u"{}"
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory):
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
raise NotImplementedError
def vocab_size(self):
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
def add_tokens(self, new_tokens):
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
to_add_tokens = []
for token in new_tokens:
assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode))
if token != self.unk_token and \
self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token) and \
token not in to_add_tokens:
to_add_tokens.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens))
added_tok_decoder = {v:k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
return len(to_add_tokens)
def num_added_tokens(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- special tokens are carefully handled by the tokenizer (they are never split)
- you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
"""
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
sub_text = sub_text.strip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text:
return []
if not tok_list:
return self._tokenize(text, **kwargs)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.added_tokens_encoder \
and sub_text not in self.all_special_tokens:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return sum((self._tokenize(token, **kwargs) if token not \
in self.added_tokens_encoder and token not in self.all_special_tokens \
else [token] for token in tokenized_text), [])
added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id
(resp. a sequence of ids), using the vocabulary.
"""
if tokens is None:
return None
if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
if len(ids) > self.max_len:
logger.warning("Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.max_len))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(self,
text,
text_pair=None,
add_special_tokens=False,
max_length=None,
stride=0,
truncation_strategy='longest_first',
return_tensors=None,
**kwargs):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length: if set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
encoded_inputs = self.encode_plus(text,
text_pair=text_pair,
max_length=max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
**kwargs)
return encoded_inputs["input_ids"]
def encode_plus(self,
text,
text_pair=None,
add_special_tokens=False,
max_length=None,
stride=0,
truncation_strategy='longest_first',
return_tensors=None,
**kwargs):
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional informations:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length: if set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
def get_input_ids(text):
if isinstance(text, six.string_types):
return self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], six.string_types):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError("Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.")
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(first_ids,
pair_ids=second_ids,
max_length=max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors)
def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=False, stride=0,
truncation_strategy='longest_first', return_tensors=None):
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates
sequences if overflowing while taking into account the special tokens and manages a window stride for
overflowing tokens
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
stride: window stride for overflowing tokens. Can be useful for edge effect removal when using sequential
list of inputs.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
Return:
A Dictionary of shape::
{
input_ids: list[int],
overflowing_tokens: list[int] if a ``max_length`` is specified, else None
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
}
With the fields:
``input_ids``: list of tokens to be fed to a model
``overflowing_tokens``: list of overflowing tokens if a max length is specified.
``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
encoded_inputs = {}
total_len = len_ids + len_pair_ids + (self.num_added_tokens(pair=pair) if add_special_tokens else 0)
if max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids,
num_tokens_to_remove=total_len-max_length,
truncation_strategy=truncation_strategy,
stride=stride)
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
if return_tensors == 'tf' and is_tf_available():
sequence = tf.constant([sequence])
token_type_ids = tf.constant([token_type_ids])
elif return_tensors == 'pt' and is_torch_available():
sequence = torch.tensor([sequence])
token_type_ids = torch.tensor([token_type_ids])
elif return_tensors is not None:
logger.warning("Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(return_tensors))
encoded_inputs["input_ids"] = sequence
encoded_inputs["token_type_ids"] = token_type_ids
if max_length and len(encoded_inputs["input_ids"]) > max_length:
encoded_inputs["input_ids"] = encoded_inputs["input_ids"][:max_length]
encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"][:max_length]
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"][:max_length]
return encoded_inputs
def truncate_sequences(self, ids, pair_ids=None, num_tokens_to_remove=0, truncation_strategy='longest_first', stride=0):
"""Truncates a sequence pair in place to the maximum length.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences).
Overflowing tokens only contains overflow from the first sequence.
- 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if truncation_strategy == 'longest_first':
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == 'only_first':
assert len(ids) > num_tokens_to_remove
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
elif truncation_strategy == 'only_second':
assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif truncation_strategy == 'do_not_truncate':
raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.")
else:
raise ValueError("Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']")
return (ids, pair_ids, overflowing_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
logger.warning("This tokenizer does not make use of special tokens.")
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A RoBERTa sequence has the following format:
single sequence: <s> X </s>
pair of sequences: <s> A </s></s> B </s>
"""
logger.warning("This tokenizer does not make use of special tokens. Input is returned with no modification.")
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
"""
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
raise NotImplementedError
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return ' '.join(self.convert_ids_to_tokens(tokens))
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods.
skip_special_tokens: if set to True, will replace special tokens.
clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separatly for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(" " + token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = ''.join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = list(self._convert_token_to_id(t) for t in all_toks)
return all_ids
@staticmethod
def clean_up_tokenization(out_string):
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string
| 54,979 | 50.431244 | 372 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/google_albert_pytorch_modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import print_function
import copy
import json
import math
import logging
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def fast_gelu(x):
return x * torch.sigmoid(1.702 * x)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": fast_gelu, "relu": torch.relu, "swish": swish}
class AlbertConfig(object):
"""Configuration for `AlbertModel`.
The default settings match the configuration of model `albert_xxlarge`.
"""
def __init__(self,
vocab_size,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
down_scale_factor=1,
hidden_act="gelu",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs AlbertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.
embedding_size: size of voc embeddings.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`AlbertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.down_scale_factor = down_scale_factor
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `AlbertConfig` from a Python dictionary of parameters."""
config = AlbertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `AlbertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class AlbertEmbeddings(nn.Module):
""" Albert embeddings. """
def __init__(self, config):
super(AlbertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.embedding_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
# if isinstance(config.hidden_act, str) else config.hidden_act
self.intermediate_act_fn = ACT2FN[config.hidden_act]
self.output = BertOutput(config)
def forward(self, input_tensor):
hidden_states = self.dense(input_tensor)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_output = self.output(hidden_states, input_tensor)
return hidden_output
class BertFF(nn.Module):
def __init__(self, config):
super(BertFF, self).__init__()
self.intermediate = BertIntermediate(config)
def forward(self, hidden_states):
hidden_states = self.intermediate(hidden_states)
return hidden_states
class AlbertLayer(nn.Module):
def __init__(self, config):
super(AlbertLayer, self).__init__()
self.attention_1 = BertAttention(config)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.ffn_1 = BertFF(config)
self.LayerNorm_1 = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention_1(hidden_states, attention_mask)
attention_output = self.LayerNorm(attention_output)
attention_output = self.ffn_1(attention_output)
attention_output = self.LayerNorm_1(attention_output)
return attention_output
class AlbertEncoder(nn.Module):
def __init__(self, config):
super(AlbertEncoder, self).__init__()
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.num_hidden_layers = config.num_hidden_layers
self.transformer = AlbertLayer(config)
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_encoder_layers = []
for i in range(self.num_hidden_layers):
hidden_states = self.transformer(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class AlbertModel(nn.Module):
def __init__(self, config):
super(AlbertModel, self).__init__()
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertEncoder(config)
self.pooler = BertPooler(config)
self.config = config
self.apply(self.init_bert_weights)
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class AlbertForPreTraining(nn.Module):
def __init__(self, config):
super(AlbertForPreTraining, self).__init__()
self.bert = AlbertModel(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
return self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers)
class MRC_finetune(nn.Module):
def __init__(self, config):
super(MRC_finetune, self).__init__()
self.start_dense = nn.Linear(config.hidden_size, 1)
self.end_dense = nn.Linear(config.hidden_size, 1)
def forward(self, input_tensor):
return self.start_dense(input_tensor), self.end_dense(input_tensor)
class AlbertForMRC(nn.Module):
def __init__(self, config):
super(AlbertForMRC, self).__init__()
self.bert = AlbertModel(config)
self.finetune_mrc = MRC_finetune(config)
self.config = config
self.apply(self.init_bert_weights)
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
start_logits, end_logits = self.finetune_mrc(sequence_output)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
class AlbertForMultipleChoice(nn.Module):
def __init__(self, config, num_choices=2):
super(AlbertForMultipleChoice, self).__init__()
self.config = config
self.num_choices = num_choices
self.bert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, return_logits=False):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if return_logits:
return loss, reshaped_logits
else:
return loss
else:
return reshaped_logits
| 22,556 | 42.885214 | 119 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/run_c3.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import json
import logging
import os
import pickle
import random
import numpy as np
import torch
from google_albert_pytorch_modeling import AlbertConfig, AlbertForMultipleChoice
from pytorch_modeling import BertConfig, BertForMultipleChoice, ALBertConfig, ALBertForMultipleChoice
from tools import official_tokenization as tokenization
from tools import utils
from tools.pytorch_optimization import get_optimization, warmup_linear
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
n_class = 4
reverse_order = False
sa_step = False
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, text_c=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.text_c = text_c
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class c3Processor(DataProcessor):
def __init__(self, data_dir):
self.D = [[], [], []]
self.data_dir = data_dir
for sid in range(3):
data = []
for subtask in ["d", "m"]:
with open(self.data_dir + "/c3-" + subtask + "-" + ["train.json", "dev.json", "test.json"][sid],
"r", encoding="utf8") as f:
data += json.load(f)
if sid == 0:
random.shuffle(data)
for i in range(len(data)):
for j in range(len(data[i][1])):
d = ['\n'.join(data[i][0]).lower(), data[i][1][j]["question"].lower()]
for k in range(len(data[i][1][j]["choice"])):
d += [data[i][1][j]["choice"][k].lower()]
for k in range(len(data[i][1][j]["choice"]), 4):
d += ['无效答案'] # 有些C3数据选项不足4个,添加[无效答案]能够有效增强模型收敛稳定性
d += [data[i][1][j]["answer"].lower()]
self.D[sid] += [d]
def get_train_examples(self):
"""See base class."""
return self._create_examples(self.D[0], "train")
def get_test_examples(self):
"""See base class."""
return self._create_examples(self.D[2], "test")
def get_dev_examples(self):
"""See base class."""
return self._create_examples(self.D[1], "dev")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
cache_dir = os.path.join(self.data_dir, set_type + '_examples.pkl')
if os.path.exists(cache_dir):
examples = pickle.load(open(cache_dir, 'rb'))
else:
examples = []
for (i, d) in enumerate(data):
answer = -1
# 这里data[i]有6个元素,0是context,1是问题,2~5是choice,6是答案
for k in range(4):
if data[i][2 + k] == data[i][6]:
answer = str(k)
label = tokenization.convert_to_unicode(answer)
for k in range(4):
guid = "%s-%s-%s" % (set_type, i, k)
text_a = tokenization.convert_to_unicode(data[i][0])
text_b = tokenization.convert_to_unicode(data[i][k + 2])
text_c = tokenization.convert_to_unicode(data[i][1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, text_c=text_c))
with open(cache_dir, 'wb') as w:
pickle.dump(examples, w)
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
print("#examples", len(examples))
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = [[]]
for (ex_index, example) in enumerate(tqdm(examples)):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = tokenizer.tokenize(example.text_b)
tokens_c = tokenizer.tokenize(example.text_c)
_truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
tokens_b = tokens_c + ["[SEP]"] + tokens_b
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features[-1].append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
if len(features[-1]) == n_class:
features.append([])
if len(features[-1]) == 0:
features = features[:-1]
print('#features', len(features))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_length):
"""Truncates a sequence tuple in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)
if total_length <= max_length:
break
if len(tokens_a) >= len(tokens_b) and len(tokens_a) >= len(tokens_c):
tokens_a.pop()
elif len(tokens_b) >= len(tokens_a) and len(tokens_b) >= len(tokens_c):
tokens_b.pop()
else:
tokens_c.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--gpu_ids",
default='0',
type=str,
required=True)
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--task_name",
default='c3',
type=str,
required=True)
parser.add_argument("--bert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary file that the BERT model was trained on.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--init_checkpoint",
default='check_points/pretrain_models/albert_xxlarge_google_zh_v1121/pytorch_model.pth',
type=str,
help="Initial checkpoint (usually from a pre-trained BERT model).")
parser.add_argument("--do_lower_case",
default=True,
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size",
default=16,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=16,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=2e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--schedule",
default='warmup_linear',
type=str,
help='schedule')
parser.add_argument("--weight_decay_rate",
default=0.01,
type=float,
help='weight_decay_rate')
parser.add_argument('--clip_norm',
type=float,
default=1.0)
parser.add_argument("--num_train_epochs",
default=8.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--float16',
action='store_true',
default=False)
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=422,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--setting_file', type=str, default='setting.txt')
parser.add_argument('--log_file', type=str, default='log.txt')
args = parser.parse_args()
args.setting_file = os.path.join(args.output_dir, args.setting_file)
args.log_file = os.path.join(args.output_dir, args.log_file)
os.makedirs(args.output_dir, exist_ok=True)
with open(args.setting_file, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
print('------------ Options -------------')
for k in args.__dict__:
v = args.__dict__[k]
opt_file.write('%s: %s\n' % (str(k), str(v)))
print('%s: %s' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
print('------------ End -------------')
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
if os.path.exists(args.log_file):
os.remove(args.log_file)
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
processor = c3Processor(args.data_dir)
label_list = processor.get_labels()
tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = processor.get_train_examples()
num_train_steps = int(len(train_examples) / n_class / args.train_batch_size /
args.gradient_accumulation_steps * args.num_train_epochs)
if 'albert' in args.bert_config_file:
if 'google' in args.bert_config_file:
bert_config = AlbertConfig.from_json_file(args.bert_config_file)
model = AlbertForMultipleChoice(bert_config, num_choices=n_class)
else:
bert_config = ALBertConfig.from_json_file(args.bert_config_file)
model = ALBertForMultipleChoice(bert_config, num_choices=n_class)
else:
bert_config = BertConfig.from_json_file(args.bert_config_file)
model = BertForMultipleChoice(bert_config, num_choices=n_class)
if args.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length {} because the BERT model was only trained up to sequence length {}".format(
args.max_seq_length, bert_config.max_position_embeddings))
if args.init_checkpoint is not None:
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_checkpoint)
if args.float16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
optimizer = get_optimization(model=model,
float16=args.float16,
learning_rate=args.learning_rate,
total_steps=num_train_steps,
schedule=args.schedule,
warmup_rate=args.warmup_proportion,
max_grad_norm=args.clip_norm,
weight_decay_rate=args.weight_decay_rate,
opt_pooler=True) # multi_choice must update pooler
global_step = 0
eval_dataloader = None
if args.do_eval:
eval_examples = processor.get_dev_examples()
feature_dir = os.path.join(args.data_dir, 'dev_features{}.pkl'.format(args.max_seq_length))
if os.path.exists(feature_dir):
eval_features = pickle.load(open(feature_dir, 'rb'))
else:
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
with open(feature_dir, 'wb') as w:
pickle.dump(eval_features, w)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
for f in eval_features:
input_ids.append([])
input_mask.append([])
segment_ids.append([])
for i in range(n_class):
input_ids[-1].append(f[i].input_ids)
input_mask[-1].append(f[i].input_mask)
segment_ids[-1].append(f[i].segment_ids)
label_id.append(f[0].label_id)
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)
all_label_ids = torch.tensor(label_id, dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.do_train:
best_accuracy = 0
feature_dir = os.path.join(args.data_dir, 'train_features{}.pkl'.format(args.max_seq_length))
if os.path.exists(feature_dir):
train_features = pickle.load(open(feature_dir, 'rb'))
else:
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer)
with open(feature_dir, 'wb') as w:
pickle.dump(train_features, w)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
for f in train_features:
input_ids.append([])
input_mask.append([])
segment_ids.append([])
for i in range(n_class):
input_ids[-1].append(f[i].input_ids)
input_mask[-1].append(f[i].input_mask)
segment_ids[-1].append(f[i].segment_ids)
label_id.append(f[0].label_id)
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)
all_label_ids = torch.tensor(label_id, dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size,
drop_last=True)
steps_per_epoch = int(num_train_steps / args.num_train_epochs)
for ie in range(int(args.num_train_epochs)):
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
with tqdm(total=int(steps_per_epoch), desc='Epoch %d' % (ie + 1)) as pbar:
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
if args.float16:
optimizer.backward(loss)
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used and handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
else:
loss.backward()
nb_tr_examples += input_ids.size(0)
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step() # We have accumulated enought gradients
model.zero_grad()
global_step += 1
nb_tr_steps += 1
pbar.set_postfix({'loss': '{0:1.5f}'.format(tr_loss / (nb_tr_steps + 1e-5))})
pbar.update(1)
if args.do_eval:
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
logits_all = []
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.cpu().numpy()
for i in range(len(logits)):
logits_all += [logits[i]]
tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1))
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
if args.do_train:
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss / nb_tr_steps}
else:
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy}
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
with open(args.log_file, 'a') as aw:
aw.write("-------------------global steps:{}-------------------\n".format(global_step))
aw.write(str(json.dumps(result, indent=2)) + '\n')
if eval_accuracy >= best_accuracy:
torch.save(model.state_dict(), os.path.join(args.output_dir, "model_best.pt"))
best_accuracy = eval_accuracy
model.load_state_dict(torch.load(os.path.join(args.output_dir, "model_best.pt")))
torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt"))
model.load_state_dict(torch.load(os.path.join(args.output_dir, "model.pt")))
if args.do_eval:
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
logits_all = []
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.cpu().numpy()
for i in range(len(logits)):
logits_all += [logits[i]]
tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1))
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy}
output_eval_file = os.path.join(args.output_dir, "results_dev.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
output_eval_file = os.path.join(args.output_dir, "logits_dev.txt")
with open(output_eval_file, "w") as f:
for i in range(len(logits_all)):
for j in range(len(logits_all[i])):
f.write(str(logits_all[i][j]))
if j == len(logits_all[i]) - 1:
f.write("\n")
else:
f.write(" ")
test_examples = processor.get_test_examples()
feature_dir = os.path.join(args.data_dir, 'test_features{}.pkl'.format(args.max_seq_length))
if os.path.exists(feature_dir):
test_features = pickle.load(open(feature_dir, 'rb'))
else:
test_features = convert_examples_to_features(test_examples, label_list, args.max_seq_length, tokenizer)
with open(feature_dir, 'wb') as w:
pickle.dump(test_features, w)
logger.info("***** Running testing *****")
logger.info(" Num examples = %d", len(test_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
for f in test_features:
input_ids.append([])
input_mask.append([])
segment_ids.append([])
for i in range(n_class):
input_ids[-1].append(f[i].input_ids)
input_mask[-1].append(f[i].input_mask)
segment_ids[-1].append(f[i].segment_ids)
label_id.append(f[0].label_id)
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)
all_label_ids = torch.tensor(label_id, dtype=torch.long)
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
test_sampler = SequentialSampler(test_data)
else:
test_sampler = DistributedSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
model.eval()
test_loss, test_accuracy = 0, 0
nb_test_steps, nb_test_examples = 0, 0
logits_all = []
for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_test_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
for i in range(len(logits)):
logits_all += [logits[i]]
tmp_test_accuracy = accuracy(logits, label_ids.reshape(-1))
test_loss += tmp_test_loss.mean().item()
test_accuracy += tmp_test_accuracy
nb_test_examples += input_ids.size(0)
nb_test_steps += 1
test_loss = test_loss / nb_test_steps
test_accuracy = test_accuracy / nb_test_examples
result = {'test_loss': test_loss,
'test_accuracy': test_accuracy}
output_test_file = os.path.join(args.output_dir, "results_test.txt")
with open(output_test_file, "w") as writer:
logger.info("***** Test results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
output_test_file = os.path.join(args.output_dir, "logits_test.txt")
with open(output_test_file, "w") as f:
for i in range(len(logits_all)):
for j in range(len(logits_all[i])):
f.write(str(logits_all[i][j]))
if j == len(logits_all[i]) - 1:
f.write("\n")
else:
f.write(" ")
# the test submission order can't be changed
submission_test = os.path.join(args.output_dir, "submission_test.json")
test_preds = [int(np.argmax(logits_)) for logits_ in logits_all]
with open(submission_test, "w") as f:
json.dump(test_preds, f)
if __name__ == "__main__":
main()
| 34,700 | 41.061818 | 120 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/test_mrc.py | import argparse
import collections
import json
import os
from glob import glob
import torch
from torch.utils.data import TensorDataset, DataLoader
from tqdm import tqdm
from pytorch_modeling import BertConfig, BertForQuestionAnswering, ALBertConfig, ALBertForQA
from google_albert_pytorch_modeling import AlbertConfig, AlbertForMRC
from tools import official_tokenization as tokenization
from tools import utils
def test(model, args, eval_examples, eval_features, device):
print("***** Eval *****")
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
output_prediction_file = os.path.join(args.output_dir, args.output_file)
output_nbest_file = output_prediction_file.replace('predictions', 'nbest')
all_input_ids = torch.tensor([f['input_ids'] for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f['input_mask'] for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f['segment_ids'] for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.n_batch, shuffle=False)
model.eval()
all_results = []
print("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature['unique_id'])
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
write_predictions(eval_examples, eval_features, all_results,
n_best_size=args.n_best, max_answer_length=args.max_ans_length,
do_lower_case=True, output_prediction_file=output_prediction_file,
output_nbest_file=output_nbest_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_ids', type=str, default='0')
parser.add_argument('--task_name', type=str, required=True, default='cmrc2018')
# training parameter
parser.add_argument('--n_batch', type=int, default=32)
parser.add_argument('--float16', action='store_true', default=False) # only sm >= 7.0 (tensorcores)
parser.add_argument('--max_ans_length', type=int, default=50)
parser.add_argument('--n_best', type=int, default=20)
parser.add_argument('--vocab_size', type=int, default=21128)
parser.add_argument('--max_seq_length', type=int, default=256)
# data dir
parser.add_argument('--test_dir1', type=str, required=True)
parser.add_argument('--test_dir2', type=str, required=True)
parser.add_argument('--test_file', type=str, default='cmrc2018_test_2k.json')
parser.add_argument('--bert_config_file', type=str, required=True)
parser.add_argument('--vocab_file', type=str, required=True)
parser.add_argument('--init_restore_dir', type=str, required=True)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--output_file', type=str, default='predictions_test.json')
# use some global vars for convenience
args = parser.parse_args()
if args.task_name.lower() == 'drcd':
from preprocess.DRCD_output import write_predictions
from preprocess.DRCD_preprocess import json2features
elif args.task_name.lower() == 'cmrc2018':
from preprocess.cmrc2018_output import write_predictions
from preprocess.cmrc2018_preprocess import json2features
else:
raise NotImplementedError
args.test_dir1 = args.test_dir1.replace('examples.json', 'examples_' + str(args.max_seq_length) + '.json')
args.test_dir2 = args.test_dir2.replace('features.json', 'features_' + str(args.max_seq_length) + '.json')
if args.init_restore_dir.endswith('.pth') or \
args.init_restore_dir.endswith('.pt') or \
args.init_restore_dir.endswith('.bin'):
pass
else:
args.init_restore_dir = glob(args.init_restore_dir + '*.pth')
assert len(args.init_restore_dir) == 1
args.init_restore_dir = args.init_restore_dir[0]
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print("device %s n_gpu %d" % (device, n_gpu))
print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16))
# load the bert setting
if 'albert' not in args.bert_config_file:
bert_config = BertConfig.from_json_file(args.bert_config_file)
else:
if 'google' in args.bert_config_file:
bert_config = AlbertConfig.from_json_file(args.bert_config_file)
else:
bert_config = ALBertConfig.from_json_file(args.bert_config_file)
# load data
print('loading data...')
tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
assert args.vocab_size == len(tokenizer.vocab)
if not os.path.exists(args.test_dir1) or not os.path.exists(args.test_dir2):
json2features(args.test_file, [args.test_dir1, args.test_dir2], tokenizer, is_training=False,
max_seq_length=args.max_seq_length)
if not os.path.exists(args.test_dir1):
json2features(input_file=args.test_file, output_files=[args.test_dir1, args.test_dir2],
tokenizer=tokenizer, is_training=False, repeat_limit=3, max_query_length=64,
max_seq_length=args.max_seq_length, doc_stride=128)
test_examples = json.load(open(args.test_dir1, 'r'))
test_features = json.load(open(args.test_dir2, 'r'))
dev_steps_per_epoch = len(test_features) // args.n_batch
if len(test_features) % args.n_batch != 0:
dev_steps_per_epoch += 1
# init model
print('init model...')
if 'albert' not in args.init_restore_dir:
model = BertForQuestionAnswering(bert_config)
else:
if 'google' in args.init_restore_dir:
model = AlbertForMRC(bert_config)
else:
model = ALBertForQA(bert_config, dropout_rate=args.dropout)
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_restore_dir)
if args.float16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
test(model, args, test_examples, test_features, device)
| 7,116 | 44.33121 | 110 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/run_mrc.py | import argparse
import collections
import json
import os
import random
import numpy as np
import torch
from google_albert_pytorch_modeling import AlbertConfig, AlbertForMRC
from preprocess.cmrc2018_evaluate import get_eval
from pytorch_modeling import BertConfig, BertForQuestionAnswering, ALBertConfig, ALBertForQA
from tools import official_tokenization as tokenization, utils
from tools.pytorch_optimization import get_optimization, warmup_linear
from torch.utils.data import TensorDataset, DataLoader
from tqdm import tqdm
def evaluate(model, args, eval_examples, eval_features, device, global_steps, best_f1, best_em, best_f1_em):
print("***** Eval *****")
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
output_prediction_file = os.path.join(args.checkpoint_dir,
"predictions_steps" + str(global_steps) + ".json")
output_nbest_file = output_prediction_file.replace('predictions', 'nbest')
all_input_ids = torch.tensor([f['input_ids'] for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f['input_mask'] for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f['segment_ids'] for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.n_batch, shuffle=False)
model.eval()
all_results = []
print("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature['unique_id'])
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
write_predictions(eval_examples, eval_features, all_results,
n_best_size=args.n_best, max_answer_length=args.max_ans_length,
do_lower_case=True, output_prediction_file=output_prediction_file,
output_nbest_file=output_nbest_file)
tmp_result = get_eval(args.dev_file, output_prediction_file)
tmp_result['STEP'] = global_steps
with open(args.log_file, 'a') as aw:
aw.write(json.dumps(tmp_result) + '\n')
print(tmp_result)
if float(tmp_result['F1']) > best_f1:
best_f1 = float(tmp_result['F1'])
if float(tmp_result['EM']) > best_em:
best_em = float(tmp_result['EM'])
if float(tmp_result['F1']) + float(tmp_result['EM']) > best_f1_em:
best_f1_em = float(tmp_result['F1']) + float(tmp_result['EM'])
utils.torch_save_model(model, args.checkpoint_dir,
{'f1': float(tmp_result['F1']), 'em': float(tmp_result['EM'])}, max_save_num=1)
model.train()
return best_f1, best_em, best_f1_em
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_ids', type=str, default='0,1,2,3')
# training parameter
parser.add_argument('--train_epochs', type=int, default=2)
parser.add_argument('--n_batch', type=int, default=32)
parser.add_argument('--lr', type=float, default=3e-5)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--clip_norm', type=float, default=1.0)
parser.add_argument('--warmup_rate', type=float, default=0.05)
parser.add_argument("--schedule", default='warmup_linear', type=str, help='schedule')
parser.add_argument("--weight_decay_rate", default=0.01, type=float, help='weight_decay_rate')
parser.add_argument('--seed', type=list, default=[123])
parser.add_argument('--float16', action='store_true', default=False) # only sm >= 7.0 (tensorcores)
parser.add_argument('--max_ans_length', type=int, default=50)
parser.add_argument('--n_best', type=int, default=20)
parser.add_argument('--eval_epochs', type=float, default=0.5)
parser.add_argument('--save_best', type=bool, default=True)
parser.add_argument('--vocab_size', type=int, default=21128)
parser.add_argument('--max_seq_length', type=int, default=256)
# data dir
parser.add_argument('--train_dir', type=str, required=True)
parser.add_argument('--dev_dir1', type=str, required=True)
parser.add_argument('--dev_dir2', type=str, required=True)
parser.add_argument('--train_file', type=str, required=True)
parser.add_argument('--dev_file', type=str, required=True)
parser.add_argument('--bert_config_file', type=str, required=True)
parser.add_argument('--vocab_file', type=str, required=True)
parser.add_argument('--init_restore_dir', type=str, required=True)
parser.add_argument('--checkpoint_dir', type=str, required=True)
parser.add_argument('--task_name', type=str, required=True)
parser.add_argument('--setting_file', type=str, default='setting.txt')
parser.add_argument('--log_file', type=str, default='log.txt')
# use some global vars for convenience
args = parser.parse_args()
if args.task_name.lower() == 'drcd':
from preprocess.DRCD_output import write_predictions
from preprocess.DRCD_preprocess import json2features
elif args.task_name.lower() == 'cmrc2018':
from preprocess.cmrc2018_output import write_predictions
from preprocess.cmrc2018_preprocess import json2features
else:
raise NotImplementedError
args.train_dir = args.train_dir.replace('features.json', 'features_' + str(args.max_seq_length) + '.json')
args.dev_dir1 = args.dev_dir1.replace('examples.json', 'examples_' + str(args.max_seq_length) + '.json')
args.dev_dir2 = args.dev_dir2.replace('features.json', 'features_' + str(args.max_seq_length) + '.json')
args = utils.check_args(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print("device %s n_gpu %d" % (device, n_gpu))
print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16))
# load the bert setting
if 'albert' not in args.bert_config_file:
bert_config = BertConfig.from_json_file(args.bert_config_file)
else:
if 'google' in args.bert_config_file:
bert_config = AlbertConfig.from_json_file(args.bert_config_file)
else:
bert_config = ALBertConfig.from_json_file(args.bert_config_file)
# load data
print('loading data...')
tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
assert args.vocab_size == len(tokenizer.vocab)
if not os.path.exists(args.train_dir):
json2features(args.train_file, [args.train_dir.replace('_features_', '_examples_'), args.train_dir],
tokenizer, is_training=True,
max_seq_length=args.max_seq_length)
if not os.path.exists(args.dev_dir1) or not os.path.exists(args.dev_dir2):
json2features(args.dev_file, [args.dev_dir1, args.dev_dir2], tokenizer, is_training=False,
max_seq_length=args.max_seq_length)
train_features = json.load(open(args.train_dir, 'r'))
dev_examples = json.load(open(args.dev_dir1, 'r'))
dev_features = json.load(open(args.dev_dir2, 'r'))
if os.path.exists(args.log_file):
os.remove(args.log_file)
steps_per_epoch = len(train_features) // args.n_batch
eval_steps = int(steps_per_epoch * args.eval_epochs)
dev_steps_per_epoch = len(dev_features) // args.n_batch
if len(train_features) % args.n_batch != 0:
steps_per_epoch += 1
if len(dev_features) % args.n_batch != 0:
dev_steps_per_epoch += 1
total_steps = steps_per_epoch * args.train_epochs
print('steps per epoch:', steps_per_epoch)
print('total steps:', total_steps)
print('warmup steps:', int(args.warmup_rate * total_steps))
F1s = []
EMs = []
# 存一个全局最优的模型
best_f1_em = 0
for seed_ in args.seed:
best_f1, best_em = 0, 0
with open(args.log_file, 'a') as aw:
aw.write('===================================' +
'SEED:' + str(seed_)
+ '===================================' + '\n')
print('SEED:', seed_)
random.seed(seed_)
np.random.seed(seed_)
torch.manual_seed(seed_)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed_)
# init model
print('init model...')
if 'albert' not in args.init_restore_dir:
model = BertForQuestionAnswering(bert_config)
else:
if 'google' in args.init_restore_dir:
model = AlbertForMRC(bert_config)
else:
model = ALBertForQA(bert_config, dropout_rate=args.dropout)
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_restore_dir)
if args.float16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
optimizer = get_optimization(model=model,
float16=args.float16,
learning_rate=args.lr,
total_steps=total_steps,
schedule=args.schedule,
warmup_rate=args.warmup_rate,
max_grad_norm=args.clip_norm,
weight_decay_rate=args.weight_decay_rate)
all_input_ids = torch.tensor([f['input_ids'] for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f['input_mask'] for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f['segment_ids'] for f in train_features], dtype=torch.long)
seq_len = all_input_ids.shape[1]
# 样本长度不能超过bert的长度限制
assert seq_len <= bert_config.max_position_embeddings
# true label
all_start_positions = torch.tensor([f['start_position'] for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f['end_position'] for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
train_dataloader = DataLoader(train_data, batch_size=args.n_batch, shuffle=True)
print('***** Training *****')
model.train()
global_steps = 1
best_em = 0
best_f1 = 0
for i in range(int(args.train_epochs)):
print('Starting epoch %d' % (i + 1))
total_loss = 0
iteration = 1
with tqdm(total=steps_per_epoch, desc='Epoch %d' % (i + 1)) as pbar:
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
total_loss += loss.item()
pbar.set_postfix({'loss': '{0:1.5f}'.format(total_loss / (iteration + 1e-5))})
pbar.update(1)
if args.float16:
optimizer.backward(loss)
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used and handles this automatically
lr_this_step = args.lr * warmup_linear(global_steps / total_steps, args.warmup_rate)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
else:
loss.backward()
optimizer.step()
model.zero_grad()
global_steps += 1
iteration += 1
if global_steps % eval_steps == 0:
best_f1, best_em, best_f1_em = evaluate(model, args, dev_examples, dev_features, device,
global_steps, best_f1, best_em, best_f1_em)
F1s.append(best_f1)
EMs.append(best_em)
# release the memory
del model
del optimizer
torch.cuda.empty_cache()
print('Mean F1:', np.mean(F1s), 'Mean EM:', np.mean(EMs))
print('Best F1:', np.max(F1s), 'Best EM:', np.max(EMs))
with open(args.log_file, 'a') as aw:
aw.write('Mean(Best) F1:{}({})\n'.format(np.mean(F1s), np.max(F1s)))
aw.write('Mean(Best) EM:{}({})\n'.format(np.mean(EMs), np.max(EMs)))
| 13,603 | 45.749141 | 112 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/pytorch_modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from tools.file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class ALBertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
embedding_size=128,
ln_type="postln",
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.embedding_size = embedding_size
self.ln_type = ln_type
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
# TODO:ROBERTA暂时存在一些问题,必须512才能加载一些模型,但是部分模型却不是用512长度训练的,要注意
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ALBertEmbeddings(nn.Module):
""" ALBert embeddings. """
def __init__(self, config):
super(ALBertEmbeddings, self).__init__()
# word_embeddings_2: project vector(output_middle) to the hidden space
if config.embedding_size == config.hidden_size:
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.word_embeddings_2 = None
else:
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.word_embeddings_2 = nn.Linear(config.embedding_size, config.hidden_size, bias=False)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
if self.word_embeddings_2:
words_embeddings = self.word_embeddings_2(words_embeddings)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.ln_type = 'postln'
if 'ln_type' in config.__dict__:
self.ln_type = config.ln_type
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if self.ln_type == 'preln':
hidden_states = hidden_states + input_tensor
else:
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.ln_type = 'postln'
if 'ln_type' in config.__dict__:
self.ln_type = config.ln_type
def forward(self, input_tensor, attention_mask):
if self.ln_type == 'preln':
hidden_state = self.output.LayerNorm(input_tensor) # pre_ln
self_output = self.self(hidden_state, attention_mask)
else:
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.ln_type = 'postln'
if 'ln_type' in config.__dict__:
self.ln_type = config.ln_type
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if self.ln_type == 'preln':
hidden_states = hidden_states + input_tensor
else:
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.ln_type = 'postln'
if 'ln_type' in config.__dict__:
self.ln_type = config.ln_type
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
if self.ln_type == 'preln':
attention_output_pre = self.output.LayerNorm(attention_output)
else:
attention_output_pre = attention_output
intermediate_output = self.intermediate(attention_output_pre)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class ALBertEncoder(nn.Module):
def __init__(self, config):
super(ALBertEncoder, self).__init__()
self.num_hidden_layers = config.num_hidden_layers
self.layer_shared = BertLayer(config)
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for i in range(self.num_hidden_layers):
hidden_states = self.layer_shared(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
class BertModel(PreTrainedBertModel):
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class ALBertModel(PreTrainedBertModel):
def __init__(self, config):
super(ALBertModel, self).__init__(config)
self.embeddings = ALBertEmbeddings(config)
self.encoder = ALBertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class ALBertForPreTraining(PreTrainedBertModel):
def __init__(self, config):
super(ALBertForPreTraining, self).__init__(config)
self.bert = ALBertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
return sequence_output, pooled_output
# 不做预训练的话,这些不做也没事
# prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
#
# if masked_lm_labels is not None and next_sentence_label is not None:
# loss_fct = CrossEntropyLoss(ignore_index=-1)
# masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
# next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
# total_loss = masked_lm_loss + next_sentence_loss
# return total_loss
# else:
# return prediction_scores, seq_relationship_score
class BertForMaskedLM(PreTrainedBertModel):
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(PreTrainedBertModel):
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls(pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(PreTrainedBertModel):
def __init__(self, config, num_labels=2):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(PreTrainedBertModel):
def __init__(self, config, num_choices=2):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, return_logits=False):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if return_logits:
return loss, reshaped_logits
else:
return loss
else:
return reshaped_logits
class BertForTokenClassification(PreTrainedBertModel):
def __init__(self, config, num_labels=2):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(PreTrainedBertModel):
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
class BertForQA_CLS(PreTrainedBertModel):
def __init__(self, config):
super(BertForQA_CLS, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.cls_outputs = nn.Linear(config.hidden_size, 3)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
start_positions=None, end_positions=None, target_labels=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
target_logits = self.cls_outputs(pooled_output)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
# classifier loss
loss_fct_cls = CrossEntropyLoss(ignore_index=-1) # no loss for has answer
cls_loss = loss_fct_cls(target_logits, target_labels)
total_loss = ((start_loss + end_loss) / 2) + cls_loss
return total_loss
else:
return start_logits, end_logits, target_logits
class ALBertForQA(PreTrainedBertModel):
def __init__(self, config, dropout_rate):
super(ALBertForQA, self).__init__(config)
self.bert = ALBertModel(config)
self.ln_type = config.ln_type
if self.ln_type == 'ln_pre':
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
else:
self.LayerNorm = None
self.dropout = nn.Dropout(dropout_rate)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
if self.ln_type == 'ln_pre':
sequence_output = self.LayerNorm(sequence_output)
sequence_output = self.dropout(sequence_output)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
class ALBertForQA_CLS(PreTrainedBertModel):
def __init__(self, config, dropout_rate):
super(ALBertForQA_CLS, self).__init__(config)
self.bert = ALBertModel(config)
self.ln_type = config.ln_type
if self.ln_type == 'ln_pre':
self.LayerNorm_qa = BertLayerNorm(config.hidden_size, eps=1e-5)
self.LayerNorm_cls = BertLayerNorm(config.hidden_size, eps=1e-5)
else:
self.LayerNorm_qa = None
self.LayerNorm_cls = None
self.dropout_qa = nn.Dropout(dropout_rate)
self.dropout_cls = nn.Dropout(dropout_rate)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.cls_outputs = nn.Linear(config.hidden_size, 3)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
start_positions=None, end_positions=None, target_labels=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
if self.ln_type == 'ln_pre':
sequence_output = self.LayerNorm_qa(sequence_output)
pooled_output = self.LayerNorm_cls(pooled_output)
sequence_output = self.dropout_qa(sequence_output)
pooled_output = self.dropout_cls(pooled_output)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
target_logits = self.cls_outputs(pooled_output)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
# classifier loss
loss_fct_cls = CrossEntropyLoss(ignore_index=-1) # no loss for has answer
cls_loss = loss_fct_cls(target_logits, target_labels)
total_loss = ((start_loss + end_loss) / 2) + cls_loss
return total_loss
else:
return start_logits, end_logits, target_logits
class ALBertForMultipleChoice(PreTrainedBertModel):
def __init__(self, config, num_choices=2):
super(ALBertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = ALBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, return_logits=False):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if return_logits:
return loss, reshaped_logits
else:
return loss
else:
return reshaped_logits
| 57,982 | 45.798224 | 130 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/convert_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import print_function
import argparse
import os
import re
import numpy as np
import tensorflow as tf
import torch
from pytorch_modeling import BertConfig, BertForPreTraining, ALBertConfig, ALBertForPreTraining
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path, is_albert):
config_path = os.path.abspath(bert_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
# Initialise PyTorch model
if is_albert:
config = ALBertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = ALBertForPreTraining(config)
else:
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
for name, array in zip(names, arrays):
name = name.split('/')
if name[0] == 'global_step':
continue
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name[-13:] == '_embeddings_2':
pointer = getattr(pointer, 'weight')
array = np.transpose(array)
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default='check_points/pretrain_models/albert_large_zh/albert_model.ckpt',
type=str,
help="Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default='check_points/pretrain_models/albert_large_zh/albert_config_large.json',
type=str,
help="The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default='check_points/pretrain_models/albert_large_zh/pytorch_albert_model.pth',
type=str,
help="Path to the output PyTorch model.")
parser.add_argument("--is_albert",
default=False,
action='store_true',
type=bool,
help="whether is albert?")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path,
args.is_albert)
| 5,188 | 40.18254 | 105 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/test_multichoice_mrc.py | from __future__ import print_function
import argparse
import os
from glob import glob
import torch
from google_albert_pytorch_modeling import AlbertConfig, AlbertForMultipleChoice
from preprocess.CHID_preprocess import RawResult, get_final_predictions, write_predictions, \
generate_input
from pytorch_modeling import ALBertConfig, ALBertForMultipleChoice
from pytorch_modeling import BertConfig, BertForMultipleChoice
from tools.official_tokenization import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from tqdm import tqdm
def torch_init_model(model, init_restore_dir):
state_dict = torch.load(init_restore_dir, map_location='cpu')
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
print("missing keys:{}".format(missing_keys))
print('unexpected keys:{}'.format(unexpected_keys))
print('error msgs:{}'.format(error_msgs))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_ids", default='0', type=str)
parser.add_argument("--bert_config_file",
default='check_points/pretrain_models/bert_wwm_ext_base/bert_config.json',
type=str,
help="The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
parser.add_argument("--vocab_file", default='check_points/pretrain_models/bert_wwm_ext_base/vocab.txt',
type=str,
help="The vocabulary file that the BERT model was trained on.")
parser.add_argument("--init_restore_dir",
required=True,
type=str,
help="Initial checkpoint (usually from a pre-trained BERT model).")
parser.add_argument("--input_dir", required=True, default='dataset/CHID')
parser.add_argument("--output_dir", required=True, type=str,
help="The output directory where the model checkpoints and predictions will be written.")
parser.add_argument("--predict_file",
required=True,
type=str,
help="Initial checkpoint (usually from a pre-trained BERT model).")
parser.add_argument('--output_file', type=str, default='test_predictions.json')
## Other parameters
parser.add_argument("--max_seq_length", default=64, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--max_num_choices", default=10, type=int,
help="The maximum number of cadicate answer, shorter than this will be padded.")
parser.add_argument("--predict_batch_size", default=16, type=int, help="Total batch size for predictions.")
parser.add_argument("--do_lower_case",
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: {}, 16-bits training: {}".format(device, args.fp16))
tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
test_example_file = os.path.join(args.input_dir, 'test_examples_{}.pkl'.format(str(args.max_seq_length)))
test_feature_file = os.path.join(args.input_dir, 'test_features_{}.pkl'.format(str(args.max_seq_length)))
eval_features = generate_input(args.predict_file, None, test_example_file, test_feature_file, tokenizer,
max_seq_length=args.max_seq_length, max_num_choices=args.max_num_choices,
is_training=False)
# Prepare model
if 'albert' in args.bert_config_file:
if 'google' in args.bert_config_file:
bert_config = AlbertConfig.from_json_file(args.bert_config_file)
model = AlbertForMultipleChoice(bert_config, num_choices=args.max_num_choices)
else:
bert_config = ALBertConfig.from_json_file(args.bert_config_file)
model = ALBertForMultipleChoice(bert_config, num_choices=args.max_num_choices)
else:
bert_config = BertConfig.from_json_file(args.bert_config_file)
model = BertForMultipleChoice(bert_config, num_choices=args.max_num_choices)
model = model.to(device)
if args.init_restore_dir.endswith('.pth') or \
args.init_restore_dir.endswith('.pt') or \
args.init_restore_dir.endswith('.bin'):
pass
else:
args.init_restore_dir = glob(args.init_restore_dir + '*.pth') + \
glob(args.init_restore_dir + '*.pt') + \
glob(args.init_restore_dir + '*.bin')
assert len(args.init_restore_dir) == 1
args.init_restore_dir = args.init_restore_dir[0]
torch_init_model(model, args.init_restore_dir)
if args.fp16:
model = model.half()
print("***** Running predictions *****")
print("Num split examples = %d", len(eval_features))
print("Batch size = %d", args.predict_batch_size)
all_example_ids = [f.example_id for f in eval_features]
all_tags = [f.tag for f in eval_features]
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_masks = torch.tensor([f.input_masks for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_choice_masks = torch.tensor([f.choice_masks for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks,
all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
print("Start evaluating")
for input_ids, input_masks, segment_ids, choice_masks, example_indices in tqdm(eval_dataloader,
desc="Evaluating",
disable=None):
if len(all_results) == 0:
print('shape of input_ids: {}'.format(input_ids.shape))
input_ids = input_ids.to(device)
input_masks = input_masks.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_logits = model(input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_masks,
labels=None)
for i, example_index in enumerate(example_indices):
logits = batch_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
example_id=all_example_ids[unique_id],
tag=all_tags[unique_id],
logit=logits))
else:
print("prediction is over")
print('decoder raw results')
tmp_predict_file = os.path.join(args.output_dir, "test_raw_predictions.pkl")
output_prediction_file = os.path.join(args.output_dir, args.output_file)
results = get_final_predictions(all_results, tmp_predict_file, g=True)
write_predictions(results, output_prediction_file)
print('predictions saved to {}'.format(output_prediction_file))
if __name__ == "__main__":
main()
| 9,007 | 49.044444 | 118 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/run_multichoice_mrc.py | """
@name = 'roberta_wwm_ext_large'
@author = 'zhangxinrui'
@time = '2019/11/15'
roberta_wwm_ext_large 的baseline版本
coding=utf-8
Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
import os
import random
import numpy as np
import torch
from google_albert_pytorch_modeling import AlbertConfig, AlbertForMultipleChoice
from preprocess.CHID_preprocess import RawResult, get_final_predictions, write_predictions, generate_input, evaluate
from pytorch_modeling import ALBertConfig, ALBertForMultipleChoice
from pytorch_modeling import BertConfig, BertForMultipleChoice
from tools.official_tokenization import BertTokenizer
from tools.pytorch_optimization import get_optimization, warmup_linear
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from tqdm import tqdm
def reset_model(args, bert_config, model_cls):
# Prepare model
model = model_cls(bert_config, num_choices=args.max_num_choices)
if args.init_restore_dir is not None:
print('load bert weight')
state_dict = torch.load(args.init_restore_dir, map_location='cpu')
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
print("missing keys:{}".format(missing_keys))
print('unexpected keys:{}'.format(unexpected_keys))
print('error msgs:{}'.format(error_msgs))
if args.fp16:
model.half()
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_ids", default='', required=True, type=str)
parser.add_argument("--bert_config_file", required=True,
default='check_points/pretrain_models/roberta_wwm_ext_large/bert_config.json')
parser.add_argument("--vocab_file", required=True,
default='check_points/pretrain_models/roberta_wwm_ext_large/vocab.txt')
parser.add_argument("--init_restore_dir", required=True,
default='check_points/pretrain_models/roberta_wwm_ext_large/pytorch_model.pth')
parser.add_argument("--input_dir", required=True, default='dataset/CHID')
parser.add_argument("--output_dir", required=True, default='check_points/CHID')
## Other parameters
parser.add_argument("--train_file", default='./origin_data/CHID/train.json', type=str,
help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--train_ans_file", default='./origin_data/CHID/train_answer.json', type=str,
help="SQuAD answer for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default='./origin_data/CHID/dev.json', type=str,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--predict_ans_file", default='origin_data/CHID/dev_answer.json', type=str,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--max_seq_length", default=64, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--max_num_choices", default=10, type=int,
help="The maximum number of cadicate answer, shorter than this will be padded.")
parser.add_argument("--train_batch_size", default=20, type=int, help="Total batch size for training.")
parser.add_argument("--predict_batch_size", default=16, type=int, help="Total batch size for predictions.")
parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.06, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% "
"of training.")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--do_lower_case", default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--fp16', default=False, action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device: {} n_gpu: {}, 16-bits training: {}".format(device, n_gpu, args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if os.path.exists(args.input_dir) == False:
os.makedirs(args.input_dir, exist_ok=True)
if os.path.exists(args.output_dir) == False:
os.makedirs(args.output_dir, exist_ok=True)
tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
print('ready for train dataset')
train_example_file = os.path.join(args.input_dir, 'train_examples_{}.pkl'.format(str(args.max_seq_length)))
train_feature_file = os.path.join(args.input_dir, 'train_features_{}.pkl'.format(str(args.max_seq_length)))
train_features = generate_input(args.train_file, args.train_ans_file, train_example_file, train_feature_file,
tokenizer, max_seq_length=args.max_seq_length,
max_num_choices=args.max_num_choices,
is_training=True)
dev_example_file = os.path.join(args.input_dir, 'dev_examples_{}.pkl'.format(str(args.max_seq_length)))
dev_feature_file = os.path.join(args.input_dir, 'dev_features_{}.pkl'.format(str(args.max_seq_length)))
eval_features = generate_input(args.predict_file, None, dev_example_file, dev_feature_file, tokenizer,
max_seq_length=args.max_seq_length, max_num_choices=args.max_num_choices,
is_training=False)
print("train features {}".format(len(train_features)))
num_train_steps = int(
len(train_features) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
print("loaded train dataset")
print("Num generate examples = {}".format(len(train_features)))
print("Batch size = {}".format(args.train_batch_size))
print("Num steps for a epoch = {}".format(num_train_steps))
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_masks = torch.tensor([f.input_masks for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_choice_masks = torch.tensor([f.choice_masks for f in train_features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks, all_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size,
drop_last=True)
all_example_ids = [f.example_id for f in eval_features]
all_tags = [f.tag for f in eval_features]
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_masks = torch.tensor([f.input_masks for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_choice_masks = torch.tensor([f.choice_masks for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks,
all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
# Prepare model
if 'albert' in args.bert_config_file:
if 'google' in args.bert_config_file:
bert_config = AlbertConfig.from_json_file(args.bert_config_file)
model = reset_model(args, bert_config, AlbertForMultipleChoice)
else:
bert_config = ALBertConfig.from_json_file(args.bert_config_file)
model = reset_model(args, bert_config, ALBertForMultipleChoice)
else:
bert_config = BertConfig.from_json_file(args.bert_config_file)
model = reset_model(args, bert_config, BertForMultipleChoice)
model = model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
optimizer = get_optimization(model,
float16=args.fp16,
learning_rate=args.learning_rate,
total_steps=num_train_steps,
schedule='warmup_linear',
warmup_rate=args.warmup_proportion,
weight_decay_rate=0.01,
max_grad_norm=1.0,
opt_pooler=True)
global_step = 0
best_acc = 0
acc = 0
for i in range(int(args.num_train_epochs)):
num_step = 0
average_loss = 0
model.train()
model.zero_grad() # 等价于optimizer.zero_grad()
steps_per_epoch = num_train_steps // args.num_train_epochs
with tqdm(total=int(steps_per_epoch), desc='Epoch %d' % (i + 1)) as pbar:
for step, batch in enumerate(train_dataloader):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_masks, segment_ids, choice_masks, labels = batch
if step == 0 and i == 0:
print('shape of input_ids: {}'.format(input_ids.shape))
print('shape of labels: {}'.format(labels.shape))
loss = model(input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_masks,
labels=labels)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used and handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
average_loss += loss.item()
num_step += 1
pbar.set_postfix({'loss': '{0:1.5f}'.format(average_loss / (num_step + 1e-5))})
pbar.update(1)
print("***** Running predictions *****")
print("Num split examples = {}".format(len(eval_features)))
print("Batch size = {}".format(args.predict_batch_size))
model.eval()
all_results = []
print("Start evaluating")
for input_ids, input_masks, segment_ids, choice_masks, example_indices in tqdm(eval_dataloader,
desc="Evaluating",
disable=None):
if len(all_results) == 0:
print('shape of input_ids: {}'.format(input_ids.shape))
input_ids = input_ids.to(device)
input_masks = input_masks.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_logits = model(input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_masks,
labels=None)
for i, example_index in enumerate(example_indices):
logits = batch_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
example_id=all_example_ids[unique_id],
tag=all_tags[unique_id],
logit=logits))
predict_file = 'dev_predictions.json'
print('decoder raw results')
tmp_predict_file = os.path.join(args.output_dir, "raw_predictions.pkl")
output_prediction_file = os.path.join(args.output_dir, predict_file)
results = get_final_predictions(all_results, tmp_predict_file, g=True)
write_predictions(results, output_prediction_file)
print('predictions saved to {}'.format(output_prediction_file))
if args.predict_ans_file:
acc = evaluate(args.predict_ans_file, output_prediction_file)
print(f'{args.predict_file} 预测精度:{acc}')
# Save a epoch trained model
if acc > best_acc:
best_acc = acc
output_model_file = os.path.join(args.output_dir, "best_checkpoint.bin")
print('save trained model from {}'.format(output_model_file))
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
torch.save(model_to_save.state_dict(), output_model_file)
if __name__ == "__main__":
main()
| 16,489 | 50.69279 | 118 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/pytorch_optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.nn.utils import clip_grad_norm_
from torch.optim.optimizer import Optimizer
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x / warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return (1.0 - x) / (1.0 - warmup)
def warmup_fix(step, warmup_step):
return min(1.0, step / warmup_step)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
'warmup_fix': warmup_fix
}
class BERTAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix (and no ).
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay_rate: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay_rate=0.01, cycle_step=None,
max_grad_norm=1.0):
if lr is not None and not lr >= 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay_rate=weight_decay_rate,
max_grad_norm=max_grad_norm, cycle_step=cycle_step)
super(BERTAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay_rate'] > 0.0:
update += group['weight_decay_rate'] * p.data
schedule_fct = SCHEDULES[group['schedule']]
if group['cycle_step'] is not None and state['step'] > group['cycle_step']:
lr_scheduled = group['lr'] * (1 - ((state['step'] % group['cycle_step']) / group['cycle_step']))
elif group['t_total'] != -1 and group['schedule'] != 'warmup_fix':
lr_scheduled = group['lr'] * schedule_fct(state['step'] / group['t_total'], group['warmup'])
elif group['schedule'] == 'warmup_fix':
lr_scheduled = group['lr'] * schedule_fct(state['step'], group['warmup'] * group['t_total'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss
def get_optimization(model, float16, learning_rate, total_steps, schedule,
warmup_rate, weight_decay_rate, max_grad_norm, opt_pooler=False):
# Prepare optimizer
assert 0.0 <= warmup_rate <= 1.0
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
if opt_pooler is False:
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_parameters = [
{'params': [p for n, p in param_optimizer if not any([nd in n for nd in no_decay])],
'weight_decay_rate': weight_decay_rate},
{'params': [p for n, p in param_optimizer if any([nd in n for nd in no_decay])],
'weight_decay_rate': 0.0}
]
if float16:
try:
from apex.contrib.optimizers import FP16_Optimizer
from apex.contrib.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_parameters,
lr=learning_rate,
bias_correction=False,
max_grad_norm=max_grad_norm)
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = BERTAdam(params=optimizer_parameters,
lr=learning_rate,
warmup=warmup_rate,
max_grad_norm=max_grad_norm,
t_total=total_steps,
schedule=schedule,
weight_decay_rate=weight_decay_rate)
return optimizer
| 8,435 | 41.606061 | 116 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/utils.py | import collections
import os
import re
from glob import glob
import tensorflow as tf
import tensorflow.contrib.slim as slim
import torch
def check_args(args):
args.setting_file = os.path.join(args.checkpoint_dir, args.setting_file)
args.log_file = os.path.join(args.checkpoint_dir, args.log_file)
os.makedirs(args.checkpoint_dir, exist_ok=True)
with open(args.setting_file, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
print('------------ Options -------------')
for k in args.__dict__:
v = args.__dict__[k]
opt_file.write('%s: %s\n' % (str(k), str(v)))
print('%s: %s' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
print('------------ End -------------')
return args
def show_all_variables(rank=0):
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True if rank == 0 else False)
def torch_show_all_params(model, rank=0):
params = list(model.parameters())
k = 0
for i in params:
l = 1
for j in i.size():
l *= j
k = k + l
if rank == 0:
print("Total param num:" + str(k))
# import ipdb
def get_assigment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
initialized_variable_names = {}
new_variable_names = set()
unused_variable_names = set()
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
if 'adam' not in name:
unused_variable_names.add(name)
continue
# assignment_map[name] = name
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
for name in name_to_variable:
if name not in initialized_variable_names:
new_variable_names.add(name)
return assignment_map, initialized_variable_names, new_variable_names, unused_variable_names
# loading weights
def init_from_checkpoint(init_checkpoint, tvars=None, rank=0):
if not tvars:
tvars = tf.trainable_variables()
assignment_map, initialized_variable_names, new_variable_names, unused_variable_names \
= get_assigment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if rank == 0:
# 显示成功加载的权重
for t in initialized_variable_names:
if ":0" not in t:
print("Loading weights success: " + t)
# 显示新的参数
print('New parameters:', new_variable_names)
# 显示初始化参数中没用到的参数
print('Unused parameters', unused_variable_names)
def torch_init_model(model, init_checkpoint):
state_dict = torch.load(init_checkpoint, map_location='cpu')
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
print("missing keys:{}".format(missing_keys))
print('unexpected keys:{}'.format(unexpected_keys))
print('error msgs:{}'.format(error_msgs))
def torch_save_model(model, output_dir, scores, max_save_num=1):
# Save model checkpoint
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
saved_pths = glob(os.path.join(output_dir, '*.pth'))
saved_pths.sort()
while len(saved_pths) >= max_save_num:
if os.path.exists(saved_pths[0].replace('//', '/')):
os.remove(saved_pths[0].replace('//', '/'))
del saved_pths[0]
save_prex = "checkpoint_score"
for k in scores:
save_prex += ('_' + k + '-' + str(scores[k])[:6])
save_prex += '.pth'
torch.save(model_to_save.state_dict(),
os.path.join(output_dir, save_prex))
print("Saving model checkpoint to %s", output_dir)
| 4,999 | 33.013605 | 117 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from urllib.parse import urlparse
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,020 | 32.560669 | 98 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/official_tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import os
import logging
import six
from tools.file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | 14,426 | 36.18299 | 133 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/zh_wiki.py | # -*- coding: utf-8 -*-
# copy fom wikipedia
zh2Hant = {
'呆': '獃',
"打印机": "印表機",
'帮助文件': '說明檔案',
"画": "畫",
"龙": "竜",
"板": "板",
"表": "表",
"才": "才",
"丑": "醜",
"出": "出",
"淀": "澱",
"冬": "冬",
"范": "範",
"丰": "豐",
"刮": "刮",
"后": "後",
"胡": "胡",
"回": "回",
"伙": "夥",
"姜": "薑",
"借": "借",
"克": "克",
"困": "困",
"漓": "漓",
"里": "里",
"帘": "簾",
"霉": "霉",
"面": "面",
"蔑": "蔑",
"千": "千",
"秋": "秋",
"松": "松",
"咸": "咸",
"向": "向",
"余": "餘",
"郁": "鬱",
"御": "御",
"愿": "願",
"云": "雲",
"芸": "芸",
"沄": "沄",
"致": "致",
"制": "制",
"朱": "朱",
"筑": "築",
"准": "準",
"厂": "廠",
"广": "廣",
"辟": "闢",
"别": "別",
"卜": "卜",
"沈": "沈",
"冲": "沖",
"种": "種",
"虫": "蟲",
"担": "擔",
"党": "黨",
"斗": "鬥",
"儿": "兒",
"干": "乾",
"谷": "谷",
"柜": "櫃",
"合": "合",
"划": "劃",
"坏": "壞",
"几": "幾",
"系": "系",
"家": "家",
"价": "價",
"据": "據",
"卷": "捲",
"适": "適",
"蜡": "蠟",
"腊": "臘",
"了": "了",
"累": "累",
"么": "麽",
"蒙": "蒙",
"万": "萬",
"宁": "寧",
"朴": "樸",
"苹": "蘋",
"仆": "僕",
"曲": "曲",
"确": "確",
"舍": "舍",
"胜": "勝",
"术": "術",
"台": "台",
"体": "體",
"涂": "塗",
"叶": "葉",
"吁": "吁",
"旋": "旋",
"佣": "傭",
"与": "與",
"折": "折",
"征": "徵",
"症": "症",
"恶": "惡",
"发": "發",
"复": "復",
"汇": "匯",
"获": "獲",
"饥": "飢",
"尽": "盡",
"历": "歷",
"卤": "滷",
"弥": "彌",
"签": "簽",
"纤": "纖",
"苏": "蘇",
"坛": "壇",
"团": "團",
"须": "須",
"脏": "臟",
"只": "只",
"钟": "鐘",
"药": "藥",
"同": "同",
"志": "志",
"杯": "杯",
"岳": "岳",
"布": "布",
"当": "當",
"吊": "弔",
"仇": "仇",
"蕴": "蘊",
"线": "線",
"为": "為",
"产": "產",
"众": "眾",
"伪": "偽",
"凫": "鳧",
"厕": "廁",
"启": "啟",
"墙": "牆",
"壳": "殼",
"奖": "獎",
"妫": "媯",
"并": "並",
"录": "錄",
"悫": "愨",
"极": "極",
"沩": "溈",
"瘘": "瘺",
"硷": "鹼",
"竖": "豎",
"绝": "絕",
"绣": "繡",
"绦": "絛",
"绱": "緔",
"绷": "綳",
"绿": "綠",
"缰": "韁",
"苧": "苎",
"莼": "蒓",
"说": "說",
"谣": "謠",
"谫": "譾",
"赃": "贓",
"赍": "齎",
"赝": "贗",
"酝": "醞",
"采": "採",
"钩": "鉤",
"钵": "缽",
"锈": "銹",
"锐": "銳",
"锨": "杴",
"镌": "鐫",
"镢": "钁",
"阅": "閱",
"颓": "頹",
"颜": "顏",
"骂": "罵",
"鲇": "鯰",
"鲞": "鯗",
"鳄": "鱷",
"鸡": "雞",
"鹚": "鶿",
"荡": "盪",
"锤": "錘",
"㟆": "㠏",
"㛟": "𡞵",
"专": "專",
"业": "業",
"丛": "叢",
"东": "東",
"丝": "絲",
"丢": "丟",
"两": "兩",
"严": "嚴",
"丧": "喪",
"个": "個",
"临": "臨",
"丽": "麗",
"举": "舉",
"义": "義",
"乌": "烏",
"乐": "樂",
"乔": "喬",
"习": "習",
"乡": "鄉",
"书": "書",
"买": "買",
"乱": "亂",
"争": "爭",
"于": "於",
"亏": "虧",
"亚": "亞",
"亩": "畝",
"亲": "親",
"亵": "褻",
"亸": "嚲",
"亿": "億",
"仅": "僅",
"从": "從",
"仑": "侖",
"仓": "倉",
"仪": "儀",
"们": "們",
"优": "優",
"会": "會",
"伛": "傴",
"伞": "傘",
"伟": "偉",
"传": "傳",
"伣": "俔",
"伤": "傷",
"伥": "倀",
"伦": "倫",
"伧": "傖",
"伫": "佇",
"佥": "僉",
"侠": "俠",
"侣": "侶",
"侥": "僥",
"侦": "偵",
"侧": "側",
"侨": "僑",
"侩": "儈",
"侪": "儕",
"侬": "儂",
"俣": "俁",
"俦": "儔",
"俨": "儼",
"俩": "倆",
"俪": "儷",
"俫": "倈",
"俭": "儉",
"债": "債",
"倾": "傾",
"偬": "傯",
"偻": "僂",
"偾": "僨",
"偿": "償",
"傥": "儻",
"傧": "儐",
"储": "儲",
"傩": "儺",
"㑩": "儸",
"兑": "兌",
"兖": "兗",
"兰": "蘭",
"关": "關",
"兴": "興",
"兹": "茲",
"养": "養",
"兽": "獸",
"冁": "囅",
"内": "內",
"冈": "岡",
"册": "冊",
"写": "寫",
"军": "軍",
"农": "農",
"冯": "馮",
"决": "決",
"况": "況",
"冻": "凍",
"净": "凈",
"凉": "涼",
"减": "減",
"凑": "湊",
"凛": "凜",
"凤": "鳳",
"凭": "憑",
"凯": "凱",
"击": "擊",
"凿": "鑿",
"刍": "芻",
"刘": "劉",
"则": "則",
"刚": "剛",
"创": "創",
"删": "刪",
"刬": "剗",
"刭": "剄",
"刹": "剎",
"刽": "劊",
"刿": "劌",
"剀": "剴",
"剂": "劑",
"剐": "剮",
"剑": "劍",
"剥": "剝",
"剧": "劇",
"㓥": "劏",
"㔉": "劚",
"劝": "勸",
"办": "辦",
"务": "務",
"劢": "勱",
"动": "動",
"励": "勵",
"劲": "勁",
"劳": "勞",
"势": "勢",
"勋": "勛",
"勚": "勩",
"匀": "勻",
"匦": "匭",
"匮": "匱",
"区": "區",
"医": "醫",
"华": "華",
"协": "協",
"单": "單",
"卖": "賣",
"卢": "盧",
"卫": "衛",
"却": "卻",
"厅": "廳",
"厉": "厲",
"压": "壓",
"厌": "厭",
"厍": "厙",
"厐": "龎",
"厘": "釐",
"厢": "廂",
"厣": "厴",
"厦": "廈",
"厨": "廚",
"厩": "廄",
"厮": "廝",
"县": "縣",
"叁": "叄",
"参": "參",
"双": "雙",
"变": "變",
"叙": "敘",
"叠": "疊",
"号": "號",
"叹": "嘆",
"叽": "嘰",
"吓": "嚇",
"吕": "呂",
"吗": "嗎",
"吣": "唚",
"吨": "噸",
"听": "聽",
"吴": "吳",
"呐": "吶",
"呒": "嘸",
"呓": "囈",
"呕": "嘔",
"呖": "嚦",
"呗": "唄",
"员": "員",
"呙": "咼",
"呛": "嗆",
"呜": "嗚",
"咏": "詠",
"咙": "嚨",
"咛": "嚀",
"咝": "噝",
"咤": "吒",
"响": "響",
"哑": "啞",
"哒": "噠",
"哓": "嘵",
"哔": "嗶",
"哕": "噦",
"哗": "嘩",
"哙": "噲",
"哜": "嚌",
"哝": "噥",
"哟": "喲",
"唛": "嘜",
"唝": "嗊",
"唠": "嘮",
"唡": "啢",
"唢": "嗩",
"唤": "喚",
"啧": "嘖",
"啬": "嗇",
"啭": "囀",
"啮": "嚙",
"啴": "嘽",
"啸": "嘯",
"㖞": "喎",
"喷": "噴",
"喽": "嘍",
"喾": "嚳",
"嗫": "囁",
"嗳": "噯",
"嘘": "噓",
"嘤": "嚶",
"嘱": "囑",
"㖊": "噚",
"噜": "嚕",
"嚣": "囂",
"园": "園",
"囱": "囪",
"围": "圍",
"囵": "圇",
"国": "國",
"图": "圖",
"圆": "圓",
"圣": "聖",
"圹": "壙",
"场": "場",
"坂": "阪",
"块": "塊",
"坚": "堅",
"坜": "壢",
"坝": "壩",
"坞": "塢",
"坟": "墳",
"坠": "墜",
"垄": "壟",
"垅": "壠",
"垆": "壚",
"垒": "壘",
"垦": "墾",
"垩": "堊",
"垫": "墊",
"垭": "埡",
"垱": "壋",
"垲": "塏",
"垴": "堖",
"埘": "塒",
"埙": "塤",
"埚": "堝",
"埯": "垵",
"堑": "塹",
"堕": "墮",
"𡒄": "壈",
"壮": "壯",
"声": "聲",
"壶": "壺",
"壸": "壼",
"处": "處",
"备": "備",
"够": "夠",
"头": "頭",
"夸": "誇",
"夹": "夾",
"夺": "奪",
"奁": "奩",
"奂": "奐",
"奋": "奮",
"奥": "奧",
"奸": "姦",
"妆": "妝",
"妇": "婦",
"妈": "媽",
"妩": "嫵",
"妪": "嫗",
"姗": "姍",
"姹": "奼",
"娄": "婁",
"娅": "婭",
"娆": "嬈",
"娇": "嬌",
"娈": "孌",
"娱": "娛",
"娲": "媧",
"娴": "嫻",
"婳": "嫿",
"婴": "嬰",
"婵": "嬋",
"婶": "嬸",
"媪": "媼",
"嫒": "嬡",
"嫔": "嬪",
"嫱": "嬙",
"嬷": "嬤",
"孙": "孫",
"学": "學",
"孪": "孿",
"宝": "寶",
"实": "實",
"宠": "寵",
"审": "審",
"宪": "憲",
"宫": "宮",
"宽": "寬",
"宾": "賓",
"寝": "寢",
"对": "對",
"寻": "尋",
"导": "導",
"寿": "壽",
"将": "將",
"尔": "爾",
"尘": "塵",
"尝": "嘗",
"尧": "堯",
"尴": "尷",
"尸": "屍",
"层": "層",
"屃": "屓",
"屉": "屜",
"届": "屆",
"属": "屬",
"屡": "屢",
"屦": "屨",
"屿": "嶼",
"岁": "歲",
"岂": "豈",
"岖": "嶇",
"岗": "崗",
"岘": "峴",
"岙": "嶴",
"岚": "嵐",
"岛": "島",
"岭": "嶺",
"岽": "崬",
"岿": "巋",
"峄": "嶧",
"峡": "峽",
"峣": "嶢",
"峤": "嶠",
"峥": "崢",
"峦": "巒",
"崂": "嶗",
"崃": "崍",
"崄": "嶮",
"崭": "嶄",
"嵘": "嶸",
"嵚": "嶔",
"嵝": "嶁",
"巅": "巔",
"巩": "鞏",
"巯": "巰",
"币": "幣",
"帅": "帥",
"师": "師",
"帏": "幃",
"帐": "帳",
"帜": "幟",
"带": "帶",
"帧": "幀",
"帮": "幫",
"帱": "幬",
"帻": "幘",
"帼": "幗",
"幂": "冪",
"庄": "莊",
"庆": "慶",
"庐": "廬",
"庑": "廡",
"库": "庫",
"应": "應",
"庙": "廟",
"庞": "龐",
"废": "廢",
"廪": "廩",
"开": "開",
"异": "異",
"弃": "棄",
"弑": "弒",
"张": "張",
"弪": "弳",
"弯": "彎",
"弹": "彈",
"强": "強",
"归": "歸",
"彝": "彞",
"彦": "彥",
"彻": "徹",
"径": "徑",
"徕": "徠",
"忆": "憶",
"忏": "懺",
"忧": "憂",
"忾": "愾",
"怀": "懷",
"态": "態",
"怂": "慫",
"怃": "憮",
"怄": "慪",
"怅": "悵",
"怆": "愴",
"怜": "憐",
"总": "總",
"怼": "懟",
"怿": "懌",
"恋": "戀",
"恒": "恆",
"恳": "懇",
"恸": "慟",
"恹": "懨",
"恺": "愷",
"恻": "惻",
"恼": "惱",
"恽": "惲",
"悦": "悅",
"悬": "懸",
"悭": "慳",
"悮": "悞",
"悯": "憫",
"惊": "驚",
"惧": "懼",
"惨": "慘",
"惩": "懲",
"惫": "憊",
"惬": "愜",
"惭": "慚",
"惮": "憚",
"惯": "慣",
"愠": "慍",
"愤": "憤",
"愦": "憒",
"慑": "懾",
"懑": "懣",
"懒": "懶",
"懔": "懍",
"戆": "戇",
"戋": "戔",
"戏": "戲",
"戗": "戧",
"战": "戰",
"戬": "戩",
"戯": "戱",
"户": "戶",
"扑": "撲",
"执": "執",
"扩": "擴",
"扪": "捫",
"扫": "掃",
"扬": "揚",
"扰": "擾",
"抚": "撫",
"抛": "拋",
"抟": "摶",
"抠": "摳",
"抡": "掄",
"抢": "搶",
"护": "護",
"报": "報",
"拟": "擬",
"拢": "攏",
"拣": "揀",
"拥": "擁",
"拦": "攔",
"拧": "擰",
"拨": "撥",
"择": "擇",
"挂": "掛",
"挚": "摯",
"挛": "攣",
"挜": "掗",
"挝": "撾",
"挞": "撻",
"挟": "挾",
"挠": "撓",
"挡": "擋",
"挢": "撟",
"挣": "掙",
"挤": "擠",
"挥": "揮",
"挦": "撏",
"挽": "輓",
"捝": "挩",
"捞": "撈",
"损": "損",
"捡": "撿",
"换": "換",
"捣": "搗",
"掳": "擄",
"掴": "摑",
"掷": "擲",
"掸": "撣",
"掺": "摻",
"掼": "摜",
"揽": "攬",
"揾": "搵",
"揿": "撳",
"搀": "攙",
"搁": "擱",
"搂": "摟",
"搅": "攪",
"携": "攜",
"摄": "攝",
"摅": "攄",
"摆": "擺",
"摇": "搖",
"摈": "擯",
"摊": "攤",
"撄": "攖",
"撑": "撐",
"㧑": "撝",
"撵": "攆",
"撷": "擷",
"撸": "擼",
"撺": "攛",
"㧟": "擓",
"擞": "擻",
"攒": "攢",
"敌": "敵",
"敛": "斂",
"数": "數",
"斋": "齋",
"斓": "斕",
"斩": "斬",
"断": "斷",
"无": "無",
"旧": "舊",
"时": "時",
"旷": "曠",
"旸": "暘",
"昙": "曇",
"昼": "晝",
"昽": "曨",
"显": "顯",
"晋": "晉",
"晒": "曬",
"晓": "曉",
"晔": "曄",
"晕": "暈",
"晖": "暉",
"暂": "暫",
"暧": "曖",
"机": "機",
"杀": "殺",
"杂": "雜",
"权": "權",
"杆": "桿",
"条": "條",
"来": "來",
"杨": "楊",
"杩": "榪",
"杰": "傑",
"构": "構",
"枞": "樅",
"枢": "樞",
"枣": "棗",
"枥": "櫪",
"枧": "梘",
"枨": "棖",
"枪": "槍",
"枫": "楓",
"枭": "梟",
"柠": "檸",
"柽": "檉",
"栀": "梔",
"栅": "柵",
"标": "標",
"栈": "棧",
"栉": "櫛",
"栊": "櫳",
"栋": "棟",
"栌": "櫨",
"栎": "櫟",
"栏": "欄",
"树": "樹",
"栖": "棲",
"栗": "慄",
"样": "樣",
"栾": "欒",
"桠": "椏",
"桡": "橈",
"桢": "楨",
"档": "檔",
"桤": "榿",
"桥": "橋",
"桦": "樺",
"桧": "檜",
"桨": "槳",
"桩": "樁",
"梦": "夢",
"梼": "檮",
"梾": "棶",
"梿": "槤",
"检": "檢",
"棁": "梲",
"棂": "欞",
"椁": "槨",
"椟": "櫝",
"椠": "槧",
"椤": "欏",
"椭": "橢",
"楼": "樓",
"榄": "欖",
"榅": "榲",
"榇": "櫬",
"榈": "櫚",
"榉": "櫸",
"槚": "檟",
"槛": "檻",
"槟": "檳",
"槠": "櫧",
"横": "橫",
"樯": "檣",
"樱": "櫻",
"橥": "櫫",
"橱": "櫥",
"橹": "櫓",
"橼": "櫞",
"檩": "檁",
"欢": "歡",
"欤": "歟",
"欧": "歐",
"歼": "殲",
"殁": "歿",
"殇": "殤",
"残": "殘",
"殒": "殞",
"殓": "殮",
"殚": "殫",
"殡": "殯",
"㱮": "殨",
"㱩": "殰",
"殴": "毆",
"毁": "毀",
"毂": "轂",
"毕": "畢",
"毙": "斃",
"毡": "氈",
"毵": "毿",
"氇": "氌",
"气": "氣",
"氢": "氫",
"氩": "氬",
"氲": "氳",
"汉": "漢",
"汤": "湯",
"汹": "洶",
"沟": "溝",
"没": "沒",
"沣": "灃",
"沤": "漚",
"沥": "瀝",
"沦": "淪",
"沧": "滄",
"沪": "滬",
"泞": "濘",
"注": "註",
"泪": "淚",
"泶": "澩",
"泷": "瀧",
"泸": "瀘",
"泺": "濼",
"泻": "瀉",
"泼": "潑",
"泽": "澤",
"泾": "涇",
"洁": "潔",
"洒": "灑",
"洼": "窪",
"浃": "浹",
"浅": "淺",
"浆": "漿",
"浇": "澆",
"浈": "湞",
"浊": "濁",
"测": "測",
"浍": "澮",
"济": "濟",
"浏": "瀏",
"浐": "滻",
"浑": "渾",
"浒": "滸",
"浓": "濃",
"浔": "潯",
"涛": "濤",
"涝": "澇",
"涞": "淶",
"涟": "漣",
"涠": "潿",
"涡": "渦",
"涣": "渙",
"涤": "滌",
"润": "潤",
"涧": "澗",
"涨": "漲",
"涩": "澀",
"渊": "淵",
"渌": "淥",
"渍": "漬",
"渎": "瀆",
"渐": "漸",
"渑": "澠",
"渔": "漁",
"渖": "瀋",
"渗": "滲",
"温": "溫",
"湾": "灣",
"湿": "濕",
"溃": "潰",
"溅": "濺",
"溆": "漵",
"滗": "潷",
"滚": "滾",
"滞": "滯",
"滟": "灧",
"滠": "灄",
"满": "滿",
"滢": "瀅",
"滤": "濾",
"滥": "濫",
"滦": "灤",
"滨": "濱",
"滩": "灘",
"滪": "澦",
"漤": "灠",
"潆": "瀠",
"潇": "瀟",
"潋": "瀲",
"潍": "濰",
"潜": "潛",
"潴": "瀦",
"澜": "瀾",
"濑": "瀨",
"濒": "瀕",
"㲿": "瀇",
"灏": "灝",
"灭": "滅",
"灯": "燈",
"灵": "靈",
"灶": "竈",
"灾": "災",
"灿": "燦",
"炀": "煬",
"炉": "爐",
"炖": "燉",
"炜": "煒",
"炝": "熗",
"点": "點",
"炼": "煉",
"炽": "熾",
"烁": "爍",
"烂": "爛",
"烃": "烴",
"烛": "燭",
"烟": "煙",
"烦": "煩",
"烧": "燒",
"烨": "燁",
"烩": "燴",
"烫": "燙",
"烬": "燼",
"热": "熱",
"焕": "煥",
"焖": "燜",
"焘": "燾",
"㶽": "煱",
"煴": "熅",
"㶶": "燶",
"爱": "愛",
"爷": "爺",
"牍": "牘",
"牦": "氂",
"牵": "牽",
"牺": "犧",
"犊": "犢",
"状": "狀",
"犷": "獷",
"犸": "獁",
"犹": "猶",
"狈": "狽",
"狝": "獮",
"狞": "獰",
"独": "獨",
"狭": "狹",
"狮": "獅",
"狯": "獪",
"狰": "猙",
"狱": "獄",
"狲": "猻",
"猃": "獫",
"猎": "獵",
"猕": "獼",
"猡": "玀",
"猪": "豬",
"猫": "貓",
"猬": "蝟",
"献": "獻",
"獭": "獺",
"㺍": "獱",
"玑": "璣",
"玚": "瑒",
"玛": "瑪",
"玮": "瑋",
"环": "環",
"现": "現",
"玱": "瑲",
"玺": "璽",
"珐": "琺",
"珑": "瓏",
"珰": "璫",
"珲": "琿",
"琏": "璉",
"琐": "瑣",
"琼": "瓊",
"瑶": "瑤",
"瑷": "璦",
"璎": "瓔",
"瓒": "瓚",
"瓯": "甌",
"电": "電",
"画": "畫",
"畅": "暢",
"畴": "疇",
"疖": "癤",
"疗": "療",
"疟": "瘧",
"疠": "癘",
"疡": "瘍",
"疬": "癧",
"疭": "瘲",
"疮": "瘡",
"疯": "瘋",
"疱": "皰",
"疴": "痾",
"痈": "癰",
"痉": "痙",
"痒": "癢",
"痖": "瘂",
"痨": "癆",
"痪": "瘓",
"痫": "癇",
"瘅": "癉",
"瘆": "瘮",
"瘗": "瘞",
"瘪": "癟",
"瘫": "癱",
"瘾": "癮",
"瘿": "癭",
"癞": "癩",
"癣": "癬",
"癫": "癲",
"皑": "皚",
"皱": "皺",
"皲": "皸",
"盏": "盞",
"盐": "鹽",
"监": "監",
"盖": "蓋",
"盗": "盜",
"盘": "盤",
"眍": "瞘",
"眦": "眥",
"眬": "矓",
"着": "著",
"睁": "睜",
"睐": "睞",
"睑": "瞼",
"瞆": "瞶",
"瞒": "瞞",
"䁖": "瞜",
"瞩": "矚",
"矫": "矯",
"矶": "磯",
"矾": "礬",
"矿": "礦",
"砀": "碭",
"码": "碼",
"砖": "磚",
"砗": "硨",
"砚": "硯",
"砜": "碸",
"砺": "礪",
"砻": "礱",
"砾": "礫",
"础": "礎",
"硁": "硜",
"硕": "碩",
"硖": "硤",
"硗": "磽",
"硙": "磑",
"碍": "礙",
"碛": "磧",
"碜": "磣",
"碱": "鹼",
"礼": "禮",
"祃": "禡",
"祎": "禕",
"祢": "禰",
"祯": "禎",
"祷": "禱",
"祸": "禍",
"禀": "稟",
"禄": "祿",
"禅": "禪",
"离": "離",
"秃": "禿",
"秆": "稈",
"积": "積",
"称": "稱",
"秽": "穢",
"秾": "穠",
"稆": "穭",
"税": "稅",
"䅉": "稏",
"稣": "穌",
"稳": "穩",
"穑": "穡",
"穷": "窮",
"窃": "竊",
"窍": "竅",
"窎": "窵",
"窑": "窯",
"窜": "竄",
"窝": "窩",
"窥": "窺",
"窦": "竇",
"窭": "窶",
"竞": "競",
"笃": "篤",
"笋": "筍",
"笔": "筆",
"笕": "筧",
"笺": "箋",
"笼": "籠",
"笾": "籩",
"筚": "篳",
"筛": "篩",
"筜": "簹",
"筝": "箏",
"䇲": "筴",
"筹": "籌",
"筼": "篔",
"简": "簡",
"箓": "籙",
"箦": "簀",
"箧": "篋",
"箨": "籜",
"箩": "籮",
"箪": "簞",
"箫": "簫",
"篑": "簣",
"篓": "簍",
"篮": "籃",
"篱": "籬",
"簖": "籪",
"籁": "籟",
"籴": "糴",
"类": "類",
"籼": "秈",
"粜": "糶",
"粝": "糲",
"粤": "粵",
"粪": "糞",
"粮": "糧",
"糁": "糝",
"糇": "餱",
"紧": "緊",
"䌷": "紬",
"䌹": "絅",
"絷": "縶",
"䌼": "綐",
"䌽": "綵",
"䌸": "縳",
"䍁": "繸",
"䍀": "繿",
"纟": "糹",
"纠": "糾",
"纡": "紆",
"红": "紅",
"纣": "紂",
"纥": "紇",
"约": "約",
"级": "級",
"纨": "紈",
"纩": "纊",
"纪": "紀",
"纫": "紉",
"纬": "緯",
"纭": "紜",
"纮": "紘",
"纯": "純",
"纰": "紕",
"纱": "紗",
"纲": "綱",
"纳": "納",
"纴": "紝",
"纵": "縱",
"纶": "綸",
"纷": "紛",
"纸": "紙",
"纹": "紋",
"纺": "紡",
"纻": "紵",
"纼": "紖",
"纽": "紐",
"纾": "紓",
"绀": "紺",
"绁": "紲",
"绂": "紱",
"练": "練",
"组": "組",
"绅": "紳",
"细": "細",
"织": "織",
"终": "終",
"绉": "縐",
"绊": "絆",
"绋": "紼",
"绌": "絀",
"绍": "紹",
"绎": "繹",
"经": "經",
"绐": "紿",
"绑": "綁",
"绒": "絨",
"结": "結",
"绔": "絝",
"绕": "繞",
"绖": "絰",
"绗": "絎",
"绘": "繪",
"给": "給",
"绚": "絢",
"绛": "絳",
"络": "絡",
"绞": "絞",
"统": "統",
"绠": "綆",
"绡": "綃",
"绢": "絹",
"绤": "綌",
"绥": "綏",
"继": "繼",
"绨": "綈",
"绩": "績",
"绪": "緒",
"绫": "綾",
"绬": "緓",
"续": "續",
"绮": "綺",
"绯": "緋",
"绰": "綽",
"绲": "緄",
"绳": "繩",
"维": "維",
"绵": "綿",
"绶": "綬",
"绸": "綢",
"绹": "綯",
"绺": "綹",
"绻": "綣",
"综": "綜",
"绽": "綻",
"绾": "綰",
"缀": "綴",
"缁": "緇",
"缂": "緙",
"缃": "緗",
"缄": "緘",
"缅": "緬",
"缆": "纜",
"缇": "緹",
"缈": "緲",
"缉": "緝",
"缊": "縕",
"缋": "繢",
"缌": "緦",
"缍": "綞",
"缎": "緞",
"缏": "緶",
"缑": "緱",
"缒": "縋",
"缓": "緩",
"缔": "締",
"缕": "縷",
"编": "編",
"缗": "緡",
"缘": "緣",
"缙": "縉",
"缚": "縛",
"缛": "縟",
"缜": "縝",
"缝": "縫",
"缞": "縗",
"缟": "縞",
"缠": "纏",
"缡": "縭",
"缢": "縊",
"缣": "縑",
"缤": "繽",
"缥": "縹",
"缦": "縵",
"缧": "縲",
"缨": "纓",
"缩": "縮",
"缪": "繆",
"缫": "繅",
"缬": "纈",
"缭": "繚",
"缮": "繕",
"缯": "繒",
"缱": "繾",
"缲": "繰",
"缳": "繯",
"缴": "繳",
"缵": "纘",
"罂": "罌",
"网": "網",
"罗": "羅",
"罚": "罰",
"罢": "罷",
"罴": "羆",
"羁": "羈",
"羟": "羥",
"翘": "翹",
"耢": "耮",
"耧": "耬",
"耸": "聳",
"耻": "恥",
"聂": "聶",
"聋": "聾",
"职": "職",
"聍": "聹",
"联": "聯",
"聩": "聵",
"聪": "聰",
"肃": "肅",
"肠": "腸",
"肤": "膚",
"肮": "骯",
"肴": "餚",
"肾": "腎",
"肿": "腫",
"胀": "脹",
"胁": "脅",
"胆": "膽",
"胧": "朧",
"胨": "腖",
"胪": "臚",
"胫": "脛",
"胶": "膠",
"脉": "脈",
"脍": "膾",
"脐": "臍",
"脑": "腦",
"脓": "膿",
"脔": "臠",
"脚": "腳",
"脱": "脫",
"脶": "腡",
"脸": "臉",
"腭": "齶",
"腻": "膩",
"腼": "靦",
"腽": "膃",
"腾": "騰",
"膑": "臏",
"臜": "臢",
"舆": "輿",
"舣": "艤",
"舰": "艦",
"舱": "艙",
"舻": "艫",
"艰": "艱",
"艳": "艷",
"艺": "藝",
"节": "節",
"芈": "羋",
"芗": "薌",
"芜": "蕪",
"芦": "蘆",
"苁": "蓯",
"苇": "葦",
"苈": "藶",
"苋": "莧",
"苌": "萇",
"苍": "蒼",
"苎": "苧",
"茎": "莖",
"茏": "蘢",
"茑": "蔦",
"茔": "塋",
"茕": "煢",
"茧": "繭",
"荆": "荊",
"荐": "薦",
"荙": "薘",
"荚": "莢",
"荛": "蕘",
"荜": "蓽",
"荞": "蕎",
"荟": "薈",
"荠": "薺",
"荣": "榮",
"荤": "葷",
"荥": "滎",
"荦": "犖",
"荧": "熒",
"荨": "蕁",
"荩": "藎",
"荪": "蓀",
"荫": "蔭",
"荬": "蕒",
"荭": "葒",
"荮": "葤",
"莅": "蒞",
"莱": "萊",
"莲": "蓮",
"莳": "蒔",
"莴": "萵",
"莶": "薟",
"莸": "蕕",
"莹": "瑩",
"莺": "鶯",
"萝": "蘿",
"萤": "螢",
"营": "營",
"萦": "縈",
"萧": "蕭",
"萨": "薩",
"葱": "蔥",
"蒇": "蕆",
"蒉": "蕢",
"蒋": "蔣",
"蒌": "蔞",
"蓝": "藍",
"蓟": "薊",
"蓠": "蘺",
"蓣": "蕷",
"蓥": "鎣",
"蓦": "驀",
"蔂": "虆",
"蔷": "薔",
"蔹": "蘞",
"蔺": "藺",
"蔼": "藹",
"蕰": "薀",
"蕲": "蘄",
"薮": "藪",
"䓕": "薳",
"藓": "蘚",
"蘖": "櫱",
"虏": "虜",
"虑": "慮",
"虚": "虛",
"虬": "虯",
"虮": "蟣",
"虽": "雖",
"虾": "蝦",
"虿": "蠆",
"蚀": "蝕",
"蚁": "蟻",
"蚂": "螞",
"蚕": "蠶",
"蚬": "蜆",
"蛊": "蠱",
"蛎": "蠣",
"蛏": "蟶",
"蛮": "蠻",
"蛰": "蟄",
"蛱": "蛺",
"蛲": "蟯",
"蛳": "螄",
"蛴": "蠐",
"蜕": "蛻",
"蜗": "蝸",
"蝇": "蠅",
"蝈": "蟈",
"蝉": "蟬",
"蝼": "螻",
"蝾": "蠑",
"螀": "螿",
"螨": "蟎",
"䗖": "螮",
"蟏": "蠨",
"衅": "釁",
"衔": "銜",
"补": "補",
"衬": "襯",
"衮": "袞",
"袄": "襖",
"袅": "裊",
"袆": "褘",
"袜": "襪",
"袭": "襲",
"袯": "襏",
"装": "裝",
"裆": "襠",
"裈": "褌",
"裢": "褳",
"裣": "襝",
"裤": "褲",
"裥": "襇",
"褛": "褸",
"褴": "襤",
"䙓": "襬",
"见": "見",
"观": "觀",
"觃": "覎",
"规": "規",
"觅": "覓",
"视": "視",
"觇": "覘",
"览": "覽",
"觉": "覺",
"觊": "覬",
"觋": "覡",
"觌": "覿",
"觍": "覥",
"觎": "覦",
"觏": "覯",
"觐": "覲",
"觑": "覷",
"觞": "觴",
"触": "觸",
"觯": "觶",
"訚": "誾",
"䜣": "訢",
"誉": "譽",
"誊": "謄",
"䜧": "譅",
"讠": "訁",
"计": "計",
"订": "訂",
"讣": "訃",
"认": "認",
"讥": "譏",
"讦": "訐",
"讧": "訌",
"讨": "討",
"让": "讓",
"讪": "訕",
"讫": "訖",
"讬": "託",
"训": "訓",
"议": "議",
"讯": "訊",
"记": "記",
"讱": "訒",
"讲": "講",
"讳": "諱",
"讴": "謳",
"讵": "詎",
"讶": "訝",
"讷": "訥",
"许": "許",
"讹": "訛",
"论": "論",
"讻": "訩",
"讼": "訟",
"讽": "諷",
"设": "設",
"访": "訪",
"诀": "訣",
"证": "證",
"诂": "詁",
"诃": "訶",
"评": "評",
"诅": "詛",
"识": "識",
"诇": "詗",
"诈": "詐",
"诉": "訴",
"诊": "診",
"诋": "詆",
"诌": "謅",
"词": "詞",
"诎": "詘",
"诏": "詔",
"诐": "詖",
"译": "譯",
"诒": "詒",
"诓": "誆",
"诔": "誄",
"试": "試",
"诖": "詿",
"诗": "詩",
"诘": "詰",
"诙": "詼",
"诚": "誠",
"诛": "誅",
"诜": "詵",
"话": "話",
"诞": "誕",
"诟": "詬",
"诠": "詮",
"诡": "詭",
"询": "詢",
"诣": "詣",
"诤": "諍",
"该": "該",
"详": "詳",
"诧": "詫",
"诨": "諢",
"诩": "詡",
"诪": "譸",
"诫": "誡",
"诬": "誣",
"语": "語",
"诮": "誚",
"误": "誤",
"诰": "誥",
"诱": "誘",
"诲": "誨",
"诳": "誑",
"诵": "誦",
"诶": "誒",
"请": "請",
"诸": "諸",
"诹": "諏",
"诺": "諾",
"读": "讀",
"诼": "諑",
"诽": "誹",
"课": "課",
"诿": "諉",
"谀": "諛",
"谁": "誰",
"谂": "諗",
"调": "調",
"谄": "諂",
"谅": "諒",
"谆": "諄",
"谇": "誶",
"谈": "談",
"谊": "誼",
"谋": "謀",
"谌": "諶",
"谍": "諜",
"谎": "謊",
"谏": "諫",
"谐": "諧",
"谑": "謔",
"谒": "謁",
"谓": "謂",
"谔": "諤",
"谕": "諭",
"谖": "諼",
"谗": "讒",
"谘": "諮",
"谙": "諳",
"谚": "諺",
"谛": "諦",
"谜": "謎",
"谝": "諞",
"谞": "諝",
"谟": "謨",
"谠": "讜",
"谡": "謖",
"谢": "謝",
"谤": "謗",
"谥": "謚",
"谦": "謙",
"谧": "謐",
"谨": "謹",
"谩": "謾",
"谪": "謫",
"谬": "謬",
"谭": "譚",
"谮": "譖",
"谯": "譙",
"谰": "讕",
"谱": "譜",
"谲": "譎",
"谳": "讞",
"谴": "譴",
"谵": "譫",
"谶": "讖",
"豮": "豶",
"䝙": "貙",
"䞐": "賰",
"贝": "貝",
"贞": "貞",
"负": "負",
"贠": "貟",
"贡": "貢",
"财": "財",
"责": "責",
"贤": "賢",
"败": "敗",
"账": "賬",
"货": "貨",
"质": "質",
"贩": "販",
"贪": "貪",
"贫": "貧",
"贬": "貶",
"购": "購",
"贮": "貯",
"贯": "貫",
"贰": "貳",
"贱": "賤",
"贲": "賁",
"贳": "貰",
"贴": "貼",
"贵": "貴",
"贶": "貺",
"贷": "貸",
"贸": "貿",
"费": "費",
"贺": "賀",
"贻": "貽",
"贼": "賊",
"贽": "贄",
"贾": "賈",
"贿": "賄",
"赀": "貲",
"赁": "賃",
"赂": "賂",
"资": "資",
"赅": "賅",
"赆": "贐",
"赇": "賕",
"赈": "賑",
"赉": "賚",
"赊": "賒",
"赋": "賦",
"赌": "賭",
"赎": "贖",
"赏": "賞",
"赐": "賜",
"赑": "贔",
"赒": "賙",
"赓": "賡",
"赔": "賠",
"赕": "賧",
"赖": "賴",
"赗": "賵",
"赘": "贅",
"赙": "賻",
"赚": "賺",
"赛": "賽",
"赜": "賾",
"赞": "贊",
"赟": "贇",
"赠": "贈",
"赡": "贍",
"赢": "贏",
"赣": "贛",
"赪": "赬",
"赵": "趙",
"赶": "趕",
"趋": "趨",
"趱": "趲",
"趸": "躉",
"跃": "躍",
"跄": "蹌",
"跞": "躒",
"践": "踐",
"跶": "躂",
"跷": "蹺",
"跸": "蹕",
"跹": "躚",
"跻": "躋",
"踊": "踴",
"踌": "躊",
"踪": "蹤",
"踬": "躓",
"踯": "躑",
"蹑": "躡",
"蹒": "蹣",
"蹰": "躕",
"蹿": "躥",
"躏": "躪",
"躜": "躦",
"躯": "軀",
"车": "車",
"轧": "軋",
"轨": "軌",
"轩": "軒",
"轪": "軑",
"轫": "軔",
"转": "轉",
"轭": "軛",
"轮": "輪",
"软": "軟",
"轰": "轟",
"轱": "軲",
"轲": "軻",
"轳": "轤",
"轴": "軸",
"轵": "軹",
"轶": "軼",
"轷": "軤",
"轸": "軫",
"轹": "轢",
"轺": "軺",
"轻": "輕",
"轼": "軾",
"载": "載",
"轾": "輊",
"轿": "轎",
"辀": "輈",
"辁": "輇",
"辂": "輅",
"较": "較",
"辄": "輒",
"辅": "輔",
"辆": "輛",
"辇": "輦",
"辈": "輩",
"辉": "輝",
"辊": "輥",
"辋": "輞",
"辌": "輬",
"辍": "輟",
"辎": "輜",
"辏": "輳",
"辐": "輻",
"辑": "輯",
"辒": "轀",
"输": "輸",
"辔": "轡",
"辕": "轅",
"辖": "轄",
"辗": "輾",
"辘": "轆",
"辙": "轍",
"辚": "轔",
"辞": "辭",
"辩": "辯",
"辫": "辮",
"边": "邊",
"辽": "遼",
"达": "達",
"迁": "遷",
"过": "過",
"迈": "邁",
"运": "運",
"还": "還",
"这": "這",
"进": "進",
"远": "遠",
"违": "違",
"连": "連",
"迟": "遲",
"迩": "邇",
"迳": "逕",
"迹": "跡",
"选": "選",
"逊": "遜",
"递": "遞",
"逦": "邐",
"逻": "邏",
"遗": "遺",
"遥": "遙",
"邓": "鄧",
"邝": "鄺",
"邬": "鄔",
"邮": "郵",
"邹": "鄒",
"邺": "鄴",
"邻": "鄰",
"郏": "郟",
"郐": "鄶",
"郑": "鄭",
"郓": "鄆",
"郦": "酈",
"郧": "鄖",
"郸": "鄲",
"酂": "酇",
"酦": "醱",
"酱": "醬",
"酽": "釅",
"酾": "釃",
"酿": "釀",
"释": "釋",
"鉴": "鑒",
"銮": "鑾",
"錾": "鏨",
"𨱏": "鎝",
"钅": "釒",
"钆": "釓",
"钇": "釔",
"针": "針",
"钉": "釘",
"钊": "釗",
"钋": "釙",
"钌": "釕",
"钍": "釷",
"钎": "釺",
"钏": "釧",
"钐": "釤",
"钑": "鈒",
"钒": "釩",
"钓": "釣",
"钔": "鍆",
"钕": "釹",
"钖": "鍚",
"钗": "釵",
"钘": "鈃",
"钙": "鈣",
"钚": "鈈",
"钛": "鈦",
"钜": "鉅",
"钝": "鈍",
"钞": "鈔",
"钠": "鈉",
"钡": "鋇",
"钢": "鋼",
"钣": "鈑",
"钤": "鈐",
"钥": "鑰",
"钦": "欽",
"钧": "鈞",
"钨": "鎢",
"钪": "鈧",
"钫": "鈁",
"钬": "鈥",
"钭": "鈄",
"钮": "鈕",
"钯": "鈀",
"钰": "鈺",
"钱": "錢",
"钲": "鉦",
"钳": "鉗",
"钴": "鈷",
"钶": "鈳",
"钷": "鉕",
"钸": "鈽",
"钹": "鈸",
"钺": "鉞",
"钻": "鑽",
"钼": "鉬",
"钽": "鉭",
"钾": "鉀",
"钿": "鈿",
"铀": "鈾",
"铁": "鐵",
"铂": "鉑",
"铃": "鈴",
"铄": "鑠",
"铅": "鉛",
"铆": "鉚",
"铇": "鉋",
"铈": "鈰",
"铉": "鉉",
"铊": "鉈",
"铋": "鉍",
"铌": "鈮",
"铍": "鈹",
"铎": "鐸",
"铏": "鉶",
"铐": "銬",
"铑": "銠",
"铒": "鉺",
"铓": "鋩",
"铔": "錏",
"铕": "銪",
"铖": "鋮",
"铗": "鋏",
"铘": "鋣",
"铙": "鐃",
"铚": "銍",
"铛": "鐺",
"铜": "銅",
"铝": "鋁",
"铞": "銱",
"铟": "銦",
"铠": "鎧",
"铡": "鍘",
"铢": "銖",
"铣": "銑",
"铤": "鋌",
"铥": "銩",
"铦": "銛",
"铧": "鏵",
"铨": "銓",
"铩": "鎩",
"铪": "鉿",
"铫": "銚",
"铬": "鉻",
"铭": "銘",
"铮": "錚",
"铯": "銫",
"铰": "鉸",
"铱": "銥",
"铲": "鏟",
"铳": "銃",
"铴": "鐋",
"铵": "銨",
"银": "銀",
"铷": "銣",
"铸": "鑄",
"铹": "鐒",
"铺": "鋪",
"铻": "鋙",
"铼": "錸",
"铽": "鋱",
"链": "鏈",
"铿": "鏗",
"销": "銷",
"锁": "鎖",
"锂": "鋰",
"锃": "鋥",
"锄": "鋤",
"锅": "鍋",
"锆": "鋯",
"锇": "鋨",
"锉": "銼",
"锊": "鋝",
"锋": "鋒",
"锌": "鋅",
"锍": "鋶",
"锎": "鐦",
"锏": "鐧",
"锑": "銻",
"锒": "鋃",
"锓": "鋟",
"锔": "鋦",
"锕": "錒",
"锖": "錆",
"锗": "鍺",
"锘": "鍩",
"错": "錯",
"锚": "錨",
"锛": "錛",
"锜": "錡",
"锝": "鍀",
"锞": "錁",
"锟": "錕",
"锠": "錩",
"锡": "錫",
"锢": "錮",
"锣": "鑼",
"锥": "錐",
"锦": "錦",
"锧": "鑕",
"锩": "錈",
"锪": "鍃",
"锫": "錇",
"锬": "錟",
"锭": "錠",
"键": "鍵",
"锯": "鋸",
"锰": "錳",
"锱": "錙",
"锲": "鍥",
"锳": "鍈",
"锴": "鍇",
"锵": "鏘",
"锶": "鍶",
"锷": "鍔",
"锸": "鍤",
"锹": "鍬",
"锺": "鍾",
"锻": "鍛",
"锼": "鎪",
"锽": "鍠",
"锾": "鍰",
"锿": "鎄",
"镀": "鍍",
"镁": "鎂",
"镂": "鏤",
"镃": "鎡",
"镄": "鐨",
"镅": "鎇",
"镆": "鏌",
"镇": "鎮",
"镈": "鎛",
"镉": "鎘",
"镊": "鑷",
"镋": "鎲",
"镍": "鎳",
"镎": "鎿",
"镏": "鎦",
"镐": "鎬",
"镑": "鎊",
"镒": "鎰",
"镓": "鎵",
"镔": "鑌",
"镕": "鎔",
"镖": "鏢",
"镗": "鏜",
"镘": "鏝",
"镙": "鏍",
"镚": "鏰",
"镛": "鏞",
"镜": "鏡",
"镝": "鏑",
"镞": "鏃",
"镟": "鏇",
"镠": "鏐",
"镡": "鐔",
"镣": "鐐",
"镤": "鏷",
"镥": "鑥",
"镦": "鐓",
"镧": "鑭",
"镨": "鐠",
"镩": "鑹",
"镪": "鏹",
"镫": "鐙",
"镬": "鑊",
"镭": "鐳",
"镮": "鐶",
"镯": "鐲",
"镰": "鐮",
"镱": "鐿",
"镲": "鑔",
"镳": "鑣",
"镴": "鑞",
"镵": "鑱",
"镶": "鑲",
"长": "長",
"门": "門",
"闩": "閂",
"闪": "閃",
"闫": "閆",
"闬": "閈",
"闭": "閉",
"问": "問",
"闯": "闖",
"闰": "閏",
"闱": "闈",
"闲": "閑",
"闳": "閎",
"间": "間",
"闵": "閔",
"闶": "閌",
"闷": "悶",
"闸": "閘",
"闹": "鬧",
"闺": "閨",
"闻": "聞",
"闼": "闥",
"闽": "閩",
"闾": "閭",
"闿": "闓",
"阀": "閥",
"阁": "閣",
"阂": "閡",
"阃": "閫",
"阄": "鬮",
"阆": "閬",
"阇": "闍",
"阈": "閾",
"阉": "閹",
"阊": "閶",
"阋": "鬩",
"阌": "閿",
"阍": "閽",
"阎": "閻",
"阏": "閼",
"阐": "闡",
"阑": "闌",
"阒": "闃",
"阓": "闠",
"阔": "闊",
"阕": "闋",
"阖": "闔",
"阗": "闐",
"阘": "闒",
"阙": "闕",
"阚": "闞",
"阛": "闤",
"队": "隊",
"阳": "陽",
"阴": "陰",
"阵": "陣",
"阶": "階",
"际": "際",
"陆": "陸",
"陇": "隴",
"陈": "陳",
"陉": "陘",
"陕": "陝",
"陧": "隉",
"陨": "隕",
"险": "險",
"随": "隨",
"隐": "隱",
"隶": "隸",
"隽": "雋",
"难": "難",
"雏": "雛",
"雠": "讎",
"雳": "靂",
"雾": "霧",
"霁": "霽",
"霡": "霢",
"霭": "靄",
"靓": "靚",
"静": "靜",
"靥": "靨",
"䩄": "靦",
"鞑": "韃",
"鞒": "鞽",
"鞯": "韉",
"韦": "韋",
"韧": "韌",
"韨": "韍",
"韩": "韓",
"韪": "韙",
"韫": "韞",
"韬": "韜",
"韵": "韻",
"页": "頁",
"顶": "頂",
"顷": "頃",
"顸": "頇",
"项": "項",
"顺": "順",
"顼": "頊",
"顽": "頑",
"顾": "顧",
"顿": "頓",
"颀": "頎",
"颁": "頒",
"颂": "頌",
"颃": "頏",
"预": "預",
"颅": "顱",
"领": "領",
"颇": "頗",
"颈": "頸",
"颉": "頡",
"颊": "頰",
"颋": "頲",
"颌": "頜",
"颍": "潁",
"颎": "熲",
"颏": "頦",
"颐": "頤",
"频": "頻",
"颒": "頮",
"颔": "頷",
"颕": "頴",
"颖": "穎",
"颗": "顆",
"题": "題",
"颙": "顒",
"颚": "顎",
"颛": "顓",
"额": "額",
"颞": "顳",
"颟": "顢",
"颠": "顛",
"颡": "顙",
"颢": "顥",
"颤": "顫",
"颥": "顬",
"颦": "顰",
"颧": "顴",
"风": "風",
"飏": "颺",
"飐": "颭",
"飑": "颮",
"飒": "颯",
"飓": "颶",
"飔": "颸",
"飕": "颼",
"飖": "颻",
"飗": "飀",
"飘": "飄",
"飙": "飆",
"飚": "飈",
"飞": "飛",
"飨": "饗",
"餍": "饜",
"饣": "飠",
"饤": "飣",
"饦": "飥",
"饧": "餳",
"饨": "飩",
"饩": "餼",
"饪": "飪",
"饫": "飫",
"饬": "飭",
"饭": "飯",
"饮": "飲",
"饯": "餞",
"饰": "飾",
"饱": "飽",
"饲": "飼",
"饳": "飿",
"饴": "飴",
"饵": "餌",
"饶": "饒",
"饷": "餉",
"饸": "餄",
"饹": "餎",
"饺": "餃",
"饻": "餏",
"饼": "餅",
"饽": "餑",
"饾": "餖",
"饿": "餓",
"馀": "餘",
"馁": "餒",
"馂": "餕",
"馃": "餜",
"馄": "餛",
"馅": "餡",
"馆": "館",
"馇": "餷",
"馈": "饋",
"馉": "餶",
"馊": "餿",
"馋": "饞",
"馌": "饁",
"馍": "饃",
"馎": "餺",
"馏": "餾",
"馐": "饈",
"馑": "饉",
"馒": "饅",
"馓": "饊",
"馔": "饌",
"馕": "饢",
"䯄": "騧",
"马": "馬",
"驭": "馭",
"驮": "馱",
"驯": "馴",
"驰": "馳",
"驱": "驅",
"驲": "馹",
"驳": "駁",
"驴": "驢",
"驵": "駔",
"驶": "駛",
"驷": "駟",
"驸": "駙",
"驹": "駒",
"驺": "騶",
"驻": "駐",
"驼": "駝",
"驽": "駑",
"驾": "駕",
"驿": "驛",
"骀": "駘",
"骁": "驍",
"骃": "駰",
"骄": "驕",
"骅": "驊",
"骆": "駱",
"骇": "駭",
"骈": "駢",
"骉": "驫",
"骊": "驪",
"骋": "騁",
"验": "驗",
"骍": "騂",
"骎": "駸",
"骏": "駿",
"骐": "騏",
"骑": "騎",
"骒": "騍",
"骓": "騅",
"骔": "騌",
"骕": "驌",
"骖": "驂",
"骗": "騙",
"骘": "騭",
"骙": "騤",
"骚": "騷",
"骛": "騖",
"骜": "驁",
"骝": "騮",
"骞": "騫",
"骟": "騸",
"骠": "驃",
"骡": "騾",
"骢": "驄",
"骣": "驏",
"骤": "驟",
"骥": "驥",
"骦": "驦",
"骧": "驤",
"髅": "髏",
"髋": "髖",
"髌": "髕",
"鬓": "鬢",
"魇": "魘",
"魉": "魎",
"鱼": "魚",
"鱽": "魛",
"鱾": "魢",
"鱿": "魷",
"鲀": "魨",
"鲁": "魯",
"鲂": "魴",
"鲃": "䰾",
"鲄": "魺",
"鲅": "鮁",
"鲆": "鮃",
"鲈": "鱸",
"鲉": "鮋",
"鲊": "鮓",
"鲋": "鮒",
"鲌": "鮊",
"鲍": "鮑",
"鲎": "鱟",
"鲏": "鮍",
"鲐": "鮐",
"鲑": "鮭",
"鲒": "鮚",
"鲓": "鮳",
"鲔": "鮪",
"鲕": "鮞",
"鲖": "鮦",
"鲗": "鰂",
"鲘": "鮜",
"鲙": "鱠",
"鲚": "鱭",
"鲛": "鮫",
"鲜": "鮮",
"鲝": "鮺",
"鲟": "鱘",
"鲠": "鯁",
"鲡": "鱺",
"鲢": "鰱",
"鲣": "鰹",
"鲤": "鯉",
"鲥": "鰣",
"鲦": "鰷",
"鲧": "鯀",
"鲨": "鯊",
"鲩": "鯇",
"鲪": "鮶",
"鲫": "鯽",
"鲬": "鯒",
"鲭": "鯖",
"鲮": "鯪",
"鲯": "鯕",
"鲰": "鯫",
"鲱": "鯡",
"鲲": "鯤",
"鲳": "鯧",
"鲴": "鯝",
"鲵": "鯢",
"鲶": "鯰",
"鲷": "鯛",
"鲸": "鯨",
"鲹": "鰺",
"鲺": "鯴",
"鲻": "鯔",
"鲼": "鱝",
"鲽": "鰈",
"鲾": "鰏",
"鲿": "鱨",
"鳀": "鯷",
"鳁": "鰮",
"鳂": "鰃",
"鳃": "鰓",
"鳅": "鰍",
"鳆": "鰒",
"鳇": "鰉",
"鳈": "鰁",
"鳉": "鱂",
"鳊": "鯿",
"鳋": "鰠",
"鳌": "鰲",
"鳍": "鰭",
"鳎": "鰨",
"鳏": "鰥",
"鳐": "鰩",
"鳑": "鰟",
"鳒": "鰜",
"鳓": "鰳",
"鳔": "鰾",
"鳕": "鱈",
"鳖": "鱉",
"鳗": "鰻",
"鳘": "鰵",
"鳙": "鱅",
"鳚": "䲁",
"鳛": "鰼",
"鳜": "鱖",
"鳝": "鱔",
"鳞": "鱗",
"鳟": "鱒",
"鳠": "鱯",
"鳡": "鱤",
"鳢": "鱧",
"鳣": "鱣",
"䴓": "鳾",
"䴕": "鴷",
"䴔": "鵁",
"䴖": "鶄",
"䴗": "鶪",
"䴘": "鷈",
"䴙": "鷿",
"㶉": "鸂",
"鸟": "鳥",
"鸠": "鳩",
"鸢": "鳶",
"鸣": "鳴",
"鸤": "鳲",
"鸥": "鷗",
"鸦": "鴉",
"鸧": "鶬",
"鸨": "鴇",
"鸩": "鴆",
"鸪": "鴣",
"鸫": "鶇",
"鸬": "鸕",
"鸭": "鴨",
"鸮": "鴞",
"鸯": "鴦",
"鸰": "鴒",
"鸱": "鴟",
"鸲": "鴝",
"鸳": "鴛",
"鸴": "鷽",
"鸵": "鴕",
"鸶": "鷥",
"鸷": "鷙",
"鸸": "鴯",
"鸹": "鴰",
"鸺": "鵂",
"鸻": "鴴",
"鸼": "鵃",
"鸽": "鴿",
"鸾": "鸞",
"鸿": "鴻",
"鹀": "鵐",
"鹁": "鵓",
"鹂": "鸝",
"鹃": "鵑",
"鹄": "鵠",
"鹅": "鵝",
"鹆": "鵒",
"鹇": "鷳",
"鹈": "鵜",
"鹉": "鵡",
"鹊": "鵲",
"鹋": "鶓",
"鹌": "鵪",
"鹍": "鵾",
"鹎": "鵯",
"鹏": "鵬",
"鹐": "鵮",
"鹑": "鶉",
"鹒": "鶊",
"鹓": "鵷",
"鹔": "鷫",
"鹕": "鶘",
"鹖": "鶡",
"鹗": "鶚",
"鹘": "鶻",
"鹙": "鶖",
"鹛": "鶥",
"鹜": "鶩",
"鹝": "鷊",
"鹞": "鷂",
"鹟": "鶲",
"鹠": "鶹",
"鹡": "鶺",
"鹢": "鷁",
"鹣": "鶼",
"鹤": "鶴",
"鹥": "鷖",
"鹦": "鸚",
"鹧": "鷓",
"鹨": "鷚",
"鹩": "鷯",
"鹪": "鷦",
"鹫": "鷲",
"鹬": "鷸",
"鹭": "鷺",
"鹯": "鸇",
"鹰": "鷹",
"鹱": "鸌",
"鹲": "鸏",
"鹳": "鸛",
"鹴": "鸘",
"鹾": "鹺",
"麦": "麥",
"麸": "麩",
"黄": "黃",
"黉": "黌",
"黡": "黶",
"黩": "黷",
"黪": "黲",
"黾": "黽",
"鼋": "黿",
"鼍": "鼉",
"鼗": "鞀",
"鼹": "鼴",
"齐": "齊",
"齑": "齏",
"齿": "齒",
"龀": "齔",
"龁": "齕",
"龂": "齗",
"龃": "齟",
"龄": "齡",
"龅": "齙",
"龆": "齠",
"龇": "齜",
"龈": "齦",
"龉": "齬",
"龊": "齪",
"龋": "齲",
"龌": "齷",
"龙": "龍",
"龚": "龔",
"龛": "龕",
"龟": "龜",
"一伙": "一伙",
"一并": "一併",
"一准": "一准",
"一划": "一划",
"一地里": "一地裡",
"一干": "一干",
"一树百获": "一樹百穫",
"一台": "一臺",
"一冲": "一衝",
"一只": "一隻",
"一发千钧": "一髮千鈞",
"一出": "一齣",
"七只": "七隻",
"三元里": "三元裡",
"三国志": "三國誌",
"三复": "三複",
"三只": "三隻",
"上吊": "上吊",
"上台": "上臺",
"下不了台": "下不了臺",
"下台": "下臺",
"下面": "下麵",
"不准": "不准",
"不吊": "不吊",
"不知就里": "不知就裡",
"不知所云": "不知所云",
"不锈钢": "不鏽鋼",
"丑剧": "丑劇",
"丑旦": "丑旦",
"丑角": "丑角",
"并存着": "並存著",
"中岳": "中嶽",
"中台医专": "中臺醫專",
"丰南": "丰南",
"丰台": "丰台",
"丰姿": "丰姿",
"丰采": "丰采",
"丰韵": "丰韻",
"主干": "主幹",
"么么唱唱": "么么唱唱",
"么儿": "么兒",
"么喝": "么喝",
"么妹": "么妹",
"么弟": "么弟",
"么爷": "么爺",
"九世之雠": "九世之讎",
"九只": "九隻",
"干丝": "乾絲",
"干着急": "乾著急",
"乱发": "亂髮",
"云云": "云云",
"云尔": "云爾",
"五岳": "五嶽",
"五斗柜": "五斗櫃",
"五斗橱": "五斗櫥",
"五谷": "五穀",
"五行生克": "五行生剋",
"五只": "五隻",
"五出": "五齣",
"交卷": "交卷",
"人云亦云": "人云亦云",
"人物志": "人物誌",
"什锦面": "什錦麵",
"什么": "什麼",
"仆倒": "仆倒",
"介系词": "介係詞",
"介系词": "介繫詞",
"仿制": "仿製",
"伙伕": "伙伕",
"伙伴": "伙伴",
"伙同": "伙同",
"伙夫": "伙夫",
"伙房": "伙房",
"伙计": "伙計",
"伙食": "伙食",
"布下": "佈下",
"布告": "佈告",
"布哨": "佈哨",
"布局": "佈局",
"布岗": "佈崗",
"布施": "佈施",
"布景": "佈景",
"布满": "佈滿",
"布线": "佈線",
"布置": "佈置",
"布署": "佈署",
"布道": "佈道",
"布达": "佈達",
"布防": "佈防",
"布阵": "佈陣",
"布雷": "佈雷",
"体育锻鍊": "体育鍛鍊",
"何干": "何干",
"作准": "作准",
"佣人": "佣人",
"佣工": "佣工",
"佣金": "佣金",
"并入": "併入",
"并列": "併列",
"并到": "併到",
"并合": "併合",
"并吞": "併吞",
"并在": "併在",
"并成": "併成",
"并排": "併排",
"并拢": "併攏",
"并案": "併案",
"并为": "併為",
"并发": "併發",
"并科": "併科",
"并购": "併購",
"并进": "併進",
"来复": "來複",
"供制": "供製",
"依依不舍": "依依不捨",
"侵并": "侵併",
"便辟": "便辟",
"系数": "係數",
"系为": "係為",
"保险柜": "保險柜",
"信号台": "信號臺",
"修复": "修複",
"修胡刀": "修鬍刀",
"俯冲": "俯衝",
"个里": "個裡",
"借着": "借著",
"假发": "假髮",
"停制": "停製",
"偷鸡不着": "偷雞不著",
"家伙": "傢伙",
"家俱": "傢俱",
"家具": "傢具",
"传布": "傳佈",
"债台高筑": "債臺高築",
"傻里傻气": "傻裡傻氣",
"倾家荡产": "傾家蕩產",
"倾复": "傾複",
"倾复": "傾覆",
"僱佣": "僱佣",
"仪表": "儀錶",
"亿只": "億隻",
"尽尽": "儘儘",
"尽先": "儘先",
"尽其所有": "儘其所有",
"尽力": "儘力",
"尽快": "儘快",
"尽早": "儘早",
"尽是": "儘是",
"尽管": "儘管",
"尽速": "儘速",
"尽量": "儘量",
"允准": "允准",
"兄台": "兄臺",
"充饥": "充饑",
"光采": "光采",
"克里": "克裡",
"克复": "克複",
"入伙": "入伙",
"内制": "內製",
"两只": "兩隻",
"八字胡": "八字鬍",
"八只": "八隻",
"公布": "公佈",
"公干": "公幹",
"公斗": "公斗",
"公历": "公曆",
"六只": "六隻",
"六出": "六齣",
"兼并": "兼併",
"冤雠": "冤讎",
"准予": "准予",
"准假": "准假",
"准将": "准將",
"准考证": "准考證",
"准许": "准許",
"几几": "几几",
"几案": "几案",
"几丝": "几絲",
"凹洞里": "凹洞裡",
"出征": "出征",
"出锤": "出鎚",
"刀削面": "刀削麵",
"刁斗": "刁斗",
"分布": "分佈",
"切面": "切麵",
"刊布": "刊佈",
"划上": "划上",
"划下": "划下",
"划不来": "划不來",
"划了": "划了",
"划具": "划具",
"划出": "划出",
"划到": "划到",
"划动": "划動",
"划去": "划去",
"划子": "划子",
"划得来": "划得來",
"划拳": "划拳",
"划桨": "划槳",
"划水": "划水",
"划算": "划算",
"划船": "划船",
"划艇": "划艇",
"划着": "划著",
"划着走": "划著走",
"划行": "划行",
"划走": "划走",
"划起": "划起",
"划进": "划進",
"划过": "划過",
"初征": "初征",
"别致": "別緻",
"别着": "別著",
"别只": "別隻",
"利比里亚": "利比裡亞",
"刮着": "刮著",
"刮胡刀": "刮鬍刀",
"剃发": "剃髮",
"剃须": "剃鬚",
"削发": "削髮",
"克制": "剋制",
"克星": "剋星",
"克服": "剋服",
"克死": "剋死",
"克薄": "剋薄",
"前仆后继": "前仆後繼",
"前台": "前臺",
"前车之复": "前車之覆",
"刚才": "剛纔",
"剪发": "剪髮",
"割舍": "割捨",
"创制": "創製",
"加里宁": "加裡寧",
"动荡": "動蕩",
"劳力士表": "勞力士錶",
"包准": "包准",
"包谷": "包穀",
"北斗": "北斗",
"北回": "北迴",
"匡复": "匡複",
"匪干": "匪幹",
"十卷": "十卷",
"十台": "十臺",
"十只": "十隻",
"十出": "十齣",
"千丝万缕": "千絲萬縷",
"千回百折": "千迴百折",
"千回百转": "千迴百轉",
"千钧一发": "千鈞一髮",
"千只": "千隻",
"升斗小民": "升斗小民",
"半只": "半隻",
"南岳": "南嶽",
"南征": "南征",
"南台": "南臺",
"南回": "南迴",
"卡里": "卡裡",
"印制": "印製",
"卷入": "卷入",
"卷取": "卷取",
"卷土重来": "卷土重來",
"卷子": "卷子",
"卷宗": "卷宗",
"卷尺": "卷尺",
"卷层云": "卷層雲",
"卷帙": "卷帙",
"卷扬机": "卷揚機",
"卷曲": "卷曲",
"卷染": "卷染",
"卷烟": "卷煙",
"卷筒": "卷筒",
"卷纬": "卷緯",
"卷绕": "卷繞",
"卷装": "卷裝",
"卷轴": "卷軸",
"卷云": "卷雲",
"卷领": "卷領",
"卷发": "卷髮",
"卷须": "卷鬚",
"参与": "參与",
"参与者": "參与者",
"参合": "參合",
"参考价值": "參考價值",
"参与": "參與",
"参与人员": "參與人員",
"参与制": "參與制",
"参与感": "參與感",
"参与者": "參與者",
"参观团": "參觀團",
"参观团体": "參觀團體",
"参阅": "參閱",
"反冲": "反衝",
"反复": "反複",
"反复": "反覆",
"取舍": "取捨",
"口里": "口裡",
"只准": "只准",
"只冲": "只衝",
"叮当": "叮噹",
"可怜虫": "可憐虫",
"可紧可松": "可緊可鬆",
"台制": "台製",
"司令台": "司令臺",
"吃着不尽": "吃著不盡",
"吃里扒外": "吃裡扒外",
"吃里爬外": "吃裡爬外",
"各吊": "各吊",
"合伙": "合伙",
"合并": "合併",
"合着": "合著",
"合着者": "合著者",
"吊上": "吊上",
"吊下": "吊下",
"吊了": "吊了",
"吊个": "吊個",
"吊儿郎当": "吊兒郎當",
"吊到": "吊到",
"吊去": "吊去",
"吊取": "吊取",
"吊吊": "吊吊",
"吊嗓": "吊嗓",
"吊好": "吊好",
"吊子": "吊子",
"吊带": "吊帶",
"吊带裤": "吊帶褲",
"吊床": "吊床",
"吊得": "吊得",
"吊挂": "吊掛",
"吊挂着": "吊掛著",
"吊杆": "吊杆",
"吊架": "吊架",
"吊桶": "吊桶",
"吊杆": "吊桿",
"吊桥": "吊橋",
"吊死": "吊死",
"吊灯": "吊燈",
"吊环": "吊環",
"吊盘": "吊盤",
"吊索": "吊索",
"吊着": "吊著",
"吊装": "吊裝",
"吊裤": "吊褲",
"吊裤带": "吊褲帶",
"吊袜": "吊襪",
"吊走": "吊走",
"吊起": "吊起",
"吊车": "吊車",
"吊钩": "吊鉤",
"吊销": "吊銷",
"吊钟": "吊鐘",
"同伙": "同伙",
"名表": "名錶",
"后冠": "后冠",
"后土": "后土",
"后妃": "后妃",
"后座": "后座",
"后稷": "后稷",
"后羿": "后羿",
"后里": "后里",
"向着": "向著",
"吞并": "吞併",
"吹发": "吹髮",
"吕后": "呂后",
"獃里獃气": "呆裡呆氣",
"周而复始": "周而複始",
"呼吁": "呼籲",
"和面": "和麵",
"哪里": "哪裡",
"哭脏": "哭髒",
"问卷": "問卷",
"喝采": "喝采",
"单干": "單干",
"单只": "單隻",
"嘴里": "嘴裡",
"恶心": "噁心",
"当啷": "噹啷",
"当当": "噹噹",
"噜苏": "嚕囌",
"向导": "嚮導",
"向往": "嚮往",
"向应": "嚮應",
"向日": "嚮日",
"向迩": "嚮邇",
"严丝合缝": "嚴絲合縫",
"严复": "嚴複",
"四舍五入": "四捨五入",
"四只": "四隻",
"四出": "四齣",
"回丝": "回絲",
"回着": "回著",
"回荡": "回蕩",
"回复": "回覆",
"回采": "回采",
"圈子里": "圈子裡",
"圈里": "圈裡",
"国历": "國曆",
"国雠": "國讎",
"园里": "園裡",
"图里": "圖裡",
"土里": "土裡",
"土制": "土製",
"地志": "地誌",
"坍台": "坍臺",
"坑里": "坑裡",
"坦荡": "坦蕩",
"垂发": "垂髮",
"垮台": "垮臺",
"埋布": "埋佈",
"城里": "城裡",
"基干": "基幹",
"报复": "報複",
"塌台": "塌臺",
"塔台": "塔臺",
"涂着": "塗著",
"墓志": "墓誌",
"墨斗": "墨斗",
"墨索里尼": "墨索裡尼",
"垦复": "墾複",
"垄断价格": "壟斷價格",
"垄断资产": "壟斷資產",
"垄断集团": "壟斷集團",
"壶里": "壺裡",
"寿面": "壽麵",
"夏天里": "夏天裡",
"夏历": "夏曆",
"外制": "外製",
"多冲": "多衝",
"多采多姿": "多采多姿",
"多么": "多麼",
"夜光表": "夜光錶",
"夜里": "夜裡",
"梦里": "夢裡",
"大伙": "大伙",
"大卷": "大卷",
"大干": "大干",
"大干": "大幹",
"大锤": "大鎚",
"大只": "大隻",
"天后": "天后",
"天干": "天干",
"天文台": "天文臺",
"天翻地复": "天翻地覆",
"太后": "太后",
"奏折": "奏摺",
"女丑": "女丑",
"女佣": "女佣",
"好家夥": "好傢夥",
"好戏连台": "好戲連臺",
"如法泡制": "如法泡製",
"妆台": "妝臺",
"姜太公": "姜太公",
"姜子牙": "姜子牙",
"姜丝": "姜絲",
"字汇": "字彙",
"字里行间": "字裡行間",
"存折": "存摺",
"孟姜女": "孟姜女",
"宇宙志": "宇宙誌",
"定准": "定准",
"定制": "定製",
"宣布": "宣佈",
"宫里": "宮裡",
"家伙": "家伙",
"家里": "家裡",
"密布": "密佈",
"寇雠": "寇讎",
"实干": "實幹",
"写字台": "寫字檯",
"写字台": "寫字臺",
"宽松": "寬鬆",
"封面里": "封面裡",
"射干": "射干",
"对表": "對錶",
"小丑": "小丑",
"小伙": "小伙",
"小只": "小隻",
"少吊": "少吊",
"尺布斗粟": "尺布斗粟",
"尼克松": "尼克鬆",
"尼采": "尼采",
"尿斗": "尿斗",
"局里": "局裡",
"居里": "居裡",
"屋子里": "屋子裡",
"屋里": "屋裡",
"展布": "展佈",
"屡仆屡起": "屢仆屢起",
"屯里": "屯裡",
"山岳": "山嶽",
"山里": "山裡",
"峰回": "峰迴",
"巡回": "巡迴",
"巧干": "巧幹",
"巴尔干": "巴爾幹",
"巴里": "巴裡",
"巷里": "巷裡",
"市里": "市裡",
"布谷": "布穀",
"希腊": "希腊",
"帘子": "帘子",
"帘布": "帘布",
"席卷": "席卷",
"带团参加": "帶團參加",
"带发修行": "帶髮修行",
"干休": "干休",
"干系": "干係",
"干卿何事": "干卿何事",
"干将": "干將",
"干戈": "干戈",
"干挠": "干撓",
"干扰": "干擾",
"干支": "干支",
"干政": "干政",
"干时": "干時",
"干涉": "干涉",
"干犯": "干犯",
"干与": "干與",
"干着急": "干著急",
"干贝": "干貝",
"干预": "干預",
"平台": "平臺",
"年历": "年曆",
"年里": "年裡",
"干上": "幹上",
"干下去": "幹下去",
"干了": "幹了",
"干事": "幹事",
"干些": "幹些",
"干个": "幹個",
"干劲": "幹勁",
"干员": "幹員",
"干吗": "幹嗎",
"干嘛": "幹嘛",
"干坏事": "幹壞事",
"干完": "幹完",
"干得": "幹得",
"干性油": "幹性油",
"干才": "幹才",
"干掉": "幹掉",
"干校": "幹校",
"干活": "幹活",
"干流": "幹流",
"干球温度": "幹球溫度",
"干线": "幹線",
"干练": "幹練",
"干警": "幹警",
"干起来": "幹起來",
"干路": "幹路",
"干道": "幹道",
"干部": "幹部",
"干么": "幹麼",
"几丝": "幾絲",
"几只": "幾隻",
"几出": "幾齣",
"底里": "底裡",
"康采恩": "康采恩",
"庙里": "廟裡",
"建台": "建臺",
"弄脏": "弄髒",
"弔卷": "弔卷",
"弘历": "弘曆",
"别扭": "彆扭",
"别拗": "彆拗",
"别气": "彆氣",
"别脚": "彆腳",
"别着": "彆著",
"弹子台": "彈子檯",
"弹药": "彈葯",
"汇报": "彙報",
"汇整": "彙整",
"汇编": "彙編",
"汇总": "彙總",
"汇纂": "彙纂",
"汇辑": "彙輯",
"汇集": "彙集",
"形单影只": "形單影隻",
"影后": "影后",
"往里": "往裡",
"往复": "往複",
"征伐": "征伐",
"征兵": "征兵",
"征尘": "征塵",
"征夫": "征夫",
"征战": "征戰",
"征收": "征收",
"征服": "征服",
"征求": "征求",
"征发": "征發",
"征衣": "征衣",
"征讨": "征討",
"征途": "征途",
"后台": "後臺",
"从里到外": "從裡到外",
"从里向外": "從裡向外",
"复雠": "復讎",
"复辟": "復辟",
"德干高原": "德干高原",
"心愿": "心愿",
"心荡神驰": "心蕩神馳",
"心里": "心裡",
"忙里": "忙裡",
"快干": "快幹",
"快冲": "快衝",
"怎么": "怎麼",
"怎么着": "怎麼著",
"怒发冲冠": "怒髮衝冠",
"急冲而下": "急衝而下",
"怪里怪气": "怪裡怪氣",
"恩准": "恩准",
"情有所钟": "情有所鍾",
"意面": "意麵",
"慌里慌张": "慌裡慌張",
"慰借": "慰藉",
"忧郁": "憂郁",
"凭吊": "憑吊",
"凭借": "憑藉",
"凭借着": "憑藉著",
"蒙懂": "懞懂",
"怀里": "懷裡",
"怀表": "懷錶",
"悬吊": "懸吊",
"恋恋不舍": "戀戀不捨",
"戏台": "戲臺",
"戴表": "戴錶",
"戽斗": "戽斗",
"房里": "房裡",
"手不释卷": "手不釋卷",
"手卷": "手卷",
"手折": "手摺",
"手里": "手裡",
"手表": "手錶",
"手松": "手鬆",
"才干": "才幹",
"才高八斗": "才高八斗",
"打谷": "打穀",
"扞御": "扞禦",
"批准": "批准",
"批复": "批複",
"批复": "批覆",
"承制": "承製",
"抗御": "抗禦",
"折冲": "折衝",
"披复": "披覆",
"披发": "披髮",
"抱朴": "抱朴",
"抵御": "抵禦",
"拆伙": "拆伙",
"拆台": "拆臺",
"拈须": "拈鬚",
"拉纤": "拉縴",
"拉面": "拉麵",
"拖吊": "拖吊",
"拗别": "拗彆",
"拮据": "拮据",
"振荡": "振蕩",
"捍御": "捍禦",
"舍不得": "捨不得",
"舍出": "捨出",
"舍去": "捨去",
"舍命": "捨命",
"舍己从人": "捨己從人",
"舍己救人": "捨己救人",
"舍己为人": "捨己為人",
"舍己为公": "捨己為公",
"舍己为国": "捨己為國",
"舍得": "捨得",
"舍我其谁": "捨我其誰",
"舍本逐末": "捨本逐末",
"舍弃": "捨棄",
"舍死忘生": "捨死忘生",
"舍生": "捨生",
"舍短取长": "捨短取長",
"舍身": "捨身",
"舍车保帅": "捨車保帥",
"舍近求远": "捨近求遠",
"捲发": "捲髮",
"捵面": "捵麵",
"扫荡": "掃蕩",
"掌柜": "掌柜",
"排骨面": "排骨麵",
"挂帘": "掛帘",
"挂面": "掛麵",
"接着说": "接著說",
"提心吊胆": "提心吊膽",
"插图卷": "插圖卷",
"换吊": "換吊",
"换只": "換隻",
"换发": "換髮",
"摇荡": "搖蕩",
"搭伙": "搭伙",
"折合": "摺合",
"折奏": "摺奏",
"折子": "摺子",
"折尺": "摺尺",
"折扇": "摺扇",
"折梯": "摺梯",
"折椅": "摺椅",
"折叠": "摺疊",
"折痕": "摺痕",
"折篷": "摺篷",
"折纸": "摺紙",
"折裙": "摺裙",
"撒布": "撒佈",
"撚须": "撚鬚",
"撞球台": "撞球檯",
"擂台": "擂臺",
"担仔面": "擔仔麵",
"担担面": "擔擔麵",
"担着": "擔著",
"担负着": "擔負著",
"据云": "據云",
"擢发难数": "擢髮難數",
"摆布": "擺佈",
"摄制": "攝製",
"支干": "支幹",
"收获": "收穫",
"改制": "改製",
"攻克": "攻剋",
"放荡": "放蕩",
"放松": "放鬆",
"叙说着": "敘說著",
"散伙": "散伙",
"散布": "散佈",
"散荡": "散蕩",
"散发": "散髮",
"整只": "整隻",
"整出": "整齣",
"文采": "文采",
"斗六": "斗六",
"斗南": "斗南",
"斗大": "斗大",
"斗子": "斗子",
"斗室": "斗室",
"斗方": "斗方",
"斗栱": "斗栱",
"斗笠": "斗笠",
"斗箕": "斗箕",
"斗篷": "斗篷",
"斗胆": "斗膽",
"斗转参横": "斗轉參橫",
"斗量": "斗量",
"斗门": "斗門",
"料斗": "料斗",
"斯里兰卡": "斯裡蘭卡",
"新历": "新曆",
"断头台": "斷頭臺",
"方才": "方纔",
"施舍": "施捨",
"旋绕着": "旋繞著",
"旋回": "旋迴",
"族里": "族裡",
"日历": "日曆",
"日志": "日誌",
"日进斗金": "日進斗金",
"明了": "明瞭",
"明窗净几": "明窗淨几",
"明里": "明裡",
"星斗": "星斗",
"星历": "星曆",
"星移斗换": "星移斗換",
"星移斗转": "星移斗轉",
"星罗棋布": "星羅棋佈",
"星辰表": "星辰錶",
"春假里": "春假裡",
"春天里": "春天裡",
"晃荡": "晃蕩",
"景致": "景緻",
"暗地里": "暗地裡",
"暗沟里": "暗溝裡",
"暗里": "暗裡",
"历数": "曆數",
"历书": "曆書",
"历法": "曆法",
"书卷": "書卷",
"会干": "會幹",
"会里": "會裡",
"月历": "月曆",
"月台": "月臺",
"有只": "有隻",
"木制": "木製",
"本台": "本臺",
"朴子": "朴子",
"朴实": "朴實",
"朴硝": "朴硝",
"朴素": "朴素",
"朴资茅斯": "朴資茅斯",
"村里": "村裡",
"束发": "束髮",
"东岳": "東嶽",
"东征": "東征",
"松赞干布": "松贊干布",
"板着脸": "板著臉",
"板荡": "板蕩",
"枕借": "枕藉",
"林宏岳": "林宏嶽",
"枝干": "枝幹",
"枯干": "枯幹",
"某只": "某隻",
"染发": "染髮",
"柜上": "柜上",
"柜台": "柜台",
"柜子": "柜子",
"查卷": "查卷",
"查号台": "查號臺",
"校雠学": "校讎學",
"核准": "核准",
"核复": "核覆",
"格里": "格裡",
"案卷": "案卷",
"条干": "條幹",
"棉卷": "棉卷",
"棉制": "棉製",
"植发": "植髮",
"楼台": "樓臺",
"标志着": "標志著",
"标致": "標緻",
"标志": "標誌",
"模制": "模製",
"树干": "樹幹",
"横征暴敛": "橫征暴斂",
"横冲": "橫衝",
"档卷": "檔卷",
"检复": "檢覆",
"台子": "檯子",
"台布": "檯布",
"台灯": "檯燈",
"台球": "檯球",
"台面": "檯面",
"柜台": "櫃檯",
"柜台": "櫃臺",
"栏干": "欄干",
"欺蒙": "欺矇",
"歌后": "歌后",
"欧几里得": "歐幾裡得",
"正当着": "正當著",
"武后": "武后",
"武松": "武鬆",
"归并": "歸併",
"死里求生": "死裡求生",
"死里逃生": "死裡逃生",
"残卷": "殘卷",
"杀虫药": "殺虫藥",
"壳里": "殼裡",
"母后": "母后",
"每只": "每隻",
"比干": "比干",
"毛卷": "毛卷",
"毛发": "毛髮",
"毫发": "毫髮",
"气冲牛斗": "氣沖牛斗",
"气象台": "氣象臺",
"氯霉素": "氯黴素",
"水斗": "水斗",
"水里": "水裡",
"水表": "水錶",
"永历": "永曆",
"污蔑": "汙衊",
"池里": "池裡",
"污蔑": "污衊",
"沈着": "沈著",
"没事干": "沒事幹",
"没精打采": "沒精打采",
"冲着": "沖著",
"沙里淘金": "沙裡淘金",
"河里": "河裡",
"油面": "油麵",
"泡面": "泡麵",
"泰斗": "泰斗",
"洗手不干": "洗手不幹",
"洗发精": "洗髮精",
"派团参加": "派團參加",
"流荡": "流蕩",
"浩荡": "浩蕩",
"浪琴表": "浪琴錶",
"浪荡": "浪蕩",
"浮荡": "浮蕩",
"海里": "海裡",
"涂着": "涂著",
"液晶表": "液晶錶",
"凉面": "涼麵",
"淡朱": "淡硃",
"淫荡": "淫蕩",
"测验卷": "測驗卷",
"港制": "港製",
"游荡": "游蕩",
"凑合着": "湊合著",
"湖里": "湖裡",
"汤团": "湯糰",
"汤面": "湯麵",
"卤制": "滷製",
"卤面": "滷麵",
"满布": "滿佈",
"漂荡": "漂蕩",
"漏斗": "漏斗",
"演奏台": "演奏臺",
"潭里": "潭裡",
"激荡": "激蕩",
"浓郁": "濃郁",
"浓发": "濃髮",
"湿地松": "濕地鬆",
"蒙蒙": "濛濛",
"蒙雾": "濛霧",
"瀛台": "瀛臺",
"弥漫": "瀰漫",
"弥漫着": "瀰漫著",
"火并": "火併",
"灰蒙": "灰濛",
"炒面": "炒麵",
"炮制": "炮製",
"炸药": "炸葯",
"炸酱面": "炸醬麵",
"为着": "為著",
"乌干达": "烏干達",
"乌苏里江": "烏蘇裡江",
"乌发": "烏髮",
"乌龙面": "烏龍麵",
"烘制": "烘製",
"烽火台": "烽火臺",
"无干": "無干",
"无精打采": "無精打采",
"炼制": "煉製",
"烟卷儿": "煙卷兒",
"烟斗": "煙斗",
"烟斗丝": "煙斗絲",
"烟台": "煙臺",
"照准": "照准",
"熨斗": "熨斗",
"灯台": "燈臺",
"燎发": "燎髮",
"烫发": "燙髮",
"烫面": "燙麵",
"烛台": "燭臺",
"炉台": "爐臺",
"爽荡": "爽蕩",
"片言只语": "片言隻語",
"牛肉面": "牛肉麵",
"牛只": "牛隻",
"特准": "特准",
"特征": "特征",
"特里": "特裡",
"特制": "特製",
"牵系": "牽繫",
"狼借": "狼藉",
"猛冲": "猛衝",
"奖杯": "獎盃",
"获准": "獲准",
"率团参加": "率團參加",
"王侯后": "王侯后",
"王后": "王后",
"班里": "班裡",
"理发": "理髮",
"瑶台": "瑤臺",
"甚么": "甚麼",
"甜面酱": "甜麵醬",
"生力面": "生力麵",
"生锈": "生鏽",
"生发": "生髮",
"田里": "田裡",
"由馀": "由余",
"男佣": "男佣",
"男用表": "男用錶",
"留发": "留髮",
"畚斗": "畚斗",
"当着": "當著",
"疏松": "疏鬆",
"疲困": "疲睏",
"病症": "病癥",
"症候": "癥候",
"症状": "癥狀",
"症结": "癥結",
"登台": "登臺",
"发布": "發佈",
"发着": "發著",
"发面": "發麵",
"发霉": "發黴",
"白卷": "白卷",
"白干儿": "白干兒",
"白发": "白髮",
"白面": "白麵",
"百里": "百裡",
"百只": "百隻",
"皇后": "皇后",
"皇历": "皇曆",
"皓发": "皓髮",
"皮里阳秋": "皮裏陽秋",
"皮里春秋": "皮裡春秋",
"皮制": "皮製",
"皱折": "皺摺",
"盒里": "盒裡",
"监制": "監製",
"盘里": "盤裡",
"盘回": "盤迴",
"直接参与": "直接參与",
"直冲": "直衝",
"相克": "相剋",
"相干": "相干",
"相冲": "相衝",
"看台": "看臺",
"眼帘": "眼帘",
"眼眶里": "眼眶裡",
"眼里": "眼裡",
"困乏": "睏乏",
"睡着了": "睡著了",
"了如": "瞭如",
"了望": "瞭望",
"了然": "瞭然",
"了若指掌": "瞭若指掌",
"了解": "瞭解",
"蒙住": "矇住",
"蒙昧无知": "矇昧無知",
"蒙混": "矇混",
"蒙蒙": "矇矇",
"蒙眬": "矇矓",
"蒙蔽": "矇蔽",
"蒙骗": "矇騙",
"短发": "短髮",
"石英表": "石英錶",
"研制": "研製",
"砰当": "砰噹",
"砲台": "砲臺",
"朱唇皓齿": "硃唇皓齒",
"朱批": "硃批",
"朱砂": "硃砂",
"朱笔": "硃筆",
"朱红色": "硃紅色",
"朱色": "硃色",
"硬干": "硬幹",
"砚台": "硯臺",
"碑志": "碑誌",
"磁制": "磁製",
"磨制": "磨製",
"示复": "示覆",
"社里": "社裡",
"神采": "神采",
"御侮": "禦侮",
"御寇": "禦寇",
"御寒": "禦寒",
"御敌": "禦敵",
"秃发": "禿髮",
"秀发": "秀髮",
"私下里": "私下裡",
"秋天里": "秋天裡",
"秋裤": "秋褲",
"秒表": "秒錶",
"稀松": "稀鬆",
"禀复": "稟覆",
"稻谷": "稻穀",
"稽征": "稽征",
"谷仓": "穀倉",
"谷场": "穀場",
"谷子": "穀子",
"谷壳": "穀殼",
"谷物": "穀物",
"谷皮": "穀皮",
"谷神": "穀神",
"谷粒": "穀粒",
"谷舱": "穀艙",
"谷苗": "穀苗",
"谷草": "穀草",
"谷贱伤农": "穀賤傷農",
"谷道": "穀道",
"谷雨": "穀雨",
"谷类": "穀類",
"积极参与": "積极參与",
"积极参加": "積极參加",
"空荡": "空蕩",
"窗帘": "窗帘",
"窗明几净": "窗明几淨",
"窗台": "窗檯",
"窗台": "窗臺",
"窝里": "窩裡",
"窝阔台": "窩闊臺",
"穷追不舍": "窮追不捨",
"笆斗": "笆斗",
"笑里藏刀": "笑裡藏刀",
"第一卷": "第一卷",
"筋斗": "筋斗",
"答卷": "答卷",
"答复": "答複",
"答复": "答覆",
"筵几": "筵几",
"箕斗": "箕斗",
"签着": "簽著",
"吁求": "籲求",
"吁请": "籲請",
"粗制": "粗製",
"粗卤": "粗鹵",
"精干": "精幹",
"精明强干": "精明強幹",
"精致": "精緻",
"精制": "精製",
"精辟": "精辟",
"精采": "精采",
"糊里糊涂": "糊裡糊塗",
"团子": "糰子",
"系着": "系著",
"纪历": "紀曆",
"红发": "紅髮",
"红霉素": "紅黴素",
"纡回": "紆迴",
"纳采": "納采",
"素食面": "素食麵",
"素面": "素麵",
"紫微斗数": "紫微斗數",
"细致": "細緻",
"组里": "組裡",
"结发": "結髮",
"绝对参照": "絕對參照",
"丝来线去": "絲來線去",
"丝布": "絲布",
"丝板": "絲板",
"丝瓜布": "絲瓜布",
"丝绒布": "絲絨布",
"丝线": "絲線",
"丝织厂": "絲織廠",
"丝虫": "絲蟲",
"綑吊": "綑吊",
"经卷": "經卷",
"绿霉素": "綠黴素",
"维系": "維繫",
"绾发": "綰髮",
"网里": "網裡",
"紧绷": "緊繃",
"紧绷着": "緊繃著",
"紧追不舍": "緊追不捨",
"编制": "編製",
"编发": "編髮",
"缓冲": "緩衝",
"致密": "緻密",
"萦回": "縈迴",
"县里": "縣裡",
"县志": "縣誌",
"缝里": "縫裡",
"缝制": "縫製",
"纤夫": "縴夫",
"繁复": "繁複",
"绷住": "繃住",
"绷子": "繃子",
"绷带": "繃帶",
"绷紧": "繃緊",
"绷脸": "繃臉",
"绷着": "繃著",
"绷着脸": "繃著臉",
"绷着脸儿": "繃著臉兒",
"绷开": "繃開",
"绘制": "繪製",
"系上": "繫上",
"系到": "繫到",
"系囚": "繫囚",
"系心": "繫心",
"系念": "繫念",
"系怀": "繫懷",
"系数": "繫數",
"系于": "繫於",
"系系": "繫系",
"系紧": "繫緊",
"系绳": "繫繩",
"系着": "繫著",
"系辞": "繫辭",
"缴卷": "繳卷",
"累囚": "纍囚",
"累累": "纍纍",
"坛子": "罈子",
"坛坛罐罐": "罈罈罐罐",
"骂着": "罵著",
"美制": "美製",
"美发": "美髮",
"翻来复去": "翻來覆去",
"翻天复地": "翻天覆地",
"翻复": "翻覆",
"翻云复雨": "翻雲覆雨",
"老么": "老么",
"老板": "老闆",
"考卷": "考卷",
"耕获": "耕穫",
"聊斋志异": "聊齋誌異",
"联系": "聯係",
"联系": "聯繫",
"肉丝面": "肉絲麵",
"肉羹面": "肉羹麵",
"肉松": "肉鬆",
"肢体": "肢体",
"背向着": "背向著",
"背地里": "背地裡",
"胡里胡涂": "胡裡胡塗",
"能干": "能幹",
"脉冲": "脈衝",
"脱发": "脫髮",
"腊味": "腊味",
"腊笔": "腊筆",
"腊肉": "腊肉",
"脑子里": "腦子裡",
"腰里": "腰裡",
"胶卷": "膠卷",
"自制": "自製",
"自觉自愿": "自覺自愿",
"台上": "臺上",
"台下": "臺下",
"台中": "臺中",
"台北": "臺北",
"台南": "臺南",
"台地": "臺地",
"台塑": "臺塑",
"台大": "臺大",
"台币": "臺幣",
"台座": "臺座",
"台东": "臺東",
"台柱": "臺柱",
"台榭": "臺榭",
"台汽": "臺汽",
"台海": "臺海",
"台澎金马": "臺澎金馬",
"台湾": "臺灣",
"台灯": "臺燈",
"台球": "臺球",
"台省": "臺省",
"台端": "臺端",
"台糖": "臺糖",
"台肥": "臺肥",
"台航": "臺航",
"台视": "臺視",
"台词": "臺詞",
"台车": "臺車",
"台铁": "臺鐵",
"台阶": "臺階",
"台电": "臺電",
"台面": "臺面",
"舂谷": "舂穀",
"兴致": "興緻",
"兴高采烈": "興高采烈",
"旧历": "舊曆",
"舒卷": "舒卷",
"舞台": "舞臺",
"航海历": "航海曆",
"船只": "船隻",
"舰只": "艦隻",
"芬郁": "芬郁",
"花卷": "花卷",
"花盆里": "花盆裡",
"花采": "花采",
"苑里": "苑裡",
"若干": "若干",
"苦干": "苦幹",
"苦里": "苦裏",
"苦卤": "苦鹵",
"范仲淹": "范仲淹",
"范蠡": "范蠡",
"范阳": "范陽",
"茅台": "茅臺",
"茶几": "茶几",
"草丛里": "草叢裡",
"庄里": "莊裡",
"茎干": "莖幹",
"莽荡": "莽蕩",
"菌丝体": "菌絲体",
"菌丝体": "菌絲體",
"华里": "華裡",
"华发": "華髮",
"万卷": "萬卷",
"万历": "萬曆",
"万只": "萬隻",
"落发": "落髮",
"着儿": "著兒",
"着书立说": "著書立說",
"着色软体": "著色軟體",
"着重指出": "著重指出",
"着录": "著錄",
"着录规则": "著錄規則",
"蓄发": "蓄髮",
"蓄须": "蓄鬚",
"蓬发": "蓬髮",
"蓬松": "蓬鬆",
"莲台": "蓮臺",
"荡来荡去": "蕩來蕩去",
"荡女": "蕩女",
"荡妇": "蕩婦",
"荡寇": "蕩寇",
"荡平": "蕩平",
"荡涤": "蕩滌",
"荡漾": "蕩漾",
"荡然": "蕩然",
"荡舟": "蕩舟",
"荡船": "蕩船",
"荡荡": "蕩蕩",
"薑丝": "薑絲",
"薙发": "薙髮",
"借以": "藉以",
"借口": "藉口",
"借故": "藉故",
"借机": "藉機",
"借此": "藉此",
"借由": "藉由",
"借端": "藉端",
"借着": "藉著",
"借借": "藉藉",
"借词": "藉詞",
"借资": "藉資",
"借酒浇愁": "藉酒澆愁",
"藤制": "藤製",
"蕴含着": "蘊含著",
"蕴涵着": "蘊涵著",
"蕴借": "蘊藉",
"萝卜": "蘿蔔",
"虎须": "虎鬚",
"号志": "號誌",
"蜂后": "蜂后",
"蛮干": "蠻幹",
"行事历": "行事曆",
"胡同": "衚衕",
"冲上": "衝上",
"冲下": "衝下",
"冲来": "衝來",
"冲倒": "衝倒",
"冲出": "衝出",
"冲到": "衝到",
"冲刺": "衝刺",
"冲克": "衝剋",
"冲力": "衝力",
"冲劲": "衝勁",
"冲动": "衝動",
"冲去": "衝去",
"冲口": "衝口",
"冲垮": "衝垮",
"冲堂": "衝堂",
"冲压": "衝壓",
"冲天": "衝天",
"冲掉": "衝掉",
"冲撞": "衝撞",
"冲击": "衝擊",
"冲散": "衝散",
"冲决": "衝決",
"冲浪": "衝浪",
"冲激": "衝激",
"冲破": "衝破",
"冲程": "衝程",
"冲突": "衝突",
"冲线": "衝線",
"冲着": "衝著",
"冲冲": "衝衝",
"冲要": "衝要",
"冲起": "衝起",
"冲进": "衝進",
"冲过": "衝過",
"冲锋": "衝鋒",
"表里": "表裡",
"袖里": "袖裡",
"被里": "被裡",
"被复": "被複",
"被复": "被覆",
"被复着": "被覆著",
"被发": "被髮",
"裁并": "裁併",
"裁制": "裁製",
"里面": "裏面",
"里人": "裡人",
"里加": "裡加",
"里外": "裡外",
"里子": "裡子",
"里屋": "裡屋",
"里层": "裡層",
"里布": "裡布",
"里带": "裡帶",
"里弦": "裡弦",
"里应外合": "裡應外合",
"里拉": "裡拉",
"里斯": "裡斯",
"里海": "裡海",
"里脊": "裡脊",
"里衣": "裡衣",
"里里": "裡裡",
"里通外国": "裡通外國",
"里通外敌": "裡通外敵",
"里边": "裡邊",
"里间": "裡間",
"里面": "裡面",
"里头": "裡頭",
"制件": "製件",
"制作": "製作",
"制做": "製做",
"制备": "製備",
"制冰": "製冰",
"制冷": "製冷",
"制剂": "製劑",
"制品": "製品",
"制图": "製圖",
"制成": "製成",
"制法": "製法",
"制为": "製為",
"制片": "製片",
"制版": "製版",
"制程": "製程",
"制糖": "製糖",
"制纸": "製紙",
"制药": "製藥",
"制表": "製表",
"制裁": "製裁",
"制造": "製造",
"制革": "製革",
"制鞋": "製鞋",
"制盐": "製鹽",
"复仞年如": "複仞年如",
"复以百万": "複以百萬",
"复位": "複位",
"复信": "複信",
"复分数": "複分數",
"复列": "複列",
"复利": "複利",
"复印": "複印",
"复原": "複原",
"复句": "複句",
"复合": "複合",
"复名": "複名",
"复员": "複員",
"复壁": "複壁",
"复壮": "複壯",
"复姓": "複姓",
"复字键": "複字鍵",
"复审": "複審",
"复写": "複寫",
"复式": "複式",
"复复": "複復",
"复数": "複數",
"复本": "複本",
"复查": "複查",
"复核": "複核",
"复检": "複檢",
"复次": "複次",
"复比": "複比",
"复决": "複決",
"复活": "複活",
"复测": "複測",
"复亩珍": "複畝珍",
"复发": "複發",
"复目": "複目",
"复眼": "複眼",
"复种": "複種",
"复线": "複線",
"复习": "複習",
"复兴社": "複興社",
"复旧": "複舊",
"复色": "複色",
"复叶": "複葉",
"复盖": "複蓋",
"复苏": "複蘇",
"复制": "複製",
"复诊": "複診",
"复词": "複詞",
"复试": "複試",
"复课": "複課",
"复议": "複議",
"复变函数": "複變函數",
"复赛": "複賽",
"复述": "複述",
"复选": "複選",
"复钱": "複錢",
"复杂": "複雜",
"复电": "複電",
"复音": "複音",
"复韵": "複韻",
"衬里": "襯裡",
"西岳": "西嶽",
"西征": "西征",
"西历": "西曆",
"要冲": "要衝",
"要么": "要麼",
"复上": "覆上",
"复亡": "覆亡",
"复住": "覆住",
"复信": "覆信",
"复命": "覆命",
"复在": "覆在",
"复审": "覆審",
"复巢之下": "覆巢之下",
"复成": "覆成",
"复败": "覆敗",
"复文": "覆文",
"复校": "覆校",
"复核": "覆核",
"复水难收": "覆水難收",
"复没": "覆沒",
"复灭": "覆滅",
"复盆": "覆盆",
"复舟": "覆舟",
"复着": "覆著",
"复盖": "覆蓋",
"复盖着": "覆蓋著",
"复试": "覆試",
"复议": "覆議",
"复车": "覆車",
"复载": "覆載",
"复辙": "覆轍",
"复电": "覆電",
"见复": "見覆",
"亲征": "親征",
"观众台": "觀眾臺",
"观台": "觀臺",
"观象台": "觀象臺",
"角落里": "角落裡",
"觔斗": "觔斗",
"触须": "觸鬚",
"订制": "訂製",
"诉说着": "訴說著",
"词汇": "詞彙",
"试卷": "試卷",
"诗卷": "詩卷",
"话里有话": "話裡有話",
"志哀": "誌哀",
"志喜": "誌喜",
"志庆": "誌慶",
"语云": "語云",
"语汇": "語彙",
"诬蔑": "誣衊",
"诵经台": "誦經臺",
"说着": "說著",
"课征": "課征",
"调制": "調製",
"调频台": "調頻臺",
"请参阅": "請參閱",
"讲台": "講臺",
"谢绝参观": "謝絕參觀",
"护发": "護髮",
"雠隙": "讎隙",
"豆腐干": "豆腐干",
"竖着": "豎著",
"丰富多采": "豐富多采",
"丰滨": "豐濱",
"丰滨乡": "豐濱鄉",
"丰采": "豐采",
"象征着": "象徵著",
"贵干": "貴幹",
"贾后": "賈后",
"赈饥": "賑饑",
"贤后": "賢后",
"质朴": "質朴",
"赌台": "賭檯",
"购并": "購併",
"赤松": "赤鬆",
"起吊": "起吊",
"起复": "起複",
"赶制": "趕製",
"跌荡": "跌蕩",
"跟斗": "跟斗",
"跳荡": "跳蕩",
"跳表": "跳錶",
"踬仆": "躓仆",
"躯干": "軀幹",
"车库里": "車庫裡",
"车站里": "車站裡",
"车里": "車裡",
"轻松": "輕鬆",
"轮回": "輪迴",
"转台": "轉檯",
"辛丑": "辛丑",
"辟邪": "辟邪",
"办伙": "辦伙",
"办公台": "辦公檯",
"辞汇": "辭彙",
"农历": "農曆",
"迂回": "迂迴",
"近日里": "近日裡",
"迥然回异": "迥然迴異",
"回光返照": "迴光返照",
"回向": "迴向",
"回圈": "迴圈",
"回廊": "迴廊",
"回形夹": "迴形夾",
"回文": "迴文",
"回旋": "迴旋",
"回流": "迴流",
"回环": "迴環",
"回荡": "迴盪",
"回纹针": "迴紋針",
"回绕": "迴繞",
"回肠": "迴腸",
"回荡": "迴蕩",
"回诵": "迴誦",
"回路": "迴路",
"回转": "迴轉",
"回递性": "迴遞性",
"回避": "迴避",
"回响": "迴響",
"回风": "迴風",
"回首": "迴首",
"迷蒙": "迷濛",
"退伙": "退伙",
"这么着": "這么著",
"这里": "這裏",
"这里": "這裡",
"这只": "這隻",
"这么": "這麼",
"这么着": "這麼著",
"通心面": "通心麵",
"速食面": "速食麵",
"连系": "連繫",
"连台好戏": "連臺好戲",
"游荡": "遊蕩",
"遍布": "遍佈",
"递回": "遞迴",
"远征": "遠征",
"适才": "適纔",
"遮复": "遮覆",
"还冲": "還衝",
"邋里邋遢": "邋裡邋遢",
"那里": "那裡",
"那只": "那隻",
"那么": "那麼",
"那么着": "那麼著",
"邪辟": "邪辟",
"郁烈": "郁烈",
"郁穆": "郁穆",
"郁郁": "郁郁",
"郁闭": "郁閉",
"郁馥": "郁馥",
"乡愿": "鄉愿",
"乡里": "鄉裡",
"邻里": "鄰裡",
"配合着": "配合著",
"配制": "配製",
"酒杯": "酒盃",
"酒坛": "酒罈",
"酥松": "酥鬆",
"醋坛": "醋罈",
"酝借": "醞藉",
"酝酿着": "醞釀著",
"医药": "醫葯",
"醲郁": "醲郁",
"酿制": "釀製",
"采地": "采地",
"采女": "采女",
"采声": "采聲",
"采色": "采色",
"采邑": "采邑",
"里程表": "里程錶",
"重折": "重摺",
"重复": "重複",
"重复": "重覆",
"重锤": "重鎚",
"野台戏": "野臺戲",
"金斗": "金斗",
"金表": "金錶",
"金发": "金髮",
"金霉素": "金黴素",
"钉锤": "釘鎚",
"银朱": "銀硃",
"银发": "銀髮",
"铜制": "銅製",
"铝制": "鋁製",
"钢制": "鋼製",
"录着": "錄著",
"录制": "錄製",
"表带": "錶帶",
"表店": "錶店",
"表厂": "錶廠",
"表壳": "錶殼",
"表链": "錶鏈",
"表面": "錶面",
"锅台": "鍋臺",
"锻鍊出": "鍛鍊出",
"锻鍊身体": "鍛鍊身体",
"锲而不舍": "鍥而不捨",
"锤儿": "鎚兒",
"锤子": "鎚子",
"锤头": "鎚頭",
"链霉素": "鏈黴素",
"镜台": "鏡臺",
"锈病": "鏽病",
"锈菌": "鏽菌",
"锈蚀": "鏽蝕",
"钟表": "鐘錶",
"铁锤": "鐵鎚",
"铁锈": "鐵鏽",
"长征": "長征",
"长发": "長髮",
"长须鲸": "長鬚鯨",
"门帘": "門帘",
"门斗": "門斗",
"门里": "門裡",
"开伙": "開伙",
"开卷": "開卷",
"开诚布公": "開誠佈公",
"开采": "開采",
"閒情逸致": "閒情逸緻",
"閒荡": "閒蕩",
"间不容发": "間不容髮",
"闵采尔": "閔采爾",
"阅卷": "閱卷",
"阑干": "闌干",
"关系": "關係",
"关系着": "關係著",
"防御": "防禦",
"防锈": "防鏽",
"防台": "防颱",
"阿斗": "阿斗",
"阿里": "阿裡",
"除旧布新": "除舊佈新",
"阴干": "陰干",
"阴历": "陰曆",
"阴郁": "陰郁",
"陆征祥": "陸征祥",
"阳春面": "陽春麵",
"阳历": "陽曆",
"阳台": "陽臺",
"只字": "隻字",
"只影": "隻影",
"只手遮天": "隻手遮天",
"只眼": "隻眼",
"只言片语": "隻言片語",
"只身": "隻身",
"雅致": "雅緻",
"雇佣": "雇佣",
"双折": "雙摺",
"杂志": "雜誌",
"鸡丝": "雞絲",
"鸡丝面": "雞絲麵",
"鸡腿面": "雞腿麵",
"鸡只": "雞隻",
"难舍": "難捨",
"雪里": "雪裡",
"云须": "雲鬚",
"电子表": "電子錶",
"电台": "電臺",
"电冲": "電衝",
"电复": "電覆",
"电视台": "電視臺",
"电表": "電錶",
"震荡": "震蕩",
"雾里": "霧裡",
"露台": "露臺",
"灵台": "靈臺",
"青瓦台": "青瓦臺",
"青霉": "青黴",
"面朝着": "面朝著",
"面临着": "面臨著",
"鞋里": "鞋裡",
"鞣制": "鞣製",
"秋千": "鞦韆",
"鞭辟入里": "鞭辟入裡",
"韩国制": "韓國製",
"韩制": "韓製",
"预制": "預製",
"颁布": "頒佈",
"头里": "頭裡",
"头发": "頭髮",
"颊须": "頰鬚",
"颠仆": "顛仆",
"颠复": "顛複",
"颠复": "顛覆",
"显着标志": "顯著標志",
"风土志": "風土誌",
"风斗": "風斗",
"风物志": "風物誌",
"风里": "風裡",
"风采": "風采",
"台风": "颱風",
"刮了": "颳了",
"刮倒": "颳倒",
"刮去": "颳去",
"刮得": "颳得",
"刮着": "颳著",
"刮走": "颳走",
"刮起": "颳起",
"刮风": "颳風",
"飘荡": "飄蕩",
"饭团": "飯糰",
"饼干": "餅干",
"馄饨面": "餛飩麵",
"饥不择食": "饑不擇食",
"饥寒": "饑寒",
"饥民": "饑民",
"饥渴": "饑渴",
"饥溺": "饑溺",
"饥荒": "饑荒",
"饥饱": "饑飽",
"饥饿": "饑餓",
"饥馑": "饑饉",
"首当其冲": "首當其衝",
"香郁": "香郁",
"馥郁": "馥郁",
"马里": "馬裡",
"马表": "馬錶",
"骀荡": "駘蕩",
"腾冲": "騰衝",
"骨子里": "骨子裡",
"骨干": "骨幹",
"骨灰坛": "骨灰罈",
"肮脏": "骯髒",
"脏乱": "髒亂",
"脏兮兮": "髒兮兮",
"脏字": "髒字",
"脏得": "髒得",
"脏东西": "髒東西",
"脏水": "髒水",
"脏的": "髒的",
"脏话": "髒話",
"脏钱": "髒錢",
"高干": "高幹",
"高台": "高臺",
"髭须": "髭鬚",
"发型": "髮型",
"发夹": "髮夾",
"发妻": "髮妻",
"发姐": "髮姐",
"发带": "髮帶",
"发廊": "髮廊",
"发式": "髮式",
"发指": "髮指",
"发捲": "髮捲",
"发根": "髮根",
"发毛": "髮毛",
"发油": "髮油",
"发状": "髮狀",
"发短心长": "髮短心長",
"发端": "髮端",
"发结": "髮結",
"发丝": "髮絲",
"发网": "髮網",
"发肤": "髮膚",
"发胶": "髮膠",
"发菜": "髮菜",
"发蜡": "髮蠟",
"发辫": "髮辮",
"发针": "髮針",
"发长": "髮長",
"发际": "髮際",
"发霜": "髮霜",
"发髻": "髮髻",
"发鬓": "髮鬢",
"鬅松": "鬅鬆",
"松了": "鬆了",
"松些": "鬆些",
"松劲": "鬆勁",
"松动": "鬆動",
"松口": "鬆口",
"松土": "鬆土",
"松弛": "鬆弛",
"松快": "鬆快",
"松懈": "鬆懈",
"松手": "鬆手",
"松散": "鬆散",
"松林": "鬆林",
"松柔": "鬆柔",
"松毛虫": "鬆毛蟲",
"松浮": "鬆浮",
"松涛": "鬆濤",
"松科": "鬆科",
"松节油": "鬆節油",
"松绑": "鬆綁",
"松紧": "鬆緊",
"松缓": "鬆緩",
"松脆": "鬆脆",
"松脱": "鬆脫",
"松起": "鬆起",
"松软": "鬆軟",
"松通": "鬆通",
"松开": "鬆開",
"松饼": "鬆餅",
"松松": "鬆鬆",
"鬈发": "鬈髮",
"胡子": "鬍子",
"胡梢": "鬍梢",
"胡渣": "鬍渣",
"胡髭": "鬍髭",
"胡须": "鬍鬚",
"须根": "鬚根",
"须毛": "鬚毛",
"须生": "鬚生",
"须眉": "鬚眉",
"须发": "鬚髮",
"须须": "鬚鬚",
"鬓发": "鬢髮",
"斗着": "鬥著",
"闹着玩儿": "鬧著玩儿",
"闹着玩儿": "鬧著玩兒",
"郁郁": "鬱郁",
"鱼松": "魚鬆",
"鲸须": "鯨鬚",
"鲇鱼": "鯰魚",
"鹤发": "鶴髮",
"卤化": "鹵化",
"卤味": "鹵味",
"卤族": "鹵族",
"卤水": "鹵水",
"卤汁": "鹵汁",
"卤簿": "鹵簿",
"卤素": "鹵素",
"卤莽": "鹵莽",
"卤钝": "鹵鈍",
"咸味": "鹹味",
"咸土": "鹹土",
"咸度": "鹹度",
"咸得": "鹹得",
"咸水": "鹹水",
"咸海": "鹹海",
"咸淡": "鹹淡",
"咸湖": "鹹湖",
"咸汤": "鹹湯",
"咸的": "鹹的",
"咸肉": "鹹肉",
"咸菜": "鹹菜",
"咸蛋": "鹹蛋",
"咸猪肉": "鹹豬肉",
"咸类": "鹹類",
"咸鱼": "鹹魚",
"咸鸭蛋": "鹹鴨蛋",
"咸卤": "鹹鹵",
"咸咸": "鹹鹹",
"盐卤": "鹽鹵",
"面价": "麵價",
"面包": "麵包",
"面团": "麵團",
"面店": "麵店",
"面厂": "麵廠",
"面杖": "麵杖",
"面条": "麵條",
"面灰": "麵灰",
"面皮": "麵皮",
"面筋": "麵筋",
"面粉": "麵粉",
"面糊": "麵糊",
"面线": "麵線",
"面茶": "麵茶",
"面食": "麵食",
"面饺": "麵餃",
"面饼": "麵餅",
"麻酱面": "麻醬麵",
"黄历": "黃曆",
"黄发垂髫": "黃髮垂髫",
"黑发": "黑髮",
"黑松": "黑鬆",
"霉毒": "黴毒",
"霉菌": "黴菌",
"鼓里": "鼓裡",
"冬冬": "鼕鼕",
"龙卷": "龍卷",
"龙须": "龍鬚",
}
zh2Hans = {
'顯著': '显著',
'土著': '土著',
'印表機': '打印机',
'說明檔案': '帮助文件',
"瀋": "沈",
"畫": "划",
"鍾": "钟",
"靦": "腼",
"餘": "余",
"鯰": "鲇",
"鹼": "碱",
"㠏": "㟆",
"𡞵": "㛟",
"万": "万",
"与": "与",
"丑": "丑",
"丟": "丢",
"並": "并",
"丰": "丰",
"么": "么",
"乾": "干",
"乾坤": "乾坤",
"乾隆": "乾隆",
"亂": "乱",
"云": "云",
"亙": "亘",
"亞": "亚",
"仆": "仆",
"价": "价",
"伙": "伙",
"佇": "伫",
"佈": "布",
"体": "体",
"余": "余",
"余": "馀",
"佣": "佣",
"併": "并",
"來": "来",
"侖": "仑",
"侶": "侣",
"俁": "俣",
"係": "系",
"俔": "伣",
"俠": "侠",
"倀": "伥",
"倆": "俩",
"倈": "俫",
"倉": "仓",
"個": "个",
"們": "们",
"倫": "伦",
"偉": "伟",
"側": "侧",
"偵": "侦",
"偽": "伪",
"傑": "杰",
"傖": "伧",
"傘": "伞",
"備": "备",
"傢": "家",
"傭": "佣",
"傯": "偬",
"傳": "传",
"傴": "伛",
"債": "债",
"傷": "伤",
"傾": "倾",
"僂": "偻",
"僅": "仅",
"僉": "佥",
"僑": "侨",
"僕": "仆",
"僞": "伪",
"僥": "侥",
"僨": "偾",
"價": "价",
"儀": "仪",
"儂": "侬",
"億": "亿",
"儈": "侩",
"儉": "俭",
"儐": "傧",
"儔": "俦",
"儕": "侪",
"儘": "尽",
"償": "偿",
"優": "优",
"儲": "储",
"儷": "俪",
"儸": "㑩",
"儺": "傩",
"儻": "傥",
"儼": "俨",
"儿": "儿",
"兇": "凶",
"兌": "兑",
"兒": "儿",
"兗": "兖",
"党": "党",
"內": "内",
"兩": "两",
"冊": "册",
"冪": "幂",
"准": "准",
"凈": "净",
"凍": "冻",
"凜": "凛",
"几": "几",
"凱": "凯",
"划": "划",
"別": "别",
"刪": "删",
"剄": "刭",
"則": "则",
"剋": "克",
"剎": "刹",
"剗": "刬",
"剛": "刚",
"剝": "剥",
"剮": "剐",
"剴": "剀",
"創": "创",
"劃": "划",
"劇": "剧",
"劉": "刘",
"劊": "刽",
"劌": "刿",
"劍": "剑",
"劏": "㓥",
"劑": "剂",
"劚": "㔉",
"勁": "劲",
"動": "动",
"務": "务",
"勛": "勋",
"勝": "胜",
"勞": "劳",
"勢": "势",
"勩": "勚",
"勱": "劢",
"勵": "励",
"勸": "劝",
"勻": "匀",
"匭": "匦",
"匯": "汇",
"匱": "匮",
"區": "区",
"協": "协",
"卷": "卷",
"卻": "却",
"厂": "厂",
"厙": "厍",
"厠": "厕",
"厭": "厌",
"厲": "厉",
"厴": "厣",
"參": "参",
"叄": "叁",
"叢": "丛",
"台": "台",
"叶": "叶",
"吊": "吊",
"后": "后",
"吒": "咤",
"吳": "吴",
"吶": "呐",
"呂": "吕",
"獃": "呆",
"咼": "呙",
"員": "员",
"唄": "呗",
"唚": "吣",
"問": "问",
"啓": "启",
"啞": "哑",
"啟": "启",
"啢": "唡",
"喎": "㖞",
"喚": "唤",
"喪": "丧",
"喬": "乔",
"單": "单",
"喲": "哟",
"嗆": "呛",
"嗇": "啬",
"嗊": "唝",
"嗎": "吗",
"嗚": "呜",
"嗩": "唢",
"嗶": "哔",
"嘆": "叹",
"嘍": "喽",
"嘔": "呕",
"嘖": "啧",
"嘗": "尝",
"嘜": "唛",
"嘩": "哗",
"嘮": "唠",
"嘯": "啸",
"嘰": "叽",
"嘵": "哓",
"嘸": "呒",
"嘽": "啴",
"噁": "恶",
"噓": "嘘",
"噚": "㖊",
"噝": "咝",
"噠": "哒",
"噥": "哝",
"噦": "哕",
"噯": "嗳",
"噲": "哙",
"噴": "喷",
"噸": "吨",
"噹": "当",
"嚀": "咛",
"嚇": "吓",
"嚌": "哜",
"嚕": "噜",
"嚙": "啮",
"嚥": "咽",
"嚦": "呖",
"嚨": "咙",
"嚮": "向",
"嚲": "亸",
"嚳": "喾",
"嚴": "严",
"嚶": "嘤",
"囀": "啭",
"囁": "嗫",
"囂": "嚣",
"囅": "冁",
"囈": "呓",
"囌": "苏",
"囑": "嘱",
"囪": "囱",
"圇": "囵",
"國": "国",
"圍": "围",
"園": "园",
"圓": "圆",
"圖": "图",
"團": "团",
"坏": "坏",
"垵": "埯",
"埡": "垭",
"埰": "采",
"執": "执",
"堅": "坚",
"堊": "垩",
"堖": "垴",
"堝": "埚",
"堯": "尧",
"報": "报",
"場": "场",
"塊": "块",
"塋": "茔",
"塏": "垲",
"塒": "埘",
"塗": "涂",
"塚": "冢",
"塢": "坞",
"塤": "埙",
"塵": "尘",
"塹": "堑",
"墊": "垫",
"墜": "坠",
"墮": "堕",
"墳": "坟",
"墻": "墙",
"墾": "垦",
"壇": "坛",
"壈": "𡒄",
"壋": "垱",
"壓": "压",
"壘": "垒",
"壙": "圹",
"壚": "垆",
"壞": "坏",
"壟": "垄",
"壠": "垅",
"壢": "坜",
"壩": "坝",
"壯": "壮",
"壺": "壶",
"壼": "壸",
"壽": "寿",
"夠": "够",
"夢": "梦",
"夾": "夹",
"奐": "奂",
"奧": "奥",
"奩": "奁",
"奪": "夺",
"奬": "奖",
"奮": "奋",
"奼": "姹",
"妝": "妆",
"姍": "姗",
"姜": "姜",
"姦": "奸",
"娛": "娱",
"婁": "娄",
"婦": "妇",
"婭": "娅",
"媧": "娲",
"媯": "妫",
"媼": "媪",
"媽": "妈",
"嫗": "妪",
"嫵": "妩",
"嫻": "娴",
"嫿": "婳",
"嬀": "妫",
"嬈": "娆",
"嬋": "婵",
"嬌": "娇",
"嬙": "嫱",
"嬡": "嫒",
"嬤": "嬷",
"嬪": "嫔",
"嬰": "婴",
"嬸": "婶",
"孌": "娈",
"孫": "孙",
"學": "学",
"孿": "孪",
"宁": "宁",
"宮": "宫",
"寢": "寝",
"實": "实",
"寧": "宁",
"審": "审",
"寫": "写",
"寬": "宽",
"寵": "宠",
"寶": "宝",
"將": "将",
"專": "专",
"尋": "寻",
"對": "对",
"導": "导",
"尷": "尴",
"屆": "届",
"屍": "尸",
"屓": "屃",
"屜": "屉",
"屢": "屡",
"層": "层",
"屨": "屦",
"屬": "属",
"岡": "冈",
"峴": "岘",
"島": "岛",
"峽": "峡",
"崍": "崃",
"崗": "岗",
"崢": "峥",
"崬": "岽",
"嵐": "岚",
"嶁": "嵝",
"嶄": "崭",
"嶇": "岖",
"嶔": "嵚",
"嶗": "崂",
"嶠": "峤",
"嶢": "峣",
"嶧": "峄",
"嶮": "崄",
"嶴": "岙",
"嶸": "嵘",
"嶺": "岭",
"嶼": "屿",
"嶽": "岳",
"巋": "岿",
"巒": "峦",
"巔": "巅",
"巰": "巯",
"帘": "帘",
"帥": "帅",
"師": "师",
"帳": "帐",
"帶": "带",
"幀": "帧",
"幃": "帏",
"幗": "帼",
"幘": "帻",
"幟": "帜",
"幣": "币",
"幫": "帮",
"幬": "帱",
"幹": "干",
"幺": "么",
"幾": "几",
"广": "广",
"庫": "库",
"廁": "厕",
"廂": "厢",
"廄": "厩",
"廈": "厦",
"廚": "厨",
"廝": "厮",
"廟": "庙",
"廠": "厂",
"廡": "庑",
"廢": "废",
"廣": "广",
"廩": "廪",
"廬": "庐",
"廳": "厅",
"弒": "弑",
"弳": "弪",
"張": "张",
"強": "强",
"彆": "别",
"彈": "弹",
"彌": "弥",
"彎": "弯",
"彙": "汇",
"彞": "彝",
"彥": "彦",
"征": "征",
"後": "后",
"徑": "径",
"從": "从",
"徠": "徕",
"復": "复",
"徵": "征",
"徹": "彻",
"志": "志",
"恆": "恒",
"恥": "耻",
"悅": "悦",
"悞": "悮",
"悵": "怅",
"悶": "闷",
"惡": "恶",
"惱": "恼",
"惲": "恽",
"惻": "恻",
"愛": "爱",
"愜": "惬",
"愨": "悫",
"愴": "怆",
"愷": "恺",
"愾": "忾",
"愿": "愿",
"慄": "栗",
"態": "态",
"慍": "愠",
"慘": "惨",
"慚": "惭",
"慟": "恸",
"慣": "惯",
"慤": "悫",
"慪": "怄",
"慫": "怂",
"慮": "虑",
"慳": "悭",
"慶": "庆",
"憂": "忧",
"憊": "惫",
"憐": "怜",
"憑": "凭",
"憒": "愦",
"憚": "惮",
"憤": "愤",
"憫": "悯",
"憮": "怃",
"憲": "宪",
"憶": "忆",
"懇": "恳",
"應": "应",
"懌": "怿",
"懍": "懔",
"懞": "蒙",
"懟": "怼",
"懣": "懑",
"懨": "恹",
"懲": "惩",
"懶": "懒",
"懷": "怀",
"懸": "悬",
"懺": "忏",
"懼": "惧",
"懾": "慑",
"戀": "恋",
"戇": "戆",
"戔": "戋",
"戧": "戗",
"戩": "戬",
"戰": "战",
"戱": "戯",
"戲": "戏",
"戶": "户",
"担": "担",
"拋": "抛",
"挩": "捝",
"挾": "挟",
"捨": "舍",
"捫": "扪",
"据": "据",
"掃": "扫",
"掄": "抡",
"掗": "挜",
"掙": "挣",
"掛": "挂",
"採": "采",
"揀": "拣",
"揚": "扬",
"換": "换",
"揮": "挥",
"損": "损",
"搖": "摇",
"搗": "捣",
"搵": "揾",
"搶": "抢",
"摑": "掴",
"摜": "掼",
"摟": "搂",
"摯": "挚",
"摳": "抠",
"摶": "抟",
"摺": "折",
"摻": "掺",
"撈": "捞",
"撏": "挦",
"撐": "撑",
"撓": "挠",
"撝": "㧑",
"撟": "挢",
"撣": "掸",
"撥": "拨",
"撫": "抚",
"撲": "扑",
"撳": "揿",
"撻": "挞",
"撾": "挝",
"撿": "捡",
"擁": "拥",
"擄": "掳",
"擇": "择",
"擊": "击",
"擋": "挡",
"擓": "㧟",
"擔": "担",
"據": "据",
"擠": "挤",
"擬": "拟",
"擯": "摈",
"擰": "拧",
"擱": "搁",
"擲": "掷",
"擴": "扩",
"擷": "撷",
"擺": "摆",
"擻": "擞",
"擼": "撸",
"擾": "扰",
"攄": "摅",
"攆": "撵",
"攏": "拢",
"攔": "拦",
"攖": "撄",
"攙": "搀",
"攛": "撺",
"攜": "携",
"攝": "摄",
"攢": "攒",
"攣": "挛",
"攤": "摊",
"攪": "搅",
"攬": "揽",
"敗": "败",
"敘": "叙",
"敵": "敌",
"數": "数",
"斂": "敛",
"斃": "毙",
"斕": "斓",
"斗": "斗",
"斬": "斩",
"斷": "断",
"於": "于",
"時": "时",
"晉": "晋",
"晝": "昼",
"暈": "晕",
"暉": "晖",
"暘": "旸",
"暢": "畅",
"暫": "暂",
"曄": "晔",
"曆": "历",
"曇": "昙",
"曉": "晓",
"曏": "向",
"曖": "暧",
"曠": "旷",
"曨": "昽",
"曬": "晒",
"書": "书",
"會": "会",
"朧": "胧",
"朮": "术",
"术": "术",
"朴": "朴",
"東": "东",
"杴": "锨",
"极": "极",
"柜": "柜",
"柵": "栅",
"桿": "杆",
"梔": "栀",
"梘": "枧",
"條": "条",
"梟": "枭",
"梲": "棁",
"棄": "弃",
"棖": "枨",
"棗": "枣",
"棟": "栋",
"棧": "栈",
"棲": "栖",
"棶": "梾",
"椏": "桠",
"楊": "杨",
"楓": "枫",
"楨": "桢",
"業": "业",
"極": "极",
"榪": "杩",
"榮": "荣",
"榲": "榅",
"榿": "桤",
"構": "构",
"槍": "枪",
"槤": "梿",
"槧": "椠",
"槨": "椁",
"槳": "桨",
"樁": "桩",
"樂": "乐",
"樅": "枞",
"樓": "楼",
"標": "标",
"樞": "枢",
"樣": "样",
"樸": "朴",
"樹": "树",
"樺": "桦",
"橈": "桡",
"橋": "桥",
"機": "机",
"橢": "椭",
"橫": "横",
"檁": "檩",
"檉": "柽",
"檔": "档",
"檜": "桧",
"檟": "槚",
"檢": "检",
"檣": "樯",
"檮": "梼",
"檯": "台",
"檳": "槟",
"檸": "柠",
"檻": "槛",
"櫃": "柜",
"櫓": "橹",
"櫚": "榈",
"櫛": "栉",
"櫝": "椟",
"櫞": "橼",
"櫟": "栎",
"櫥": "橱",
"櫧": "槠",
"櫨": "栌",
"櫪": "枥",
"櫫": "橥",
"櫬": "榇",
"櫱": "蘖",
"櫳": "栊",
"櫸": "榉",
"櫻": "樱",
"欄": "栏",
"權": "权",
"欏": "椤",
"欒": "栾",
"欖": "榄",
"欞": "棂",
"欽": "钦",
"歐": "欧",
"歟": "欤",
"歡": "欢",
"歲": "岁",
"歷": "历",
"歸": "归",
"歿": "殁",
"殘": "残",
"殞": "殒",
"殤": "殇",
"殨": "㱮",
"殫": "殚",
"殮": "殓",
"殯": "殡",
"殰": "㱩",
"殲": "歼",
"殺": "杀",
"殻": "壳",
"殼": "壳",
"毀": "毁",
"毆": "殴",
"毿": "毵",
"氂": "牦",
"氈": "毡",
"氌": "氇",
"氣": "气",
"氫": "氢",
"氬": "氩",
"氳": "氲",
"汙": "污",
"決": "决",
"沒": "没",
"沖": "冲",
"況": "况",
"洶": "汹",
"浹": "浃",
"涂": "涂",
"涇": "泾",
"涼": "凉",
"淀": "淀",
"淒": "凄",
"淚": "泪",
"淥": "渌",
"淨": "净",
"淩": "凌",
"淪": "沦",
"淵": "渊",
"淶": "涞",
"淺": "浅",
"渙": "涣",
"減": "减",
"渦": "涡",
"測": "测",
"渾": "浑",
"湊": "凑",
"湞": "浈",
"湯": "汤",
"溈": "沩",
"準": "准",
"溝": "沟",
"溫": "温",
"滄": "沧",
"滅": "灭",
"滌": "涤",
"滎": "荥",
"滬": "沪",
"滯": "滞",
"滲": "渗",
"滷": "卤",
"滸": "浒",
"滻": "浐",
"滾": "滚",
"滿": "满",
"漁": "渔",
"漚": "沤",
"漢": "汉",
"漣": "涟",
"漬": "渍",
"漲": "涨",
"漵": "溆",
"漸": "渐",
"漿": "浆",
"潁": "颍",
"潑": "泼",
"潔": "洁",
"潙": "沩",
"潛": "潜",
"潤": "润",
"潯": "浔",
"潰": "溃",
"潷": "滗",
"潿": "涠",
"澀": "涩",
"澆": "浇",
"澇": "涝",
"澐": "沄",
"澗": "涧",
"澠": "渑",
"澤": "泽",
"澦": "滪",
"澩": "泶",
"澮": "浍",
"澱": "淀",
"濁": "浊",
"濃": "浓",
"濕": "湿",
"濘": "泞",
"濛": "蒙",
"濟": "济",
"濤": "涛",
"濫": "滥",
"濰": "潍",
"濱": "滨",
"濺": "溅",
"濼": "泺",
"濾": "滤",
"瀅": "滢",
"瀆": "渎",
"瀇": "㲿",
"瀉": "泻",
"瀋": "沈",
"瀏": "浏",
"瀕": "濒",
"瀘": "泸",
"瀝": "沥",
"瀟": "潇",
"瀠": "潆",
"瀦": "潴",
"瀧": "泷",
"瀨": "濑",
"瀰": "弥",
"瀲": "潋",
"瀾": "澜",
"灃": "沣",
"灄": "滠",
"灑": "洒",
"灕": "漓",
"灘": "滩",
"灝": "灏",
"灠": "漤",
"灣": "湾",
"灤": "滦",
"灧": "滟",
"災": "灾",
"為": "为",
"烏": "乌",
"烴": "烃",
"無": "无",
"煉": "炼",
"煒": "炜",
"煙": "烟",
"煢": "茕",
"煥": "焕",
"煩": "烦",
"煬": "炀",
"煱": "㶽",
"熅": "煴",
"熒": "荧",
"熗": "炝",
"熱": "热",
"熲": "颎",
"熾": "炽",
"燁": "烨",
"燈": "灯",
"燉": "炖",
"燒": "烧",
"燙": "烫",
"燜": "焖",
"營": "营",
"燦": "灿",
"燭": "烛",
"燴": "烩",
"燶": "㶶",
"燼": "烬",
"燾": "焘",
"爍": "烁",
"爐": "炉",
"爛": "烂",
"爭": "争",
"爲": "为",
"爺": "爷",
"爾": "尔",
"牆": "墙",
"牘": "牍",
"牽": "牵",
"犖": "荦",
"犢": "犊",
"犧": "牺",
"狀": "状",
"狹": "狭",
"狽": "狈",
"猙": "狰",
"猶": "犹",
"猻": "狲",
"獁": "犸",
"獄": "狱",
"獅": "狮",
"獎": "奖",
"獨": "独",
"獪": "狯",
"獫": "猃",
"獮": "狝",
"獰": "狞",
"獱": "㺍",
"獲": "获",
"獵": "猎",
"獷": "犷",
"獸": "兽",
"獺": "獭",
"獻": "献",
"獼": "猕",
"玀": "猡",
"現": "现",
"琺": "珐",
"琿": "珲",
"瑋": "玮",
"瑒": "玚",
"瑣": "琐",
"瑤": "瑶",
"瑩": "莹",
"瑪": "玛",
"瑲": "玱",
"璉": "琏",
"璣": "玑",
"璦": "瑷",
"璫": "珰",
"環": "环",
"璽": "玺",
"瓊": "琼",
"瓏": "珑",
"瓔": "璎",
"瓚": "瓒",
"甌": "瓯",
"產": "产",
"産": "产",
"畝": "亩",
"畢": "毕",
"異": "异",
"畵": "画",
"當": "当",
"疇": "畴",
"疊": "叠",
"痙": "痉",
"痾": "疴",
"瘂": "痖",
"瘋": "疯",
"瘍": "疡",
"瘓": "痪",
"瘞": "瘗",
"瘡": "疮",
"瘧": "疟",
"瘮": "瘆",
"瘲": "疭",
"瘺": "瘘",
"瘻": "瘘",
"療": "疗",
"癆": "痨",
"癇": "痫",
"癉": "瘅",
"癘": "疠",
"癟": "瘪",
"癢": "痒",
"癤": "疖",
"癥": "症",
"癧": "疬",
"癩": "癞",
"癬": "癣",
"癭": "瘿",
"癮": "瘾",
"癰": "痈",
"癱": "瘫",
"癲": "癫",
"發": "发",
"皚": "皑",
"皰": "疱",
"皸": "皲",
"皺": "皱",
"盃": "杯",
"盜": "盗",
"盞": "盏",
"盡": "尽",
"監": "监",
"盤": "盘",
"盧": "卢",
"盪": "荡",
"眥": "眦",
"眾": "众",
"睏": "困",
"睜": "睁",
"睞": "睐",
"瞘": "眍",
"瞜": "䁖",
"瞞": "瞒",
"瞭": "了",
"瞶": "瞆",
"瞼": "睑",
"矇": "蒙",
"矓": "眬",
"矚": "瞩",
"矯": "矫",
"硃": "朱",
"硜": "硁",
"硤": "硖",
"硨": "砗",
"确": "确",
"硯": "砚",
"碩": "硕",
"碭": "砀",
"碸": "砜",
"確": "确",
"碼": "码",
"磑": "硙",
"磚": "砖",
"磣": "碜",
"磧": "碛",
"磯": "矶",
"磽": "硗",
"礆": "硷",
"礎": "础",
"礙": "碍",
"礦": "矿",
"礪": "砺",
"礫": "砾",
"礬": "矾",
"礱": "砻",
"祿": "禄",
"禍": "祸",
"禎": "祯",
"禕": "祎",
"禡": "祃",
"禦": "御",
"禪": "禅",
"禮": "礼",
"禰": "祢",
"禱": "祷",
"禿": "秃",
"秈": "籼",
"种": "种",
"稅": "税",
"稈": "秆",
"稏": "䅉",
"稟": "禀",
"種": "种",
"稱": "称",
"穀": "谷",
"穌": "稣",
"積": "积",
"穎": "颖",
"穠": "秾",
"穡": "穑",
"穢": "秽",
"穩": "稳",
"穫": "获",
"穭": "稆",
"窩": "窝",
"窪": "洼",
"窮": "穷",
"窯": "窑",
"窵": "窎",
"窶": "窭",
"窺": "窥",
"竄": "窜",
"竅": "窍",
"竇": "窦",
"竈": "灶",
"竊": "窃",
"竪": "竖",
"競": "竞",
"筆": "笔",
"筍": "笋",
"筑": "筑",
"筧": "笕",
"筴": "䇲",
"箋": "笺",
"箏": "筝",
"節": "节",
"範": "范",
"築": "筑",
"篋": "箧",
"篔": "筼",
"篤": "笃",
"篩": "筛",
"篳": "筚",
"簀": "箦",
"簍": "篓",
"簞": "箪",
"簡": "简",
"簣": "篑",
"簫": "箫",
"簹": "筜",
"簽": "签",
"簾": "帘",
"籃": "篮",
"籌": "筹",
"籖": "签",
"籙": "箓",
"籜": "箨",
"籟": "籁",
"籠": "笼",
"籩": "笾",
"籪": "簖",
"籬": "篱",
"籮": "箩",
"籲": "吁",
"粵": "粤",
"糝": "糁",
"糞": "粪",
"糧": "粮",
"糰": "团",
"糲": "粝",
"糴": "籴",
"糶": "粜",
"糹": "纟",
"糾": "纠",
"紀": "纪",
"紂": "纣",
"約": "约",
"紅": "红",
"紆": "纡",
"紇": "纥",
"紈": "纨",
"紉": "纫",
"紋": "纹",
"納": "纳",
"紐": "纽",
"紓": "纾",
"純": "纯",
"紕": "纰",
"紖": "纼",
"紗": "纱",
"紘": "纮",
"紙": "纸",
"級": "级",
"紛": "纷",
"紜": "纭",
"紝": "纴",
"紡": "纺",
"紬": "䌷",
"細": "细",
"紱": "绂",
"紲": "绁",
"紳": "绅",
"紵": "纻",
"紹": "绍",
"紺": "绀",
"紼": "绋",
"紿": "绐",
"絀": "绌",
"終": "终",
"組": "组",
"絅": "䌹",
"絆": "绊",
"絎": "绗",
"結": "结",
"絕": "绝",
"絛": "绦",
"絝": "绔",
"絞": "绞",
"絡": "络",
"絢": "绚",
"給": "给",
"絨": "绒",
"絰": "绖",
"統": "统",
"絲": "丝",
"絳": "绛",
"絶": "绝",
"絹": "绢",
"綁": "绑",
"綃": "绡",
"綆": "绠",
"綈": "绨",
"綉": "绣",
"綌": "绤",
"綏": "绥",
"綐": "䌼",
"經": "经",
"綜": "综",
"綞": "缍",
"綠": "绿",
"綢": "绸",
"綣": "绻",
"綫": "线",
"綬": "绶",
"維": "维",
"綯": "绹",
"綰": "绾",
"綱": "纲",
"網": "网",
"綳": "绷",
"綴": "缀",
"綵": "䌽",
"綸": "纶",
"綹": "绺",
"綺": "绮",
"綻": "绽",
"綽": "绰",
"綾": "绫",
"綿": "绵",
"緄": "绲",
"緇": "缁",
"緊": "紧",
"緋": "绯",
"緑": "绿",
"緒": "绪",
"緓": "绬",
"緔": "绱",
"緗": "缃",
"緘": "缄",
"緙": "缂",
"線": "线",
"緝": "缉",
"緞": "缎",
"締": "缔",
"緡": "缗",
"緣": "缘",
"緦": "缌",
"編": "编",
"緩": "缓",
"緬": "缅",
"緯": "纬",
"緱": "缑",
"緲": "缈",
"練": "练",
"緶": "缏",
"緹": "缇",
"緻": "致",
"縈": "萦",
"縉": "缙",
"縊": "缢",
"縋": "缒",
"縐": "绉",
"縑": "缣",
"縕": "缊",
"縗": "缞",
"縛": "缚",
"縝": "缜",
"縞": "缟",
"縟": "缛",
"縣": "县",
"縧": "绦",
"縫": "缝",
"縭": "缡",
"縮": "缩",
"縱": "纵",
"縲": "缧",
"縳": "䌸",
"縴": "纤",
"縵": "缦",
"縶": "絷",
"縷": "缕",
"縹": "缥",
"總": "总",
"績": "绩",
"繃": "绷",
"繅": "缫",
"繆": "缪",
"繒": "缯",
"織": "织",
"繕": "缮",
"繚": "缭",
"繞": "绕",
"繡": "绣",
"繢": "缋",
"繩": "绳",
"繪": "绘",
"繫": "系",
"繭": "茧",
"繮": "缰",
"繯": "缳",
"繰": "缲",
"繳": "缴",
"繸": "䍁",
"繹": "绎",
"繼": "继",
"繽": "缤",
"繾": "缱",
"繿": "䍀",
"纈": "缬",
"纊": "纩",
"續": "续",
"纍": "累",
"纏": "缠",
"纓": "缨",
"纔": "才",
"纖": "纤",
"纘": "缵",
"纜": "缆",
"缽": "钵",
"罈": "坛",
"罌": "罂",
"罰": "罚",
"罵": "骂",
"罷": "罢",
"羅": "罗",
"羆": "罴",
"羈": "羁",
"羋": "芈",
"羥": "羟",
"義": "义",
"習": "习",
"翹": "翘",
"耬": "耧",
"耮": "耢",
"聖": "圣",
"聞": "闻",
"聯": "联",
"聰": "聪",
"聲": "声",
"聳": "耸",
"聵": "聩",
"聶": "聂",
"職": "职",
"聹": "聍",
"聽": "听",
"聾": "聋",
"肅": "肃",
"胜": "胜",
"脅": "胁",
"脈": "脉",
"脛": "胫",
"脫": "脱",
"脹": "胀",
"腊": "腊",
"腎": "肾",
"腖": "胨",
"腡": "脶",
"腦": "脑",
"腫": "肿",
"腳": "脚",
"腸": "肠",
"膃": "腽",
"膚": "肤",
"膠": "胶",
"膩": "腻",
"膽": "胆",
"膾": "脍",
"膿": "脓",
"臉": "脸",
"臍": "脐",
"臏": "膑",
"臘": "腊",
"臚": "胪",
"臟": "脏",
"臠": "脔",
"臢": "臜",
"臥": "卧",
"臨": "临",
"臺": "台",
"與": "与",
"興": "兴",
"舉": "举",
"舊": "旧",
"艙": "舱",
"艤": "舣",
"艦": "舰",
"艫": "舻",
"艱": "艰",
"艷": "艳",
"芻": "刍",
"苧": "苎",
"苹": "苹",
"范": "范",
"茲": "兹",
"荊": "荆",
"莊": "庄",
"莖": "茎",
"莢": "荚",
"莧": "苋",
"華": "华",
"萇": "苌",
"萊": "莱",
"萬": "万",
"萵": "莴",
"葉": "叶",
"葒": "荭",
"著": "着",
"著名": "著名",
"葤": "荮",
"葦": "苇",
"葯": "药",
"葷": "荤",
"蒓": "莼",
"蒔": "莳",
"蒞": "莅",
"蒼": "苍",
"蓀": "荪",
"蓋": "盖",
"蓮": "莲",
"蓯": "苁",
"蓴": "莼",
"蓽": "荜",
"蔔": "卜",
"蔞": "蒌",
"蔣": "蒋",
"蔥": "葱",
"蔦": "茑",
"蔭": "荫",
"蕁": "荨",
"蕆": "蒇",
"蕎": "荞",
"蕒": "荬",
"蕓": "芸",
"蕕": "莸",
"蕘": "荛",
"蕢": "蒉",
"蕩": "荡",
"蕪": "芜",
"蕭": "萧",
"蕷": "蓣",
"薀": "蕰",
"薈": "荟",
"薊": "蓟",
"薌": "芗",
"薔": "蔷",
"薘": "荙",
"薟": "莶",
"薦": "荐",
"薩": "萨",
"薳": "䓕",
"薴": "苧",
"薺": "荠",
"藉": "借",
"藍": "蓝",
"藎": "荩",
"藝": "艺",
"藥": "药",
"藪": "薮",
"藴": "蕴",
"藶": "苈",
"藹": "蔼",
"藺": "蔺",
"蘄": "蕲",
"蘆": "芦",
"蘇": "苏",
"蘊": "蕴",
"蘋": "苹",
"蘚": "藓",
"蘞": "蔹",
"蘢": "茏",
"蘭": "兰",
"蘺": "蓠",
"蘿": "萝",
"虆": "蔂",
"處": "处",
"虛": "虚",
"虜": "虏",
"號": "号",
"虧": "亏",
"虫": "虫",
"虯": "虬",
"蛺": "蛱",
"蛻": "蜕",
"蜆": "蚬",
"蜡": "蜡",
"蝕": "蚀",
"蝟": "猬",
"蝦": "虾",
"蝸": "蜗",
"螄": "蛳",
"螞": "蚂",
"螢": "萤",
"螮": "䗖",
"螻": "蝼",
"螿": "螀",
"蟄": "蛰",
"蟈": "蝈",
"蟎": "螨",
"蟣": "虮",
"蟬": "蝉",
"蟯": "蛲",
"蟲": "虫",
"蟶": "蛏",
"蟻": "蚁",
"蠅": "蝇",
"蠆": "虿",
"蠐": "蛴",
"蠑": "蝾",
"蠟": "蜡",
"蠣": "蛎",
"蠨": "蟏",
"蠱": "蛊",
"蠶": "蚕",
"蠻": "蛮",
"衆": "众",
"衊": "蔑",
"術": "术",
"衕": "同",
"衚": "胡",
"衛": "卫",
"衝": "冲",
"衹": "只",
"袞": "衮",
"裊": "袅",
"裏": "里",
"補": "补",
"裝": "装",
"裡": "里",
"製": "制",
"複": "复",
"褌": "裈",
"褘": "袆",
"褲": "裤",
"褳": "裢",
"褸": "褛",
"褻": "亵",
"襇": "裥",
"襏": "袯",
"襖": "袄",
"襝": "裣",
"襠": "裆",
"襤": "褴",
"襪": "袜",
"襬": "䙓",
"襯": "衬",
"襲": "袭",
"覆": "复",
"覆蓋": "覆盖",
"翻來覆去": "翻来覆去",
"見": "见",
"覎": "觃",
"規": "规",
"覓": "觅",
"視": "视",
"覘": "觇",
"覡": "觋",
"覥": "觍",
"覦": "觎",
"親": "亲",
"覬": "觊",
"覯": "觏",
"覲": "觐",
"覷": "觑",
"覺": "觉",
"覽": "览",
"覿": "觌",
"觀": "观",
"觴": "觞",
"觶": "觯",
"觸": "触",
"訁": "讠",
"訂": "订",
"訃": "讣",
"計": "计",
"訊": "讯",
"訌": "讧",
"討": "讨",
"訐": "讦",
"訒": "讱",
"訓": "训",
"訕": "讪",
"訖": "讫",
"託": "讬",
"記": "记",
"訛": "讹",
"訝": "讶",
"訟": "讼",
"訢": "䜣",
"訣": "诀",
"訥": "讷",
"訩": "讻",
"訪": "访",
"設": "设",
"許": "许",
"訴": "诉",
"訶": "诃",
"診": "诊",
"註": "注",
"詁": "诂",
"詆": "诋",
"詎": "讵",
"詐": "诈",
"詒": "诒",
"詔": "诏",
"評": "评",
"詖": "诐",
"詗": "诇",
"詘": "诎",
"詛": "诅",
"詞": "词",
"詠": "咏",
"詡": "诩",
"詢": "询",
"詣": "诣",
"試": "试",
"詩": "诗",
"詫": "诧",
"詬": "诟",
"詭": "诡",
"詮": "诠",
"詰": "诘",
"話": "话",
"該": "该",
"詳": "详",
"詵": "诜",
"詼": "诙",
"詿": "诖",
"誄": "诔",
"誅": "诛",
"誆": "诓",
"誇": "夸",
"誌": "志",
"認": "认",
"誑": "诳",
"誒": "诶",
"誕": "诞",
"誘": "诱",
"誚": "诮",
"語": "语",
"誠": "诚",
"誡": "诫",
"誣": "诬",
"誤": "误",
"誥": "诰",
"誦": "诵",
"誨": "诲",
"說": "说",
"説": "说",
"誰": "谁",
"課": "课",
"誶": "谇",
"誹": "诽",
"誼": "谊",
"誾": "訚",
"調": "调",
"諂": "谄",
"諄": "谆",
"談": "谈",
"諉": "诿",
"請": "请",
"諍": "诤",
"諏": "诹",
"諑": "诼",
"諒": "谅",
"論": "论",
"諗": "谂",
"諛": "谀",
"諜": "谍",
"諝": "谞",
"諞": "谝",
"諢": "诨",
"諤": "谔",
"諦": "谛",
"諧": "谐",
"諫": "谏",
"諭": "谕",
"諮": "谘",
"諱": "讳",
"諳": "谙",
"諶": "谌",
"諷": "讽",
"諸": "诸",
"諺": "谚",
"諼": "谖",
"諾": "诺",
"謀": "谋",
"謁": "谒",
"謂": "谓",
"謄": "誊",
"謅": "诌",
"謊": "谎",
"謎": "谜",
"謐": "谧",
"謔": "谑",
"謖": "谡",
"謗": "谤",
"謙": "谦",
"謚": "谥",
"講": "讲",
"謝": "谢",
"謠": "谣",
"謡": "谣",
"謨": "谟",
"謫": "谪",
"謬": "谬",
"謭": "谫",
"謳": "讴",
"謹": "谨",
"謾": "谩",
"譅": "䜧",
"證": "证",
"譎": "谲",
"譏": "讥",
"譖": "谮",
"識": "识",
"譙": "谯",
"譚": "谭",
"譜": "谱",
"譫": "谵",
"譯": "译",
"議": "议",
"譴": "谴",
"護": "护",
"譸": "诪",
"譽": "誉",
"譾": "谫",
"讀": "读",
"變": "变",
"讎": "仇",
"讎": "雠",
"讒": "谗",
"讓": "让",
"讕": "谰",
"讖": "谶",
"讜": "谠",
"讞": "谳",
"豈": "岂",
"豎": "竖",
"豐": "丰",
"豬": "猪",
"豶": "豮",
"貓": "猫",
"貙": "䝙",
"貝": "贝",
"貞": "贞",
"貟": "贠",
"負": "负",
"財": "财",
"貢": "贡",
"貧": "贫",
"貨": "货",
"販": "贩",
"貪": "贪",
"貫": "贯",
"責": "责",
"貯": "贮",
"貰": "贳",
"貲": "赀",
"貳": "贰",
"貴": "贵",
"貶": "贬",
"買": "买",
"貸": "贷",
"貺": "贶",
"費": "费",
"貼": "贴",
"貽": "贻",
"貿": "贸",
"賀": "贺",
"賁": "贲",
"賂": "赂",
"賃": "赁",
"賄": "贿",
"賅": "赅",
"資": "资",
"賈": "贾",
"賊": "贼",
"賑": "赈",
"賒": "赊",
"賓": "宾",
"賕": "赇",
"賙": "赒",
"賚": "赉",
"賜": "赐",
"賞": "赏",
"賠": "赔",
"賡": "赓",
"賢": "贤",
"賣": "卖",
"賤": "贱",
"賦": "赋",
"賧": "赕",
"質": "质",
"賫": "赍",
"賬": "账",
"賭": "赌",
"賰": "䞐",
"賴": "赖",
"賵": "赗",
"賺": "赚",
"賻": "赙",
"購": "购",
"賽": "赛",
"賾": "赜",
"贄": "贽",
"贅": "赘",
"贇": "赟",
"贈": "赠",
"贊": "赞",
"贋": "赝",
"贍": "赡",
"贏": "赢",
"贐": "赆",
"贓": "赃",
"贔": "赑",
"贖": "赎",
"贗": "赝",
"贛": "赣",
"贜": "赃",
"赬": "赪",
"趕": "赶",
"趙": "赵",
"趨": "趋",
"趲": "趱",
"跡": "迹",
"踐": "践",
"踴": "踊",
"蹌": "跄",
"蹕": "跸",
"蹣": "蹒",
"蹤": "踪",
"蹺": "跷",
"躂": "跶",
"躉": "趸",
"躊": "踌",
"躋": "跻",
"躍": "跃",
"躑": "踯",
"躒": "跞",
"躓": "踬",
"躕": "蹰",
"躚": "跹",
"躡": "蹑",
"躥": "蹿",
"躦": "躜",
"躪": "躏",
"軀": "躯",
"車": "车",
"軋": "轧",
"軌": "轨",
"軍": "军",
"軑": "轪",
"軒": "轩",
"軔": "轫",
"軛": "轭",
"軟": "软",
"軤": "轷",
"軫": "轸",
"軲": "轱",
"軸": "轴",
"軹": "轵",
"軺": "轺",
"軻": "轲",
"軼": "轶",
"軾": "轼",
"較": "较",
"輅": "辂",
"輇": "辁",
"輈": "辀",
"載": "载",
"輊": "轾",
"輒": "辄",
"輓": "挽",
"輔": "辅",
"輕": "轻",
"輛": "辆",
"輜": "辎",
"輝": "辉",
"輞": "辋",
"輟": "辍",
"輥": "辊",
"輦": "辇",
"輩": "辈",
"輪": "轮",
"輬": "辌",
"輯": "辑",
"輳": "辏",
"輸": "输",
"輻": "辐",
"輾": "辗",
"輿": "舆",
"轀": "辒",
"轂": "毂",
"轄": "辖",
"轅": "辕",
"轆": "辘",
"轉": "转",
"轍": "辙",
"轎": "轿",
"轔": "辚",
"轟": "轰",
"轡": "辔",
"轢": "轹",
"轤": "轳",
"辟": "辟",
"辦": "办",
"辭": "辞",
"辮": "辫",
"辯": "辩",
"農": "农",
"迴": "回",
"适": "适",
"逕": "迳",
"這": "这",
"連": "连",
"週": "周",
"進": "进",
"遊": "游",
"運": "运",
"過": "过",
"達": "达",
"違": "违",
"遙": "遥",
"遜": "逊",
"遞": "递",
"遠": "远",
"適": "适",
"遲": "迟",
"遷": "迁",
"選": "选",
"遺": "遗",
"遼": "辽",
"邁": "迈",
"還": "还",
"邇": "迩",
"邊": "边",
"邏": "逻",
"邐": "逦",
"郁": "郁",
"郟": "郏",
"郵": "邮",
"鄆": "郓",
"鄉": "乡",
"鄒": "邹",
"鄔": "邬",
"鄖": "郧",
"鄧": "邓",
"鄭": "郑",
"鄰": "邻",
"鄲": "郸",
"鄴": "邺",
"鄶": "郐",
"鄺": "邝",
"酇": "酂",
"酈": "郦",
"醖": "酝",
"醜": "丑",
"醞": "酝",
"醫": "医",
"醬": "酱",
"醱": "酦",
"釀": "酿",
"釁": "衅",
"釃": "酾",
"釅": "酽",
"采": "采",
"釋": "释",
"釐": "厘",
"釒": "钅",
"釓": "钆",
"釔": "钇",
"釕": "钌",
"釗": "钊",
"釘": "钉",
"釙": "钋",
"針": "针",
"釣": "钓",
"釤": "钐",
"釧": "钏",
"釩": "钒",
"釵": "钗",
"釷": "钍",
"釹": "钕",
"釺": "钎",
"鈀": "钯",
"鈁": "钫",
"鈃": "钘",
"鈄": "钭",
"鈈": "钚",
"鈉": "钠",
"鈍": "钝",
"鈎": "钩",
"鈐": "钤",
"鈑": "钣",
"鈒": "钑",
"鈔": "钞",
"鈕": "钮",
"鈞": "钧",
"鈣": "钙",
"鈥": "钬",
"鈦": "钛",
"鈧": "钪",
"鈮": "铌",
"鈰": "铈",
"鈳": "钶",
"鈴": "铃",
"鈷": "钴",
"鈸": "钹",
"鈹": "铍",
"鈺": "钰",
"鈽": "钸",
"鈾": "铀",
"鈿": "钿",
"鉀": "钾",
"鉅": "钜",
"鉈": "铊",
"鉉": "铉",
"鉋": "铇",
"鉍": "铋",
"鉑": "铂",
"鉕": "钷",
"鉗": "钳",
"鉚": "铆",
"鉛": "铅",
"鉞": "钺",
"鉢": "钵",
"鉤": "钩",
"鉦": "钲",
"鉬": "钼",
"鉭": "钽",
"鉶": "铏",
"鉸": "铰",
"鉺": "铒",
"鉻": "铬",
"鉿": "铪",
"銀": "银",
"銃": "铳",
"銅": "铜",
"銍": "铚",
"銑": "铣",
"銓": "铨",
"銖": "铢",
"銘": "铭",
"銚": "铫",
"銛": "铦",
"銜": "衔",
"銠": "铑",
"銣": "铷",
"銥": "铱",
"銦": "铟",
"銨": "铵",
"銩": "铥",
"銪": "铕",
"銫": "铯",
"銬": "铐",
"銱": "铞",
"銳": "锐",
"銷": "销",
"銹": "锈",
"銻": "锑",
"銼": "锉",
"鋁": "铝",
"鋃": "锒",
"鋅": "锌",
"鋇": "钡",
"鋌": "铤",
"鋏": "铗",
"鋒": "锋",
"鋙": "铻",
"鋝": "锊",
"鋟": "锓",
"鋣": "铘",
"鋤": "锄",
"鋥": "锃",
"鋦": "锔",
"鋨": "锇",
"鋩": "铓",
"鋪": "铺",
"鋭": "锐",
"鋮": "铖",
"鋯": "锆",
"鋰": "锂",
"鋱": "铽",
"鋶": "锍",
"鋸": "锯",
"鋼": "钢",
"錁": "锞",
"錄": "录",
"錆": "锖",
"錇": "锫",
"錈": "锩",
"錏": "铔",
"錐": "锥",
"錒": "锕",
"錕": "锟",
"錘": "锤",
"錙": "锱",
"錚": "铮",
"錛": "锛",
"錟": "锬",
"錠": "锭",
"錡": "锜",
"錢": "钱",
"錦": "锦",
"錨": "锚",
"錩": "锠",
"錫": "锡",
"錮": "锢",
"錯": "错",
"録": "录",
"錳": "锰",
"錶": "表",
"錸": "铼",
"鍀": "锝",
"鍁": "锨",
"鍃": "锪",
"鍆": "钔",
"鍇": "锴",
"鍈": "锳",
"鍋": "锅",
"鍍": "镀",
"鍔": "锷",
"鍘": "铡",
"鍚": "钖",
"鍛": "锻",
"鍠": "锽",
"鍤": "锸",
"鍥": "锲",
"鍩": "锘",
"鍬": "锹",
"鍰": "锾",
"鍵": "键",
"鍶": "锶",
"鍺": "锗",
"鍾": "钟",
"鎂": "镁",
"鎄": "锿",
"鎇": "镅",
"鎊": "镑",
"鎔": "镕",
"鎖": "锁",
"鎘": "镉",
"鎚": "锤",
"鎛": "镈",
"鎝": "𨱏",
"鎡": "镃",
"鎢": "钨",
"鎣": "蓥",
"鎦": "镏",
"鎧": "铠",
"鎩": "铩",
"鎪": "锼",
"鎬": "镐",
"鎮": "镇",
"鎰": "镒",
"鎲": "镋",
"鎳": "镍",
"鎵": "镓",
"鎸": "镌",
"鎿": "镎",
"鏃": "镞",
"鏇": "镟",
"鏈": "链",
"鏌": "镆",
"鏍": "镙",
"鏐": "镠",
"鏑": "镝",
"鏗": "铿",
"鏘": "锵",
"鏜": "镗",
"鏝": "镘",
"鏞": "镛",
"鏟": "铲",
"鏡": "镜",
"鏢": "镖",
"鏤": "镂",
"鏨": "錾",
"鏰": "镚",
"鏵": "铧",
"鏷": "镤",
"鏹": "镪",
"鏽": "锈",
"鐃": "铙",
"鐋": "铴",
"鐐": "镣",
"鐒": "铹",
"鐓": "镦",
"鐔": "镡",
"鐘": "钟",
"鐙": "镫",
"鐝": "镢",
"鐠": "镨",
"鐦": "锎",
"鐧": "锏",
"鐨": "镄",
"鐫": "镌",
"鐮": "镰",
"鐲": "镯",
"鐳": "镭",
"鐵": "铁",
"鐶": "镮",
"鐸": "铎",
"鐺": "铛",
"鐿": "镱",
"鑄": "铸",
"鑊": "镬",
"鑌": "镔",
"鑒": "鉴",
"鑔": "镲",
"鑕": "锧",
"鑞": "镴",
"鑠": "铄",
"鑣": "镳",
"鑥": "镥",
"鑭": "镧",
"鑰": "钥",
"鑱": "镵",
"鑲": "镶",
"鑷": "镊",
"鑹": "镩",
"鑼": "锣",
"鑽": "钻",
"鑾": "銮",
"鑿": "凿",
"钁": "镢",
"镟": "旋",
"長": "长",
"門": "门",
"閂": "闩",
"閃": "闪",
"閆": "闫",
"閈": "闬",
"閉": "闭",
"開": "开",
"閌": "闶",
"閎": "闳",
"閏": "闰",
"閑": "闲",
"間": "间",
"閔": "闵",
"閘": "闸",
"閡": "阂",
"閣": "阁",
"閤": "合",
"閥": "阀",
"閨": "闺",
"閩": "闽",
"閫": "阃",
"閬": "阆",
"閭": "闾",
"閱": "阅",
"閲": "阅",
"閶": "阊",
"閹": "阉",
"閻": "阎",
"閼": "阏",
"閽": "阍",
"閾": "阈",
"閿": "阌",
"闃": "阒",
"闆": "板",
"闈": "闱",
"闊": "阔",
"闋": "阕",
"闌": "阑",
"闍": "阇",
"闐": "阗",
"闒": "阘",
"闓": "闿",
"闔": "阖",
"闕": "阙",
"闖": "闯",
"關": "关",
"闞": "阚",
"闠": "阓",
"闡": "阐",
"闤": "阛",
"闥": "闼",
"阪": "坂",
"陘": "陉",
"陝": "陕",
"陣": "阵",
"陰": "阴",
"陳": "陈",
"陸": "陆",
"陽": "阳",
"隉": "陧",
"隊": "队",
"階": "阶",
"隕": "陨",
"際": "际",
"隨": "随",
"險": "险",
"隱": "隐",
"隴": "陇",
"隸": "隶",
"隻": "只",
"雋": "隽",
"雖": "虽",
"雙": "双",
"雛": "雏",
"雜": "杂",
"雞": "鸡",
"離": "离",
"難": "难",
"雲": "云",
"電": "电",
"霢": "霡",
"霧": "雾",
"霽": "霁",
"靂": "雳",
"靄": "霭",
"靈": "灵",
"靚": "靓",
"靜": "静",
"靨": "靥",
"鞀": "鼗",
"鞏": "巩",
"鞝": "绱",
"鞦": "秋",
"鞽": "鞒",
"韁": "缰",
"韃": "鞑",
"韆": "千",
"韉": "鞯",
"韋": "韦",
"韌": "韧",
"韍": "韨",
"韓": "韩",
"韙": "韪",
"韜": "韬",
"韞": "韫",
"韻": "韵",
"響": "响",
"頁": "页",
"頂": "顶",
"頃": "顷",
"項": "项",
"順": "顺",
"頇": "顸",
"須": "须",
"頊": "顼",
"頌": "颂",
"頎": "颀",
"頏": "颃",
"預": "预",
"頑": "顽",
"頒": "颁",
"頓": "顿",
"頗": "颇",
"領": "领",
"頜": "颌",
"頡": "颉",
"頤": "颐",
"頦": "颏",
"頭": "头",
"頮": "颒",
"頰": "颊",
"頲": "颋",
"頴": "颕",
"頷": "颔",
"頸": "颈",
"頹": "颓",
"頻": "频",
"頽": "颓",
"顆": "颗",
"題": "题",
"額": "额",
"顎": "颚",
"顏": "颜",
"顒": "颙",
"顓": "颛",
"顔": "颜",
"願": "愿",
"顙": "颡",
"顛": "颠",
"類": "类",
"顢": "颟",
"顥": "颢",
"顧": "顾",
"顫": "颤",
"顬": "颥",
"顯": "显",
"顰": "颦",
"顱": "颅",
"顳": "颞",
"顴": "颧",
"風": "风",
"颭": "飐",
"颮": "飑",
"颯": "飒",
"颱": "台",
"颳": "刮",
"颶": "飓",
"颸": "飔",
"颺": "飏",
"颻": "飖",
"颼": "飕",
"飀": "飗",
"飄": "飘",
"飆": "飙",
"飈": "飚",
"飛": "飞",
"飠": "饣",
"飢": "饥",
"飣": "饤",
"飥": "饦",
"飩": "饨",
"飪": "饪",
"飫": "饫",
"飭": "饬",
"飯": "饭",
"飲": "饮",
"飴": "饴",
"飼": "饲",
"飽": "饱",
"飾": "饰",
"飿": "饳",
"餃": "饺",
"餄": "饸",
"餅": "饼",
"餉": "饷",
"養": "养",
"餌": "饵",
"餎": "饹",
"餏": "饻",
"餑": "饽",
"餒": "馁",
"餓": "饿",
"餕": "馂",
"餖": "饾",
"餚": "肴",
"餛": "馄",
"餜": "馃",
"餞": "饯",
"餡": "馅",
"館": "馆",
"餱": "糇",
"餳": "饧",
"餶": "馉",
"餷": "馇",
"餺": "馎",
"餼": "饩",
"餾": "馏",
"餿": "馊",
"饁": "馌",
"饃": "馍",
"饅": "馒",
"饈": "馐",
"饉": "馑",
"饊": "馓",
"饋": "馈",
"饌": "馔",
"饑": "饥",
"饒": "饶",
"饗": "飨",
"饜": "餍",
"饞": "馋",
"饢": "馕",
"馬": "马",
"馭": "驭",
"馮": "冯",
"馱": "驮",
"馳": "驰",
"馴": "驯",
"馹": "驲",
"駁": "驳",
"駐": "驻",
"駑": "驽",
"駒": "驹",
"駔": "驵",
"駕": "驾",
"駘": "骀",
"駙": "驸",
"駛": "驶",
"駝": "驼",
"駟": "驷",
"駡": "骂",
"駢": "骈",
"駭": "骇",
"駰": "骃",
"駱": "骆",
"駸": "骎",
"駿": "骏",
"騁": "骋",
"騂": "骍",
"騅": "骓",
"騌": "骔",
"騍": "骒",
"騎": "骑",
"騏": "骐",
"騖": "骛",
"騙": "骗",
"騤": "骙",
"騧": "䯄",
"騫": "骞",
"騭": "骘",
"騮": "骝",
"騰": "腾",
"騶": "驺",
"騷": "骚",
"騸": "骟",
"騾": "骡",
"驀": "蓦",
"驁": "骜",
"驂": "骖",
"驃": "骠",
"驄": "骢",
"驅": "驱",
"驊": "骅",
"驌": "骕",
"驍": "骁",
"驏": "骣",
"驕": "骄",
"驗": "验",
"驚": "惊",
"驛": "驿",
"驟": "骤",
"驢": "驴",
"驤": "骧",
"驥": "骥",
"驦": "骦",
"驪": "骊",
"驫": "骉",
"骯": "肮",
"髏": "髅",
"髒": "脏",
"體": "体",
"髕": "髌",
"髖": "髋",
"髮": "发",
"鬆": "松",
"鬍": "胡",
"鬚": "须",
"鬢": "鬓",
"鬥": "斗",
"鬧": "闹",
"鬩": "阋",
"鬮": "阄",
"鬱": "郁",
"魎": "魉",
"魘": "魇",
"魚": "鱼",
"魛": "鱽",
"魢": "鱾",
"魨": "鲀",
"魯": "鲁",
"魴": "鲂",
"魷": "鱿",
"魺": "鲄",
"鮁": "鲅",
"鮃": "鲆",
"鮊": "鲌",
"鮋": "鲉",
"鮍": "鲏",
"鮎": "鲇",
"鮐": "鲐",
"鮑": "鲍",
"鮒": "鲋",
"鮓": "鲊",
"鮚": "鲒",
"鮜": "鲘",
"鮝": "鲞",
"鮞": "鲕",
"鮦": "鲖",
"鮪": "鲔",
"鮫": "鲛",
"鮭": "鲑",
"鮮": "鲜",
"鮳": "鲓",
"鮶": "鲪",
"鮺": "鲝",
"鯀": "鲧",
"鯁": "鲠",
"鯇": "鲩",
"鯉": "鲤",
"鯊": "鲨",
"鯒": "鲬",
"鯔": "鲻",
"鯕": "鲯",
"鯖": "鲭",
"鯗": "鲞",
"鯛": "鲷",
"鯝": "鲴",
"鯡": "鲱",
"鯢": "鲵",
"鯤": "鲲",
"鯧": "鲳",
"鯨": "鲸",
"鯪": "鲮",
"鯫": "鲰",
"鯴": "鲺",
"鯷": "鳀",
"鯽": "鲫",
"鯿": "鳊",
"鰁": "鳈",
"鰂": "鲗",
"鰃": "鳂",
"鰈": "鲽",
"鰉": "鳇",
"鰍": "鳅",
"鰏": "鲾",
"鰐": "鳄",
"鰒": "鳆",
"鰓": "鳃",
"鰜": "鳒",
"鰟": "鳑",
"鰠": "鳋",
"鰣": "鲥",
"鰥": "鳏",
"鰨": "鳎",
"鰩": "鳐",
"鰭": "鳍",
"鰮": "鳁",
"鰱": "鲢",
"鰲": "鳌",
"鰳": "鳓",
"鰵": "鳘",
"鰷": "鲦",
"鰹": "鲣",
"鰺": "鲹",
"鰻": "鳗",
"鰼": "鳛",
"鰾": "鳔",
"鱂": "鳉",
"鱅": "鳙",
"鱈": "鳕",
"鱉": "鳖",
"鱒": "鳟",
"鱔": "鳝",
"鱖": "鳜",
"鱗": "鳞",
"鱘": "鲟",
"鱝": "鲼",
"鱟": "鲎",
"鱠": "鲙",
"鱣": "鳣",
"鱤": "鳡",
"鱧": "鳢",
"鱨": "鲿",
"鱭": "鲚",
"鱯": "鳠",
"鱷": "鳄",
"鱸": "鲈",
"鱺": "鲡",
"䰾": "鲃",
"䲁": "鳚",
"鳥": "鸟",
"鳧": "凫",
"鳩": "鸠",
"鳬": "凫",
"鳲": "鸤",
"鳳": "凤",
"鳴": "鸣",
"鳶": "鸢",
"鳾": "䴓",
"鴆": "鸩",
"鴇": "鸨",
"鴉": "鸦",
"鴒": "鸰",
"鴕": "鸵",
"鴛": "鸳",
"鴝": "鸲",
"鴞": "鸮",
"鴟": "鸱",
"鴣": "鸪",
"鴦": "鸯",
"鴨": "鸭",
"鴯": "鸸",
"鴰": "鸹",
"鴴": "鸻",
"鴷": "䴕",
"鴻": "鸿",
"鴿": "鸽",
"鵁": "䴔",
"鵂": "鸺",
"鵃": "鸼",
"鵐": "鹀",
"鵑": "鹃",
"鵒": "鹆",
"鵓": "鹁",
"鵜": "鹈",
"鵝": "鹅",
"鵠": "鹄",
"鵡": "鹉",
"鵪": "鹌",
"鵬": "鹏",
"鵮": "鹐",
"鵯": "鹎",
"鵲": "鹊",
"鵷": "鹓",
"鵾": "鹍",
"鶄": "䴖",
"鶇": "鸫",
"鶉": "鹑",
"鶊": "鹒",
"鶓": "鹋",
"鶖": "鹙",
"鶘": "鹕",
"鶚": "鹗",
"鶡": "鹖",
"鶥": "鹛",
"鶩": "鹜",
"鶪": "䴗",
"鶬": "鸧",
"鶯": "莺",
"鶲": "鹟",
"鶴": "鹤",
"鶹": "鹠",
"鶺": "鹡",
"鶻": "鹘",
"鶼": "鹣",
"鶿": "鹚",
"鷀": "鹚",
"鷁": "鹢",
"鷂": "鹞",
"鷄": "鸡",
"鷈": "䴘",
"鷊": "鹝",
"鷓": "鹧",
"鷖": "鹥",
"鷗": "鸥",
"鷙": "鸷",
"鷚": "鹨",
"鷥": "鸶",
"鷦": "鹪",
"鷫": "鹔",
"鷯": "鹩",
"鷲": "鹫",
"鷳": "鹇",
"鷸": "鹬",
"鷹": "鹰",
"鷺": "鹭",
"鷽": "鸴",
"鷿": "䴙",
"鸂": "㶉",
"鸇": "鹯",
"鸌": "鹱",
"鸏": "鹲",
"鸕": "鸬",
"鸘": "鹴",
"鸚": "鹦",
"鸛": "鹳",
"鸝": "鹂",
"鸞": "鸾",
"鹵": "卤",
"鹹": "咸",
"鹺": "鹾",
"鹽": "盐",
"麗": "丽",
"麥": "麦",
"麩": "麸",
"麯": "曲",
"麵": "面",
"麼": "么",
"麽": "么",
"黃": "黄",
"黌": "黉",
"點": "点",
"黨": "党",
"黲": "黪",
"黴": "霉",
"黶": "黡",
"黷": "黩",
"黽": "黾",
"黿": "鼋",
"鼉": "鼍",
"鼕": "冬",
"鼴": "鼹",
"齊": "齐",
"齋": "斋",
"齎": "赍",
"齏": "齑",
"齒": "齿",
"齔": "龀",
"齕": "龁",
"齗": "龂",
"齙": "龅",
"齜": "龇",
"齟": "龃",
"齠": "龆",
"齡": "龄",
"齣": "出",
"齦": "龈",
"齪": "龊",
"齬": "龉",
"齲": "龋",
"齶": "腭",
"齷": "龌",
"龍": "龙",
"龎": "厐",
"龐": "庞",
"龔": "龚",
"龕": "龛",
"龜": "龟",
"幾畫": "几画",
"賣畫": "卖画",
"滷鹼": "卤碱",
"原畫": "原画",
"口鹼": "口碱",
"古畫": "古画",
"名畫": "名画",
"奇畫": "奇画",
"如畫": "如画",
"弱鹼": "弱碱",
"彩畫": "彩画",
"所畫": "所画",
"扉畫": "扉画",
"教畫": "教画",
"水鹼": "水碱",
"洋鹼": "洋碱",
"炭畫": "炭画",
"畫一": "画一",
"畫上": "画上",
"畫下": "画下",
"畫中": "画中",
"畫供": "画供",
"畫兒": "画儿",
"畫具": "画具",
"畫出": "画出",
"畫史": "画史",
"畫品": "画品",
"畫商": "画商",
"畫圈": "画圈",
"畫境": "画境",
"畫工": "画工",
"畫帖": "画帖",
"畫幅": "画幅",
"畫意": "画意",
"畫成": "画成",
"畫景": "画景",
"畫本": "画本",
"畫架": "画架",
"畫框": "画框",
"畫法": "画法",
"畫王": "画王",
"畫界": "画界",
"畫符": "画符",
"畫紙": "画纸",
"畫線": "画线",
"畫航": "画航",
"畫舫": "画舫",
"畫虎": "画虎",
"畫論": "画论",
"畫譜": "画谱",
"畫象": "画象",
"畫質": "画质",
"畫貼": "画贴",
"畫軸": "画轴",
"畫頁": "画页",
"鹽鹼": "盐碱",
"鹼": "碱",
"鹼基": "碱基",
"鹼度": "碱度",
"鹼水": "碱水",
"鹼熔": "碱熔",
"磁畫": "磁画",
"策畫": "策画",
"組畫": "组画",
"絹畫": "绢画",
"耐鹼": "耐碱",
"肉鹼": "肉碱",
"膠畫": "胶画",
"茶鹼": "茶碱",
"西畫": "西画",
"貼畫": "贴画",
"返鹼": "返碱",
"鍾鍛": "锺锻",
"鍛鍾": "锻锺",
"雕畫": "雕画",
"鯰": "鲶",
"三聯畫": "三联画",
"中國畫": "中国画",
"書畫": "书画",
"書畫社": "书画社",
"五筆畫": "五笔画",
"作畫": "作画",
"入畫": "入画",
"寫生畫": "写生画",
"刻畫": "刻画",
"動畫": "动画",
"勾畫": "勾画",
"單色畫": "单色画",
"卡通畫": "卡通画",
"國畫": "国画",
"圖畫": "图画",
"壁畫": "壁画",
"字畫": "字画",
"宣傳畫": "宣传画",
"工筆畫": "工笔画",
"年畫": "年画",
"幽默畫": "幽默画",
"指畫": "指画",
"描畫": "描画",
"插畫": "插画",
"擘畫": "擘画",
"春畫": "春画",
"木刻畫": "木刻画",
"機械畫": "机械画",
"比畫": "比画",
"毛筆畫": "毛笔画",
"水粉畫": "水粉画",
"油畫": "油画",
"海景畫": "海景画",
"漫畫": "漫画",
"點畫": "点画",
"版畫": "版画",
"畫": "画",
"畫像": "画像",
"畫冊": "画册",
"畫刊": "画刊",
"畫匠": "画匠",
"畫捲": "画卷",
"畫圖": "画图",
"畫壇": "画坛",
"畫室": "画室",
"畫家": "画家",
"畫屏": "画屏",
"畫展": "画展",
"畫布": "画布",
"畫師": "画师",
"畫廊": "画廊",
"畫報": "画报",
"畫押": "画押",
"畫板": "画板",
"畫片": "画片",
"畫畫": "画画",
"畫皮": "画皮",
"畫眉鳥": "画眉鸟",
"畫稿": "画稿",
"畫筆": "画笔",
"畫院": "画院",
"畫集": "画集",
"畫面": "画面",
"筆畫": "笔画",
"細密畫": "细密画",
"繪畫": "绘画",
"自畫像": "自画像",
"蠟筆畫": "蜡笔画",
"裸體畫": "裸体画",
"西洋畫": "西洋画",
"透視畫": "透视画",
"銅版畫": "铜版画",
"鍾": "锺",
"靜物畫": "静物画",
"餘": "馀",
}
zh2TW = {
"缺省": "預設",
"串行": "串列",
"以太网": "乙太網",
"位图": "點陣圖",
"例程": "常式",
"信道": "通道",
"光标": "游標",
"光盘": "光碟",
"光驱": "光碟機",
"全角": "全形",
"加载": "載入",
"半角": "半形",
"变量": "變數",
"噪声": "雜訊",
"脱机": "離線",
"声卡": "音效卡",
"老字号": "老字號",
"字号": "字型大小",
"字库": "字型檔",
"字段": "欄位",
"字符": "字元",
"存盘": "存檔",
"寻址": "定址",
"尾注": "章節附註",
"异步": "非同步",
"总线": "匯流排",
"括号": "括弧",
"接口": "介面",
"控件": "控制項",
"权限": "許可權",
"盘片": "碟片",
"硅片": "矽片",
"硅谷": "矽谷",
"硬盘": "硬碟",
"磁盘": "磁碟",
"磁道": "磁軌",
"程控": "程式控制",
"端口": "埠",
"算子": "運算元",
"算法": "演算法",
"芯片": "晶片",
"芯片": "晶元",
"词组": "片語",
"译码": "解碼",
"软驱": "軟碟機",
"快闪存储器": "快閃記憶體",
"闪存": "快閃記憶體",
"鼠标": "滑鼠",
"进制": "進位",
"交互式": "互動式",
"仿真": "模擬",
"优先级": "優先順序",
"传感": "感測",
"便携式": "攜帶型",
"信息论": "資訊理論",
"写保护": "防寫",
"分布式": "分散式",
"分辨率": "解析度",
"服务器": "伺服器",
"等于": "等於",
"局域网": "區域網",
"计算机": "電腦",
"扫瞄仪": "掃瞄器",
"宽带": "寬頻",
"数据库": "資料庫",
"奶酪": "乳酪",
"巨商": "鉅賈",
"手电": "手電筒",
"万历": "萬曆",
"永历": "永曆",
"词汇": "辭彙",
"习用": "慣用",
"元音": "母音",
"任意球": "自由球",
"头球": "頭槌",
"入球": "進球",
"粒入球": "顆進球",
"打门": "射門",
"火锅盖帽": "蓋火鍋",
"打印机": "印表機",
"打印機": "印表機",
"字节": "位元組",
"字節": "位元組",
"打印": "列印",
"打印": "列印",
"硬件": "硬體",
"硬件": "硬體",
"二极管": "二極體",
"二極管": "二極體",
"三极管": "三極體",
"三極管": "三極體",
"软件": "軟體",
"軟件": "軟體",
"网络": "網路",
"網絡": "網路",
"人工智能": "人工智慧",
"航天飞机": "太空梭",
"穿梭機": "太空梭",
"因特网": "網際網路",
"互聯網": "網際網路",
"机器人": "機器人",
"機械人": "機器人",
"移动电话": "行動電話",
"流動電話": "行動電話",
"调制解调器": "數據機",
"調制解調器": "數據機",
"短信": "簡訊",
"短訊": "簡訊",
"乌兹别克斯坦": "烏茲別克",
"乍得": "查德",
"乍得": "查德",
"也门": "葉門",
"也門": "葉門",
"伯利兹": "貝里斯",
"伯利茲": "貝里斯",
"佛得角": "維德角",
"佛得角": "維德角",
"克罗地亚": "克羅埃西亞",
"克羅地亞": "克羅埃西亞",
"冈比亚": "甘比亞",
"岡比亞": "甘比亞",
"几内亚比绍": "幾內亞比索",
"幾內亞比紹": "幾內亞比索",
"列支敦士登": "列支敦斯登",
"列支敦士登": "列支敦斯登",
"利比里亚": "賴比瑞亞",
"利比里亞": "賴比瑞亞",
"加纳": "迦納",
"加納": "迦納",
"加蓬": "加彭",
"加蓬": "加彭",
"博茨瓦纳": "波札那",
"博茨瓦納": "波札那",
"卡塔尔": "卡達",
"卡塔爾": "卡達",
"卢旺达": "盧安達",
"盧旺達": "盧安達",
"危地马拉": "瓜地馬拉",
"危地馬拉": "瓜地馬拉",
"厄瓜多尔": "厄瓜多",
"厄瓜多爾": "厄瓜多",
"厄立特里亚": "厄利垂亞",
"厄立特里亞": "厄利垂亞",
"吉布提": "吉布地",
"吉布堤": "吉布地",
"哈萨克斯坦": "哈薩克",
"哥斯达黎加": "哥斯大黎加",
"哥斯達黎加": "哥斯大黎加",
"图瓦卢": "吐瓦魯",
"圖瓦盧": "吐瓦魯",
"土库曼斯坦": "土庫曼",
"圣卢西亚": "聖露西亞",
"聖盧西亞": "聖露西亞",
"圣基茨和尼维斯": "聖克里斯多福及尼維斯",
"聖吉斯納域斯": "聖克里斯多福及尼維斯",
"圣文森特和格林纳丁斯": "聖文森及格瑞那丁",
"聖文森特和格林納丁斯": "聖文森及格瑞那丁",
"圣马力诺": "聖馬利諾",
"聖馬力諾": "聖馬利諾",
"圭亚那": "蓋亞那",
"圭亞那": "蓋亞那",
"坦桑尼亚": "坦尚尼亞",
"坦桑尼亞": "坦尚尼亞",
"埃塞俄比亚": "衣索比亞",
"埃塞俄比亞": "衣索比亞",
"基里巴斯": "吉里巴斯",
"基里巴斯": "吉里巴斯",
"塔吉克斯坦": "塔吉克",
"塞拉利昂": "獅子山",
"塞拉利昂": "獅子山",
"塞浦路斯": "塞普勒斯",
"塞浦路斯": "塞普勒斯",
"塞舌尔": "塞席爾",
"塞舌爾": "塞席爾",
"多米尼加": "多明尼加",
"多明尼加共和國": "多明尼加",
"多米尼加联邦": "多米尼克",
"多明尼加聯邦": "多米尼克",
"安提瓜和巴布达": "安地卡及巴布達",
"安提瓜和巴布達": "安地卡及巴布達",
"尼日利亚": "奈及利亞",
"尼日利亞": "奈及利亞",
"尼日尔": "尼日",
"尼日爾": "尼日",
"巴巴多斯": "巴貝多",
"巴巴多斯": "巴貝多",
"巴布亚新几内亚": "巴布亞紐幾內亞",
"巴布亞新畿內亞": "巴布亞紐幾內亞",
"布基纳法索": "布吉納法索",
"布基納法索": "布吉納法索",
"布隆迪": "蒲隆地",
"布隆迪": "蒲隆地",
"希腊": "希臘",
"帕劳": "帛琉",
"意大利": "義大利",
"意大利": "義大利",
"所罗门群岛": "索羅門群島",
"所羅門群島": "索羅門群島",
"文莱": "汶萊",
"斯威士兰": "史瓦濟蘭",
"斯威士蘭": "史瓦濟蘭",
"斯洛文尼亚": "斯洛維尼亞",
"斯洛文尼亞": "斯洛維尼亞",
"新西兰": "紐西蘭",
"新西蘭": "紐西蘭",
"格林纳达": "格瑞那達",
"格林納達": "格瑞那達",
"格鲁吉亚": "喬治亞",
"格魯吉亞": "喬治亞",
"佐治亚": "喬治亞",
"佐治亞": "喬治亞",
"毛里塔尼亚": "茅利塔尼亞",
"毛里塔尼亞": "茅利塔尼亞",
"毛里求斯": "模里西斯",
"毛里裘斯": "模里西斯",
"沙特阿拉伯": "沙烏地阿拉伯",
"沙地阿拉伯": "沙烏地阿拉伯",
"波斯尼亚和黑塞哥维那": "波士尼亞赫塞哥維納",
"波斯尼亞黑塞哥維那": "波士尼亞赫塞哥維納",
"津巴布韦": "辛巴威",
"津巴布韋": "辛巴威",
"洪都拉斯": "宏都拉斯",
"洪都拉斯": "宏都拉斯",
"特立尼达和托巴哥": "千里達托貝哥",
"特立尼達和多巴哥": "千里達托貝哥",
"瑙鲁": "諾魯",
"瑙魯": "諾魯",
"瓦努阿图": "萬那杜",
"瓦努阿圖": "萬那杜",
"溫納圖萬": "那杜",
"科摩罗": "葛摩",
"科摩羅": "葛摩",
"科特迪瓦": "象牙海岸",
"突尼斯": "突尼西亞",
"索马里": "索馬利亞",
"索馬里": "索馬利亞",
"老挝": "寮國",
"老撾": "寮國",
"肯尼亚": "肯亞",
"肯雅": "肯亞",
"苏里南": "蘇利南",
"莫桑比克": "莫三比克",
"莱索托": "賴索托",
"萊索托": "賴索托",
"贝宁": "貝南",
"貝寧": "貝南",
"赞比亚": "尚比亞",
"贊比亞": "尚比亞",
"阿塞拜疆": "亞塞拜然",
"阿塞拜疆": "亞塞拜然",
"阿拉伯联合酋长国": "阿拉伯聯合大公國",
"阿拉伯聯合酋長國": "阿拉伯聯合大公國",
"马尔代夫": "馬爾地夫",
"馬爾代夫": "馬爾地夫",
"马耳他": "馬爾他",
"马里共和国": "馬利共和國",
"馬里共和國": "馬利共和國",
"方便面": "速食麵",
"快速面": "速食麵",
"即食麵": "速食麵",
"薯仔": "土豆",
"蹦极跳": "笨豬跳",
"绑紧跳": "笨豬跳",
"冷菜": "冷盤",
"凉菜": "冷盤",
"出租车": "計程車",
"台球": "撞球",
"桌球": "撞球",
"雪糕": "冰淇淋",
"卫生": "衛生",
"衞生": "衛生",
"平治": "賓士",
"奔驰": "賓士",
"積架": "捷豹",
"福士": "福斯",
"雪铁龙": "雪鐵龍",
"马自达": "馬自達",
"萬事得": "馬自達",
"拿破仑": "拿破崙",
"拿破侖": "拿破崙",
"布什": "布希",
"布殊": "布希",
"克林顿": "柯林頓",
"克林頓": "柯林頓",
"侯赛因": "海珊",
"侯賽因": "海珊",
"凡高": "梵谷",
"狄安娜": "黛安娜",
"戴安娜": "黛安娜",
"赫拉": "希拉",
}
zh2HK = {
"打印机": "打印機",
"印表機": "打印機",
"字节": "位元組",
"字節": "位元組",
"打印": "打印",
"列印": "打印",
"硬件": "硬件",
"硬體": "硬件",
"二极管": "二極管",
"二極體": "二極管",
"三极管": "三極管",
"三極體": "三極管",
"数码": "數碼",
"數位": "數碼",
"软件": "軟件",
"軟體": "軟件",
"网络": "網絡",
"網路": "網絡",
"人工智能": "人工智能",
"人工智慧": "人工智能",
"航天飞机": "穿梭機",
"太空梭": "穿梭機",
"因特网": "互聯網",
"網際網路": "互聯網",
"机器人": "機械人",
"機器人": "機械人",
"移动电话": "流動電話",
"行動電話": "流動電話",
"调制解调器": "調制解調器",
"數據機": "調制解調器",
"短信": "短訊",
"簡訊": "短訊",
"乍得": "乍得",
"查德": "乍得",
"也门": "也門",
"葉門": "也門",
"伯利兹": "伯利茲",
"貝里斯": "伯利茲",
"佛得角": "佛得角",
"維德角": "佛得角",
"克罗地亚": "克羅地亞",
"克羅埃西亞": "克羅地亞",
"冈比亚": "岡比亞",
"甘比亞": "岡比亞",
"几内亚比绍": "幾內亞比紹",
"幾內亞比索": "幾內亞比紹",
"列支敦士登": "列支敦士登",
"列支敦斯登": "列支敦士登",
"利比里亚": "利比里亞",
"賴比瑞亞": "利比里亞",
"加纳": "加納",
"迦納": "加納",
"加蓬": "加蓬",
"加彭": "加蓬",
"博茨瓦纳": "博茨瓦納",
"波札那": "博茨瓦納",
"卡塔尔": "卡塔爾",
"卡達": "卡塔爾",
"卢旺达": "盧旺達",
"盧安達": "盧旺達",
"危地马拉": "危地馬拉",
"瓜地馬拉": "危地馬拉",
"厄瓜多尔": "厄瓜多爾",
"厄瓜多": "厄瓜多爾",
"厄立特里亚": "厄立特里亞",
"厄利垂亞": "厄立特里亞",
"吉布提": "吉布堤",
"吉布地": "吉布堤",
"哥斯达黎加": "哥斯達黎加",
"哥斯大黎加": "哥斯達黎加",
"图瓦卢": "圖瓦盧",
"吐瓦魯": "圖瓦盧",
"圣卢西亚": "聖盧西亞",
"聖露西亞": "聖盧西亞",
"圣基茨和尼维斯": "聖吉斯納域斯",
"聖克里斯多福及尼維斯": "聖吉斯納域斯",
"圣文森特和格林纳丁斯": "聖文森特和格林納丁斯",
"聖文森及格瑞那丁": "聖文森特和格林納丁斯",
"圣马力诺": "聖馬力諾",
"聖馬利諾": "聖馬力諾",
"圭亚那": "圭亞那",
"蓋亞那": "圭亞那",
"坦桑尼亚": "坦桑尼亞",
"坦尚尼亞": "坦桑尼亞",
"埃塞俄比亚": "埃塞俄比亞",
"衣索匹亞": "埃塞俄比亞",
"衣索比亞": "埃塞俄比亞",
"基里巴斯": "基里巴斯",
"吉里巴斯": "基里巴斯",
"狮子山": "獅子山",
"塞普勒斯": "塞浦路斯",
"塞舌尔": "塞舌爾",
"塞席爾": "塞舌爾",
"多米尼加": "多明尼加共和國",
"多明尼加": "多明尼加共和國",
"多米尼加联邦": "多明尼加聯邦",
"多米尼克": "多明尼加聯邦",
"安提瓜和巴布达": "安提瓜和巴布達",
"安地卡及巴布達": "安提瓜和巴布達",
"尼日利亚": "尼日利亞",
"奈及利亞": "尼日利亞",
"尼日尔": "尼日爾",
"尼日": "尼日爾",
"巴巴多斯": "巴巴多斯",
"巴貝多": "巴巴多斯",
"巴布亚新几内亚": "巴布亞新畿內亞",
"巴布亞紐幾內亞": "巴布亞新畿內亞",
"布基纳法索": "布基納法索",
"布吉納法索": "布基納法索",
"布隆迪": "布隆迪",
"蒲隆地": "布隆迪",
"義大利": "意大利",
"所罗门群岛": "所羅門群島",
"索羅門群島": "所羅門群島",
"斯威士兰": "斯威士蘭",
"史瓦濟蘭": "斯威士蘭",
"斯洛文尼亚": "斯洛文尼亞",
"斯洛維尼亞": "斯洛文尼亞",
"新西兰": "新西蘭",
"紐西蘭": "新西蘭",
"格林纳达": "格林納達",
"格瑞那達": "格林納達",
"格鲁吉亚": "喬治亞",
"格魯吉亞": "喬治亞",
"梵蒂冈": "梵蒂岡",
"毛里塔尼亚": "毛里塔尼亞",
"茅利塔尼亞": "毛里塔尼亞",
"毛里求斯": "毛里裘斯",
"模里西斯": "毛里裘斯",
"沙烏地阿拉伯": "沙特阿拉伯",
"波斯尼亚和黑塞哥维那": "波斯尼亞黑塞哥維那",
"波士尼亞赫塞哥維納": "波斯尼亞黑塞哥維那",
"津巴布韦": "津巴布韋",
"辛巴威": "津巴布韋",
"洪都拉斯": "洪都拉斯",
"宏都拉斯": "洪都拉斯",
"特立尼达和托巴哥": "特立尼達和多巴哥",
"千里達托貝哥": "特立尼達和多巴哥",
"瑙鲁": "瑙魯",
"諾魯": "瑙魯",
"瓦努阿图": "瓦努阿圖",
"萬那杜": "瓦努阿圖",
"科摩罗": "科摩羅",
"葛摩": "科摩羅",
"索马里": "索馬里",
"索馬利亞": "索馬里",
"老挝": "老撾",
"寮國": "老撾",
"肯尼亚": "肯雅",
"肯亞": "肯雅",
"莫桑比克": "莫桑比克",
"莫三比克": "莫桑比克",
"莱索托": "萊索托",
"賴索托": "萊索托",
"贝宁": "貝寧",
"貝南": "貝寧",
"赞比亚": "贊比亞",
"尚比亞": "贊比亞",
"阿塞拜疆": "阿塞拜疆",
"亞塞拜然": "阿塞拜疆",
"阿拉伯联合酋长国": "阿拉伯聯合酋長國",
"阿拉伯聯合大公國": "阿拉伯聯合酋長國",
"马尔代夫": "馬爾代夫",
"馬爾地夫": "馬爾代夫",
"馬利共和國": "馬里共和國",
"方便面": "即食麵",
"快速面": "即食麵",
"速食麵": "即食麵",
"泡麵": "即食麵",
"土豆": "馬鈴薯",
"华乐": "中樂",
"民乐": "中樂",
"計程車": "的士",
"出租车": "的士",
"公車": "巴士",
"自行车": "單車",
"犬只": "狗隻",
"台球": "桌球",
"撞球": "桌球",
"冰淇淋": "雪糕",
"賓士": "平治",
"捷豹": "積架",
"福斯": "福士",
"雪铁龙": "先進",
"雪鐵龍": "先進",
"沃尓沃": "富豪",
"马自达": "萬事得",
"馬自達": "萬事得",
"寶獅": "標致",
"拿破崙": "拿破侖",
"布什": "布殊",
"布希": "布殊",
"克林顿": "克林頓",
"柯林頓": "克林頓",
"萨达姆": "薩達姆",
"海珊": "侯賽因",
"侯赛因": "侯賽因",
"大卫·贝克汉姆": "大衛碧咸",
"迈克尔·欧文": "米高奧雲",
"珍妮弗·卡普里亚蒂": "卡佩雅蒂",
"马拉特·萨芬": "沙芬",
"迈克尔·舒马赫": "舒麥加",
"希特勒": "希特拉",
"狄安娜": "戴安娜",
"黛安娜": "戴安娜",
}
zh2CN = {
"記憶體": "内存",
"預設": "默认",
"串列": "串行",
"乙太網": "以太网",
"點陣圖": "位图",
"常式": "例程",
"游標": "光标",
"光碟": "光盘",
"光碟機": "光驱",
"全形": "全角",
"共用": "共享",
"載入": "加载",
"半形": "半角",
"變數": "变量",
"雜訊": "噪声",
"因數": "因子",
"功能變數名稱": "域名",
"音效卡": "声卡",
"字型大小": "字号",
"字型檔": "字库",
"欄位": "字段",
"字元": "字符",
"存檔": "存盘",
"定址": "寻址",
"章節附註": "尾注",
"非同步": "异步",
"匯流排": "总线",
"括弧": "括号",
"介面": "接口",
"控制項": "控件",
"許可權": "权限",
"碟片": "盘片",
"矽片": "硅片",
"矽谷": "硅谷",
"硬碟": "硬盘",
"磁碟": "磁盘",
"磁軌": "磁道",
"程式控制": "程控",
"運算元": "算子",
"演算法": "算法",
"晶片": "芯片",
"晶元": "芯片",
"片語": "词组",
"軟碟機": "软驱",
"快閃記憶體": "快闪存储器",
"滑鼠": "鼠标",
"進位": "进制",
"互動式": "交互式",
"優先順序": "优先级",
"感測": "传感",
"攜帶型": "便携式",
"資訊理論": "信息论",
"迴圈": "循环",
"防寫": "写保护",
"分散式": "分布式",
"解析度": "分辨率",
"伺服器": "服务器",
"等於": "等于",
"區域網": "局域网",
"巨集": "宏",
"掃瞄器": "扫瞄仪",
"寬頻": "宽带",
"資料庫": "数据库",
"乳酪": "奶酪",
"鉅賈": "巨商",
"手電筒": "手电",
"萬曆": "万历",
"永曆": "永历",
"辭彙": "词汇",
"母音": "元音",
"自由球": "任意球",
"頭槌": "头球",
"進球": "入球",
"顆進球": "粒入球",
"射門": "打门",
"蓋火鍋": "火锅盖帽",
"印表機": "打印机",
"打印機": "打印机",
"位元組": "字节",
"字節": "字节",
"列印": "打印",
"打印": "打印",
"硬體": "硬件",
"二極體": "二极管",
"二極管": "二极管",
"三極體": "三极管",
"三極管": "三极管",
"數位": "数码",
"數碼": "数码",
"軟體": "软件",
"軟件": "软件",
"網路": "网络",
"網絡": "网络",
"人工智慧": "人工智能",
"太空梭": "航天飞机",
"穿梭機": "航天飞机",
"網際網路": "因特网",
"互聯網": "因特网",
"機械人": "机器人",
"機器人": "机器人",
"行動電話": "移动电话",
"流動電話": "移动电话",
"調制解調器": "调制解调器",
"數據機": "调制解调器",
"短訊": "短信",
"簡訊": "短信",
"烏茲別克": "乌兹别克斯坦",
"查德": "乍得",
"乍得": "乍得",
"也門": "",
"葉門": "也门",
"伯利茲": "伯利兹",
"貝里斯": "伯利兹",
"維德角": "佛得角",
"佛得角": "佛得角",
"克羅地亞": "克罗地亚",
"克羅埃西亞": "克罗地亚",
"岡比亞": "冈比亚",
"甘比亞": "冈比亚",
"幾內亞比紹": "几内亚比绍",
"幾內亞比索": "几内亚比绍",
"列支敦斯登": "列支敦士登",
"列支敦士登": "列支敦士登",
"利比里亞": "利比里亚",
"賴比瑞亞": "利比里亚",
"加納": "加纳",
"迦納": "加纳",
"加彭": "加蓬",
"加蓬": "加蓬",
"博茨瓦納": "博茨瓦纳",
"波札那": "博茨瓦纳",
"卡塔爾": "卡塔尔",
"卡達": "卡塔尔",
"盧旺達": "卢旺达",
"盧安達": "卢旺达",
"危地馬拉": "危地马拉",
"瓜地馬拉": "危地马拉",
"厄瓜多爾": "厄瓜多尔",
"厄瓜多": "厄瓜多尔",
"厄立特里亞": "厄立特里亚",
"厄利垂亞": "厄立特里亚",
"吉布堤": "吉布提",
"吉布地": "吉布提",
"哈薩克": "哈萨克斯坦",
"哥斯達黎加": "哥斯达黎加",
"哥斯大黎加": "哥斯达黎加",
"圖瓦盧": "图瓦卢",
"吐瓦魯": "图瓦卢",
"土庫曼": "土库曼斯坦",
"聖盧西亞": "圣卢西亚",
"聖露西亞": "圣卢西亚",
"聖吉斯納域斯": "圣基茨和尼维斯",
"聖克里斯多福及尼維斯": "圣基茨和尼维斯",
"聖文森特和格林納丁斯": "圣文森特和格林纳丁斯",
"聖文森及格瑞那丁": "圣文森特和格林纳丁斯",
"聖馬力諾": "圣马力诺",
"聖馬利諾": "圣马力诺",
"圭亞那": "圭亚那",
"蓋亞那": "圭亚那",
"坦桑尼亞": "坦桑尼亚",
"坦尚尼亞": "坦桑尼亚",
"埃塞俄比亞": "埃塞俄比亚",
"衣索匹亞": "埃塞俄比亚",
"衣索比亞": "埃塞俄比亚",
"吉里巴斯": "基里巴斯",
"基里巴斯": "基里巴斯",
"塔吉克": "塔吉克斯坦",
"塞拉利昂": "塞拉利昂",
"塞普勒斯": "塞浦路斯",
"塞浦路斯": "塞浦路斯",
"塞舌爾": "塞舌尔",
"塞席爾": "塞舌尔",
"多明尼加共和國": "多米尼加",
"多明尼加": "多米尼加",
"多明尼加聯邦": "多米尼加联邦",
"多米尼克": "多米尼加联邦",
"安提瓜和巴布達": "安提瓜和巴布达",
"安地卡及巴布達": "安提瓜和巴布达",
"尼日利亞": "尼日利亚",
"奈及利亞": "尼日利亚",
"尼日爾": "尼日尔",
"尼日": "尼日尔",
"巴貝多": "巴巴多斯",
"巴巴多斯": "巴巴多斯",
"巴布亞新畿內亞": "巴布亚新几内亚",
"巴布亞紐幾內亞": "巴布亚新几内亚",
"布基納法索": "布基纳法索",
"布吉納法索": "布基纳法索",
"蒲隆地": "布隆迪",
"布隆迪": "布隆迪",
"希臘": "希腊",
"帛琉": "帕劳",
"義大利": "意大利",
"意大利": "意大利",
"所羅門群島": "所罗门群岛",
"索羅門群島": "所罗门群岛",
"汶萊": "文莱",
"斯威士蘭": "斯威士兰",
"史瓦濟蘭": "斯威士兰",
"斯洛文尼亞": "斯洛文尼亚",
"斯洛維尼亞": "斯洛文尼亚",
"新西蘭": "新西兰",
"紐西蘭": "新西兰",
"格林納達": "格林纳达",
"格瑞那達": "格林纳达",
"格魯吉亞": "乔治亚",
"喬治亞": "乔治亚",
"梵蒂岡": "梵蒂冈",
"毛里塔尼亞": "毛里塔尼亚",
"茅利塔尼亞": "毛里塔尼亚",
"毛里裘斯": "毛里求斯",
"模里西斯": "毛里求斯",
"沙地阿拉伯": "沙特阿拉伯",
"沙烏地阿拉伯": "沙特阿拉伯",
"波斯尼亞黑塞哥維那": "波斯尼亚和黑塞哥维那",
"波士尼亞赫塞哥維納": "波斯尼亚和黑塞哥维那",
"津巴布韋": "津巴布韦",
"辛巴威": "津巴布韦",
"宏都拉斯": "洪都拉斯",
"洪都拉斯": "洪都拉斯",
"特立尼達和多巴哥": "特立尼达和托巴哥",
"千里達托貝哥": "特立尼达和托巴哥",
"瑙魯": "瑙鲁",
"諾魯": "瑙鲁",
"瓦努阿圖": "瓦努阿图",
"萬那杜": "瓦努阿图",
"溫納圖": "瓦努阿图",
"科摩羅": "科摩罗",
"葛摩": "科摩罗",
"象牙海岸": "科特迪瓦",
"突尼西亞": "突尼斯",
"索馬里": "索马里",
"索馬利亞": "索马里",
"老撾": "老挝",
"寮國": "老挝",
"肯雅": "肯尼亚",
"肯亞": "肯尼亚",
"蘇利南": "苏里南",
"莫三比克": "莫桑比克",
"莫桑比克": "莫桑比克",
"萊索托": "莱索托",
"賴索托": "莱索托",
"貝寧": "贝宁",
"貝南": "贝宁",
"贊比亞": "赞比亚",
"尚比亞": "赞比亚",
"亞塞拜然": "阿塞拜疆",
"阿塞拜疆": "阿塞拜疆",
"阿拉伯聯合酋長國": "阿拉伯联合酋长国",
"阿拉伯聯合大公國": "阿拉伯联合酋长国",
"南韓": "韩国",
"馬爾代夫": "马尔代夫",
"馬爾地夫": "马尔代夫",
"馬爾他": "马耳他",
"馬利共和國": "马里共和国",
"即食麵": "方便面",
"快速面": "方便面",
"速食麵": "方便面",
"泡麵": "方便面",
"笨豬跳": "蹦极跳",
"绑紧跳": "蹦极跳",
"冷盤": "凉菜",
"冷菜": "凉菜",
"散钱": "零钱",
"谐星": "笑星",
"夜学": "夜校",
"华乐": "民乐",
"中樂": "民乐",
"屋价": "房价",
"的士": "出租车",
"計程車": "出租车",
"公車": "公共汽车",
"單車": "自行车",
"節慶": "节日",
"芝士": "乾酪",
"狗隻": "犬只",
"士多啤梨": "草莓",
"忌廉": "奶油",
"桌球": "台球",
"撞球": "台球",
"雪糕": "冰淇淋",
"衞生": "卫生",
"衛生": "卫生",
"賓士": "奔驰",
"平治": "奔驰",
"積架": "捷豹",
"福斯": "大众",
"福士": "大众",
"雪鐵龍": "雪铁龙",
"萬事得": "马自达",
"馬自達": "马自达",
"寶獅": "标志",
"拿破崙": "拿破仑",
"布殊": "布什",
"布希": "布什",
"柯林頓": "克林顿",
"克林頓": "克林顿",
"薩達姆": "萨达姆",
"海珊": "萨达姆",
"梵谷": "凡高",
"大衛碧咸": "大卫·贝克汉姆",
"米高奧雲": "迈克尔·欧文",
"卡佩雅蒂": "珍妮弗·卡普里亚蒂",
"沙芬": "马拉特·萨芬",
"舒麥加": "迈克尔·舒马赫",
"希特拉": "希特勒",
"黛安娜": "戴安娜",
"希拉": "赫拉",
}
zh2SG = {
"方便面": "快速面",
"速食麵": "快速面",
"即食麵": "快速面",
"蹦极跳": "绑紧跳",
"笨豬跳": "绑紧跳",
"凉菜": "冷菜",
"冷盤": "冷菜",
"零钱": "散钱",
"散紙": "散钱",
"笑星": "谐星",
"夜校": "夜学",
"民乐": "华乐",
"住房": "住屋",
"房价": "屋价",
"泡麵": "快速面",
}
| 91,927 | 10.091699 | 27 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/langconv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from copy import deepcopy
try:
import psyco
psyco.full()
except:
pass
from .zh_wiki import zh2Hant, zh2Hans
import sys
py3k = sys.version_info >= (3, 0, 0)
if py3k:
UEMPTY = ''
else:
_zh2Hant, _zh2Hans = {}, {}
for old, new in ((zh2Hant, _zh2Hant), (zh2Hans, _zh2Hans)):
for k, v in old.items():
new[k.decode('utf8')] = v.decode('utf8')
zh2Hant = _zh2Hant
zh2Hans = _zh2Hans
UEMPTY = ''.decode('utf8')
# states
(START, END, FAIL, WAIT_TAIL) = list(range(4))
# conditions
(TAIL, ERROR, MATCHED_SWITCH, UNMATCHED_SWITCH, CONNECTOR) = list(range(5))
MAPS = {}
class Node(object):
def __init__(self, from_word, to_word=None, is_tail=True,
have_child=False):
self.from_word = from_word
if to_word is None:
self.to_word = from_word
self.data = (is_tail, have_child, from_word)
self.is_original = True
else:
self.to_word = to_word or from_word
self.data = (is_tail, have_child, to_word)
self.is_original = False
self.is_tail = is_tail
self.have_child = have_child
def is_original_long_word(self):
return self.is_original and len(self.from_word)>1
def is_follow(self, chars):
return chars != self.from_word[:-1]
def __str__(self):
return '<Node, %s, %s, %s, %s>' % (repr(self.from_word),
repr(self.to_word), self.is_tail, self.have_child)
__repr__ = __str__
class ConvertMap(object):
def __init__(self, name, mapping=None):
self.name = name
self._map = {}
if mapping:
self.set_convert_map(mapping)
def set_convert_map(self, mapping):
convert_map = {}
have_child = {}
max_key_length = 0
for key in sorted(mapping.keys()):
if len(key)>1:
for i in range(1, len(key)):
parent_key = key[:i]
have_child[parent_key] = True
have_child[key] = False
max_key_length = max(max_key_length, len(key))
for key in sorted(have_child.keys()):
convert_map[key] = (key in mapping, have_child[key],
mapping.get(key, UEMPTY))
self._map = convert_map
self.max_key_length = max_key_length
def __getitem__(self, k):
try:
is_tail, have_child, to_word = self._map[k]
return Node(k, to_word, is_tail, have_child)
except:
return Node(k)
def __contains__(self, k):
return k in self._map
def __len__(self):
return len(self._map)
class StatesMachineException(Exception): pass
class StatesMachine(object):
def __init__(self):
self.state = START
self.final = UEMPTY
self.len = 0
self.pool = UEMPTY
def clone(self, pool):
new = deepcopy(self)
new.state = WAIT_TAIL
new.pool = pool
return new
def feed(self, char, map):
node = map[self.pool+char]
if node.have_child:
if node.is_tail:
if node.is_original:
cond = UNMATCHED_SWITCH
else:
cond = MATCHED_SWITCH
else:
cond = CONNECTOR
else:
if node.is_tail:
cond = TAIL
else:
cond = ERROR
new = None
if cond == ERROR:
self.state = FAIL
elif cond == TAIL:
if self.state == WAIT_TAIL and node.is_original_long_word():
self.state = FAIL
else:
self.final += node.to_word
self.len += 1
self.pool = UEMPTY
self.state = END
elif self.state == START or self.state == WAIT_TAIL:
if cond == MATCHED_SWITCH:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = END
self.pool = UEMPTY
elif cond == UNMATCHED_SWITCH or cond == CONNECTOR:
if self.state == START:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = END
else:
if node.is_follow(self.pool):
self.state = FAIL
else:
self.pool = node.from_word
elif self.state == END:
# END is a new START
self.state = START
new = self.feed(char, map)
elif self.state == FAIL:
raise StatesMachineException('Translate States Machine '
'have error with input data %s' % node)
return new
def __len__(self):
return self.len + 1
def __str__(self):
return '<StatesMachine %s, pool: "%s", state: %s, final: %s>' % (
id(self), self.pool, self.state, self.final)
__repr__ = __str__
class Converter(object):
def __init__(self, to_encoding):
self.to_encoding = to_encoding
self.map = MAPS[to_encoding]
self.start()
def feed(self, char):
branches = []
for fsm in self.machines:
new = fsm.feed(char, self.map)
if new:
branches.append(new)
if branches:
self.machines.extend(branches)
self.machines = [fsm for fsm in self.machines if fsm.state != FAIL]
all_ok = True
for fsm in self.machines:
if fsm.state != END:
all_ok = False
if all_ok:
self._clean()
return self.get_result()
def _clean(self):
if len(self.machines):
self.machines.sort(key=lambda x: len(x))
# self.machines.sort(cmp=lambda x,y: cmp(len(x), len(y)))
self.final += self.machines[0].final
self.machines = [StatesMachine()]
def start(self):
self.machines = [StatesMachine()]
self.final = UEMPTY
def end(self):
self.machines = [fsm for fsm in self.machines
if fsm.state == FAIL or fsm.state == END]
self._clean()
def convert(self, string):
self.start()
for char in string:
self.feed(char)
self.end()
return self.get_result()
def get_result(self):
return self.final
def registery(name, mapping):
global MAPS
MAPS[name] = ConvertMap(name, mapping)
registery('zh-hant', zh2Hant)
registery('zh-hans', zh2Hans)
del zh2Hant, zh2Hans
def run():
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-e', type='string', dest='encoding',
help='encoding')
parser.add_option('-f', type='string', dest='file_in',
help='input file (- for stdin)')
parser.add_option('-t', type='string', dest='file_out',
help='output file')
(options, args) = parser.parse_args()
if not options.encoding:
parser.error('encoding must be set')
if options.file_in:
if options.file_in == '-':
file_in = sys.stdin
else:
file_in = open(options.file_in)
else:
file_in = sys.stdin
if options.file_out:
if options.file_out == '-':
file_out = sys.stdout
else:
file_out = open(options.file_out, 'wb')
else:
file_out = sys.stdout
c = Converter(options.encoding)
for line in file_in:
# print >> file_out, c.convert(line.rstrip('\n').decode(
file_out.write(c.convert(line.rstrip('\n').decode(
'utf8')).encode('utf8'))
if __name__ == '__main__':
run()
| 7,874 | 27.740876 | 75 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/preprocess/DRCD_output.py | import collections
import json
import math
from tqdm import tqdm
from tools.official_tokenization import BasicTokenizer
def write_predictions_topk(config, all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file):
"""Write final predictions to the json file and log-odds of null if needed."""
print("Writing predictions to: %s" % (output_prediction_file))
print("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature['example_index']].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(tqdm(all_examples)):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature['unique_id']]
for i in range(config.start_n_top):
for j in range(config.end_n_top):
start_logit = result.start_top_logits[i]
start_index = result.start_top_index[i]
j_index = i * config.end_n_top + j
end_logit = result.end_top_logits[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature['tokens']):
continue
if end_index >= len(feature['tokens']):
continue
if not feature['token_is_max_context'].get(str(start_index), False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=start_logit,
end_logit=end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
# ipdb.set_trace()
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature['tokens'][pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature['token_to_orig_map'][str(pred.start_index)]
orig_doc_end = feature['token_to_orig_map'][str(pred.end_index)]
orig_tokens = example['ori_doc_tokens'][orig_doc_start:(orig_doc_end + 1)]
tok_text = "".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = "".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
# ipdb.set_trace()
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
# ipdb.set_trace()
nbest_json = []
# ipdb.set_trace()
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = float(probs[i])
output["start_logit"] = float(entry.start_logit)
output["end_logit"] = float(entry.end_logit)
nbest_json.append(output)
assert len(nbest_json) >= 1
# ipdb.set_trace()
all_predictions[example['qid']] = nbest_json[0]["text"]
all_nbest_json[example['qid']] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + "\n")
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, version_2_with_negative=False, null_score_diff_threshold=0.):
"""Write final predictions to the json file and log-odds of null if needed."""
print("Writing predictions to: %s" % (output_prediction_file))
print("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature['example_index']].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(tqdm(all_examples)):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature['unique_id']]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature['tokens']):
continue
if end_index >= len(feature['tokens']):
continue
if str(start_index) not in feature['token_to_orig_map'] and \
start_index not in feature['token_to_orig_map']:
continue
if str(end_index) not in feature['token_to_orig_map'] and \
end_index not in feature['token_to_orig_map']:
continue
if not feature['token_is_max_context'].get(str(start_index), False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature['tokens'][pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature['token_to_orig_map'][str(pred.start_index)]
orig_doc_end = feature['token_to_orig_map'][str(pred.end_index)]
orig_tokens = example['ori_doc_tokens'][orig_doc_start:(orig_doc_end + 1)]
tok_text = "".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = "".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = float(probs[i])
output["start_logit"] = float(entry.start_logit)
output["end_logit"] = float(entry.end_logit)
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example['qid']] = nbest_json[0]["text"]
all_nbest_json[example['qid']] = nbest_json
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
scores_diff_json[example['qid']] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example['qid']] = ""
else:
all_predictions[example['qid']] = best_non_null_entry.text
all_nbest_json[example['qid']] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = "".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
print("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
print("Length not equal after stripping spaces: '%s' vs '%s'" % (orig_ns_text, tok_ns_text))
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
print("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
print("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
| 19,601 | 40.706383 | 104 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/preprocess/CHID_preprocess.py | '''
@author: zhangxinrui
@name: dataset_roberta.py
@date: 10/07/2019
'''
import collections
import os
import pickle
import numpy as np
import json
from tqdm import tqdm
try:
import regex as re
except Exception:
import re
RawResult = collections.namedtuple("RawResult",
["unique_id", "example_id", "tag", "logit"])
SPIECE_UNDERLINE = '▁'
class ChidExample(object):
def __init__(self,
example_id,
tag,
doc_tokens,
options,
answer_index=None):
self.example_id = example_id
self.tag = tag
self.doc_tokens = doc_tokens
self.options = options
self.answer_index = answer_index
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "tag: %s" % (self.tag)
s += ", context: %s" % (''.join(self.doc_tokens))
s += ", options: [%s]" % (", ".join(self.options))
if self.answer_index is not None:
s += ", answer: %s" % self.options[self.answer_index]
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_id,
tag,
tokens,
input_ids,
input_masks,
segment_ids,
choice_masks,
label=None):
self.unique_id = unique_id
self.example_id = example_id
self.tag = tag
self.tokens = tokens
self.input_ids = input_ids
self.input_masks = input_masks
self.segment_ids = segment_ids
self.choice_masks = choice_masks
self.label = label # 正确答案在所有候选答案中的index
def read_chid_examples(input_data_file, input_label_file, is_training=True):
'''
将原始数据处理为如下形式:
part_passage遍历每个blak的周围位置
:param input_data:
:param is_training:
:return:
'''
if is_training:
input_label = json.load(open(input_label_file))
input_data = open(input_data_file)
def _is_chinese_char(cp):
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def is_fuhao(c):
if c == '。' or c == ',' or c == '!' or c == '?' or c == ';' or c == '、' or c == ':' or c == '(' or c == ')' \
or c == '-' or c == '~' or c == '「' or c == '《' or c == '》' or c == ',' or c == '」' or c == '"' or c == '“' or c == '”' \
or c == '$' or c == '『' or c == '』' or c == '—' or c == ';' or c == '。' or c == '(' or c == ')' or c == '-' or c == '~' or c == '。' \
or c == '‘' or c == '’':
return True
return False
def _tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
output = []
is_blank = False
for index, char in enumerate(text):
cp = ord(char)
if is_blank:
output.append(char)
if context[index - 12:index + 1].startswith("#idiom"):
is_blank = False
output.append(SPIECE_UNDERLINE)
else:
if text[index:index + 6] == "#idiom":
is_blank = True
if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:
output.append(SPIECE_UNDERLINE)
output.append(char)
elif _is_chinese_char(cp) or is_fuhao(char):
if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:
output.append(SPIECE_UNDERLINE)
output.append(char)
output.append(SPIECE_UNDERLINE)
else:
output.append(char)
return "".join(output)
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F or c == SPIECE_UNDERLINE:
return True
return False
examples = []
example_id = 0
for data in tqdm(input_data):
data = eval(data)
options = data['candidates']
for context in data['content']:
context = context.replace("“", "\"").replace("”", "\"").replace("——", "--"). \
replace("—", "-").replace("―", "-").replace("…", "...").replace("‘", "\'").replace("’", "\'")
context = _tokenize_chinese_chars(context)
paragraph_text = context.strip()
doc_tokens = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
tags = [blank for blank in doc_tokens if '#idiom' in blank]
if is_training:
for tag_index, tag in enumerate(tags):
answer_index = input_label[tag]
example = ChidExample(
example_id=example_id,
tag=tag,
doc_tokens=doc_tokens,
options=options,
answer_index=answer_index)
examples.append(example)
else:
for tag_index, tag in enumerate(tags):
example = ChidExample(
example_id=example_id,
tag=tag,
doc_tokens=doc_tokens,
options=options)
examples.append(example)
else:
example_id += 1
else:
print('原始样本个数:{}'.format(example_id))
print('实际生成总样例数:{}'.format(len(examples)))
return examples
def add_tokens_for_around(tokens, pos, num_tokens):
num_l = num_tokens // 2
num_r = num_tokens - num_l
if pos >= num_l and (len(tokens) - 1 - pos) >= num_r:
tokens_l = tokens[pos - num_l: pos]
tokens_r = tokens[pos + 1: pos + 1 + num_r]
elif pos <= num_l:
tokens_l = tokens[:pos]
right_len = num_tokens - len(tokens_l)
tokens_r = tokens[pos + 1: pos + 1 + right_len]
elif (len(tokens) - 1 - pos) <= num_r:
tokens_r = tokens[pos + 1:]
left_len = num_tokens - len(tokens_r)
tokens_l = tokens[pos - left_len: pos]
else:
raise ValueError('impossible')
return tokens_l, tokens_r
def convert_examples_to_features(examples, tokenizer, max_seq_length=128, max_num_choices=10):
'''
将所有候选答案放置在片段开头
'''
def _loop(example, unique_id, label):
'''
:param example:
:param unique_id:
:return:
input_ids = (C, seq_len)
token_type_ids = (C, seq_len) = segment_id
input_mask = (C, seq_len)
labels = int
choices_mask = (C)
'''
input_ids = []
input_masks = []
segment_ids = []
choice_masks = [1] * len(example.options)
tag = example.tag
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
if '#idiom' in token:
sub_tokens = [str(token)]
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
all_doc_tokens.append(sub_token)
pos = all_doc_tokens.index(tag)
num_tokens = max_tokens_for_doc - 5 # [unused1]和segA的成语
tmp_l, tmp_r = add_tokens_for_around(all_doc_tokens, pos, num_tokens)
num_l = len(tmp_l)
num_r = len(tmp_r)
tokens_l = []
for token in tmp_l:
if '#idiom' in token and token != tag:
tokens_l.extend(['[MASK]'] * 4)
else:
tokens_l.append(token)
tokens_l = tokens_l[-num_l:]
del tmp_l
tokens_r = []
for token in tmp_r:
if '#idiom' in token and token != tag:
tokens_r.extend(['[MASK]'] * 4)
else:
tokens_r.append(token)
tokens_r = tokens_r[: num_r]
del tmp_r
for i, elem in enumerate(example.options):
option = tokenizer.tokenize(elem)
tokens = ['[CLS]'] + option + ['[SEP]'] + tokens_l + ['[unused1]'] + tokens_r + ['[SEP]']
input_id = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_id)
segment_id = [0] * len(input_id)
while len(input_id) < max_seq_length:
input_id.append(0)
input_mask.append(0)
segment_id.append(0)
assert len(input_id) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_id) == max_seq_length
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
if unique_id < 5:
print("*** Example ***")
print("unique_id: {}".format(unique_id))
print("context_id: {}".format(tag))
print("label: {}".format(label))
print("tag_index: {}".format(pos))
print("tokens: {}".format("".join(tokens)))
print("choice_masks: {}".format(choice_masks))
while len(input_ids) < max_num_choices:
input_ids.append([0] * max_seq_length)
input_masks.append([0] * max_seq_length)
segment_ids.append([0] * max_seq_length)
choice_masks.append(0)
assert len(input_ids) == max_num_choices
assert len(input_masks) == max_num_choices
assert len(segment_ids) == max_num_choices
assert len(choice_masks) == max_num_choices
features.append(
InputFeatures(
unique_id=unique_id,
example_id=example.example_id,
tag=tag,
tokens=tokens,
input_ids=input_ids,
input_masks=input_masks,
segment_ids=segment_ids,
choice_masks=choice_masks,
label=label))
max_tokens_for_doc = max_seq_length - 3 # [CLS] choice [SEP] document [SEP]
features = []
unique_id = 0
for (example_index, example) in enumerate(tqdm(examples)):
label = example.answer_index
if label != None:
_loop(example, unique_id, label)
else:
_loop(example, unique_id, None)
unique_id += 1
if unique_id % 12000 == 0:
print("unique_id: %s" % (unique_id))
print("unique_id: %s" % (unique_id))
return features
def logits_matrix_to_array(logits_matrix, index_2_idiom):
"""从矩阵中计算全局概率最大的序列"""
logits_matrix = np.array(logits_matrix)
logits_matrix = np.transpose(logits_matrix)
tmp = []
for i, row in enumerate(logits_matrix):
for j, col in enumerate(row):
tmp.append((i, j, col))
else:
choice = set(range(i + 1))
blanks = set(range(j + 1))
tmp = sorted(tmp, key=lambda x: x[2], reverse=True)
results = []
for i, j, v in tmp:
if (j in blanks) and (i in choice):
results.append((i, j))
blanks.remove(j)
choice.remove(i)
results = sorted(results, key=lambda x: x[1], reverse=False)
results = [[index_2_idiom[j], i] for i, j in results]
return results
def logits_matrix_max_array(logits_matrix, index_2_idiom):
logits_matrix = np.array(logits_matrix)
arg_max = logits_matrix.argmax(axis=1)
results = [[index_2_idiom[i], idx] for i, idx in enumerate(arg_max)]
return results
def get_final_predictions(all_results, tmp_predict_file, g=True):
if not os.path.exists(tmp_predict_file):
pickle.dump(all_results, open(tmp_predict_file, 'wb'))
raw_results = {}
for i, elem in enumerate(all_results):
example_id = elem.example_id
if example_id not in raw_results:
raw_results[example_id] = [(elem.tag, elem.logit)]
else:
raw_results[example_id].append((elem.tag, elem.logit))
results = []
for example_id, elem in raw_results.items():
index_2_idiom = {index: tag for index, (tag, logit) in enumerate(elem)}
logits = [logit for _, logit in elem]
if g:
results.extend(logits_matrix_to_array(logits, index_2_idiom))
else:
results.extend(logits_matrix_max_array(logits, index_2_idiom))
return results
def write_predictions(results, output_prediction_file):
# output_prediction_file = result6.csv
# results = pd.DataFrame(results)
# results.to_csv(output_prediction_file, header=None, index=None)
results_dict = {}
for result in results:
results_dict[result[0]] = result[1]
with open(output_prediction_file, 'w') as w:
json.dump(results_dict, w, indent=2)
print("Writing predictions to: {}".format(output_prediction_file))
def generate_input(data_file, label_file, example_file, feature_file, tokenizer, max_seq_length, max_num_choices,
is_training=True):
if os.path.exists(feature_file):
features = pickle.load(open(feature_file, 'rb'))
elif os.path.exists(example_file):
examples = pickle.load(open(example_file, 'rb'))
features = convert_examples_to_features(examples, tokenizer, max_seq_length, max_num_choices)
pickle.dump(features, open(feature_file, 'wb'))
else:
examples = read_chid_examples(data_file, label_file, is_training=is_training)
pickle.dump(examples, open(example_file, 'wb'))
features = convert_examples_to_features(examples, tokenizer, max_seq_length, max_num_choices)
pickle.dump(features, open(feature_file, 'wb'))
return features
def evaluate(ans_f, pre_f):
ans = json.load(open(ans_f))
pre = json.load(open(pre_f))
total_num = 0
acc_num = 0
for id_ in ans:
if id_ not in pre:
raise FileNotFoundError
total_num += 1
if ans[id_] == pre[id_]:
acc_num += 1
acc = acc_num / total_num
acc *= 100
return acc
| 14,712 | 32.362812 | 149 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/preprocess/DRCD_preprocess.py | import collections
import copy
import json
import os
from tqdm import tqdm
from tools.langconv import Converter
SPIECE_UNDERLINE = '▁'
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def Traditional2Simplified(sentence):
'''
将sentence中的繁体字转为简体字
:param sentence: 待转换的句子
:return: 将句子中繁体字转换为简体字之后的句子
'''
sentence = Converter('zh-hans').convert(sentence)
return sentence
def json2features(input_file, output_files, tokenizer, is_training=False, max_query_length=64,
max_seq_length=512, doc_stride=128):
with open(input_file, 'r') as f:
train_data = json.load(f)
train_data = train_data['data']
def _is_chinese_char(cp):
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def is_fuhao(c):
if c == '。' or c == ',' or c == '!' or c == '?' or c == ';' or c == '、' or c == ':' or c == '(' or c == ')' \
or c == '-' or c == '~' or c == '「' or c == '《' or c == '》' or c == ',' or c == '」' or c == '"' or c == '“' or c == '”' \
or c == '$' or c == '『' or c == '』' or c == '—' or c == ';' or c == '。' or c == '(' or c == ')' or c == '-' or c == '~' or c == '。' \
or c == '‘' or c == '’' or c == '─' or c == ':':
return True
return False
def _tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or is_fuhao(char):
if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:
output.append(SPIECE_UNDERLINE)
output.append(char)
output.append(SPIECE_UNDERLINE)
else:
output.append(char)
return "".join(output)
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F or c == SPIECE_UNDERLINE:
return True
return False
# to examples
examples = []
mis_match = 0
for article in tqdm(train_data):
for para in article['paragraphs']:
context = copy.deepcopy(para['context'])
# 转简体
context = Traditional2Simplified(context)
# context中的中文前后加入空格
context_chs = _tokenize_chinese_chars(context)
context_fhs = _tokenize_chinese_chars(para['context'])
doc_tokens = []
ori_doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for ic, c in enumerate(context_chs):
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
ori_doc_tokens.append(context_fhs[ic])
else:
doc_tokens[-1] += c
ori_doc_tokens[-1] += context_fhs[ic]
prev_is_whitespace = False
if c != SPIECE_UNDERLINE:
char_to_word_offset.append(len(doc_tokens) - 1)
assert len(context_chs) == len(context_fhs)
for qas in para['qas']:
qid = qas['id']
ques_text = Traditional2Simplified(qas['question'])
ans_text = Traditional2Simplified(qas['answers'][0]['text'])
start_position_final = None
end_position_final = None
if is_training:
start_position = qas['answers'][0]['answer_start']
end_position = start_position + len(ans_text) - 1
while context[start_position] == " " or context[start_position] == "\t" or \
context[start_position] == "\r" or context[start_position] == "\n":
start_position += 1
start_position_final = char_to_word_offset[start_position]
end_position_final = char_to_word_offset[end_position]
if doc_tokens[start_position_final] in {"。", ",", ":", ":", ".", ","}:
start_position_final += 1
actual_text = "".join(doc_tokens[start_position_final:(end_position_final + 1)])
cleaned_answer_text = "".join(whitespace_tokenize(ans_text))
if actual_text != cleaned_answer_text:
print(actual_text, 'V.S', cleaned_answer_text)
mis_match += 1
examples.append({'doc_tokens': doc_tokens,
'ori_doc_tokens': ori_doc_tokens,
'orig_answer_text': ans_text,
'qid': qid,
'question': ques_text,
'answer': ans_text,
'start_position': start_position_final,
'end_position': end_position_final})
print('examples num:', len(examples))
print('mis match:', mis_match)
os.makedirs('/'.join(output_files[0].split('/')[0:-1]), exist_ok=True)
json.dump(examples, open(output_files[0], 'w'))
# to features
features = []
unique_id = 1000000000
for (example_index, example) in enumerate(tqdm(examples)):
query_tokens = tokenizer.tokenize(example['question'])
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example['doc_tokens']):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example['start_position']] # 原来token到新token的映射,这是新token的起点
if example['end_position'] < len(example['doc_tokens']) - 1:
tok_end_position = orig_to_tok_index[example['end_position'] + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example['orig_answer_text'])
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
doc_spans = []
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
if tok_start_position == -1 and tok_end_position == -1:
start_position = 0 # 问题本来没答案,0是[CLS]的位子
end_position = 0
else: # 如果原本是有答案的,那么去除没有答案的feature
out_of_span = False
doc_start = doc_span.start # 映射回原文的起点和终点
doc_end = doc_span.start + doc_span.length - 1
if not (tok_start_position >= doc_start and tok_end_position <= doc_end): # 该划窗没答案作为无答案增强
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append({'unique_id': unique_id,
'example_index': example_index,
'doc_span_index': doc_span_index,
'tokens': tokens,
'token_to_orig_map': token_to_orig_map,
'token_is_max_context': token_is_max_context,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
'start_position': start_position,
'end_position': end_position})
unique_id += 1
print('features num:', len(features))
json.dump(features, open(output_files[1], 'w'))
| 14,501 | 40.913295 | 149 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/preprocess/cmrc2018_output.py | import collections
import json
import math
from tqdm import tqdm
from tools.official_tokenization import BasicTokenizer
def write_predictions_topk(FLAGS, all_examples, all_features, all_results, n_best_size,
max_answer_length, output_prediction_file, output_nbest_file):
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index",
"start_log_prob", "end_log_prob"])
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"])
"""Write final predictions to the json file and log-odds of null if needed."""
print("Writing predictions to: %s" % (output_prediction_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature['example_index']].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature['unique_id']]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(FLAGS.start_n_top):
for j in range(FLAGS.end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * FLAGS.end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature['paragraph_len'] - 1:
continue
if end_index >= feature['paragraph_len'] - 1:
continue
if not feature['token_is_max_context'].get(start_index, False) and \
not feature['token_is_max_context'].get(str(start_index), False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_log_prob + x.end_log_prob),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_start_to_orig_index = feature['tok_start_to_orig_index']
tok_end_to_orig_index = feature['tok_end_to_orig_index']
start_orig_pos = tok_start_to_orig_index[pred.start_index]
end_orig_pos = tok_end_to_orig_index[pred.end_index]
paragraph_text = example['paragraph_text']
final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_log_prob=pred.start_log_prob,
end_log_prob=pred.end_log_prob))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="", start_log_prob=-1e6,
end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example['qas_id']] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example['qas_id']] = best_non_null_entry.text
all_nbest_json[example['qas_id']] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + "\n")
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, version_2_with_negative=False, null_score_diff_threshold=0.):
"""Write final predictions to the json file and log-odds of null if needed."""
print("Writing predictions to: %s" % (output_prediction_file))
print("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature['example_index']].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(tqdm(all_examples)):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature['unique_id']]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature['tokens']):
continue
if end_index >= len(feature['tokens']):
continue
if str(start_index) not in feature['token_to_orig_map'] and \
start_index not in feature['token_to_orig_map']:
continue
if str(end_index) not in feature['token_to_orig_map'] and \
end_index not in feature['token_to_orig_map']:
continue
if not feature['token_is_max_context'].get(str(start_index), False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature['tokens'][pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature['token_to_orig_map'][str(pred.start_index)]
orig_doc_end = feature['token_to_orig_map'][str(pred.end_index)]
orig_tokens = example['doc_tokens'][orig_doc_start:(orig_doc_end + 1)]
tok_text = "".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = "".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = float(probs[i])
output["start_logit"] = float(entry.start_logit)
output["end_logit"] = float(entry.end_logit)
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example['qid']] = nbest_json[0]["text"]
all_nbest_json[example['qid']] = nbest_json
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
scores_diff_json[example['qid']] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example['qid']] = ""
else:
all_predictions[example['qid']] = best_non_null_entry.text
all_nbest_json[example['qid']] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = "".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
print("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
print("Length not equal after stripping spaces: '%s' vs '%s'" % (orig_ns_text, tok_ns_text))
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
print("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
print("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
| 19,623 | 40.576271 | 104 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/preprocess/cmrc2018_preprocess.py | import collections
import json
import os
from tqdm import tqdm
from tools import official_tokenization as tokenization
SPIECE_UNDERLINE = '▁'
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def json2features(input_file, output_files, tokenizer, is_training=False, repeat_limit=3, max_query_length=64,
max_seq_length=512, doc_stride=128):
with open(input_file, 'r') as f:
train_data = json.load(f)
train_data = train_data['data']
def _is_chinese_char(cp):
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def is_fuhao(c):
if c == '。' or c == ',' or c == '!' or c == '?' or c == ';' or c == '、' or c == ':' or c == '(' or c == ')' \
or c == '-' or c == '~' or c == '「' or c == '《' or c == '》' or c == ',' or c == '」' or c == '"' or c == '“' or c == '”' \
or c == '$' or c == '『' or c == '』' or c == '—' or c == ';' or c == '。' or c == '(' or c == ')' or c == '-' or c == '~' or c == '。' \
or c == '‘' or c == '’':
return True
return False
def _tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or is_fuhao(char):
if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:
output.append(SPIECE_UNDERLINE)
output.append(char)
output.append(SPIECE_UNDERLINE)
else:
output.append(char)
return "".join(output)
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F or c == SPIECE_UNDERLINE:
return True
return False
# to examples
examples = []
mis_match = 0
for article in tqdm(train_data):
for para in article['paragraphs']:
context = para['context']
context_chs = _tokenize_chinese_chars(context)
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in context_chs:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
if c != SPIECE_UNDERLINE:
char_to_word_offset.append(len(doc_tokens) - 1)
for qas in para['qas']:
qid = qas['id']
ques_text = qas['question']
ans_text = qas['answers'][0]['text']
start_position_final = None
end_position_final = None
if is_training:
count_i = 0
start_position = qas['answers'][0]['answer_start']
end_position = start_position + len(ans_text) - 1
while context[start_position:end_position + 1] != ans_text and count_i < repeat_limit:
start_position -= 1
end_position -= 1
count_i += 1
while context[start_position] == " " or context[start_position] == "\t" or \
context[start_position] == "\r" or context[start_position] == "\n":
start_position += 1
start_position_final = char_to_word_offset[start_position]
end_position_final = char_to_word_offset[end_position]
if doc_tokens[start_position_final] in {"。", ",", ":", ":", ".", ","}:
start_position_final += 1
actual_text = "".join(doc_tokens[start_position_final:(end_position_final + 1)])
cleaned_answer_text = "".join(tokenization.whitespace_tokenize(ans_text))
if actual_text != cleaned_answer_text:
print(actual_text, 'V.S', cleaned_answer_text)
mis_match += 1
# ipdb.set_trace()
examples.append({'doc_tokens': doc_tokens,
'orig_answer_text': ans_text,
'qid': qid,
'question': ques_text,
'answer': ans_text,
'start_position': start_position_final,
'end_position': end_position_final})
print('examples num:', len(examples))
print('mis_match:', mis_match)
os.makedirs('/'.join(output_files[0].split('/')[0:-1]), exist_ok=True)
json.dump(examples, open(output_files[0], 'w'))
# to features
features = []
unique_id = 1000000000
for (example_index, example) in enumerate(tqdm(examples)):
query_tokens = tokenizer.tokenize(example['question'])
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example['doc_tokens']):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example['start_position']] # 原来token到新token的映射,这是新token的起点
if example['end_position'] < len(example['doc_tokens']) - 1:
tok_end_position = orig_to_tok_index[example['end_position'] + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example['orig_answer_text'])
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
doc_spans = []
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
if tok_start_position == -1 and tok_end_position == -1:
start_position = 0 # 问题本来没答案,0是[CLS]的位子
end_position = 0
else: # 如果原本是有答案的,那么去除没有答案的feature
out_of_span = False
doc_start = doc_span.start # 映射回原文的起点和终点
doc_end = doc_span.start + doc_span.length - 1
if not (tok_start_position >= doc_start and tok_end_position <= doc_end): # 该划窗没答案作为无答案增强
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append({'unique_id': unique_id,
'example_index': example_index,
'doc_span_index': doc_span_index,
'tokens': tokens,
'token_to_orig_map': token_to_orig_map,
'token_is_max_context': token_is_max_context,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
'start_position': start_position,
'end_position': end_position})
unique_id += 1
print('features num:', len(features))
json.dump(features, open(output_files[1], 'w'))
def _convert_index(index, pos, M=None, is_start=True):
if pos >= len(index):
pos = len(index) - 1
if index[pos] is not None:
return index[pos]
N = len(index)
rear = pos
while rear < N - 1 and index[rear] is None:
rear += 1
front = pos
while front > 0 and index[front] is None:
front -= 1
assert index[front] is not None or index[rear] is not None
if index[front] is None:
if index[rear] >= 1:
if is_start:
return 0
else:
return index[rear] - 1
return index[rear]
if index[rear] is None:
if M is not None and index[front] < M - 1:
if is_start:
return index[front] + 1
else:
return M - 1
return index[front]
if is_start:
if index[rear] > index[front] + 1:
return index[front] + 1
else:
return index[rear]
else:
if index[rear] > index[front] + 1:
return index[rear] - 1
else:
return index[front]
| 14,964 | 40.225895 | 149 | py |
CLUE | CLUE-master/baselines/models_pytorch/mrc_pytorch/preprocess/cmrc2018_evaluate.py | # -*- coding: utf-8 -*-
'''
Evaluation script for CMRC 2018
version: v5 - special
Note:
v5 - special: Evaluate on SQuAD-style CMRC 2018 Datasets
v5: formatted output, add usage description
v4: fixed segmentation issues
'''
from __future__ import print_function
import json
import re
from collections import OrderedDict
import nltk
# split Chinese with English
def mixed_segmentation(in_str, rm_punc=False):
in_str = str(in_str).lower().strip()
segs_out = []
temp_str = ""
sp_char = ['-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=',
',', '。', ':', '?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、',
'「', '」', '(', ')', '-', '~', '『', '』']
for char in in_str:
if rm_punc and char in sp_char:
continue
if re.search(r'[\u4e00-\u9fa5]', char) or char in sp_char:
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
# handling last part
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
# remove punctuation
def remove_punctuation(in_str):
in_str = str(in_str).lower().strip()
sp_char = ['-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=',
',', '。', ':', '?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、',
'「', '」', '(', ')', '-', '~', '『', '』']
out_segs = []
for char in in_str:
if char in sp_char:
continue
else:
out_segs.append(char)
return ''.join(out_segs)
# find longest common string
def find_lcs(s1, s2):
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > mmax:
mmax = m[i + 1][j + 1]
p = i + 1
return s1[p - mmax:p], mmax
def evaluate(ground_truth_file, prediction_file):
f1 = 0
em = 0
total_count = 0
skip_count = 0
for instance in ground_truth_file["data"]:
# context_id = instance['context_id'].strip()
# context_text = instance['context_text'].strip()
for para in instance["paragraphs"]:
for qas in para['qas']:
total_count += 1
query_id = qas['id'].strip()
query_text = qas['question'].strip()
answers = [x["text"] for x in qas['answers']]
if query_id not in prediction_file:
print('Unanswered question: {}\n'.format(query_id))
skip_count += 1
continue
prediction = str(prediction_file[query_id])
f1 += calc_f1_score(answers, prediction)
em += calc_em_score(answers, prediction)
f1_score = 100.0 * f1 / total_count
em_score = 100.0 * em / total_count
return f1_score, em_score, total_count, skip_count
def evaluate2(ground_truth_file, prediction_file):
f1 = 0
em = 0
total_count = 0
skip_count = 0
yes_count = 0
yes_correct = 0
no_count = 0
no_correct = 0
unk_count = 0
unk_correct = 0
for instance in ground_truth_file["data"]:
for para in instance["paragraphs"]:
for qas in para['qas']:
total_count += 1
query_id = qas['id'].strip()
if query_id not in prediction_file:
print('Unanswered question: {}\n'.format(query_id))
skip_count += 1
continue
prediction = str(prediction_file[query_id])
if len(qas['answers']) == 0:
unk_count += 1
answers = [""]
if prediction == "":
unk_correct += 1
else:
answers = []
for x in qas['answers']:
answers.append(x['text'])
if x['text'] == 'YES':
if prediction == 'YES':
yes_correct += 1
yes_count += 1
if x['text'] == 'NO':
if prediction == 'NO':
no_correct += 1
no_count += 1
f1 += calc_f1_score(answers, prediction)
em += calc_em_score(answers, prediction)
f1_score = 100.0 * f1 / total_count
em_score = 100.0 * em / total_count
yes_acc = 100.0 * yes_correct / yes_count
no_acc = 100.0 * no_correct / no_count
unk_acc = 100.0 * unk_correct / unk_count
return f1_score, em_score, yes_acc, no_acc, unk_acc, total_count, skip_count
def calc_f1_score(answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = mixed_segmentation(ans, rm_punc=True)
prediction_segs = mixed_segmentation(prediction, rm_punc=True)
lcs, lcs_len = find_lcs(ans_segs, prediction_segs)
if lcs_len == 0:
f1_scores.append(0)
continue
precision = 1.0 * lcs_len / len(prediction_segs)
recall = 1.0 * lcs_len / len(ans_segs)
f1 = (2 * precision * recall) / (precision + recall)
f1_scores.append(f1)
return max(f1_scores)
def calc_em_score(answers, prediction):
em = 0
for ans in answers:
ans_ = remove_punctuation(ans)
prediction_ = remove_punctuation(prediction)
if ans_ == prediction_:
em = 1
break
return em
def get_eval(original_file, prediction_file):
ground_truth_file = json.load(open(original_file, 'r'))
prediction_file = json.load(open(prediction_file, 'r'))
F1, EM, TOTAL, SKIP = evaluate(ground_truth_file, prediction_file)
AVG = (EM + F1) * 0.5
output_result = OrderedDict()
output_result['AVERAGE'] = '%.3f' % AVG
output_result['F1'] = '%.3f' % F1
output_result['EM'] = '%.3f' % EM
output_result['TOTAL'] = TOTAL
output_result['SKIP'] = SKIP
return output_result
def get_eval_with_neg(original_file, prediction_file):
ground_truth_file = json.load(open(original_file, 'r'))
prediction_file = json.load(open(prediction_file, 'r'))
F1, EM, YES_ACC, NO_ACC, UNK_ACC, TOTAL, SKIP = evaluate2(ground_truth_file, prediction_file)
AVG = (EM + F1) * 0.5
output_result = OrderedDict()
output_result['AVERAGE'] = '%.3f' % AVG
output_result['F1'] = '%.3f' % F1
output_result['EM'] = '%.3f' % EM
output_result['YES'] = '%.3f' % YES_ACC
output_result['NO'] = '%.3f' % NO_ACC
output_result['UNK'] = '%.3f' % UNK_ACC
output_result['TOTAL'] = TOTAL
output_result['SKIP'] = SKIP
return output_result
| 7,007 | 31.294931 | 97 | py |
SparseCoupledDictionaryLearning | SparseCoupledDictionaryLearning-master/args.py | # -*- coding: utf-8 -*-
"""SF DECONVOLVE ARGUMENTS
This module sets the arguments for sf_deconvolve.py.
:Author: Samuel Farrens <samuel.farrens@gmail.com>
:Version: 2.4
:Date: 23/10/2017
"""
import argparse as ap
from argparse import ArgumentDefaultsHelpFormatter as formatter
class ArgParser(ap.ArgumentParser):
"""Argument Parser
This class defines a custom argument parser to override the
default convert_arg_line_to_args method from argparse.
"""
def __init__(self, *args, **kwargs):
super(ArgParser, self).__init__(*args, **kwargs)
def convert_arg_line_to_args(self, line):
"""Convert argument line to arguments
This method overrides the default method of argparse. It skips blank
and comment lines, and allows .ini style formatting.
Parameters
----------
line : str
Input argument string
Yields
------
str
Argument strings
"""
line = line.split()
if line and line[0][0] not in ('#', ';'):
if line[0][0] != '-':
line[0] = '--' + line[0]
if len(line) > 1 and '=' in line[0]:
line = line[0].split('=') + line[1:]
for arg in line:
yield arg
def get_opts(args=None):
"""Get script options
This method sets the PSF deconvolution script options.
Returns
-------
arguments namespace
"""
# Set up argument parser
parser = ArgParser(add_help=False, usage='%(prog)s [options]',
description='PSF Deconvolution Script',
formatter_class=formatter,
fromfile_prefix_chars='@')
required = parser.add_argument_group('Required Arguments')
optional = parser.add_argument_group('Optional Arguments')
# Add arguments
optional.add_argument('-h', '--help', action='help',
help='show this help message and exit')
optional.add_argument('-q', '--quiet', action='store_true',
help='Suppress verbose.')
required.add_argument('-ih', '--inputhigh', required=True,
help='Input data file name (high resolution).')
required.add_argument('-il', '--inputlow', required=True,
help='Input data file name (low resolution).')
required.add_argument('-d', '--dictsize', type=int,
help='Dictionanry size.')
required.add_argument('-img', '--imageN', type=int,
help='Size of input image.')
optional.add_argument('-n', '--n_iter', type=int, default=150,
help='Number of iterations.')
optional.add_argument('--window', type=int, default=1,
help='Window to measure error.')
optional.add_argument('--bands_h', type=int, default=25, help='Number of bands in high resolution')
optional.add_argument('--bands_l', type=int, default=9, help='Number of bands in low resolution')
optional.add_argument('--c1', type=float, default=0.4)
optional.add_argument('--c2', type=float, default=0.4)
optional.add_argument('--c3', type=float, default=0.8)
optional.add_argument('--maxbeta', type=float, default=1e+6)
optional.add_argument('--delta', type=float, default=1e-4)
optional.add_argument('--beta', type=float, default=0.01)
optional.add_argument('--lamda', type=float, default=0.1)
# Return the argument namespace
return parser.parse_args(args)
| 3,662 | 26.961832 | 103 | py |
SparseCoupledDictionaryLearning | SparseCoupledDictionaryLearning-master/CDLOps.py |
"""SPARSE COUPLED DICTIONARY LEARNING
Sumplementary Class and Methods for the execution of intermediate matrices (Lagrange multiplier matrices, sparse coding matrices)
:Author: Nancy Panousopoulou <apanouso@ics.forth.gr>
:Reference document: Konstantina Fotiadou, Grigorios Tsagkatakis, Panagiotos Tsakalides `` Linear Inverse Problems with Sparsity Constraints'', DEDALE DELIVERABLE 3.1, 2016.
:Date: December 2017
"""
import numpy as np
import AuxOps as aops
import copy
class CDL():
"""Coupled Dictionary Learning Class
This class defines the intermediate matrices ((Lagrange multiplier matrices, sparse coding matrices)
Parameters
----------
datain_h : np.ndarray
Input data array, containing the data cubes in high resolution
datain_l : np.ndarray
Input data array, containing the data cubes in low resolution
dictsize : int
The size of the dictionaries
imageN: int
The number of samples in low and high resolution data cubes.
"""
def __init__(self, datain_h, datain_l, dictsize, imageN):
self.datain_h = datain_h
self.datain_l = datain_l
#sparse coding matrices
self.wh = np.zeros((dictsize,imageN))
self.wl = np.zeros((dictsize,imageN))
#
self.p = np.zeros((dictsize, imageN))
self.q = np.zeros((dictsize, imageN))
#Lagrangian multiplier matrices
self.y1 = np.zeros((dictsize, imageN))
self.y2 = np.zeros((dictsize, imageN))
self.y3 = np.zeros((dictsize, imageN))
self.dictsize = dictsize
def updateCDL(cdlin, dictin_ht, dictin_lt, dtdh, dtdl, c1,c2,c3, maxbeta, beta, lamda):
"""
Method for updating intermediate matrices
Input Arguments
----------
cdlin : CDL object
The set of intermediate matrices to be updated
dictin_ht: np.array
The transpose of the input dictionary in high resolution
dictin_lt: np.array
The transpose of the input dictionary in low resolution
dtdh: np.array
Auxiliary matrix - first term of Equation (11) for the high resolution dictionaries
dtdl: np.array
Auxiliary matrix - first term of Equation (11) for the low resolution dictionaries
c1, c2, c3: double
Step size parameters for the augmentend Lagrangian function
maxbeta, beta: double
Auxiliary parameters for updating Lagrange multiplier matrices.
lamda: double
The threshold value.
"""
y11= cdlin.y1
y22 = cdlin.y2
y33 = cdlin.y3
pp = cdlin.p
qq = cdlin.q
datain_h = cdlin.datain_h
datain_l = cdlin.datain_l
wl = cdlin.wl
wh = cdlin.wh
#print('wh & wl')
#update the sparse coding matrices according to Eq. (11)
whl = aops.calcW(datain_h, datain_l, dictin_ht,dictin_lt, dtdh, dtdl, wh, wl, c1,c2,c3, y11,y22,y33,pp,qq)
#update the thresholding matrices according to Eq. (13)
pp = aops.updThr(np.array(whl[0])-y11/c1, lamda)
qq = aops.updThr(np.array(whl[1])-y22/c2, lamda)
#update the Lagrange multiplier matrices according to Eq. (19).
y11 = aops.updateY(y11, c1, pp, np.array(whl[0]), maxbeta, beta)
y22 = aops.updateY(y22, c1, qq, np.array(whl[1]), maxbeta, beta)
y33 = aops.updateY(y33, c3, np.array(whl[0]), np.array(whl[1]), maxbeta, beta)
#print(y11.shape)
#print(y22.shape)
#print(y33.shape)
#Save back to the CDL object and return
cdlin.wh = np.array(whl[0]).copy()
cdlin.wl = np.array(whl[1]).copy()
cdlin.y1 = y11.copy()
cdlin.y2 = y22.copy()
cdlin.y3 = y33.copy()
cdlin.p = pp.copy()
cdlin.q = qq.copy()
return cdlin
| 3,834 | 27.198529 | 175 | py |
SparseCoupledDictionaryLearning | SparseCoupledDictionaryLearning-master/AuxOps.py | """SPARSE COUPLED DICTIONARY LEARNING
Sumplementary Methods for the execution of intermediate matrices (Lagrange multiplier matrices, sparse coding matrices)
:Author: Nancy Panousopoulou <apanouso@ics.forth.gr>
:Reference document: Konstantina Fotiadou, Grigorios Tsagkatakis, Panagiotos Tsakalides `` Linear Inverse Problems with Sparsity Constraints'', DEDALE DELIVERABLE 3.1, 2016.
:Date: December 2017
"""
import numpy as np
from numpy.linalg import inv
#from numpy.linalg import solve
from scipy.linalg import solve
def updateY(previousY, c, op1, op2, maxbeta=1e+6, beta=0.01):
"""Update Lagrange multiplier matrix (Equation (19)).
Input Arguments
----------
previousY : np.array
The previous value of the Lagrange multiplier matrix
c: double
Step size parameter for the augmentend Lagrangian function
op1: np.array
First operand matrix (thresholding values)
op2 : np.array
Second operand matrix (sparse coding coefficients)
maxbeta, beta: double
Auxiliary parameters for updating Lagrange multiplier matrices.
"""
return previousY + min(maxbeta, beta*c)*(op1-op2)
def updThr(inputmat, lam=0.1):
"""
Update thresholding matrices (Equations (13)-(14)).
Input Arguments
----------
inputmat : np.array
The input matrix for thresholding
lam: double
The thresholding value
"""
th = lam/2.
ttt = np.random.random(inputmat.shape)
#ttt = a - th [a > th]
for bb in range(inputmat.shape[0]):
for aa in range(inputmat.shape[1]):
#print('*************')
#print(aa)
#print('*************')
if inputmat[bb,aa]>th:
ttt[bb,aa] = inputmat[bb,aa]-th
elif abs(inputmat[bb,aa]) <= th:
ttt[bb,aa] = 0.
elif inputmat[bb,aa] < (-1.)*th:
ttt[bb,aa] = inputmat[bb,aa] +th
return ttt
def calcW(datain_h, datain_l, dictin_ht, dictin_lt, dtdh, dtdl, wh,wl, c1,c2,c3, y1,y2,y3,p,q):
"""
Update sparse coding parameters (Equation (11)).
Input Arguments
----------
datain_h, datain_l : np.array
The input data matrices in high and low resolution respectively
dictin_ht, dictin_lt: np.array
The transpose of the input dictionary in high and low resolution respectively
wh, wl: np.array
The previous sparse coding matrics in high and low resolution respectively
c1,c2,c3: double
Step size parameters for the augmentend Lagrangian function
y1,y2,y3: np.array
The Langrange multiplier matrices
p,q: np. array
The thresholding matrices for high and low resolution respectively.
"""
#wh: sparce coding in high resolution
tmp2 = np.dot(dictin_ht, datain_h) + (y1 - y3) + c1*p + c3*wl
tmp22 = np.dot(dtdh, tmp2)#+y1-y3)
#wh: sparce coding in low resolution
tmp4 = np.dot(dictin_lt, datain_l) + (y2 - y3) + c2*q + c3*tmp22
tmp44 = np.dot(dtdl, tmp4)
return [tmp22, tmp44]
| 3,089 | 25.869565 | 175 | py |
SparseCoupledDictionaryLearning | SparseCoupledDictionaryLearning-master/SparseCoupledDictionaryTraining.py |
"""SPARSE COUPLED DICTIONARY LEARNING
The main script for execution
:Author: Nancy Panousopoulou <apanouso@ics.forth.gr>
::Reference document: Konstantina Fotiadou, Grigorios Tsagkatakis, Panagiotos Tsakalides `` Linear Inverse Problems with Sparsity Constraints,'' DEDALE DELIVERABLE 3.1, 2016.
:Date: December 2017
"""
import numpy as np
#from numpy import linalg as la
from numpy import genfromtxt
from math import sqrt
#import scipy.io as sio
from numpy.linalg import inv
from args import get_opts
from CDLOps import CDL, updateCDL
import time
def normD(dictin):
"""
Normalize the dictionary between [0,1]
Input Arguments
----------
dictin : np.array
The input dictionary
"""
tmp = 1 / np.sqrt(np.sum(np.multiply(dictin, dictin), axis=0))
return np.dot(dictin, np.diag(tmp))
def run_script():
#the size of the image
imageN = opts.imageN
#the size of the dictionary
dictsize = opts.dictsize
#the number of bands in high resolution
bands_h_N = opts.bands_h
#the number of bands in low resolution
bands_l_N = opts.bands_l
#parameters for training.
c1 = opts.c1 # Default value: 0.4
c2 = opts.c2 # Default value: 0.4
c3 = opts.c3 # Default value: 0.8
maxbeta = opts.maxbeta #Default value: 1e+6
delta = opts.delta #Default value: 1e-4
beta = opts.beta #Default value: 0.01
lamda = opts.lamda #Default value: 0.1
#number of iterations for training
train_iter =opts.n_iter #default value: 150
#the window for calculating the value of the error.
wind = opts.window
#input data are in the from (# of Pixels) x (# of Bands)
data_h = genfromtxt(opts.inputhigh, delimiter=',')
data_l = genfromtxt(opts.inputlow, delimiter=',')
#the initial dictionaries are in the from (Dictionary Size) x (# of Bands)
dict_h = data_h[:, 0:dictsize]
dict_l = data_l[:, 0:dictsize]
#normalize the dictionaries
#dict_l = dict_l /la.norm(dict_l)
#dict_h = dict_h /la.norm(dict_h)
#mat2save = './tttmpinit' + '_' + str(imageN) + 'x' + str(dictsize) + '.mat'
#sio.savemat(mat2save, {'dicth_init':dict_h, 'dictl_init': dict_l})
#the CDL object responsible for all calculations...
cdl = CDL(data_h, data_l, dictsize, imageN)
phi_h = np.zeros(dictsize)
phi_l = np.zeros(dictsize)
dict_h_upd = np.zeros(dict_h.shape)
dict_l_upd = np.zeros(dict_l.shape)
for k in range(train_iter):
print(k)
ttime3 = time.time()
##prepare the updated values of dictionaries for updating Wh, Wl, P, Q, Y1,Y2, Y3
dict_ht = np.transpose(dict_h)
dict_lt = np.transpose(dict_l)
#(D_h^{T} \times D_h + (c_1+c_3)\times I)^{-1} -- first term of equation (11) for high resolution
dtdh = np.dot(np.transpose(dict_h), dict_h) + (c1 + c3)*np.eye(np.transpose(dict_h).shape[0])
dtdhinv = inv(dtdh)
#(D_l^{T} \times D_l + (c_2+c_3)\times I)^{-1} -- first term of equation (11) for low resolution
dtdl = np.dot(np.transpose(dict_l), dict_l) + (c2 + c3)*np.eye(np.transpose(dict_l).shape[0])
dtdlinv = inv(dtdl)
print('update...')
#update all auxiliary matrices Wh, Wl, P, Q, Y1, Y2, Y3
cdl = updateCDL(cdl, dict_ht, dict_lt, dtdhinv, dtdlinv,c1,c2,c3, maxbeta, beta, lamda)
for ii in range(dictsize):
phi_h[ii] = np.dot(cdl.wh[ii,:], np.transpose(cdl.wh[ii,:])) + delta
phi_l[ii] = np.dot(cdl.wl[ii,:], np.transpose(cdl.wl[ii,:])) + delta
dict_h_upd = dict_h + np.dot(data_h, np.transpose(cdl.wh))/(phi_h)
dict_l_upd = dict_l + np.dot(data_l, np.transpose(cdl.wl))/(phi_l)
#print(dict_h_upd.shape)
#print(dict_l_upd.shape)
#normalize dictionaries between [0,1]
#dict_h = dict_h_upd / la.norm(dict_h_upd)
#dict_l = dict_l_upd / la.norm(dict_l_upd)
dict_h = normD(dict_h_upd)
dict_l = normD(dict_l_upd)
if ~((k +1) % wind):
err_h = sqrt(np.sum(np.sum(np.square(cdl.datain_h - np.dot(dict_h, cdl.wh)))) / (bands_h_N * imageN))
err_l = sqrt(np.sum(np.sum(np.square(cdl.datain_l - np.dot(dict_l, cdl.wl)))) / (bands_l_N * imageN))
print('ERROR HIGH:')
print(err_h)
print('ERROR LOW:')
print(err_l)
print('Time elapsed for this iteration: ')
ttime3 = time.time()-ttime3
print(ttime3)
#print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
#mat2save = './results' + str(imageN) + 'x' + str(dictsize) + '_' + str(k) +'standalone.mat'
#sio.savemat(mat2save, {'timeelapsed': ttime3, 'dicth':dict_h, 'dictl': dict_l, 'phi_h': phi_h, 'phi_l': phi_l, 'err_l':err_l, 'err_h': err_h})#, 'wh': wh, 'wl': wl})#'phih': phi_h, 'sw': sw})
def main(args=None):
global opts
opts = get_opts(args)
run_script()
if __name__ == "__main__":
main()
| 5,357 | 27.652406 | 200 | py |
pointnerf | pointnerf-master/options/train_options.py | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
self.is_train = True
parser.add_argument(
'--print_freq',
type=int,
default=100,
help='frequency of showing training results on console')
parser.add_argument('--plr',
type=float,
default=0.0005,
help='initial learning rate')
parser.add_argument('--lr',
type=float,
default=0.001,
help='initial learning rate')
parser.add_argument('--lr_policy',
type=str,
default='lambda',
help='learning rate policy: lambda|step|plateau')
parser.add_argument(
'--lr_decay_iters',
type=int,
default=50,
help='multiply by a gamma every lr_decay_iters iterations')
parser.add_argument(
'--lr_decay_exp',
type=float,
default=0.1,
help='multiply by a gamma every lr_decay_iters iterations')
parser.add_argument('--train_and_test',
type=int,
default=0,
help='train and test at the same time')
parser.add_argument('--test_num', type=int, default=1, help='test num')
parser.add_argument('--test_num_step', type=int, default=1, help='test num')
parser.add_argument('--test_freq',
type=int,
default=500,
help='test frequency')
parser.add_argument('--maximum_step',
type=int,
default=None,
help='maximum # of training iterations')
parser.add_argument('--niter',
type=int,
default=100,
help='# of iter at starting learning rate')
parser.add_argument(
'--niter_decay',
type=int,
default=100,
help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--save_iter_freq',
type=int,
default=100000,
help='saving frequency')
parser.add_argument('--prune_thresh',
type=float,
default=0.1,
help='saving frequency')
parser.add_argument('--prune_iter',
type=int,
default=-1,
help='saving frequency')
parser.add_argument('--prune_max_iter',
type=int,
default=9999999,
help='saving frequency')
parser.add_argument('--alpha_range',
type=int,
default=0,
help='saving frequency')
parser.add_argument('--prob_freq',
type=int,
default=0,
help='saving frequency')
parser.add_argument('--prob_num_step',
type=int,
default=100,
help='saving frequency')
parser.add_argument('--prob_mode',
type=int,
default=0,
help='saving frequency')
parser.add_argument('--prob_top',
type=int,
default=1,
help='0 randomly select frames, 1 top frames')
parser.add_argument('--prob_mul',
type=float,
default=1.0,
help='saving frequency')
parser.add_argument('--prob_kernel_size',
type=float,
nargs='+',
default=None,
help='saving frequency')
parser.add_argument('--prob_tiers',
type=int,
nargs='+',
default=(250000),
help='saving frequency')
parser.add_argument('--far_thresh',
type=float,
default=-1.0,
help='cartisian distance for prob')
parser.add_argument('--comb_file',
type=str,
default=None,
help='cartisian distance for prob')
return parser
| 5,016 | 39.459677 | 84 | py |
pointnerf | pointnerf-master/options/base_options.py | import argparse
import os
from models import find_model_class_by_name
from data import find_dataset_class_by_name
import torch
class BaseOptions:
def initialize(self, parser: argparse.ArgumentParser):
#================================ global ================================#
parser.add_argument('--experiment',
type=str,
required=True,
dest='name',
help='name of the experiment')
parser.add_argument(
'--verbose',
action='store_true',
help='if specified, print more debugging information')
parser.add_argument(
'--timestamp',
action='store_true',
help='suffix the experiment name with current timestamp')
#================================ dataset ================================#
parser.add_argument('--data_root',
type=str,
default=None,
help='path to the dataset storage')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
help='name of dataset, determine which dataset class to use')
parser.add_argument(
'--max_dataset_size',
type=int,
default=float("inf"),
help='Maximum number of samples allowed per dataset.'
'If the dataset directory contains more than max_dataset_size, only a subset is loaded.'
)
parser.add_argument('--n_threads',
default=1,
type=int,
help='# threads for loading data')
#================================ MVS ================================#
parser.add_argument('--geo_cnsst_num',
default=2,
type=int,
help='# threads for loading data')
#================================ model ================================#
parser.add_argument('--bgmodel',
default="No",
type=str,
help='No | sphere | plane')
parser.add_argument(
'--model',
type=str,
required=True,
help='name of model, determine which network model to use')
#================================ running ================================#
parser.add_argument('--batch_size',
type=int,
default=1,
help='input batch size')
parser.add_argument('--render_only',
type=int,
default=0,
help='1 for render_only dataset')
parser.add_argument('--serial_batches',
type=int,
default=0,
help='feed batches in order without shuffling')
parser.add_argument('--gpu_ids',
type=str,
default='0',
help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir',
type=str,
default='./checkpoints',
help='models are saved here')
parser.add_argument('--show_tensorboard',
type=int,
default=0,
help='plot loss curves with tensorboard')
parser.add_argument('--resume_dir',
type=str,
default='',
help='dir of the previous checkpoint')
parser.add_argument('--resume_iter',
type=str,
default='latest',
help='which epoch to resume from')
parser.add_argument('--debug',
action='store_true',
help='indicate a debug run')
parser.add_argument('--vid',
type=int,
default=0,
help='feed batches in order without shuffling')
parser.add_argument('--resample_pnts',
type=int,
default=-1,
help='resample the num. initial points')
parser.add_argument('--inall_img',
type=int,
default=1,
help='all points must in the sight of all camera pose')
parser.add_argument('--test_train', type=int, default=0, help='test on training set for debugging')
return parser
def gather_options(self):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
opt, _ = parser.parse_known_args()
model_name = opt.model
find_model_class_by_name(model_name).modify_commandline_options(
parser, self.is_train)
dataset_name = opt.dataset_name
if dataset_name is not None:
find_dataset_class_by_name(
dataset_name).modify_commandline_options(
parser, self.is_train)
self.parser = parser
return parser.parse_args()
def print_and_save_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: {}]'.format(str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
# print(message)
# if opt.is_train:
# expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
# else:
# expr_dir = os.path.join(opt.resume_dir, opt.name)
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
os.makedirs(expr_dir, exist_ok=True)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.is_train = self.is_train
if opt.timestamp:
import datetime
now = datetime.datetime.now().strftime('%y-%m-%d_%H:%M:%S')
opt.name = opt.name + '_' + now
self.print_and_save_options(opt)
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = [
int(x) for x in opt.gpu_ids.split(',') if x.strip() and int(x) >= 0
]
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 7,159 | 38.125683 | 107 | py |
pointnerf | pointnerf-master/options/__init__.py | from .train_options import TrainOptions
from .test_options import TestOptions
from .edit_options import EditOptions
| 116 | 28.25 | 39 | py |
pointnerf | pointnerf-master/options/test_options.py | from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
self.is_train = False
parser.add_argument('--test_num', type=int, default=1, help='test num')
parser.add_argument('--test_num_step', type=int, default=1, help='test num')
parser.add_argument('--test_printId', type=int, default=0, help='The first id (offset) when saving.')
return parser
| 486 | 43.272727 | 109 | py |
pointnerf | pointnerf-master/options/edit_options.py | from .base_options import BaseOptions
class EditOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
self.is_train = False
parser.add_argument('--neural_points_names',
type=str,
nargs='+',
# default=["imgfeat_0_0123", "vol"],
default=["imgfeat_0_0", "vol"],
help="which feature_map")
parser.add_argument('--Transformation_names',
type=str,
nargs='+',
# default=["imgfeat_0_0123", "vol"],
default=["1", "2"],
help="which feature_map")
parser.add_argument('--render_name',
type=str,
# default=["imgfeat_0_0123", "vol"],
default="tryout",
help="which feature_map")
parser.add_argument('--parts_index_names',
type=str,
nargs='+',
# default=["imgfeat_0_0123", "vol"],
default=["1", "2"],
help="which feature_map")
parser.add_argument('--render_stride',
type=int,
default=30,
help='feed batches in order without shuffling')
parser.add_argument('--render_radius',
type=float,
default=4.0,
help='feed batches in order without shuffling')
return parser
| 1,766 | 44.307692 | 75 | py |
pointnerf | pointnerf-master/models/base_rendering_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils import format as fmt
import os
from .base_model import BaseModel
from .rendering.diff_render_func import find_render_function, find_blend_function, find_tone_map, alpha_blend
from .rendering.diff_ray_marching import find_ray_generation_method, find_refined_ray_generation_method, ray_march, alpha_ray_march
from utils import format as fmt
from utils.spherical import SphericalHarm, SphericalHarm_table
from utils.util import add_property2dict
from torch.autograd import Variable
from PIL import Image
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.)
class BaseRenderingModel(BaseModel):
''' A base rendering model that provides the basic loss functions,
selctions of different rendering functions, ray generation functions,
blending functions (for collocated and non-collocated ray marching),
and functions to setup encoder and decoders.
A sub model needs to at least re-implement create_network_models() and run_network_models() for actual rendering.
Examples are: hirarchical_volumetric_model etc.
The model collects
'''
@staticmethod
def modify_commandline_options(parser, is_train=True):
# loss parameters
parser.add_argument(
"--sparse_loss_weight",
type=float,
default=0,
help="The (multiple) output items to supervise with gt color.")
parser.add_argument(
"--color_loss_items",
type=str,
nargs='+',
default=None,
help="The (multiple) output items to supervise with gt color.")
parser.add_argument(
"--test_color_loss_items",
type=str,
nargs='+',
default=None,
help="The (multiple) output items to supervise with gt color.")
parser.add_argument(
"--color_loss_weights",
type=float,
nargs='+',
default=[1.0],
help=
"The weights for each color supervision item. The number of this args should be 1 or match the number in --color_loss_items"
)
parser.add_argument(
"--bg_loss_items",
type=str,
nargs='+',
default=[],
help="The (multiple) output items to supervise with gt masks.")
parser.add_argument(
"--bg_loss_weights",
type=float,
nargs='+',
default=[1.0],
help=
"The weights for each mask supervision item. The number of this args should be 1 or match the number in --bg_loss_items"
)
parser.add_argument(
"--depth_loss_items",
type=str,
nargs='+',
default=[],
help="The (multiple) output items to supervise with gt depth.")
parser.add_argument(
"--depth_loss_weights",
type=float,
nargs='+',
default=[1.0],
help=
"The weights for each depth supervision item. The number of this args should be 1 or match the number in --depth_loss_items"
)
parser.add_argument(
"--zero_one_loss_items",
type=str,
nargs='+',
default=[],
help=
"The (multiple) output items to regularize to be close to either 0 or 1 ."
)
parser.add_argument(
"--zero_one_loss_weights",
type=float,
nargs='+',
default=[1.0],
help=
"The weights for each zero_one regularization item. The number of this args should be 1 or match the number in --zero_one_loss_items"
)
parser.add_argument(
"--l2_size_loss_items",
type=str,
nargs='+',
default=[],
help=
"The (multiple) output items to regularize to be close to either 0 or 1 ."
)
parser.add_argument(
"--l2_size_loss_weights",
type=float,
nargs='+',
default=[0.0],
help=
"The weights for each zero_one regularization item. The number of this args should be 1 or match the number in --zero_one_loss_items"
)
parser.add_argument(
"--zero_epsilon",
type=float,
default=1e-3,
help="epsilon in logarithmic regularization terms when needed.",
)
parser.add_argument(
"--no_loss",
type=int,
default=False,
help="do not compute loss.",
)
#visualization terms
parser.add_argument(
"--visual_items",
type=str,
nargs='*',
default=None,
help=
"The (multiple) output items to show as images. This will replace the default visual items"
)
parser.add_argument(
"--visual_items_additional",
type=str,
nargs='+',
default=[],
help=
"The (multiple) output items to show as images in addition to default items. This is ignored if --visual_iterms is used"
)
parser.add_argument(
'--out_channels',
type=int,
default=None,
help=
'number of output channels in decoder; default 4 for radiance, 8 for microfacet and others'
)
# ray generation
parser.add_argument(
'--which_ray_generation',
type=str,
default='cube',
help='which ray point generation method to use [cube]')
parser.add_argument('--domain_size',
type=int,
default=1,
help='Size of the ray marching domain')
# rendering functions
parser.add_argument('--which_render_func',
type=str,
default='microfacet',
help='which render method to use')
parser.add_argument(
'--which_blend_func',
type=str,
default='alpha',
help=
'which blend function to use. Hint: alpha2 for collocated, alpha for non-collocated'
)
parser.add_argument('--which_tonemap_func',
type=str,
default='gamma',
help='which tone map function to use.')
parser.add_argument(
'--num_pos_freqs',
type=int,
default=-1,
help=
'number of frequency for position encoding if using nerf or mixed mlp decoders'
)
parser.add_argument(
'--num_viewdir_freqs',
type=int,
default=-1,
help=
'number of frequency for view direction encoding if using nerf decoders'
)
parser.add_argument(
'--num_feature_freqs',
type=int,
default=-1,
help=
'number of frequency for feature encoding if using mixed mlp decoders'
)
return parser
def add_default_color_losses(self, opt):
''' if no color loss terms are specified, this function is called to
add default supervision into opt.color_loss_items
'''
opt.color_loss_items = [] # add this to actual names in subclasses
def add_default_visual_items(self, opt):
''' if no visual terms are specified, this function is called to
add default visualization items
'''
opt.visual_items = ['gt_image'
] # add this to actual names in subclasses
def check_setup_loss(self, opt):
''' this function check and setup all loss items and weights.'''
self.loss_names = ['total']
if not opt.color_loss_items:
self.add_default_color_losses(opt)
if len(opt.color_loss_weights) != 1 and len(
opt.color_loss_weights) != len(opt.color_loss_items):
print(fmt.RED + "color_loss_weights does not match loss items" +
fmt.END)
exit()
if len(opt.color_loss_weights) == 1 and len(opt.color_loss_items) > 1:
opt.color_loss_weights = np.ones(len(
opt.color_loss_items), np.float32) * opt.color_loss_weights[0]
self.loss_names += opt.color_loss_items
if len(opt.depth_loss_weights) != 1 and len(
opt.depth_loss_weights) != len(opt.depth_loss_items):
print(fmt.RED + "color_depth_weights does not match loss items" +
fmt.END)
exit()
if len(opt.depth_loss_weights) == 1 and len(opt.depth_loss_items) > 1:
opt.depth_loss_weights = np.ones(len(
opt.depth_loss_items), np.float32) * opt.depth_loss_weights[0]
self.loss_names += opt.depth_loss_items
if len(opt.zero_one_loss_weights) != len(
opt.zero_one_loss_items) and len(
opt.zero_one_loss_weights) != 1:
print(fmt.RED + "zero_one_loss_weights does not match loss items" +
fmt.END)
exit()
if len(opt.zero_one_loss_weights) == 1 and len(
opt.zero_one_loss_items) > 1:
opt.zero_one_loss_weights = np.ones(
len(opt.zero_one_loss_items),
np.float32) * opt.zero_one_loss_weights[0]
self.loss_names += opt.zero_one_loss_items
if len(opt.bg_loss_weights) != 1 and len(opt.bg_loss_weights) != len(
opt.bg_loss_items):
print(fmt.RED + "bg_loss_weights does not match loss items" +
fmt.END)
exit()
if len(opt.bg_loss_weights) == 1 and len(opt.bg_loss_items) > 1:
opt.bg_loss_weights = np.ones(len(opt.bg_loss_items),
np.float32) * opt.bg_loss_weights[0]
self.loss_names += opt.bg_loss_items
if opt.sparse_loss_weight > 0:
self.loss_names += ["sparse"]
# add the functions used in losses
self.l1loss = torch.nn.L1Loss().to(self.device)
self.l2loss = torch.nn.MSELoss().to(self.device)
def check_setup_visuals(self, opt):
if opt.visual_items is None:
print("visual_items not ", opt.visual_items)
self.add_default_visual_items(opt)
self.visual_names += opt.visual_items
self.visual_names += opt.visual_items_additional
else:
self.visual_names += opt.visual_items
if len(self.visual_names) == 0:
print(fmt.YELLOW + "No items are visualized" + fmt.END)
def create_network_models(self, opt):
'''
This function should create the rendering networks.
Every subnetwork model needs to be named as self.net_"name",
and the "name" needs to be added to the self.model_names list.
An example of this is like:
self.model_names = ['ray_marching']
self.net_ray_marching = network_torch_model(self.opt)
if self.opt.gpu_ids:
self.net_ray_marching.to(self.device)
self.net_ray_marching = torch.nn.DataParallel(
self.net_ray_marching, self.opt.gpu_ids)
'''
pass
def run_network_models(self):
'''
This function defines how the network is run.
This function should use the self.input as input to the network.
and return a dict of output (that will be assign to self.output).
If only a sinlge network is used, this function could be simply just:
return net_module(**self.input)
'''
raise NotImplementedError()
def prepare_network_parameters(self, opt):
'''
Setup the parameters the network is needed.
By default, it finds rendering (shading) function, ray generation function, tonemap function, etc.
'''
self.check_setup_loss(opt)
if len(self.loss_names) == 1 and opt.is_train == True:
print(fmt.RED + "Requiring losses to train" + fmt.END)
raise NotImplementedError()
self.check_setup_visuals(opt)
self.check_setup_renderFunc_channels(opt)
self.blend_func = find_blend_function(opt.which_blend_func)
self.raygen_func = find_ray_generation_method(opt.which_ray_generation)
self.tonemap_func = find_tone_map(opt.which_tonemap_func)
self.found_funcs = {}
add_property2dict(
self.found_funcs, self,
["blend_func", "raygen_func", "tonemap_func", "render_func"])
def setup_optimizer(self, opt):
'''
Setup the optimizers for all networks.
This assumes network modules have been added to self.model_names
By default, it uses an adam optimizer for all parameters.
'''
params = []
for name in self.model_names:
net = getattr(self, 'net_' + name)
params = params + list(net.parameters())
self.optimizers = []
self.optimizer = torch.optim.Adam(params,
lr=opt.lr,
betas=(0.9, 0.999))
self.optimizers.append(self.optimizer)
def check_opts(self, opt):
pass
def initialize(self, opt):
super(BaseRenderingModel, self).initialize(opt)
self.opt = opt
if self.is_train:
self.check_opts(opt)
self.prepare_network_parameters(opt)
self.create_network_models(opt)
#check model creation
if not self.model_names:
print(
fmt.RED +
"No network is implemented! Or network's name is not properly added to self.model_names"
+ fmt.END)
raise NotImplementedError()
for mn in self.model_names:
if not hasattr(self, "net_" + mn):
print(fmt.RED + "Network " + mn + " is missing" + fmt.END)
raise NotImplementedError()
# setup optimizer
if self.is_train:
self.setup_optimizer(opt)
def set_input(self, input):
# setup self.input
# this dict is supposed to be sent the network via **self.input in run_network_modules
self.input = input
for key, item in self.input.items():
if isinstance(item, torch.Tensor):
self.input[key] = item.to(self.device)
# gt required in loss compute
self.gt_image = self.input['gt_image'].to(
self.device) if 'gt_image' in input else None
self.gt_depth = self.input['gt_depth'].to(
self.device) if 'gt_depth' in input else None
self.gt_mask = self.input['gt_mask'].to(
self.device) if 'gt_mask' in input else None
def set_visuals(self):
for key, item in self.output.items():
if key in self.visual_names:
setattr(self, key, item)
if "coarse_raycolor" not in self.visual_names:
key = "coarse_raycolor"
setattr(self, key, self.output[key])
def check_setup_renderFunc_channels(self, opt):
''' Find render functions;
the function is often used by subclasses when creating rendering networks.
'''
self.render_func = find_render_function(opt.which_render_func)
if opt.which_render_func == 'radiance':
if opt.out_channels is None:
opt.out_channels = 4
elif opt.which_render_func == 'microfacet':
if opt.out_channels is None:
opt.out_channels = 8
elif opt.which_render_func == 'harmonics':
if opt.out_channels is None:
opt.out_channels = 1 + 3 * 5 * 5
deg = int(((opt.out_channels - 1) / 3)**0.5)
if 1 + deg * deg * 3 != opt.out_channels:
print(
fmt.RED +
'[Error] output channels should match the number of sh basis'
+ fmt.END)
exit()
if deg <= 5:
print("using SH table")
self.shcomputer = SphericalHarm_table(deg)
else:
print("using runtime SH")
self.shcomputer = SphericalHarm(deg)
self.render_func.sphericalHarm = self.shcomputer
else:
if opt.out_channels is None:
opt.out_channels = 8
self.out_channels = opt.out_channels
def check_getDecoder(self, opt, **kwargs):
'''construct a decoder; this is often used by subclasses when creating networks.'''
decoder = None
if opt.which_decoder_model == 'mlp':
decoder = MlpDecoder(num_freqs=opt.num_pos_freqs,
out_channels=opt.out_channels,
**kwargs)
elif opt.which_decoder_model == 'viewmlp':
decoder = ViewMlpDecoder(num_freqs=opt.num_pos_freqs,
num_viewdir_freqs=opt.num_viewdir_freqs,
num_channels=opt.out_channels,
**kwargs)
elif opt.which_decoder_model == 'viewmlpsml':
decoder = ViewMlpSmlDecoder(num_freqs=opt.num_pos_freqs,
num_viewdir_freqs=opt.num_viewdir_freqs,
num_channels=opt.out_channels,
**kwargs)
elif opt.which_decoder_model == 'viewmlpmid':
decoder = ViewMlpMidDecoder(num_freqs=opt.num_pos_freqs,
num_viewdir_freqs=opt.num_viewdir_freqs,
num_channels=opt.out_channels,
**kwargs)
elif opt.which_decoder_model == 'nv_mlp':
decoder = VolumeDecoder(256,
template_type=opt.nv_template_type,
template_res=opt.nv_resolution,
out_channels=opt.out_channels,
**kwargs)
elif opt.which_decoder_model == 'discrete_microfacet':
decoder = DiscreteVolumeMicrofacetDecoder(
opt.discrete_volume_folder,
out_channels=opt.out_channels,
**kwargs)
elif opt.which_decoder_model == 'discrete_general':
decoder = DiscreteVolumeGeneralDecoder(
opt.discrete_volume_folder,
out_channels=opt.out_channels,
**kwargs)
elif opt.which_decoder_model == 'mixed_mlp':
decoder = MixedDecoder(256,
template_type=opt.nv_template_type,
template_res=opt.nv_resolution,
mlp_channels=128,
out_channels=opt.out_channels,
position_freqs=opt.num_pos_freqs,
feature_freqs=opt.num_feature_freqs,
**kwargs)
elif opt.which_decoder_model == 'mixed_separate_code':
decoder = MixedSeparatedDecoder(
256,
template_type=opt.nv_template_type,
template_res=opt.nv_resolution,
mlp_channels=128,
out_channels=opt.out_channels,
position_freqs=opt.num_pos_freqs,
feature_freqs=opt.num_feature_freqs,
**kwargs)
else:
raise RuntimeError('Unknown decoder model: ' +
opt.which_decoder_model)
return decoder
def forward(self):
self.output = self.run_network_models()
self.set_visuals()
if not self.opt.no_loss:
self.compute_losses()
def save_image(self, img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath)
def compute_losses(self):
''' Compute loss functions.
The total loss is saved in self.loss_total.
Every loss will be set to an attr, self.loss_lossname
'''
self.loss_total = 0
opt = self.opt
#color losses
for i, name in enumerate(opt.color_loss_items):
if name.startswith("ray_masked"):
unmasked_name = name[len("ray_masked")+1:]
masked_output = torch.masked_select(self.output[unmasked_name], (self.output["ray_mask"] > 0)[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)
masked_gt = torch.masked_select(self.gt_image, (self.output["ray_mask"] > 0)[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)
if masked_output.shape[1] > 0:
loss = self.l2loss(masked_output, masked_gt)
else:
loss = torch.tensor(0.0, dtype=torch.float32, device=masked_output.device)
# print("loss", name, torch.max(torch.abs(loss)))
elif name.startswith("ray_miss"):
unmasked_name = name[len("ray_miss") + 1:]
masked_output = torch.masked_select(self.output[unmasked_name],
(self.output["ray_mask"] == 0)[..., None].expand(-1, -1, 3)).reshape(
1, -1, 3)
masked_gt = torch.masked_select(self.gt_image,(self.output["ray_mask"] == 0)[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)
if masked_output.shape[1] > 0:
loss = self.l2loss(masked_output, masked_gt) * masked_gt.shape[1]
else:
loss = torch.tensor(0.0, dtype=torch.float32, device=masked_output.device)
elif name.startswith("ray_depth_masked"):
pixel_xy = self.input["pixel_idx"][0].long()
ray_depth_mask = self.output["ray_depth_mask"][0][pixel_xy[...,1], pixel_xy[...,0]] > 0
unmasked_name = name[len("ray_depth_masked")+1:]
masked_output = torch.masked_select(self.output[unmasked_name], (ray_depth_mask[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
masked_gt = torch.masked_select(self.gt_image, (ray_depth_mask[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
loss = self.l2loss(masked_output, masked_gt)
# print("loss", loss)
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor")
# filepath = os.path.join(
# "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# csave = torch.zeros((1, 512, 640, 3))
# ray_masks = (self.output["ray_mask"] > 0).reshape(1, -1)
# pixel_xy = self.input["pixel_idx"].reshape(1, -1, 2)[ray_masks, :]
# # print("masked_output", masked_output.shape, pixel_xy.shape)
# csave[:, pixel_xy[..., 1].long(), pixel_xy[..., 0].long(), :] = masked_output.cpu()
# img = csave.view(512, 640, 3).detach().numpy()
# self.save_image(img, filepath)
#
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt")
# filepath = os.path.join(
# "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# csave = torch.zeros((1, 512, 640, 3))
# ray_masks = (self.output["ray_mask"] > 0).reshape(1, -1)
# pixel_xy = self.input["pixel_idx"].reshape(1, -1, 2)[ray_masks, :]
# # print("masked_output", masked_output.shape, pixel_xy.shape)
# csave[:, pixel_xy[..., 1].long(), pixel_xy[..., 0].long(), :] = masked_gt.cpu()
# img = csave.view(512, 640, 3).detach().numpy()
# self.save_image(img, filepath)
# print("psnrkey recal:",mse2psnr(torch.nn.MSELoss().to("cuda")(masked_output, masked_gt)) )
else:
if name not in self.output:
print(fmt.YELLOW + "No required color loss item: " + name +
fmt.END)
# print("no_mask")
loss = self.l2loss(self.output[name], self.gt_image)
# print("loss", name, torch.max(torch.abs(loss)))
self.loss_total += (loss * opt.color_loss_weights[i] + 1e-6)
# loss.register_hook(lambda grad: print(torch.any(torch.isnan(grad)), grad, opt.color_loss_weights[i]))
setattr(self, "loss_" + name, loss)
# print(torch.sum(self.output["ray_mask"]))
#depth losses
for i, name in enumerate(opt.depth_loss_items):
if name not in self.output:
print(fmt.YELLOW + "No required depth loss item: " + name +
fmt.END)
loss = self.l2loss(self.output[name] * self.gt_mask,
self.gt_depth * self.gt_mask)
self.loss_total += loss * opt.depth_loss_weights[i]
setattr(self, "loss_" + name, loss)
#background losses
for i, name in enumerate(opt.bg_loss_items):
if name not in self.output:
print(fmt.YELLOW + "No required mask loss item: " + name +
fmt.END)
loss = self.l2loss(self.output[name] * (1 - self.gt_mask),
1 - self.gt_mask)
self.loss_total += loss * opt.bg_loss_weights[i]
setattr(self, "loss_" + name, loss)
#zero_one regularization losses
for i, name in enumerate(opt.zero_one_loss_items):
if name not in self.output:
print(fmt.YELLOW + "No required zero_one loss item: " + name +
fmt.END)
# setattr(self, "loss_" + name, torch.zeros([1], device="cuda", dtype=torch.float32))
else:
val = torch.clamp(self.output[name], self.opt.zero_epsilon,
1 - self.opt.zero_epsilon)
# print("self.output[name]",torch.min(self.output[name]), torch.max(self.output[name]))
loss = torch.mean(torch.log(val) + torch.log(1 - val))
self.loss_total += loss * opt.zero_one_loss_weights[i]
setattr(self, "loss_" + name, loss)
# l2 square regularization losses
for i, name in enumerate(opt.l2_size_loss_items):
if name not in self.output:
print(fmt.YELLOW + "No required l2_size_loss_item : " + name + fmt.END)
loss = self.l2loss(self.output[name], torch.zeros_like(self.output[name]))
# print("self.output[name]", self.output[name].shape, loss.shape)
self.loss_total += loss * opt.l2_size_loss_weights[i]
setattr(self, "loss_" + name, loss)
if opt.sparse_loss_weight > 0:
# weight and conf_coefficient 1, 1134, 40, 8
if "weight" not in self.output or "conf_coefficient" not in self.output:
print(fmt.YELLOW + "No required sparse_loss_weight weight or conf_coefficient : " + fmt.END)
loss = torch.sum(self.output["weight"] * torch.abs(1 - torch.exp(-2 * self.output["conf_coefficient"]))) / (torch.sum(self.output["weight"]) + 1e-6)
# print("self.output[name]", self.output[name].shape, loss.shape)
self.output.pop('weight')
self.output.pop('conf_coefficient')
self.loss_total += loss * opt.sparse_loss_weight
setattr(self, "loss_sparse", loss)
# self.loss_total = Variable(self.loss_total, requires_grad=True)
def backward(self):
self.optimizer.zero_grad()
if self.opt.is_train:
self.loss_total.backward()
self.optimizer.step()
def optimize_parameters(self, backward=True, total_steps=0):
self.forward()
self.backward()
| 28,653 | 41.45037 | 160 | py |
pointnerf | pointnerf-master/models/mvs_points_volumetric_model.py | from .base_rendering_model import *
from .neural_points_volumetric_model import NeuralPointsVolumetricModel
from .neural_points.neural_points import NeuralPoints
from .mvs.mvs_points_model import MvsPointsModel
from .mvs import mvs_utils
from. import base_model
from .aggregators.point_aggregators import PointAggregator
import os
import torch.nn.functional as F
import time
from utils import format as fmt
class MvsPointsVolumetricModel(NeuralPointsVolumetricModel):
def __init__(self,):
super().__init__()
self.optimizer, self.neural_point_optimizer, self.output, self.raygen_func, self.render_func, self.blend_func, self.coarse_raycolor, self.gt_image, self.input, self.l1loss, self.l2loss, self.tonemap_func, self.top_ray_miss_ids, self.top_ray_miss_loss, self.loss_ray_masked_coarse_raycolor, self.loss_ray_miss_coarse_raycolor, self.loss_total, self.loss_coarse_raycolor, self.loss_conf_coefficient = None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
#
@staticmethod
def modify_commandline_options(parser, is_train=True):
MvsPointsModel.modify_commandline_options(parser, is_train)
NeuralPointsVolumetricModel.modify_commandline_options(parser, is_train=is_train)
parser.add_argument(
'--mode',
type=int,
default=0,
help='0 for both mvs and pointnerf, 1 for only mvs, 2 for only pointnerf')
parser.add_argument(
'--add_shading_dist',
type=int,
default=0,
help='0 for both mvs and pointnerf, 1 for only mvs, 2 for only pointnerf')
def create_network_models(self, opt):
if opt.mode != 2:
self.net_mvs = MvsPointsModel(opt).to(self.device)
self.model_names = ['mvs']
if opt.mode != 1:
super(MvsPointsVolumetricModel, self).create_network_models(opt)
def setup_optimizer(self, opt):
'''
Setup the optimizers for all networks.
This assumes network modules have been added to self.model_names
By default, it uses an adam optimizer for all parameters.
'''
net_params = []
neural_params = []
mvs_params = []
self.optimizers = []
for name in self.model_names:
net = getattr(self, 'net_' + name)
if name == "mvs":
# print([[par[0], torch.typename(par[1])] for par in net.named_parameters()])
param_lst = list(net.named_parameters())
mvs_params = mvs_params + [par[1] for par in param_lst]
else:
param_lst = list(net.named_parameters())
net_params = net_params + [par[1] for par in param_lst if not par[0].startswith("module.neural_points")]
neural_params = neural_params + [par[1] for par in param_lst if par[0].startswith("module.neural_points")]
self.net_params = net_params
self.neural_params = neural_params
self.mvs_params = mvs_params
mvs_lr = opt.mvs_lr if opt.mvs_lr is not None else opt.lr
if len(mvs_params) > 0:
self.mvs_optimizer = torch.optim.Adam(mvs_params,
lr=mvs_lr,
betas=(0.9, 0.999))
self.optimizers.append(self.mvs_optimizer)
if len(net_params) > 0:
self.optimizer = torch.optim.Adam(net_params,
lr=opt.lr,
betas=(0.9, 0.999))
self.optimizers.append(self.optimizer)
if len(neural_params) > 0:
self.neural_point_optimizer = torch.optim.Adam(neural_params,
lr=opt.plr, #/ 5.0,
betas=(0.9, 0.999))
self.optimizers.append(self.neural_point_optimizer)
print("neural_params", [(par[0], par[1].shape, par[1].requires_grad) for par in param_lst if par[0].startswith("module.neural_points")])
else:
# When not doing per-scene optimization
print("no neural points as nn.Parameter")
def backward(self, iters):
[optimizer.zero_grad() for optimizer in self.optimizers]
if self.opt.is_train:
# print("self.loss_total", self.ray_masked_coarse_color.grad)
# print("self.loss_total", self.loss_total)
if self.loss_total != 0:
self.loss_total.backward()
else:
print(fmt.RED + "Loss == 0" +
fmt.END)
if self.opt.feedforward:
if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 0:
self.optimizer.step()
if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 1:
self.mvs_optimizer.step()
else:
if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 0:
self.optimizer.step()
if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 1:
self.neural_point_optimizer.step()
def forward(self):
if self.opt.mode != 2:
points_xyz, points_embedding, points_colors, points_dirs, points_conf = self.net_mvs(self.input)
# print("volume_feature", volume_feature.shape)
self.neural_points.set_points(points_xyz, points_embedding, points_color=points_colors, points_dir=points_dirs, points_conf=points_conf, parameter=self.opt.feedforward==0) # if feedforward, no neural points optimization
self.output = self.run_network_models()
if "depths_h" in self.input:
depth_gt = self.input["depths_h"][:,self.opt.trgt_id,...] if self.input["depths_h"].dim() > 3 else self.input["depths_h"]
self.output["ray_depth_mask"] = depth_gt > 0
self.set_visuals()
if not self.opt.no_loss:
self.compute_losses()
def update_rank_ray_miss(self, total_steps):
if (self.opt.prob_kernel_size is None or np.sum(np.asarray(self.opt.prob_tiers) < total_steps) < (len(self.opt.prob_kernel_size) // 3)):
if self.opt.prob_freq > 0 and self.opt.prob_num_step > 1:
self.top_ray_miss_loss, self.top_ray_miss_ids = self.rank_ray_miss(self.input["id"][0], self.loss_ray_miss_coarse_raycolor, self.top_ray_miss_ids, self.top_ray_miss_loss)
elif self.opt.prob_freq > 0 and self.opt.prob_num_step == 1:
self.top_ray_miss_loss[0] = max(self.loss_ray_miss_coarse_raycolor, self.top_ray_miss_loss[0])
def rank_ray_miss(self, new_id, newloss, inds, losses):
with torch.no_grad():
mask = (inds - new_id) == 0
if torch.sum(mask) > 0:
losses[mask] = max(newloss, losses[mask])
else:
inds[-1] = new_id
losses[-1] = newloss
losses, indices = torch.sort(losses, descending=True)
inds = inds[indices]
return losses, inds
def setup(self, opt, train_len=None):
super(MvsPointsVolumetricModel, self).setup(opt)
if opt.prob_freq > 0 and train_len is not None and opt.prob_num_step > 1:
self.num_probe = train_len // opt.prob_num_step
self.reset_ray_miss_ranking()
elif opt.prob_freq > 0 and train_len is not None and opt.prob_num_step == 1:
self.top_ray_miss_loss=torch.zeros([1], dtype=torch.float32, device=self.device)
def reset_ray_miss_ranking(self):
self.top_ray_miss_loss = torch.zeros([self.num_probe + 1], dtype=torch.float32, device=self.device)
self.top_ray_miss_ids = torch.arange(self.num_probe + 1, dtype=torch.int32, device=self.device)
def set_points(self, points_xyz, points_embedding, points_color=None, points_dir=None, points_conf=None, Rw2c=None, eulers=None, editing=False):
if not editing:
self.neural_points.set_points(points_xyz, points_embedding, points_color=points_color, points_dir=points_dir, points_conf=points_conf, parameter=self.opt.feedforward == 0, Rw2c=Rw2c, eulers=eulers)
else:
self.neural_points.editing_set_points(points_xyz, points_embedding, points_color=points_color, points_dir=points_dir, points_conf=points_conf, parameter=self.opt.feedforward == 0, Rw2c=Rw2c, eulers=eulers)
if self.opt.feedforward == 0 and self.opt.is_train:
self.setup_optimizer(self.opt)
def prune_points(self, thresh):
self.neural_points.prune(thresh)
def clean_optimizer_scheduler(self):
# self.neural_points.querier.clean_up()
self.optimizers.clear()
self.schedulers.clear()
self.neural_params.clear()
self.mvs_params.clear()
# self.optimizer.cpu(), self.neural_point_optimizer.cpu()
del self.optimizer, self.neural_point_optimizer, self.optimizers, self.schedulers, self.mvs_params, self.neural_params
def reset_optimizer(self, opt):
self.clean_optimizer()
self.setup_optimizer(opt)
def clean_optimizer(self):
self.optimizers.clear()
self.net_params.clear()
self.neural_params.clear()
self.mvs_params.clear()
del self.optimizer, self.neural_point_optimizer, self.net_params, self.neural_params, self.mvs_params
def clean_scheduler(self):
for scheduler in self.schedulers:
del scheduler
self.schedulers.clear()
del self.schedulers
def init_scheduler(self, total_steps, opt):
self.schedulers = [
base_model.get_scheduler(optim, opt) for optim in self.optimizers
]
if total_steps > 0:
for scheduler in self.schedulers:
for i in range(total_steps):
scheduler.step()
def reset_scheduler(self, total_steps, opt):
self.schedulers.clear()
self.schedulers = [
base_model.get_scheduler(optim, opt) for optim in self.optimizers
]
if total_steps > 0:
for scheduler in self.schedulers:
for i in range(total_steps):
scheduler.step()
def gen_points(self):
cam_xyz_lst, photometric_confidence_lst, point_mask_lst, HDWD, data_mvs, intrinsics_lst, extrinsics_lst = self.net_mvs.gen_points(self.input)
# print("cam_xyz_lst", cam_xyz_lst[0].shape, torch.min(cam_xyz_lst[0].view(-1,3), dim=-2)[0], torch.max(cam_xyz_lst[0].view(-1,3), dim=-2)[0])
# self.net_mvs.gen_bg_points(self.input)
return cam_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, data_mvs['c2ws'], data_mvs['w2cs'], self.input["intrinsics"], self.input["near_fars"]
def query_embedding(self, HDWD, cam_xyz, photometric_confidence, imgs, c2ws, w2cs, intrinsics, cam_vid, pointdir_w=True):
img_feats = self.net_mvs.get_image_features(imgs)
return self.net_mvs.query_embedding(HDWD, cam_xyz, photometric_confidence, img_feats, c2ws, w2cs, intrinsics, cam_vid, pointdir_w=pointdir_w)
def grow_points(self, points_xyz, points_embedding, points_color, points_dir, points_conf):
self.neural_points.grow_points(points_xyz, points_embedding, points_color, points_dir, points_conf)
# self.neural_points.reset_querier()
def cleanup(self):
if hasattr(self, "neural_points"):
self.neural_points.querier.clean_up()
del self.neural_points.querier
self.neural_points.cpu()
del self.neural_points
print("self.model_names", self.model_names)
if hasattr(self, "net_ray_marching"):
self.net_ray_marching.cpu()
del self.net_ray_marching
if hasattr(self, "net_mvs"):
self.net_mvs.cpu()
del self.net_mvs
if hasattr(self, "net_params"):
self.net_params.clear()
del self.net_params
if hasattr(self, "neural_params"):
self.neural_params.clear()
del self.neural_params
if hasattr(self, "mvs_params"):
self.mvs_params.clear()
del self.mvs_params
if hasattr(self, "aggregator"):
self.aggregator.cpu()
del self.aggregator
if hasattr(self, "optimizers"):
self.optimizers.clear()
self.schedulers.clear()
del self.optimizer, self.neural_point_optimizer, self.output, self.raygen_func, self.render_func, self.blend_func, self.coarse_raycolor, self.gt_image, self.input, self.l1loss, self.l2loss, self.tonemap_func, self.top_ray_miss_ids, self.top_ray_miss_loss, self.loss_ray_masked_coarse_raycolor, self.loss_ray_miss_coarse_raycolor, self.loss_total, self.loss_coarse_raycolor, self.loss_conf_coefficient
def set_bg(self, xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, plane_color, fg_masks=None,**kwargs):
warped_feats = []
c2w = torch.eye(4, device="cuda", dtype=torch.float32)[None, ...] # c2w[:,0,...].cuda()
count=0
mask_lst = []
fg_mask_lst = []
for imgs, w2c, intrinsics, HDWD in zip(img_lst, w2cs_lst, intrinsics_all, HDWD_lst):
# "fg_2d_masks", [1, 1, 512, 640]
# c2w: 1, 3, 4, 4, w2c: 1, 3, 4, 4, intrinsics: 1, 3, 3
HD, WD = HDWD[0], HDWD[1]
w2c = w2c[:,0,...]
warp = mvs_utils.homo_warp_nongrid # homo_warp_nongrid_occ if self.args.depth_occ > 0 else homo_warp_nongrid
src_grid, mask, hard_id_xy = warp(c2w, w2c, intrinsics, xyz_world_sect_plane, HD, WD, filter=False, tolerate=0.1)
hard_id_xy_valid = hard_id_xy[:, mask[0,:,0], :]
if fg_masks is None:
fg_mask = mvs_utils.homo_warp_fg_mask(c2w, w2c, intrinsics, self.neural_points.xyz[None,...], HD, WD, tolerate=0.1)
fg_mask_lst.append(fg_mask)
else:
fg_mask = fg_masks[:,count,...]
mask[0,mask[0,...,0].clone(),0] = (fg_mask[hard_id_xy_valid[0, ..., 1].long(), hard_id_xy_valid[
0, ..., 0].long()] < 1)
# src_grid: 1, 2032, 2
src_grid = src_grid[:, mask[0,...,0], :]
mask_lst.append(mask[0,...,0])
warped_src_feat = mvs_utils.extract_from_2d_grid(imgs[0:1, ...], src_grid.cpu(), mask.cpu())
warped_feats.append(warped_src_feat.cuda())
count+=1
# masks = ~torch.stack(mask_lst, dim = -1) # 2304, 16 fg
warped_feats = torch.stack(warped_feats, dim=-2) # 1, 2304, 16, 3
thresh=0.03
fit_mask = torch.prod(torch.logical_and(warped_feats >= (plane_color - thresh), (warped_feats <= plane_color + thresh)), dim=-1)
nofit_feats_inds = (1-fit_mask).nonzero() # 1, 2304, 16
warped_feats[0, nofit_feats_inds[...,1], nofit_feats_inds[...,2], :] = 0
warped_feats = torch.max(warped_feats, dim=-2)[0]
fg_masks = torch.stack(fg_mask_lst, dim=1) if fg_mask_lst is None else fg_masks
return warped_feats, fg_masks #
def load_networks(self, epoch):
for name, net in zip(self.model_names, self.get_networks()):
assert isinstance(name, str)
load_filename = '{}_net_{}.pth'.format(epoch, name)
load_path = os.path.join(self.opt.resume_dir, load_filename)
print('loading', name, " from ", load_path)
if not os.path.isfile(load_path):
print('cannot load', load_path)
continue
state_dict = torch.load(load_path, map_location=self.device)
if epoch=="best" and name == "ray_marching" and self.opt.default_conf > 0.0 and self.opt.default_conf <= 1.0 and self.neural_points.points_conf is not None:
assert "neural_points.points_conf" not in state_dict
state_dict["neural_points.points_conf"] = torch.ones_like(self.net_ray_marching.module.neural_points.points_conf) * self.opt.default_conf
if isinstance(net, nn.DataParallel):
net = net.module
net.load_state_dict(state_dict, strict=False)
def test(self, gen_points=False):
with torch.no_grad():
if gen_points:
self.forward()
else:
self.output = self.run_network_models()
if "depths_h" in self.input:
depth_gt = self.input["depths_h"][:, self.opt.trgt_id, ...] if self.input["depths_h"].dim() > 3 else self.input["depths_h"]
self.output["ray_depth_mask"] = depth_gt > 0
self.set_visuals()
if not self.opt.no_loss:
self.compute_losses()
return self.output
| 16,910 | 48.017391 | 519 | py |
pointnerf | pointnerf-master/models/base_model.py | import torch
from torch import nn
import os
from .helpers.networks import get_scheduler
class BaseModel:
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return self.__class__.__name__
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.is_train = opt.is_train
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0]) if self.
gpu_ids else torch.device('cpu'))
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
torch.backends.cudnn.benchmark = True
self.loss_names = [] # losses to report
self.model_names = [] # models that will be used
self.visual_names = [] # visuals to show at test time
def set_input(self, input: dict):
self.input = input
def forward(self):
'''Run the forward pass. Read from self.input, set self.output'''
raise NotImplementedError()
def setup(self, opt):
'''Creates schedulers if train, Load and print networks if resume'''
if self.is_train:
self.schedulers = [
get_scheduler(optim, opt) for optim in self.optimizers
]
if not self.is_train or opt.resume_dir:
print("opt.resume_iter!!!!!!!!!", opt.resume_iter)
self.load_networks(opt.resume_iter)
self.print_networks(opt.verbose)
def eval(self):
'''turn on eval mode'''
for net in self.get_networks():
net.eval()
def train(self):
for net in self.get_networks():
net.train()
def test(self):
with torch.no_grad():
self.forward()
def get_networks(self) -> [nn.Module]:
ret = []
for name in self.model_names:
assert isinstance(name, str)
net = getattr(self, 'net_{}'.format(name))
assert isinstance(net, nn.Module)
ret.append(net)
return ret
def get_current_visuals(self, data=None):
ret = {}
for name in self.visual_names:
assert isinstance(name, str)
if name not in ["gt_image_ray_masked", "ray_depth_masked_gt_image", "ray_depth_masked_coarse_raycolor", "ray_masked_coarse_raycolor"]:
ret[name] = getattr(self, name)
if "coarse_raycolor" not in self.visual_names:
ret["coarse_raycolor"] = getattr(self, "coarse_raycolor")
return ret
def get_current_losses(self):
ret = {}
for name in self.loss_names:
assert isinstance(name, str)
ret[name] = getattr(self, 'loss_' + name)
return ret
def save_networks(self, epoch, other_states={}, back_gpu=True):
for name, net in zip(self.model_names, self.get_networks()):
save_filename = '{}_net_{}.pth'.format(epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
try:
if isinstance(net, nn.DataParallel):
net = net.module
net.cpu()
torch.save(net.state_dict(), save_path)
if back_gpu:
net.cuda()
except Exception as e:
print("savenet:", e)
save_filename = '{}_states.pth'.format(epoch)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(other_states, save_path)
def load_networks(self, epoch):
for name, net in zip(self.model_names, self.get_networks()):
print('loading pth')
assert isinstance(name, str)
load_filename = '{}_net_{}.pth'.format(epoch, name)
print("loading epoch, name", epoch, name)
load_path = os.path.join(self.opt.resume_dir, load_filename)
if not os.path.isfile(load_path):
print('cannot load', load_path)
continue
state_dict = torch.load(load_path, map_location=self.device)
if isinstance(net, nn.DataParallel):
net = net.module
net.load_state_dict(state_dict, strict=False)
def print_networks(self, verbose):
print('------------------- Networks -------------------')
for name, net in zip(self.model_names, self.get_networks()):
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network {}] Total number of parameters: {:.3f}M'.format(
name, num_params / 1e6))
print('------------------------------------------------')
def set_requires_grad(self, nets, requires_grad):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net:
for param in net.parameters():
param.requires_grad = requires_grad
def update_learning_rate(self, **kwargs):
for scheduler in self.schedulers:
scheduler.step()
for i, optim in enumerate(self.optimizers):
lr = optim.param_groups[0]['lr']
if "opt" in kwargs:
opt = kwargs["opt"]
if not opt.lr_policy.startswith("iter") or \
("total_steps" in kwargs and kwargs["total_steps"] % opt.print_freq == 0):
print('optimizer {}, learning rate = {:.7f}'.format(i + 1, lr))
else:
print('optimizer {}, learning rate = {:.7f}'.format(i + 1, lr))
| 5,600 | 34.675159 | 146 | py |
pointnerf | pointnerf-master/models/__init__.py | import importlib
from models.base_model import BaseModel
def find_model_class_by_name(model_name):
# Given the option --model [modelname],
# the file "models/modelname_model.py"
# will be imported.
model_filename = "models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
# In the file, the class called ModelNameModel() will
# be instantiated. It has to be a subclass of BaseModel,
# and it is case-insensitive.
model = None
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, BaseModel):
model = cls
if model is None:
print(
"In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase."
% (model_filename, target_model_name))
exit(0)
return model
def get_option_setter(model_name):
model_class = find_model_class_by_name(model_name)
return model_class.modify_commandline_options
def create_model(opt):
model = find_model_class_by_name(opt.model)
instance = model()
instance.initialize(opt)
print("model [{}] was created".format(instance.name()))
return instance
| 1,305 | 30.095238 | 109 | py |
pointnerf | pointnerf-master/models/neural_points_volumetric_model.py | from .base_rendering_model import *
from .neural_points.neural_points import NeuralPoints
from .aggregators.point_aggregators import PointAggregator
import os
class NeuralPointsVolumetricModel(BaseRenderingModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
BaseRenderingModel.modify_commandline_options(parser, is_train)
NeuralPoints.modify_commandline_options(parser, is_train)
PointAggregator.modify_commandline_options(parser, is_train)
parser.add_argument(
'--neural_point_dir',
type=str,
default=None,
help='alternative loading neural_point directory')
parser.add_argument(
'--embedding_size',
type=int,
default=-1,
help='number of dimensions for latent code embedding')
parser.add_argument(
"--loss_embedding_l2_weight",
type=float,
default=-1,
help="weight for the embedding l2 loss",
)
parser.add_argument('--loss_kld_weight',
type=float,
default=-1,
help='weight for the VAE kld')
# encoder
parser.add_argument(
"--compute_depth",
type=int,
default=0,
help=
"If compute detph or not. If false, depth is only computed when depth is required by losses",
)
parser.add_argument(
"--raydist_mode_unit",
type=int,
default=0,
help="if set raydist max as one voxel",
)
parser.add_argument(
'--save_point_freq',
type=int,
default=100000,
help='frequency of showing training results on console')
parser.add_argument(
'--alter_step',
type=int,
default=0,
help='0 for no alter,')
parser.add_argument(
'--prob',
type=int,
default=0,
help='will be set as 0 for normal traing and 1 for prob, ')
def add_default_color_losses(self, opt):
if "coarse_raycolor" not in opt.color_loss_items:
opt.color_loss_items.append('coarse_raycolor')
if opt.fine_sample_num > 0:
opt.color_loss_items.append('fine_raycolor')
def add_default_visual_items(self, opt):
opt.visual_items = ['gt_image', 'coarse_raycolor', 'queried_shading']
if opt.fine_sample_num > 0:
opt.visual_items.append('fine_raycolor')
def run_network_models(self):
return self.fill_invalid(self.net_ray_marching(**self.input), self.input)
def fill_invalid(self, output, input):
# ray_mask: torch.Size([1, 1024])
# coarse_is_background: torch.Size([1, 336, 1]) -> 1, 1024, 1
# coarse_raycolor: torch.Size([1, 336, 3]) -> 1, 1024, 3
# coarse_point_opacity: torch.Size([1, 336, 24]) -> 1, 1024, 24
ray_mask = output["ray_mask"]
B, OR = ray_mask.shape
ray_inds = torch.nonzero(ray_mask) # 336, 2
coarse_is_background_tensor = torch.ones([B, OR, 1], dtype=output["coarse_is_background"].dtype, device=output["coarse_is_background"].device)
# print("coarse_is_background", output["coarse_is_background"].shape)
# print("coarse_is_background_tensor", coarse_is_background_tensor.shape)
# print("ray_inds", ray_inds.shape, ray_mask.shape)
coarse_is_background_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["coarse_is_background"]
output["coarse_is_background"] = coarse_is_background_tensor
output['coarse_mask'] = 1 - coarse_is_background_tensor
if "bg_ray" in self.input:
coarse_raycolor_tensor = coarse_is_background_tensor * self.input["bg_ray"]
coarse_raycolor_tensor[ray_inds[..., 0], ray_inds[..., 1], :] += output["coarse_raycolor"][0]
else:
coarse_raycolor_tensor = self.tonemap_func(
torch.ones([B, OR, 3], dtype=output["coarse_raycolor"].dtype, device=output["coarse_raycolor"].device) * input["bg_color"][None, ...])
coarse_raycolor_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["coarse_raycolor"]
output["coarse_raycolor"] = coarse_raycolor_tensor
coarse_point_opacity_tensor = torch.zeros([B, OR, output["coarse_point_opacity"].shape[2]], dtype=output["coarse_point_opacity"].dtype, device=output["coarse_point_opacity"].device)
coarse_point_opacity_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["coarse_point_opacity"]
output["coarse_point_opacity"] = coarse_point_opacity_tensor
queried_shading_tensor = torch.ones([B, OR, output["queried_shading"].shape[2]], dtype=output["queried_shading"].dtype, device=output["queried_shading"].device)
queried_shading_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["queried_shading"]
output["queried_shading"] = queried_shading_tensor
if self.opt.prob == 1 and "ray_max_shading_opacity" in output:
# print("ray_inds", ray_inds.shape, torch.sum(output["ray_mask"]))
output = self.unmask(ray_inds, output, ["ray_max_sample_loc_w", "ray_max_shading_opacity", "shading_avg_color", "shading_avg_dir", "shading_avg_conf", "shading_avg_embedding", "ray_max_far_dist"], B, OR)
return output
def unmask(self, ray_inds, output, names, B, OR):
for name in names:
if output[name] is not None:
name_tensor = torch.zeros([B, OR, *output[name].shape[2:]], dtype=output[name].dtype, device=output[name].device)
name_tensor[ray_inds[..., 0], ray_inds[..., 1], ...] = output[name]
output[name] = name_tensor
return output
def get_additional_network_params(self, opt):
param = {}
# additional parameters
self.aggregator = self.check_getAggregator(opt)
self.is_compute_depth = opt.compute_depth or not not opt.depth_loss_items
checkpoint_path = os.path.join(opt.checkpoints_dir, opt.name, '{}_net_ray_marching.pth'.format(opt.resume_iter))
checkpoint_path = checkpoint_path if os.path.isfile(checkpoint_path) else None
if opt.num_point > 0:
self.neural_points = NeuralPoints(opt.point_features_dim, opt.num_point, opt, self.device, checkpoint=checkpoint_path, feature_init_method=opt.feature_init_method, reg_weight=0., feedforward=opt.feedforward)
else:
self.neural_points = None
add_property2dict(param, self, [
'aggregator', 'is_compute_depth', "neural_points", "opt"
])
add_property2dict(param, opt, [
'num_pos_freqs', 'num_viewdir_freqs'
])
return param
def create_network_models(self, opt):
params = self.get_additional_network_params(opt)
# network
self.net_ray_marching = NeuralPointsRayMarching(
**params, **self.found_funcs)
self.model_names = ['ray_marching'] if getattr(self, "model_names", None) is None else self.model_names + ['ray_marching']
# parallel
if self.opt.gpu_ids:
self.net_ray_marching.to(self.device)
self.net_ray_marching = torch.nn.DataParallel(
self.net_ray_marching, self.opt.gpu_ids)
def check_getAggregator(self, opt, **kwargs):
aggregator = PointAggregator(opt)
return aggregator
def setup_optimizer(self, opt):
'''
Setup the optimizers for all networks.
This assumes network modules have been added to self.model_names
By default, it uses an adam optimizer for all parameters.
'''
net_params = []
neural_params = []
for name in self.model_names:
net = getattr(self, 'net_' + name)
param_lst = list(net.named_parameters())
net_params = net_params + [par[1] for par in param_lst if not par[0].startswith("module.neural_points")]
neural_params = neural_params + [par[1] for par in param_lst if par[0].startswith("module.neural_points")]
self.net_params = net_params
self.neural_params = neural_params
# opt.lr=0
self.optimizer = torch.optim.Adam(net_params,
lr=opt.lr,
betas=(0.9, 0.999))
self.neural_point_optimizer = torch.optim.Adam(neural_params,
lr=opt.lr, #/ 5.0,
betas=(0.9, 0.999))
self.optimizers = [self.optimizer, self.neural_point_optimizer]
def backward(self, iters):
[optimizer.zero_grad() for optimizer in self.optimizers]
if self.opt.is_train:
self.loss_total.backward()
if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 0:
self.optimizer.step()
if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 1:
self.neural_point_optimizer.step()
def optimize_parameters(self, backward=True, total_steps=0):
self.forward()
self.update_rank_ray_miss(total_steps)
self.backward(total_steps)
def update_rank_ray_miss(self, total_steps):
raise NotImplementedError
class NeuralPointsRayMarching(nn.Module):
def __init__(self,
tonemap_func=None,
render_func=None,
blend_func=None,
aggregator=None,
is_compute_depth=False,
neural_points=None,
opt=None,
num_pos_freqs=0,
num_viewdir_freqs=0,
**kwargs):
super(NeuralPointsRayMarching, self).__init__()
self.aggregator = aggregator
self.num_pos_freqs = num_pos_freqs
self.num_viewdir_freqs = num_viewdir_freqs
# ray generation
self.render_func = render_func
self.blend_func = blend_func
self.tone_map = tonemap_func
self.return_depth = is_compute_depth
self.return_color = True
self.opt = opt
self.neural_points = neural_points
def forward(self,
campos,
raydir,
gt_image=None,
bg_color=None,
camrotc2w=None,
pixel_idx=None,
near=None,
far=None,
focal=None,
h=None,
w=None,
intrinsic=None,
**kargs):
output = {}
# B, channel, 292, 24, 32; B, 3, 294, 24, 32; B, 294, 24; B, 291, 2
sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, ray_mask_tensor, vsize, grid_vox_sz = self.neural_points({"pixel_idx": pixel_idx, "camrotc2w": camrotc2w, "campos": campos, "near": near, "far": far,"focal": focal, "h": h, "w": w, "intrinsic": intrinsic,"gt_image":gt_image, "raydir":raydir})
decoded_features, ray_valid, weight, conf_coefficient = self.aggregator(sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, grid_vox_sz)
ray_dist = torch.cummax(sample_loc[..., 2], dim=-1)[0]
ray_dist = torch.cat([ray_dist[..., 1:] - ray_dist[..., :-1], torch.full((ray_dist.shape[0], ray_dist.shape[1], 1), vsize[2], device=ray_dist.device)], dim=-1)
mask = ray_dist < 1e-8
if self.opt.raydist_mode_unit > 0:
mask = torch.logical_or(mask, ray_dist > 2 * vsize[2])
mask = mask.to(torch.float32)
ray_dist = ray_dist * (1.0 - mask) + mask * vsize[2]
ray_dist *= ray_valid.float()
# raydir: N x Rays x 3sampled_color
# raypos: N x Rays x Samples x 3
# ray_dist: N x Rays x Samples
# ray_valid: N x Rays x Samples
# ray_features: N x Rays x Samples x Features
# Output
# ray_color: N x Rays x 3
# point_color: N x Rays x Samples x 3
# opacity: N x Rays x Samples
# acc_transmission: N x Rays x Samples
# blend_weight: N x Rays x Samples x 1
# background_transmission: N x Rays x 1
# ray march
output["queried_shading"] = torch.logical_not(torch.any(ray_valid, dim=-1, keepdims=True)).repeat(1, 1, 3).to(torch.float32)
if self.return_color:
if "bg_ray" in kargs:
bg_color = None
(
ray_color,
point_color,
opacity,
acc_transmission,
blend_weight,
background_transmission,
_,
) = ray_march(ray_dist, ray_valid, decoded_features, self.render_func, self.blend_func, bg_color)
ray_color = self.tone_map(ray_color)
output["coarse_raycolor"] = ray_color
output["coarse_point_opacity"] = opacity
else:
(
opacity,
acc_transmission,
blend_weight,
background_transmission,
_,
) = alpha_ray_march(ray_dist, ray_valid, decoded_features, self.blend_func)
if self.return_depth:
alpha_blend_weight = opacity * acc_transmission
weight = alpha_blend_weight.view(alpha_blend_weight.shape[:3])
avg_depth = (weight * ray_ts).sum(-1) / (weight.sum(-1) + 1e-6)
output["coarse_depth"] = avg_depth
output["coarse_is_background"] = background_transmission
output["ray_mask"] = ray_mask_tensor
if weight is not None:
output["weight"] = weight.detach()
output["blend_weight"] = blend_weight.detach()
output["conf_coefficient"] = conf_coefficient
if self.opt.prob == 1 and output["coarse_point_opacity"].shape[1] > 0 :
B, OR, _, _ = sample_pnt_mask.shape
if weight is not None:
output["ray_max_shading_opacity"], opacity_ind = torch.max(output["coarse_point_opacity"], dim=-1, keepdim=True)
opacity_ind=opacity_ind[..., None] # 1, 1024, 1, 1
output["ray_max_sample_loc_w"] = torch.gather(sample_loc_w, 2, opacity_ind.expand(-1, -1, -1, sample_loc_w.shape[-1])).squeeze(2) # 1, 1024, 24, 3 -> 1, 1024, 3
weight = torch.gather(weight*conf_coefficient, 2, opacity_ind.expand(-1, -1, -1, weight.shape[-1])).squeeze(2)[..., None] # 1, 1024, 8
opacity_ind = opacity_ind[...,None]
sampled_xyz_max_opacity = torch.gather(sampled_xyz, 2, opacity_ind.expand(-1, -1, -1, sampled_xyz.shape[-2], sampled_xyz.shape[-1])).squeeze(2) # 1, 1024, 8, 3
output["ray_max_far_dist"] = torch.min(torch.norm(sampled_xyz_max_opacity - output["ray_max_sample_loc_w"][..., None,:], dim=-1), axis=-1, keepdim=True)[0]
sampled_color = torch.gather(sampled_color, 2, opacity_ind.expand(-1, -1, -1, sampled_color.shape[-2], sampled_color.shape[-1])).squeeze(2) if sampled_color is not None else None # 1, 1024, 8, 3
sampled_dir = torch.gather(sampled_dir, 2, opacity_ind.expand(-1, -1, -1, sampled_dir.shape[-2], sampled_dir.shape[-1])).squeeze(2) if sampled_dir is not None else None # 1, 1024, 8, 3
sampled_conf = torch.gather(sampled_conf, 2, opacity_ind.expand(-1, -1, -1, sampled_conf.shape[-2], sampled_conf.shape[-1])).squeeze(2) if sampled_conf is not None else None # 1, 1024, 8, 1
sampled_embedding = torch.gather(sampled_embedding, 2, opacity_ind.expand(-1, -1, -1, sampled_embedding.shape[-2], sampled_embedding.shape[-1])).squeeze(2) # 1, 1024, 8, 1
output["shading_avg_color"] = torch.sum(sampled_color * weight, dim=-2) if sampled_color is not None else None
output["shading_avg_dir"] = torch.sum(sampled_dir * weight, dim=-2) if sampled_dir is not None else None
output["shading_avg_conf"] = torch.sum(sampled_conf * weight, dim=-2) if sampled_conf is not None else None
output["shading_avg_embedding"] = torch.sum(sampled_embedding * weight, dim=-2)
else:
output.update({
"ray_max_shading_opacity": torch.zeros([0, 0, 1, 1], device="cuda"),
"ray_max_sample_loc_w": torch.zeros([0, 0, 3], device="cuda"),
"ray_max_far_dist": torch.zeros([0, 0, 1], device="cuda"),
"shading_avg_color": torch.zeros([0, 0, 3], device="cuda"),
"shading_avg_dir": torch.zeros([0, 0, 3], device="cuda"),
"shading_avg_conf": torch.zeros([0, 0, 1], device="cuda"),
"shading_avg_embedding": torch.zeros([0, 0, sampled_embedding.shape[-1]], device="cuda"),
})
return output | 17,131 | 46.065934 | 416 | py |
pointnerf | pointnerf-master/models/neural_points/point_query.py | import torch
import torch.nn
import torch.nn.functional as F
import os
import numpy as np
from numpy import dot
from math import sqrt
import matplotlib.pyplot as plt
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
parent_dir = os.path.dirname(os.path.abspath(__file__))
from torch.utils.cpp_extension import load as load_cuda
query_worldcoords_cuda = load_cuda(
name='query_worldcoords_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/query_worldcoords.cpp', 'cuda/query_worldcoords.cu']],
verbose=True)
class lighting_fast_querier():
def __init__(self, device, opt):
print("querier device", device, device.index)
self.device="cuda"
self.gpu = device.index
self.opt = opt
self.inverse = self.opt.inverse
self.count=0
self.radius_limit_np = np.asarray(self.opt.radius_limit_scale * max(self.opt.vsize[0], self.opt.vsize[1])).astype(np.float32)
self.vscale_np = np.array(self.opt.vscale, dtype=np.int32)
self.scaled_vsize_np = (self.opt.vsize * self.vscale_np).astype(np.float32)
self.scaled_vsize_tensor = torch.as_tensor(self.scaled_vsize_np, device=device)
self.kernel_size = np.asarray(self.opt.kernel_size, dtype=np.int32)
self.kernel_size_tensor = torch.as_tensor(self.kernel_size, device=device)
self.query_size = np.asarray(self.opt.query_size, dtype=np.int32)
self.query_size_tensor = torch.as_tensor(self.query_size, device=device)
def clean_up(self):
pass
def get_hyperparameters(self, vsize_np, point_xyz_w_tensor, ranges=None):
'''
:param l:
:param h:
:param w:
:param zdim:
:param ydim:
:param xdim:
:return:
'''
min_xyz, max_xyz = torch.min(point_xyz_w_tensor, dim=-2)[0][0], torch.max(point_xyz_w_tensor, dim=-2)[0][0]
ranges_min = torch.as_tensor(ranges[:3], dtype=torch.float32, device=min_xyz.device)
ranges_max = torch.as_tensor(ranges[3:], dtype=torch.float32, device=min_xyz.device)
if ranges is not None:
# print("min_xyz", min_xyz.shape)
# print("max_xyz", max_xyz.shape)
# print("ranges", ranges)
min_xyz, max_xyz = torch.max(torch.stack([min_xyz, ranges_min], dim=0), dim=0)[0], torch.min(torch.stack([max_xyz, ranges_max], dim=0), dim=0)[0]
min_xyz = min_xyz - torch.as_tensor(self.scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32)
max_xyz = max_xyz + torch.as_tensor(self.scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32)
ranges_tensor = torch.cat([min_xyz, max_xyz], dim=-1)
vdim_np = (max_xyz - min_xyz).cpu().numpy() / vsize_np
scaled_vdim_np = np.ceil(vdim_np / self.vscale_np).astype(np.int32)
return ranges_tensor, vsize_np, scaled_vdim_np
def query_points(self, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, h, w, intrinsic, near_depth, far_depth, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor):
near_depth, far_depth = np.asarray(near_depth).item() , np.asarray(far_depth).item()
ranges_tensor, vsize_np, scaled_vdim_np = self.get_hyperparameters(self.opt.vsize, point_xyz_w_tensor, ranges=self.opt.ranges)
# print("self.opt.ranges", self.opt.ranges, range_gpu, ray_dirs_tensor)
if self.opt.inverse > 0:
raypos_tensor, _, _, _ = near_far_disparity_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.)
else:
raypos_tensor, _, _, _ = near_far_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.)
D = raypos_tensor.shape[2]
R = pixel_idx_tensor.reshape(point_xyz_w_tensor.shape[0], -1, 2).shape[1]
sample_pidx_tensor, sample_loc_w_tensor, ray_mask_tensor = \
query_worldcoords_cuda.woord_query_grid_point_index(pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, self.kernel_size_tensor,
self.query_size_tensor, self.opt.SR, self.opt.K, R, D,
torch.as_tensor(scaled_vdim_np,device=self.device),
self.opt.max_o, self.opt.P, self.radius_limit_np,
ranges_tensor,
self.scaled_vsize_tensor,
self.opt.gpu_maxthr, self.opt.NN)
sample_ray_dirs_tensor = torch.masked_select(ray_dirs_tensor, ray_mask_tensor[..., None]>0).reshape(ray_dirs_tensor.shape[0],-1,3)[...,None,:].expand(-1, -1, self.opt.SR, -1).contiguous()
# print("sample_ray_dirs_tensor", sample_ray_dirs_tensor.shape)
return sample_pidx_tensor, self.w2pers(sample_loc_w_tensor, cam_rot_tensor, cam_pos_tensor), \
sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize_np, ranges_tensor.cpu().numpy()
def w2pers(self, point_xyz_w, camrotc2w, campos):
# point_xyz_pers B X M X 3
xyz_w_shift = point_xyz_w - campos[:, None, :]
xyz_c = torch.sum(xyz_w_shift[..., None,:] * torch.transpose(camrotc2w, 1, 2)[:, None, None,...], dim=-1)
z_pers = xyz_c[..., 2]
x_pers = xyz_c[..., 0] / xyz_c[..., 2]
y_pers = xyz_c[..., 1] / xyz_c[..., 2]
return torch.stack([x_pers, y_pers, z_pers], dim=-1) | 5,914 | 53.768519 | 209 | py |
pointnerf | pointnerf-master/models/neural_points/query_point_indices_worldcoords.py | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
# X = torch.cuda.FloatTensor(8)
class Holder(pycuda.driver.PointerHolderBase):
def __init__(self, t):
super(Holder, self).__init__()
self.t = t
self.gpudata = t.data_ptr()
def get_pointer(self):
return self.t.data_ptr()
class lighting_fast_querier():
def __init__(self, device, opt):
print("querier device", device, device.index)
self.gpu = device.index
self.opt = opt
drv.init()
# self.device = drv.Device(gpu)
self.ctx = drv.Device(self.gpu).make_context()
self.claim_occ, self.map_coor2occ, self.fill_occ2pnts, self.mask_raypos, self.get_shadingloc, self.query_along_ray = self.build_cuda()
self.inverse = self.opt.inverse
self.count=0
def clean_up(self):
self.ctx.pop()
def get_hyperparameters(self, vsize_np, point_xyz_w_tensor, ranges=None):
'''
:param l:
:param h:
:param w:
:param zdim:
:param ydim:
:param xdim:
:return:
'''
min_xyz, max_xyz = torch.min(point_xyz_w_tensor, dim=-2)[0][0], torch.max(point_xyz_w_tensor, dim=-2)[0][0]
vscale_np = np.array(self.opt.vscale, dtype=np.int32)
scaled_vsize_np = (vsize_np * vscale_np).astype(np.float32)
if ranges is not None:
# print("min_xyz", min_xyz.shape)
# print("max_xyz", max_xyz.shape)
# print("ranges", ranges)
min_xyz, max_xyz = torch.max(torch.stack([min_xyz, torch.as_tensor(ranges[:3], dtype=torch.float32, device=min_xyz.device)], dim=0), dim=0)[0], torch.min(torch.stack([max_xyz, torch.as_tensor(ranges[3:], dtype=torch.float32, device=min_xyz.device)], dim=0), dim=0)[0]
min_xyz = min_xyz - torch.as_tensor(scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32)
max_xyz = max_xyz + torch.as_tensor(scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32)
ranges_np = torch.cat([min_xyz, max_xyz], dim=-1).cpu().numpy().astype(np.float32)
# print("ranges_np",ranges_np)
vdim_np = (max_xyz - min_xyz).cpu().numpy() / vsize_np
scaled_vdim_np = np.ceil(vdim_np / vscale_np).astype(np.int32)
ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = np_to_gpuarray(
ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, np.asarray(self.opt.kernel_size, dtype=np.int32),
np.asarray(self.opt.query_size, dtype=np.int32))
radius_limit_np, depth_limit_np = self.opt.radius_limit_scale * max(vsize_np[0], vsize_np[1]), self.opt.depth_limit_scale * vsize_np[2]
return np.asarray(radius_limit_np).astype(np.float32), np.asarray(depth_limit_np).astype(np.float32), ranges_np, vsize_np, vdim_np, scaled_vsize_np, scaled_vdim_np, vscale_np, ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu
def query_points(self, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, h, w, intrinsic, near_depth, far_depth, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor):
near_depth, far_depth = np.asarray(near_depth).item() , np.asarray(far_depth).item()
radius_limit_np, depth_limit_np, ranges_np, vsize_np, vdim_np, scaled_vsize_np, scaled_vdim_np, vscale_np, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.get_hyperparameters(self.opt.vsize, point_xyz_w_tensor, ranges=self.opt.ranges)
# print("self.opt.ranges", self.opt.ranges, range_gpu, ray_dirs_tensor)
if self.opt.inverse > 0:
raypos_tensor, _, _, _ = near_far_disparity_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.)
else:
raypos_tensor, _, _, _ = near_far_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.)
sample_pidx_tensor, sample_loc_w_tensor, ray_mask_tensor = self.query_grid_point_index(h, w, pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, self.opt.SR, self.opt.K, ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, self.opt.max_o, self.opt.P, radius_limit_np, depth_limit_np, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, ray_dirs_tensor, cam_pos_tensor, kMaxThreadsPerBlock=self.opt.gpu_maxthr)
sample_ray_dirs_tensor = torch.masked_select(ray_dirs_tensor, ray_mask_tensor[..., None]>0).reshape(ray_dirs_tensor.shape[0],-1,3)[...,None,:].expand(-1, -1, self.opt.SR, -1).contiguous()
# print("sample_ray_dirs_tensor", sample_ray_dirs_tensor.shape)
return sample_pidx_tensor, self.w2pers(sample_loc_w_tensor, cam_rot_tensor, cam_pos_tensor), sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize_np, ranges_np
def w2pers(self, point_xyz_w, camrotc2w, campos):
# point_xyz_pers B X M X 3
xyz_w_shift = point_xyz_w - campos[:, None, :]
xyz_c = torch.sum(xyz_w_shift[..., None,:] * torch.transpose(camrotc2w, 1, 2)[:, None, None,...], dim=-1)
z_pers = xyz_c[..., 2]
x_pers = xyz_c[..., 0] / xyz_c[..., 2]
y_pers = xyz_c[..., 1] / xyz_c[..., 2]
return torch.stack([x_pers, y_pers, z_pers], dim=-1)
def build_cuda(self):
mod = SourceModule(
"""
#define KN """ + str(self.opt.K)
+ """
#include <cuda.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <vector>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <curand_kernel.h>
namespace cuda {
static __device__ inline uint8_t atomicAdd(uint8_t *address, uint8_t val) {
size_t offset = (size_t)address & 3;
uint32_t *address_as_ui = (uint32_t *)(address - offset);
uint32_t old = *address_as_ui;
uint32_t shift = offset * 8;
uint32_t old_byte;
uint32_t newval;
uint32_t assumed;
do {
assumed = old;
old_byte = (old >> shift) & 0xff;
// preserve size in initial cast. Casting directly to uint32_t pads
// negative signed values with 1's (e.g. signed -1 = unsigned ~0).
newval = static_cast<uint8_t>(val + old_byte);
newval = (old & ~(0x000000ff << shift)) | (newval << shift);
old = atomicCAS(address_as_ui, assumed, newval);
} while (assumed != old);
return __byte_perm(old, 0, offset); // need validate
}
static __device__ inline char atomicAdd(char* address, char val) {
// offset, in bytes, of the char* address within the 32-bit address of the space that overlaps it
size_t long_address_modulo = (size_t) address & 3;
// the 32-bit address that overlaps the same memory
auto* base_address = (unsigned int*) ((char*) address - long_address_modulo);
// A 0x3210 selector in __byte_perm will simply select all four bytes in the first argument in the same order.
// The "4" signifies the position where the first byte of the second argument will end up in the output.
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
// for selecting bytes within a 32-bit chunk that correspond to the char* address (relative to base_address)
unsigned int selector = selectors[long_address_modulo];
unsigned int long_old, long_assumed, long_val, replacement;
long_old = *base_address;
do {
long_assumed = long_old;
// replace bits in long_old that pertain to the char address with those from val
long_val = __byte_perm(long_old, 0, long_address_modulo) + val;
replacement = __byte_perm(long_old, long_val, selector);
long_old = atomicCAS(base_address, long_assumed, replacement);
} while (long_old != long_assumed);
return __byte_perm(long_old, 0, long_address_modulo);
}
static __device__ inline int8_t atomicAdd(int8_t *address, int8_t val) {
return (int8_t)cuda::atomicAdd((char*)address, (char)val);
}
static __device__ inline short atomicAdd(short* address, short val)
{
unsigned int *base_address = (unsigned int *)((size_t)address & ~2);
unsigned int long_val = ((size_t)address & 2) ? ((unsigned int)val << 16) : (unsigned short)val;
unsigned int long_old = ::atomicAdd(base_address, long_val);
if((size_t)address & 2) {
return (short)(long_old >> 16);
} else {
unsigned int overflow = ((long_old & 0xffff) + long_val) & 0xffff0000;
if (overflow)
atomicSub(base_address, overflow);
return (short)(long_old & 0xffff);
}
}
static __device__ float cas(double *addr, double compare, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *) addr;
return __longlong_as_double(atomicCAS(address_as_ull,
__double_as_longlong(compare),
__double_as_longlong(val)));
}
static __device__ float cas(float *addr, float compare, float val) {
unsigned int *address_as_uint = (unsigned int *) addr;
return __uint_as_float(atomicCAS(address_as_uint,
__float_as_uint(compare),
__float_as_uint(val)));
}
static __device__ inline uint8_t atomicCAS(uint8_t * const address, uint8_t const compare, uint8_t const value)
{
uint8_t const longAddressModulo = reinterpret_cast< size_t >( address ) & 0x3;
uint32_t *const baseAddress = reinterpret_cast< uint32_t * >( address - longAddressModulo );
uint32_t constexpr byteSelection[] = { 0x3214, 0x3240, 0x3410, 0x4210 }; // The byte position we work on is '4'.
uint32_t const byteSelector = byteSelection[ longAddressModulo ];
uint32_t const longCompare = compare;
uint32_t const longValue = value;
uint32_t longOldValue = * baseAddress;
uint32_t longAssumed;
uint8_t oldValue;
do {
// Select bytes from the old value and new value to construct a 32-bit value to use.
uint32_t const replacement = __byte_perm( longOldValue, longValue, byteSelector );
uint32_t const comparison = __byte_perm( longOldValue, longCompare, byteSelector );
longAssumed = longOldValue;
// Use 32-bit atomicCAS() to try and set the 8-bits we care about.
longOldValue = ::atomicCAS( baseAddress, comparison, replacement );
// Grab the 8-bit portion we care about from the old value at address.
oldValue = ( longOldValue >> ( 8 * longAddressModulo )) & 0xFF;
} while ( compare == oldValue and longAssumed != longOldValue ); // Repeat until other three 8-bit values stabilize.
return oldValue;
}
}
extern "C" {
__global__ void claim_occ(
const float* in_data, // B * N * 3
const int* in_actual_numpoints, // B
const int B,
const int N,
const float *d_coord_shift, // 3
const float *d_voxel_size, // 3
const int *d_grid_size, // 3
const int grid_size_vol,
const int max_o,
int* occ_idx, // B, all 0
int *coor_2_occ, // B * 400 * 400 * 400, all -1
int *occ_2_coor, // B * max_o * 3, all -1
unsigned long seconds
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / N; // index of batch
if (i_batch >= B) { return; }
int i_pt = index - N * i_batch;
if (i_pt < in_actual_numpoints[i_batch]) {
int coor[3];
const float *p_pt = in_data + index * 3;
coor[0] = (int) floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]);
coor[1] = (int) floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]);
coor[2] = (int) floor((p_pt[2] - d_coord_shift[2]) / d_voxel_size[2]);
// printf("p_pt %f %f %f %f; ", p_pt[2], d_coord_shift[2], d_coord_shift[0], d_coord_shift[1]);
if (coor[0] < 0 || coor[0] >= d_grid_size[0] || coor[1] < 0 || coor[1] >= d_grid_size[1] || coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; }
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
int voxel_idx = coor_2_occ[coor_indx_b];
if (voxel_idx == -1) { // found an empty voxel
int old_voxel_num = atomicCAS(
&coor_2_occ[coor_indx_b],
-1, 0
);
if (old_voxel_num == -1) {
// CAS -> old val, if old val is -1
// if we get -1, this thread is the one who obtain a new voxel
// so only this thread should do the increase operator below
int tmp = atomicAdd(occ_idx+i_batch, 1); // increase the counter, return old counter
// increase the counter, return old counter
if (tmp < max_o) {
int coord_inds = (i_batch * max_o + tmp) * 3;
occ_2_coor[coord_inds] = coor[0];
occ_2_coor[coord_inds + 1] = coor[1];
occ_2_coor[coord_inds + 2] = coor[2];
} else {
curandState state;
curand_init(index+2*seconds, 0, 0, &state);
int insrtidx = ceilf(curand_uniform(&state) * (tmp+1)) - 1;
if(insrtidx < max_o){
int coord_inds = (i_batch * max_o + insrtidx) * 3;
occ_2_coor[coord_inds] = coor[0];
occ_2_coor[coord_inds + 1] = coor[1];
occ_2_coor[coord_inds + 2] = coor[2];
}
}
}
}
}
}
__global__ void map_coor2occ(
const int B,
const int *d_grid_size, // 3
const int *kernel_size, // 3
const int grid_size_vol,
const int max_o,
int* occ_idx, // B, all -1
int *coor_occ, // B * 400 * 400 * 400
int *coor_2_occ, // B * 400 * 400 * 400
int *occ_2_coor // B * max_o * 3
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / max_o; // index of batch
if (i_batch >= B) { return; }
int i_pt = index - max_o * i_batch;
if (i_pt < occ_idx[i_batch] && i_pt < max_o) {
int coor[3];
coor[0] = occ_2_coor[index*3];
if (coor[0] < 0) { return; }
coor[1] = occ_2_coor[index*3+1];
coor[2] = occ_2_coor[index*3+2];
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
coor_2_occ[coor_indx_b] = i_pt;
// printf("kernel_size[0] %d", kernel_size[0]);
for (int coor_x = max(0, coor[0] - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], coor[0] + (kernel_size[0] + 1) / 2); coor_x++) {
for (int coor_y = max(0, coor[1] - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], coor[1] + (kernel_size[1] + 1) / 2); coor_y++) {
for (int coor_z = max(0, coor[2] - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], coor[2] + (kernel_size[2] + 1) / 2); coor_z++) {
coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z;
if (coor_occ[coor_indx_b] > 0) { continue; }
atomicCAS(coor_occ + coor_indx_b, 0, 1);
}
}
}
}
}
__global__ void fill_occ2pnts(
const float* in_data, // B * N * 3
const int* in_actual_numpoints, // B
const int B,
const int N,
const int P,
const float *d_coord_shift, // 3
const float *d_voxel_size, // 3
const int *d_grid_size, // 3
const int grid_size_vol,
const int max_o,
int *coor_2_occ, // B * 400 * 400 * 400, all -1
int *occ_2_pnts, // B * max_o * P, all -1
int *occ_numpnts, // B * max_o, all 0
unsigned long seconds
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / N; // index of batch
if (i_batch >= B) { return; }
int i_pt = index - N * i_batch;
if (i_pt < in_actual_numpoints[i_batch]) {
int coor[3];
const float *p_pt = in_data + index * 3;
coor[0] = (int) floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]);
coor[1] = (int) floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]);
coor[2] = (int) floor((p_pt[2] - d_coord_shift[2]) / d_voxel_size[2]);
if (coor[0] < 0 || coor[0] >= d_grid_size[0] || coor[1] < 0 || coor[1] >= d_grid_size[1] || coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; }
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
int voxel_idx = coor_2_occ[coor_indx_b];
if (voxel_idx > 0) { // found an claimed coor2occ
int occ_indx_b = i_batch * max_o + voxel_idx;
int tmp = atomicAdd(occ_numpnts + occ_indx_b, 1); // increase the counter, return old counter
if (tmp < P) {
occ_2_pnts[occ_indx_b * P + tmp] = i_pt;
} else {
curandState state;
curand_init(index+2*seconds, 0, 0, &state);
int insrtidx = ceilf(curand_uniform(&state) * (tmp+1)) - 1;
if(insrtidx < P){
occ_2_pnts[occ_indx_b * P + insrtidx] = i_pt;
}
}
}
}
}
__global__ void mask_raypos(
float *raypos, // [B, 2048, 400, 3]
int *coor_occ, // B * 400 * 400 * 400
const int B, // 3
const int R, // 3
const int D, // 3
const int grid_size_vol,
const float *d_coord_shift, // 3
const int *d_grid_size, // 3
const float *d_voxel_size, // 3
int *raypos_mask // B, R, D
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * D); // index of batch
if (i_batch >= B) { return; }
int coor[3];
coor[0] = (int) floor((raypos[index*3] - d_coord_shift[0]) / d_voxel_size[0]);
coor[1] = (int) floor((raypos[index*3+1] - d_coord_shift[1]) / d_voxel_size[1]);
coor[2] = (int) floor((raypos[index*3+2] - d_coord_shift[2]) / d_voxel_size[2]);
// printf(" %f %f %f;", raypos[index*3], raypos[index*3+1], raypos[index*3+2]);
if ((coor[0] >= 0) && (coor[0] < d_grid_size[0]) && (coor[1] >= 0) && (coor[1] < d_grid_size[1]) && (coor[2] >= 0) && (coor[2] < d_grid_size[2])) {
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
raypos_mask[index] = coor_occ[coor_indx_b];
}
}
__global__ void get_shadingloc(
const float *raypos, // [B, 2048, 400, 3]
const int *raypos_mask, // B, R, D
const int B, // 3
const int R, // 3
const int D, // 3
const int SR, // 3
float *sample_loc, // B * R * SR * 3
int *sample_loc_mask // B * R * SR
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * D); // index of batch
if (i_batch >= B) { return; }
int temp = raypos_mask[index];
if (temp >= 0) {
int r = (index - i_batch * R * D) / D;
int loc_inds = i_batch * R * SR + r * SR + temp;
sample_loc[loc_inds * 3] = raypos[index * 3];
sample_loc[loc_inds * 3 + 1] = raypos[index * 3 + 1];
sample_loc[loc_inds * 3 + 2] = raypos[index * 3 + 2];
sample_loc_mask[loc_inds] = 1;
}
}
__global__ void query_neigh_along_ray_layered(
const float* in_data, // B * N * 3
const int B,
const int SR, // num. samples along each ray e.g., 128
const int R, // e.g., 1024
const int max_o,
const int P,
const int K, // num. neighbors
const int grid_size_vol,
const float radius_limit2,
const float *d_coord_shift, // 3
const int *d_grid_size,
const float *d_voxel_size, // 3
const int *kernel_size,
const int *occ_numpnts, // B * max_o
const int *occ_2_pnts, // B * max_o * P
const int *coor_2_occ, // B * 400 * 400 * 400
const float *sample_loc, // B * R * SR * 3
const int *sample_loc_mask, // B * R * SR
int *sample_pidx, // B * R * SR * K
unsigned long seconds,
const int NN
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * SR); // index of batch
if (i_batch >= B || sample_loc_mask[index] <= 0) { return; }
float centerx = sample_loc[index * 3];
float centery = sample_loc[index * 3 + 1];
float centerz = sample_loc[index * 3 + 2];
int frustx = (int) floor((centerx - d_coord_shift[0]) / d_voxel_size[0]);
int frusty = (int) floor((centery - d_coord_shift[1]) / d_voxel_size[1]);
int frustz = (int) floor((centerz - d_coord_shift[2]) / d_voxel_size[2]);
centerx = sample_loc[index * 3];
centery = sample_loc[index * 3 + 1];
centerz = sample_loc[index * 3 + 2];
int kid = 0, far_ind = 0, coor_z, coor_y, coor_x;
float far2 = 0.0;
float xyz2Buffer[KN];
for (int layer = 0; layer < (kernel_size[0]+1)/2; layer++){
for (int x = max(-frustx, -layer); x < min(d_grid_size[0] - frustx, layer + 1); x++) {
coor_x = frustx + x;
for (int y = max(-frusty, -layer); y < min(d_grid_size[1] - frusty, layer + 1); y++) {
coor_y = frusty + y;
for (int z = max(-frustz, -layer); z < min(d_grid_size[2] - frustz, layer + 1); z++) {
coor_z = z + frustz;
if (max(abs(z), max(abs(x), abs(y))) != layer) continue;
int coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z;
int occ_indx = coor_2_occ[coor_indx_b] + i_batch * max_o;
if (occ_indx >= 0) {
for (int g = 0; g < min(P, occ_numpnts[occ_indx]); g++) {
int pidx = occ_2_pnts[occ_indx * P + g];
float x_v = (in_data[pidx*3]-centerx);
float y_v = (in_data[pidx*3 + 1]-centery);
float z_v = (in_data[pidx*3 + 2]-centerz);
float xyz2 = x_v * x_v + y_v * y_v + z_v * z_v;
if ((radius_limit2 == 0 || xyz2 <= radius_limit2)){
if (kid++ < K) {
sample_pidx[index * K + kid - 1] = pidx;
xyz2Buffer[kid-1] = xyz2;
if (xyz2 > far2){
far2 = xyz2;
far_ind = kid - 1;
}
} else {
if (xyz2 < far2) {
sample_pidx[index * K + far_ind] = pidx;
xyz2Buffer[far_ind] = xyz2;
far2 = xyz2;
for (int i = 0; i < K; i++) {
if (xyz2Buffer[i] > far2) {
far2 = xyz2Buffer[i];
far_ind = i;
}
}
}
}
}
}
}
}
}
}
if (kid >= K) break;
}
}
}
""", no_extern_c=True)
claim_occ = mod.get_function("claim_occ")
map_coor2occ = mod.get_function("map_coor2occ")
fill_occ2pnts = mod.get_function("fill_occ2pnts")
mask_raypos = mod.get_function("mask_raypos")
get_shadingloc = mod.get_function("get_shadingloc")
query_along_ray = mod.get_function("query_neigh_along_ray_layered") if self.opt.NN > 0 else mod.get_function("query_rand_along_ray")
return claim_occ, map_coor2occ, fill_occ2pnts, mask_raypos, get_shadingloc, query_along_ray
def switch_pixel_id(self, pixel_idx_tensor, h):
pixel_id = torch.cat([pixel_idx_tensor[..., 0:1], h - 1 - pixel_idx_tensor[..., 1:2]], dim=-1)
# print("pixel_id", pixel_id.shape, torch.min(pixel_id, dim=-2)[0], torch.max(pixel_id, dim=-2)[0])
return pixel_id
def build_occ_vox(self, point_xyz_w_tensor, actual_numpoints_tensor, B, N, P, max_o, scaled_vdim_np, kMaxThreadsPerBlock, gridSize, scaled_vsize_gpu, scaled_vdim_gpu, kernel_size_gpu, grid_size_vol, d_coord_shift):
device = point_xyz_w_tensor.device
coor_occ_tensor = torch.zeros([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], dtype=torch.int32, device=device)
occ_2_pnts_tensor = torch.full([B, max_o, P], -1, dtype=torch.int32, device=device)
occ_2_coor_tensor = torch.full([B, max_o, 3], -1, dtype=torch.int32, device=device)
occ_numpnts_tensor = torch.zeros([B, max_o], dtype=torch.int32, device=device)
coor_2_occ_tensor = torch.full([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], -1, dtype=torch.int32, device=device)
occ_idx_tensor = torch.zeros([B], dtype=torch.int32, device=device)
seconds = time.time()
self.claim_occ(
Holder(point_xyz_w_tensor),
Holder(actual_numpoints_tensor),
np.int32(B),
np.int32(N),
d_coord_shift,
scaled_vsize_gpu,
scaled_vdim_gpu,
np.int32(grid_size_vol),
np.int32(max_o),
Holder(occ_idx_tensor),
Holder(coor_2_occ_tensor),
Holder(occ_2_coor_tensor),
np.uint64(seconds),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
coor_2_occ_tensor = torch.full([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], -1,
dtype=torch.int32, device=device)
gridSize = int((B * max_o + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.map_coor2occ(
np.int32(B),
scaled_vdim_gpu,
kernel_size_gpu,
np.int32(grid_size_vol),
np.int32(max_o),
Holder(occ_idx_tensor),
Holder(coor_occ_tensor),
Holder(coor_2_occ_tensor),
Holder(occ_2_coor_tensor),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
seconds = time.time()
gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.fill_occ2pnts(
Holder(point_xyz_w_tensor),
Holder(actual_numpoints_tensor),
np.int32(B),
np.int32(N),
np.int32(P),
d_coord_shift,
scaled_vsize_gpu,
scaled_vdim_gpu,
np.int32(grid_size_vol),
np.int32(max_o),
Holder(coor_2_occ_tensor),
Holder(occ_2_pnts_tensor),
Holder(occ_numpnts_tensor),
np.uint64(seconds),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
return coor_occ_tensor, occ_2_coor_tensor, coor_2_occ_tensor, occ_idx_tensor, occ_numpnts_tensor, occ_2_pnts_tensor
def query_grid_point_index(self, h, w, pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, SR, K, ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, max_o, P, radius_limit_np, depth_limit_np, ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, ray_dirs_tensor, cam_pos_tensor, kMaxThreadsPerBlock = 1024):
device = point_xyz_w_tensor.device
B, N = point_xyz_w_tensor.shape[0], point_xyz_w_tensor.shape[1]
pixel_size = scaled_vdim_np[0] * scaled_vdim_np[1]
grid_size_vol = pixel_size * scaled_vdim_np[2]
d_coord_shift = ranges_gpu[:3]
R, D = raypos_tensor.shape[1], raypos_tensor.shape[2]
R = pixel_idx_tensor.reshape(B, -1, 2).shape[1]
gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
coor_occ_tensor, occ_2_coor_tensor, coor_2_occ_tensor, occ_idx_tensor, occ_numpnts_tensor, occ_2_pnts_tensor = self.build_occ_vox(point_xyz_w_tensor, actual_numpoints_tensor, B, N, P, max_o, scaled_vdim_np, kMaxThreadsPerBlock, gridSize, scaled_vsize_gpu, scaled_vdim_gpu, query_size_gpu, grid_size_vol, d_coord_shift)
# torch.cuda.synchronize()
# print("coor_occ_tensor", torch.min(coor_occ_tensor), torch.max(coor_occ_tensor), torch.min(occ_2_coor_tensor), torch.max(occ_2_coor_tensor), torch.min(coor_2_occ_tensor), torch.max(coor_2_occ_tensor), torch.min(occ_idx_tensor), torch.max(occ_idx_tensor), torch.min(occ_numpnts_tensor), torch.max(occ_numpnts_tensor), torch.min(occ_2_pnts_tensor), torch.max(occ_2_pnts_tensor), occ_2_pnts_tensor.shape)
# print("occ_numpnts_tensor", torch.sum(occ_numpnts_tensor > 0), ranges_np)
# vis_vox(ranges_np, scaled_vsize_np, coor_2_occ_tensor)
raypos_mask_tensor = torch.zeros([B, R, D], dtype=torch.int32, device=device)
gridSize = int((B * R * D + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.mask_raypos(
Holder(raypos_tensor), # [1, 2048, 400, 3]
Holder(coor_occ_tensor), # [1, 2048, 400, 3]
np.int32(B),
np.int32(R),
np.int32(D),
np.int32(grid_size_vol),
d_coord_shift,
scaled_vdim_gpu,
scaled_vsize_gpu,
Holder(raypos_mask_tensor),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)
)
# torch.cuda.synchronize()
# print("raypos_mask_tensor", raypos_mask_tensor.shape, torch.sum(coor_occ_tensor), torch.sum(raypos_mask_tensor))
# save_points(raypos_tensor.reshape(-1, 3), "./", "rawraypos_pnts")
# raypos_masked = torch.masked_select(raypos_tensor, raypos_mask_tensor[..., None] > 0)
# save_points(raypos_masked.reshape(-1, 3), "./", "raypos_pnts")
ray_mask_tensor = torch.max(raypos_mask_tensor, dim=-1)[0] > 0 # B, R
R = torch.sum(ray_mask_tensor.to(torch.int32)).cpu().numpy()
# print("R", torch.sum(ray_mask_tensor.to(torch.int32)), R)
sample_loc_tensor = torch.zeros([B, R, SR, 3], dtype=torch.float32, device=device)
sample_pidx_tensor = torch.full([B, R, SR, K], -1, dtype=torch.int32, device=device)
if R > 0:
raypos_tensor = torch.masked_select(raypos_tensor, ray_mask_tensor[..., None, None].expand(-1, -1, D, 3)).reshape(B, R, D, 3)
raypos_mask_tensor = torch.masked_select(raypos_mask_tensor, ray_mask_tensor[..., None].expand(-1, -1, D)).reshape(B, R, D)
# print("R", R, raypos_tensor.shape, raypos_mask_tensor.shape)
raypos_maskcum = torch.cumsum(raypos_mask_tensor, dim=-1).to(torch.int32)
raypos_mask_tensor = (raypos_mask_tensor * raypos_maskcum * (raypos_maskcum <= SR)) - 1
sample_loc_mask_tensor = torch.zeros([B, R, SR], dtype=torch.int32, device=device)
self.get_shadingloc(
Holder(raypos_tensor), # [1, 2048, 400, 3]
Holder(raypos_mask_tensor),
np.int32(B),
np.int32(R),
np.int32(D),
np.int32(SR),
Holder(sample_loc_tensor),
Holder(sample_loc_mask_tensor),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)
)
# torch.cuda.synchronize()
# print("shadingloc_mask_tensor", torch.sum(sample_loc_mask_tensor, dim=-1), torch.sum(torch.sum(sample_loc_mask_tensor, dim=-1) > 0), torch.sum(sample_loc_mask_tensor > 0))
# shadingloc_masked = torch.masked_select(sample_loc_tensor, sample_loc_mask_tensor[..., None] > 0)
# save_points(shadingloc_masked.reshape(-1, 3), "./", "shading_pnts{}".format(self.count))
seconds = time.time()
gridSize = int((B * R * SR + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.query_along_ray(
Holder(point_xyz_w_tensor),
np.int32(B),
np.int32(SR),
np.int32(R),
np.int32(max_o),
np.int32(P),
np.int32(K),
np.int32(grid_size_vol),
np.float32(radius_limit_np ** 2),
d_coord_shift,
scaled_vdim_gpu,
scaled_vsize_gpu,
kernel_size_gpu,
Holder(occ_numpnts_tensor),
Holder(occ_2_pnts_tensor),
Holder(coor_2_occ_tensor),
Holder(sample_loc_tensor),
Holder(sample_loc_mask_tensor),
Holder(sample_pidx_tensor),
np.uint64(seconds),
np.int32(self.opt.NN),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
# print("point_xyz_w_tensor",point_xyz_w_tensor.shape)
# queried_masked = point_xyz_w_tensor[0][sample_pidx_tensor.reshape(-1).to(torch.int64), :]
# save_points(queried_masked.reshape(-1, 3), "./", "queried_pnts{}".format(self.count))
# print("valid ray", torch.sum(torch.sum(sample_loc_mask_tensor, dim=-1) > 0))
#
masked_valid_ray = torch.sum(sample_pidx_tensor.view(B, R, -1) >= 0, dim=-1) > 0
R = torch.max(torch.sum(masked_valid_ray.to(torch.int32), dim=-1)).cpu().numpy()
ray_mask_tensor.masked_scatter_(ray_mask_tensor, masked_valid_ray)
sample_pidx_tensor = torch.masked_select(sample_pidx_tensor, masked_valid_ray[..., None, None].expand(-1, -1, SR, K)).reshape(B, R, SR, K)
sample_loc_tensor = torch.masked_select(sample_loc_tensor, masked_valid_ray[..., None, None].expand(-1, -1, SR, 3)).reshape(B, R, SR, 3)
# self.count+=1
return sample_pidx_tensor, sample_loc_tensor, ray_mask_tensor.to(torch.int8)
def load_pnts(point_path, point_num):
with open(point_path, 'rb') as f:
print("point_file_path################", point_path)
all_infos = pickle.load(f)
point_xyz = all_infos["point_xyz"]
print(len(point_xyz), point_xyz.dtype, np.mean(point_xyz, axis=0), np.min(point_xyz, axis=0),
np.max(point_xyz, axis=0))
np.random.shuffle(point_xyz)
return point_xyz[:min(len(point_xyz), point_num), :]
def np_to_gpuarray(*args):
result = []
for x in args:
if isinstance(x, np.ndarray):
result.append(pycuda.gpuarray.to_gpu(x))
else:
print("trans",x)
return result
def save_points(xyz, dir, filename):
if xyz.ndim < 3:
xyz = xyz[None, ...]
filename = "{}.txt".format(filename)
os.makedirs(dir, exist_ok=True)
filepath = os.path.join(dir, filename)
print("save at {}".format(filepath))
if torch.is_tensor(xyz):
np.savetxt(filepath, xyz.cpu().reshape(-1, xyz.shape[-1]), delimiter=";")
else:
np.savetxt(filepath, xyz.reshape(-1, xyz.shape[-1]), delimiter=";")
def try_build(ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj,
radius_limit, depth_limit, near_depth, far_depth, shading_count, split=["train"], imgidx=0, gpu=0, NN=2):
# point_path = os.path.join(point_dir, point_file)
# point_xyz = load_pnts(point_path, 819200000) # 81920 233872
point_xyz = load_init_points(obj)
imgs, poses, _, hwf, _, intrinsic = load_blender_data(
os.path.expandvars("${nrDataRoot}") + "/nerf/nerf_synthetic/{}".format(obj), split, half_res=False, testskip=1)
H, W, focal = hwf
intrinsic = np.array([[focal, 0, W / 2], [0, focal, H / 2], [0, 0, 1]])
plt.figure()
plt.imshow(imgs[imgidx])
point_xyz_w_tensor = torch.as_tensor(point_xyz, device="cuda:{}".format(gpu))[None,...]
print("point_xyz_w_tensor", point_xyz_w_tensor[0].shape, torch.min(point_xyz_w_tensor[0], dim=0)[0], torch.max(point_xyz_w_tensor[0], dim=0)[0])
# plt.show()
actual_numpoints_tensor = torch.ones([1], device=point_xyz_w_tensor.device, dtype=torch.int32) * len(point_xyz_w_tensor[0])
# range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, kernel_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, kernel_size)
pixel_idx_tensor = torch.as_tensor(pixel_idx, device=point_xyz_w_tensor.device, dtype=torch.int32)[None, ...]
c2w = poses[0]
print("c2w", c2w.shape, pixel_idx.shape)
from data.data_utils import get_dtu_raydir
cam_pos, camrot = c2w[:3, 3], c2w[:3, :3]
ray_dirs_tensor, cam_pos_tensor = torch.as_tensor(get_dtu_raydir(pixel_idx, intrinsic, camrot, True), device=pixel_idx_tensor.device, dtype=torch.float32), torch.as_tensor(cam_pos, device=pixel_idx_tensor.device, dtype=torch.float32)
from collections import namedtuple
opt_construct = namedtuple('opt', 'inverse vsize vscale kernel_size radius_limit_scale depth_limit_scale max_o P SR K gpu_maxthr NN ranges z_depth_dim')
opt = opt_construct(inverse=0, vscale=vscale, vsize=vsize, kernel_size=kernel_size, radius_limit_scale=0, depth_limit_scale=0, max_o=max_o, P=P, SR=SR, K=K, gpu_maxthr=1024, NN=NN, ranges=ranges, z_depth_dim=400)
querier = lighting_fast_querier(point_xyz_w_tensor.device, opt)
print("actual_numpoints_tensor", actual_numpoints_tensor)
querier.query_points(pixel_idx_tensor, None, point_xyz_w_tensor, actual_numpoints_tensor, H, W, intrinsic, near_depth, far_depth, ray_dirs_tensor[None, ...], cam_pos_tensor[None, ...])
def w2img(point_xyz, transform_matrix, focal):
camrot = transform_matrix[:3, :3] # world 2 cam
campos = transform_matrix[:3, 3] #
point_xyz_shift = point_xyz - campos[None, :]
# xyz = np.sum(point_xyz_shift[:,None,:] * camrot.T, axis=-1)
xyz = np.sum(camrot[None, ...] * point_xyz_shift[:, :, None], axis=-2)
# print(xyz.shape, np.sum(camrot[None, None, ...] * point_xyz_shift[:,:,None], axis=-2).shape)
xper = xyz[:, 0] / -xyz[:, 2]
yper = xyz[:, 1] / xyz[:, 2]
x_pixel = np.round(xper * focal + 400).astype(np.int32)
y_pixel = np.round(yper * focal + 400).astype(np.int32)
print("focal", focal, np.tan(.5 * 0.6911112070083618))
print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel), np.min(y_pixel))
print("per xmax xmin:", np.max(xper), np.min(xper), "per ymax ymin:", np.max(yper), np.min(yper), "per zmax zmin:",
np.max(xyz[:, 2]), np.min(xyz[:, 2]))
print("min perx", -400 / focal, "max perx", 400 / focal)
background = np.ones([800, 800, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .2
plt.figure()
plt.imshow(background)
return np.stack([xper, yper, -xyz[:, 2]], axis=-1)
def render_mask_pers_points(queried_point_xyz, vsize, ranges, w, h):
pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32)
print(pixel_xy_inds.shape)
y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .5
plt.figure()
plt.imshow(background)
def save_mask_pers_points(queried_point_xyz, vsize, ranges, w, h):
pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32)
print(pixel_xy_inds.shape)
y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .5
image_dir = os.path.join(self.opt.checkpoints_dir, opt.name, 'images')
image_file = os.path.join(image_dir)
def render_pixel_mask(pixel_xy_inds, w, h):
y_pixel, x_pixel = pixel_xy_inds[0, :, 1], pixel_xy_inds[0, :, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .0
plt.figure()
plt.imshow(background)
def vis_vox(ranges_np, scaled_vsize_np, coor_2_occ_tensor):
print("ranges_np", ranges_np, scaled_vsize_np)
mask = coor_2_occ_tensor.cpu().numpy() > 0
xdim, ydim, zdim = coor_2_occ_tensor.shape[1:]
x_ = np.arange(0, xdim)
y_ = np.arange(0, ydim)
z_ = np.arange(0, zdim)
x, y, z = np.meshgrid(x_, y_, z_, indexing='ij')
xyz = np.stack([x,y,z], axis=-1).reshape(-1,3).astype(np.float32)
xyz = ranges_np[None, :3] + (xyz + 0.5) * scaled_vsize_np[None, :]
xyz = xyz[mask.reshape(-1)]
save_points(xyz, "./", "occ_xyz")
print(xyz.shape)
def save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor, pixel_idx_cur_tensor, vdim, vsize, ranges):
B, R, SR, K = sample_pidx_tensor.shape
# pixel_inds = torch.as_tensor([3210, 3217,3218,3219,3220, 3221,3222,3223,3224,3225,3226,3227,3228,3229,3230, 3231,3232,3233,3234,3235, 3236,3237,3238,3239,3240], device=sample_pidx_tensor.device, dtype=torch.int64)
point_inds = sample_pidx_tensor[0, :, :, :]
# point_inds = sample_pidx_tensor[0, pixel_inds, :, :]
mask = point_inds > -1
point_inds = torch.masked_select(point_inds, mask).to(torch.int64)
queried_point_xyz_tensor = point_xyz_tensor[0, point_inds, :]
queried_point_xyz = queried_point_xyz_tensor.cpu().numpy()
print("queried_point_xyz.shape", B, R, SR, K, point_inds.shape, queried_point_xyz_tensor.shape,
queried_point_xyz.shape)
print("pixel_idx_cur_tensor", pixel_idx_cur_tensor.shape)
render_pixel_mask(pixel_idx_cur_tensor.cpu().numpy(), vdim[0], vdim[1])
render_mask_pers_points(point_xyz_pers_tensor[0, point_inds, :].cpu().numpy(), vsize, ranges, vdim[0], vdim[1])
plt.show()
def load_init_points(scan, data_dir="/home/xharlie/user_space/data/nrData/nerf/nerf_synthetic_colmap"):
points_path = os.path.join(data_dir, scan, "colmap_results/dense/fused.ply")
# points_path = os.path.join(self.data_dir, self.scan, "exported/pcd_te_1_vs_0.01_jit.ply")
assert os.path.exists(points_path)
from plyfile import PlyData, PlyElement
plydata = PlyData.read(points_path)
# plydata (PlyProperty('x', 'double'), PlyProperty('y', 'double'), PlyProperty('z', 'double'), PlyProperty('nx', 'double'), PlyProperty('ny', 'double'), PlyProperty('nz', 'double'), PlyProperty('red', 'uchar'), PlyProperty('green', 'uchar'), PlyProperty('blue', 'uchar'))
print("plydata", plydata.elements[0])
x,y,z=torch.as_tensor(plydata.elements[0].data["x"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["y"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["z"].astype(np.float32), device="cuda", dtype=torch.float32)
points_xyz = torch.stack([x,y,z], dim=-1).to(torch.float32)
return points_xyz
if __name__ == "__main__":
obj = "lego"
# point_file = "{}.pkl".format(obj)
# point_dir = os.path.expandvars("${nrDataRoot}/nerf/nerf_synthetic_points/")
r = 0.36000002589322094
ranges = np.array([-1., -1.3, -1.2, 1., 1.3, 1.2], dtype=np.float32)
vdim = np.array([400, 400, 400], dtype=np.int32)
# vsize = np.array([2 * r / vdim[0], 2 * r / vdim[1], 4. / vdim[2]], dtype=np.float32)
vsize = np.array([0.005, 0.005, 0.005], dtype=np.float32)
vscale = np.array([2, 2, 2], dtype=np.int32)
SR = 24
P = 128
K = 8
NN = 2
ray_num = 2048
kernel_size = np.array([5, 5, 5], dtype=np.int32)
radius_limit = 0 # r / 400 * 5 #r / 400 * 5
depth_limit = 0 # 4. / 400 * 1.5 # r / 400 * 2
max_o = 500000
near_depth, far_depth = 2., 6.
shading_count = 400
xrange = np.arange(0, 800, 1, dtype=np.int32)
yrange = np.arange(0, 800, 1, dtype=np.int32)
xv, yv = np.meshgrid(xrange, yrange, sparse=False, indexing='ij')
inds = np.arange(len(xv.reshape(-1)), dtype=np.int32)
np.random.shuffle(inds)
inds = inds[:ray_num, ...]
pixel_idx = np.stack([xv, yv], axis=-1).reshape(-1, 2)[inds] # 20000 * 2
gpu = 0
imgidx = 3
split = ["train"]
if gpu < 0:
import pycuda.autoinit
else:
drv.init()
dev1 = drv.Device(gpu)
ctx1 = dev1.make_context()
try_build(ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj,
radius_limit, depth_limit, near_depth, far_depth, shading_count, split=split, imgidx=imgidx, gpu=0, NN=NN) | 51,817 | 55.385201 | 484 | py |
pointnerf | pointnerf-master/models/neural_points/neural_points.py | import torch
import torch.nn as nn
from data.load_blender import load_blender_cloud
import numpy as np
from ..helpers.networks import init_seq, positional_encoding
import matplotlib.pyplot as plt
import torch.nn.utils.prune as prune_param
class NeuralPoints(nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.add_argument('--load_points',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--point_noise',
type=str,
default="",
help='pointgaussian_0.1 | pointuniform_0.1')
parser.add_argument('--num_point',
type=int,
default=8192,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--construct_res',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--grid_res',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--cloud_path',
type=str,
default="",
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--shpnt_jitter',
type=str,
default="passfunc",
help='passfunc | uniform | gaussian')
parser.add_argument('--point_features_dim',
type=int,
default=64,
help='number of coarse samples')
parser.add_argument('--gpu_maxthr',
type=int,
default=1024,
help='number of coarse samples')
parser.add_argument('--z_depth_dim',
type=int,
default=400,
help='number of coarse samples')
parser.add_argument('--SR',
type=int,
default=24,
help='max shading points number each ray')
parser.add_argument('--K',
type=int,
default=32,
help='max neural points each group')
parser.add_argument('--max_o',
type=int,
default=None,
help='max nonempty voxels stored each frustum')
parser.add_argument('--P',
type=int,
default=16,
help='max neural points stored each block')
parser.add_argument('--NN',
type=int,
default=0,
help='0: radius search | 1: K-NN after radius search | 2: K-NN world coord after pers radius search')
parser.add_argument('--radius_limit_scale',
type=float,
default=5.0,
help='max neural points stored each block')
parser.add_argument('--depth_limit_scale',
type=float,
default=1.3,
help='max neural points stored each block')
parser.add_argument('--default_conf',
type=float,
default=-1.0,
help='max neural points stored each block')
parser.add_argument(
'--vscale',
type=int,
nargs='+',
default=(2, 2, 1),
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--kernel_size',
type=int,
nargs='+',
default=(7, 7, 1),
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--query_size',
type=int,
nargs='+',
default=(0, 0, 0),
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--xyz_grad',
type=int,
default=0,
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--feat_grad',
type=int,
default=1,
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--conf_grad',
type=int,
default=1,
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--color_grad',
type=int,
default=1,
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--dir_grad',
type=int,
default=1,
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--feedforward',
type=int,
default=0,
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--inverse',
type=int,
default=0,
help=
'1 for 1/n depth sweep'
)
parser.add_argument(
'--point_conf_mode',
type=str,
default="0",
help=
'0 for only at features, 1 for multi at weight'
)
parser.add_argument(
'--point_color_mode',
type=str,
default="0",
help=
'0 for only at features, 1 for multi at weight'
)
parser.add_argument(
'--point_dir_mode',
type=str,
default="0",
help=
'0 for only at features, 1 for multi at weight'
)
parser.add_argument(
'--vsize',
type=float,
nargs='+',
default=(0.005, 0.005, 0.005),
help=
'vscale is the block size that store several voxels'
)
parser.add_argument(
'--wcoord_query',
type=int,
default="0",
help=
'0 for perspective voxels, and 1 for world coord, -1 for world coord and using pytorch cuda'
)
parser.add_argument(
'--ranges',
type=float,
nargs='+',
default=(-100.0, -100.0, -100.0, 100.0, 100.0, 100.0),
help='vscale is the block size that store several voxels'
)
def __init__(self, num_channels, size, opt, device, checkpoint=None, feature_init_method='rand', reg_weight=0., feedforward=0):
super().__init__()
assert isinstance(size, int), 'size must be int'
self.opt = opt
self.grid_vox_sz = 0
self.points_conf, self.points_dir, self.points_color, self.eulers, self.Rw2c = None, None, None, None, None
self.device=device
if self.opt.load_points ==1:
saved_features = None
if checkpoint:
saved_features = torch.load(checkpoint, map_location=device)
if saved_features is not None and "neural_points.xyz" in saved_features:
self.xyz = nn.Parameter(saved_features["neural_points.xyz"])
else:
point_xyz, _ = load_blender_cloud(self.opt.cloud_path, self.opt.num_point)
point_xyz = torch.as_tensor(point_xyz, device=device, dtype=torch.float32)
if len(opt.point_noise) > 0:
spl = opt.point_noise.split("_")
if float(spl[1]) > 0.0:
func = getattr(self, spl[0], None)
point_xyz = func(point_xyz, float(spl[1]))
print("point_xyz shape after jittering: ", point_xyz.shape)
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& Loaded blender cloud ', self.opt.cloud_path, self.opt.num_point, point_xyz.shape)
# filepath = "./aaaaaaaaaaaaa_cloud.txt"
# np.savetxt(filepath, self.xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";")
if self.opt.construct_res > 0:
point_xyz, sparse_grid_idx, self.full_grid_idx = self.construct_grid_points(point_xyz)
self.xyz = nn.Parameter(point_xyz)
# filepath = "./grid_cloud.txt"
# np.savetxt(filepath, point_xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";")
# print("max counts", torch.max(torch.unique(point_xyz, return_counts=True, dim=0)[1]))
print("point_xyz", point_xyz.shape)
self.xyz.requires_grad = opt.xyz_grad > 0
shape = 1, self.xyz.shape[0], num_channels
# filepath = "./aaaaaaaaaaaaa_cloud.txt"
# np.savetxt(filepath, self.xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";")
if checkpoint:
self.points_embeding = nn.Parameter(saved_features["neural_points.points_embeding"]) if "neural_points.points_embeding" in saved_features else None
print("self.points_embeding", self.points_embeding.shape)
# points_conf = saved_features["neural_points.points_conf"] if "neural_points.points_conf" in saved_features else None
# if self.opt.default_conf > 0.0 and points_conf is not None:
# points_conf = torch.ones_like(points_conf) * self.opt.default_conf
# self.points_conf = nn.Parameter(points_conf) if points_conf is not None else None
self.points_conf = nn.Parameter(saved_features["neural_points.points_conf"]) if "neural_points.points_conf" in saved_features else None
# print("self.points_conf",self.points_conf)
self.points_dir = nn.Parameter(saved_features["neural_points.points_dir"]) if "neural_points.points_dir" in saved_features else None
self.points_color = nn.Parameter(saved_features["neural_points.points_color"]) if "neural_points.points_color" in saved_features else None
self.eulers = nn.Parameter(saved_features["neural_points.eulers"]) if "neural_points.eulers" in saved_features else None
self.Rw2c = nn.Parameter(saved_features["neural_points.Rw2c"]) if "neural_points.Rw2c" in saved_features else torch.eye(3, device=self.xyz.device, dtype=self.xyz.dtype)
else:
if feature_init_method == 'rand':
points_embeding = torch.rand(shape, device=device, dtype=torch.float32) - 0.5
elif feature_init_method == 'zeros':
points_embeding = torch.zeros(shape, device=device, dtype=torch.float32)
elif feature_init_method == 'ones':
points_embeding = torch.ones(shape, device=device, dtype=torch.float32)
elif feature_init_method == 'pos':
if self.opt.point_features_dim > 3:
points_embeding = positional_encoding(point_xyz.reshape(shape[0], shape[1], 3), int(self.opt.point_features_dim / 6))
if int(self.opt.point_features_dim / 6) * 6 < self.opt.point_features_dim:
rand_embeding = torch.rand(shape[:-1] + (self.opt.point_features_dim - points_embeding.shape[-1],), device=device, dtype=torch.float32) - 0.5
print("points_embeding", points_embeding.shape, rand_embeding.shape)
points_embeding = torch.cat([points_embeding, rand_embeding], dim=-1)
else:
points_embeding = point_xyz.reshape(shape[0], shape[1], 3)
elif feature_init_method.startswith("gau"):
std = float(feature_init_method.split("_")[1])
zeros = torch.zeros(shape, device=device, dtype=torch.float32)
points_embeding = torch.normal(mean=zeros, std=std)
else:
raise ValueError(init_method)
self.points_embeding = nn.Parameter(points_embeding)
print("points_embeding init:", points_embeding.shape, torch.max(self.points_embeding), torch.min(self.points_embeding))
self.points_conf=torch.ones_like(self.points_embeding[...,0:1])
if self.points_embeding is not None:
self.points_embeding.requires_grad = opt.feat_grad > 0
if self.points_conf is not None:
self.points_conf.requires_grad = self.opt.conf_grad > 0
if self.points_dir is not None:
self.points_dir.requires_grad = self.opt.dir_grad > 0
if self.points_color is not None:
self.points_color.requires_grad = self.opt.color_grad > 0
if self.eulers is not None:
self.eulers.requires_grad = False
if self.Rw2c is not None:
self.Rw2c.requires_grad = False
self.reg_weight = reg_weight
self.opt.query_size = self.opt.kernel_size if self.opt.query_size[0] == 0 else self.opt.query_size
# self.lighting_fast_querier = lighting_fast_querier_w if self.opt.wcoord_query > 0 else lighting_fast_querier_p
if self.opt.wcoord_query == 0:
from .query_point_indices import lighting_fast_querier as lighting_fast_querier_p
self.lighting_fast_querier = lighting_fast_querier_p
elif self.opt.wcoord_query > 0:
from .query_point_indices_worldcoords import lighting_fast_querier as lighting_fast_querier_w
self.lighting_fast_querier = lighting_fast_querier_w
else:
from .point_query import lighting_fast_querier as lighting_fast_querier_cuda
self.lighting_fast_querier = lighting_fast_querier_cuda
self.querier = self.lighting_fast_querier(device, self.opt)
def reset_querier(self):
self.querier.clean_up()
del self.querier
self.querier = self.lighting_fast_querier(self.device, self.opt)
def prune(self, thresh):
mask = self.points_conf[0,...,0] >= thresh
self.xyz = nn.Parameter(self.xyz[mask, :])
self.xyz.requires_grad = self.opt.xyz_grad > 0
if self.points_embeding is not None:
self.points_embeding = nn.Parameter(self.points_embeding[:, mask, :])
self.points_embeding.requires_grad = self.opt.feat_grad > 0
if self.points_conf is not None:
self.points_conf = nn.Parameter(self.points_conf[:, mask, :])
self.points_conf.requires_grad = self.opt.conf_grad > 0
if self.points_dir is not None:
self.points_dir = nn.Parameter(self.points_dir[:, mask, :])
self.points_dir.requires_grad = self.opt.dir_grad > 0
if self.points_color is not None:
self.points_color = nn.Parameter(self.points_color[:, mask, :])
self.points_color.requires_grad = self.opt.color_grad > 0
if self.eulers is not None and self.eulers.dim() > 1:
self.eulers = nn.Parameter(self.eulers[mask, :])
self.eulers.requires_grad = False
if self.Rw2c is not None and self.Rw2c.dim() > 2:
self.Rw2c = nn.Parameter(self.Rw2c[mask, :])
self.Rw2c.requires_grad = False
print("@@@@@@@@@ pruned {}/{}".format(torch.sum(mask==0), mask.shape[0]))
def grow_points(self, add_xyz, add_embedding, add_color, add_dir, add_conf, add_eulers=None, add_Rw2c=None):
# print(self.xyz.shape, self.points_conf.shape, self.points_embeding.shape, self.points_dir.shape, self.points_color.shape)
self.xyz = nn.Parameter(torch.cat([self.xyz, add_xyz], dim=0))
self.xyz.requires_grad = self.opt.xyz_grad > 0
if self.points_embeding is not None:
self.points_embeding = nn.Parameter(torch.cat([self.points_embeding, add_embedding[None, ...]], dim=1))
self.points_embeding.requires_grad = self.opt.feat_grad > 0
if self.points_conf is not None:
self.points_conf = nn.Parameter(torch.cat([self.points_conf, add_conf[None, ...]], dim=1))
self.points_conf.requires_grad = self.opt.conf_grad > 0
if self.points_dir is not None:
self.points_dir = nn.Parameter(torch.cat([self.points_dir, add_dir[None, ...]], dim=1))
self.points_dir.requires_grad = self.opt.dir_grad > 0
if self.points_color is not None:
self.points_color = nn.Parameter(torch.cat([self.points_color, add_color[None, ...]], dim=1))
self.points_color.requires_grad = self.opt.color_grad > 0
if self.eulers is not None and self.eulers.dim() > 1:
self.eulers = nn.Parameter(torch.cat([self.eulers, add_eulers[None,...]], dim=1))
self.eulers.requires_grad = False
if self.Rw2c is not None and self.Rw2c.dim() > 2:
self.Rw2c = nn.Parameter(torch.cat([self.Rw2c, add_Rw2c[None,...]], dim=1))
self.Rw2c.requires_grad = False
def set_points(self, points_xyz, points_embeding, points_color=None, points_dir=None, points_conf=None, parameter=False, Rw2c=None, eulers=None):
if points_embeding.shape[-1] > self.opt.point_features_dim:
points_embeding = points_embeding[..., :self.opt.point_features_dim]
if self.opt.default_conf > 0.0 and self.opt.default_conf <= 1.0 and points_conf is not None:
points_conf = torch.ones_like(points_conf) * self.opt.default_conf
if parameter:
self.xyz = nn.Parameter(points_xyz)
self.xyz.requires_grad = self.opt.xyz_grad > 0
if points_conf is not None:
points_conf = nn.Parameter(points_conf)
points_conf.requires_grad = self.opt.conf_grad > 0
if "0" in list(self.opt.point_conf_mode):
points_embeding = torch.cat([points_conf, points_embeding], dim=-1)
if "1" in list(self.opt.point_conf_mode):
self.points_conf = points_conf
if points_dir is not None:
points_dir = nn.Parameter(points_dir)
points_dir.requires_grad = self.opt.dir_grad > 0
if "0" in list(self.opt.point_dir_mode):
points_embeding = torch.cat([points_dir, points_embeding], dim=-1)
if "1" in list(self.opt.point_dir_mode):
self.points_dir = points_dir
if points_color is not None:
points_color = nn.Parameter(points_color)
points_color.requires_grad = self.opt.color_grad > 0
if "0" in list(self.opt.point_color_mode):
points_embeding = torch.cat([points_color, points_embeding], dim=-1)
if "1" in list(self.opt.point_color_mode):
self.points_color = points_color
points_embeding = nn.Parameter(points_embeding)
points_embeding.requires_grad = self.opt.feat_grad > 0
self.points_embeding = points_embeding
# print("self.points_embeding", self.points_embeding, self.points_color)
# print("points_xyz", torch.min(points_xyz, dim=-2)[0], torch.max(points_xyz, dim=-2)[0])
else:
self.xyz = points_xyz
if points_conf is not None:
if "0" in list(self.opt.point_conf_mode):
points_embeding = torch.cat([points_conf, points_embeding], dim=-1)
if "1" in list(self.opt.point_conf_mode):
self.points_conf = points_conf
if points_dir is not None:
if "0" in list(self.opt.point_dir_mode):
points_embeding = torch.cat([points_dir, points_embeding], dim=-1)
if "1" in list(self.opt.point_dir_mode):
self.points_dir = points_dir
if points_color is not None:
if "0" in list(self.opt.point_color_mode):
points_embeding = torch.cat([points_color, points_embeding], dim=-1)
if "1" in list(self.opt.point_color_mode):
self.points_color = points_color
self.points_embeding = points_embeding
if Rw2c is None:
self.Rw2c = torch.eye(3, device=points_xyz.device, dtype=points_xyz.dtype)
else:
self.Rw2c = nn.Parameter(Rw2c)
self.Rw2c.requires_grad = False
def editing_set_points(self, points_xyz, points_embeding, points_color=None, points_dir=None, points_conf=None,
parameter=False, Rw2c=None, eulers=None):
if self.opt.default_conf > 0.0 and self.opt.default_conf <= 1.0 and points_conf is not None:
points_conf = torch.ones_like(points_conf) * self.opt.default_conf
self.xyz = points_xyz
self.points_embeding = points_embeding
self.points_dir = points_dir
self.points_conf = points_conf
self.points_color = points_color
if Rw2c is None:
self.Rw2c = torch.eye(3, device=points_xyz.device, dtype=points_xyz.dtype)
else:
self.Rw2c = Rw2c
def construct_grid_points(self, xyz):
# --construct_res' '--grid_res',
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
self.space_edge = torch.max(xyz_max - xyz_min) * 1.1
xyz_mid = (xyz_max + xyz_min) / 2
self.space_min = xyz_mid - self.space_edge / 2
self.space_max = xyz_mid + self.space_edge / 2
self.construct_vox_sz = self.space_edge / self.opt.construct_res
self.grid_vox_sz = self.space_edge / self.opt.grid_res
xyz_shift = xyz - self.space_min[None, ...]
construct_vox_idx = torch.unique(torch.floor(xyz_shift / self.construct_vox_sz[None, ...]).to(torch.int16), dim=0)
# print("construct_grid_idx", construct_grid_idx.shape) torch.Size([7529, 3])
cg_ratio = int(self.opt.grid_res / self.opt.construct_res)
gx = torch.arange(0, cg_ratio+1, device=construct_vox_idx.device, dtype=construct_vox_idx.dtype)
gy = torch.arange(0, cg_ratio+1, device=construct_vox_idx.device, dtype=construct_vox_idx.dtype)
gz = torch.arange(0, cg_ratio+1, device=construct_vox_idx.device, dtype=construct_vox_idx.dtype)
gx, gy, gz = torch.meshgrid(gx, gy, gz)
gxyz = torch.stack([gx, gy, gz], dim=-1).view(1, -1, 3)
sparse_grid_idx = construct_vox_idx[:, None, :] * cg_ratio + gxyz
# sparse_grid_idx.shape: ([7529, 9*9*9, 3]) -> ([4376896, 3])
sparse_grid_idx = torch.unique(sparse_grid_idx.view(-1, 3), dim=0).to(torch.int64)
full_grid_idx = torch.full([self.opt.grid_res+1,self.opt.grid_res+1,self.opt.grid_res+1], -1, device=xyz.device, dtype=torch.int32)
# full_grid_idx.shape: ([401, 401, 401])
full_grid_idx[sparse_grid_idx[...,0], sparse_grid_idx[...,1], sparse_grid_idx[...,2]] = torch.arange(0, sparse_grid_idx.shape[0], device=full_grid_idx.device, dtype=full_grid_idx.dtype)
xyz = self.space_min[None, ...] + sparse_grid_idx * self.grid_vox_sz
return xyz, sparse_grid_idx, full_grid_idx
def null_grad(self):
self.points_embeding.grad = None
self.xyz.grad = None
def reg_loss(self):
return self.reg_weight * torch.mean(torch.pow(self.points_embeding, 2))
def pers2img(self, point_xyz_pers_tensor, pixel_id, pixel_idx_cur, ray_mask, sample_pidx, ranges, h, w, inputs):
xper = point_xyz_pers_tensor[..., 0].cpu().numpy()
yper = point_xyz_pers_tensor[..., 1].cpu().numpy()
x_pixel = np.clip(np.round((xper-ranges[0]) * (w-1) / (ranges[3]-ranges[0])).astype(np.int32), 0, w-1)[0]
y_pixel = np.clip(np.round((yper-ranges[1]) * (h-1) / (ranges[4]-ranges[1])).astype(np.int32), 0, h-1)[0]
print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel),
np.min(y_pixel), sample_pidx.shape,y_pixel.shape)
background = np.zeros([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = self.points_embeding.cpu().numpy()[0,...]
background[pixel_idx_cur[0,...,1],pixel_idx_cur[0,...,0],0] = 1.0
background[y_pixel[sample_pidx[-1]], x_pixel[sample_pidx[-1]], :] = self.points_embeding.cpu().numpy()[0,sample_pidx[-1]]
gtbackground = np.ones([h, w, 3], dtype=np.float32)
gtbackground[pixel_idx_cur[0 ,..., 1], pixel_idx_cur[0 , ..., 0],:] = inputs["gt_image"].cpu().numpy()[0][ray_mask[0]>0]
print("diff sum",np.sum(inputs["gt_image"].cpu().numpy()[0][ray_mask[0]>0]-self.points_embeding.cpu().numpy()[0,sample_pidx[...,1,0][-1]]))
plt.figure()
plt.imshow(background)
plt.figure()
plt.imshow(gtbackground)
plt.show()
def get_point_indices(self, inputs, cam_rot_tensor, cam_pos_tensor, pixel_idx_tensor, near_plane, far_plane, h, w, intrinsic, vox_query=False):
point_xyz_pers_tensor = self.w2pers(self.xyz, cam_rot_tensor, cam_pos_tensor)
actual_numpoints_tensor = torch.ones([point_xyz_pers_tensor.shape[0]], device=point_xyz_pers_tensor.device, dtype=torch.int32) * point_xyz_pers_tensor.shape[1]
# print("pixel_idx_tensor", pixel_idx_tensor)
# print("point_xyz_pers_tensor", point_xyz_pers_tensor.shape)
# print("actual_numpoints_tensor", actual_numpoints_tensor.shape)
# sample_pidx_tensor: B, R, SR, K
ray_dirs_tensor = inputs["raydir"]
# print("ray_dirs_tensor", ray_dirs_tensor.shape, self.xyz.shape)
sample_pidx_tensor, sample_loc_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize, ranges = self.querier.query_points(pixel_idx_tensor, point_xyz_pers_tensor, self.xyz[None,...], actual_numpoints_tensor, h, w, intrinsic, near_plane, far_plane, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor)
# print("ray_mask_tensor",ray_mask_tensor.shape)
# self.pers2img(point_xyz_pers_tensor, pixel_idx_tensor.cpu().numpy(), pixel_idx_cur_tensor.cpu().numpy(), ray_mask_tensor.cpu().numpy(), sample_pidx_tensor.cpu().numpy(), ranges, h, w, inputs)
B, _, SR, K = sample_pidx_tensor.shape
if vox_query:
if sample_pidx_tensor.shape[1] > 0:
sample_pidx_tensor = self.query_vox_grid(sample_loc_w_tensor, self.full_grid_idx, self.space_min, self.grid_vox_sz)
else:
sample_pidx_tensor = torch.zeros([B, 0, SR, 8], device=sample_pidx_tensor.device, dtype=sample_pidx_tensor.dtype)
return sample_pidx_tensor, sample_loc_tensor, ray_mask_tensor, point_xyz_pers_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, vsize
def query_vox_grid(self, sample_loc_w_tensor, full_grid_idx, space_min, grid_vox_sz):
# sample_pidx_tensor = torch.full(sample_loc_w_tensor.shape[:-1]+(8,), -1, device=sample_loc_w_tensor.device, dtype=torch.int64)
B, R, SR, _ = sample_loc_w_tensor.shape
vox_ind = torch.floor((sample_loc_w_tensor - space_min[None, None, None, :]) / grid_vox_sz).to(torch.int64) # B, R, SR, 3
shift = torch.as_tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.int64, device=full_grid_idx.device).reshape(1, 1, 1, 8, 3)
vox_ind = vox_ind[..., None, :] + shift # B, R, SR, 8, 3
vox_mask = torch.any(torch.logical_or(vox_ind < 0, vox_ind > self.opt.grid_res).view(B, R, SR, -1), dim=3)
vox_ind = torch.clamp(vox_ind, min=0, max=self.opt.grid_res).view(-1, 3)
inds = full_grid_idx[vox_ind[..., 0], vox_ind[..., 1], vox_ind[..., 2]].view(B, R, SR, 8)
inds[vox_mask, :] = -1
# -1 for all 8 corners
inds[torch.any(inds < 0, dim=-1), :] = -1
return inds.to(torch.int64)
# def w2pers(self, point_xyz, camrotc2w, campos):
# point_xyz_shift = point_xyz[None, ...] - campos[:, None, :]
# xyz = torch.sum(camrotc2w[:, None, :, :] * point_xyz_shift[:, :, :, None], dim=-2)
# # print(xyz.shape, (point_xyz_shift[:, None, :] * camrot.T).shape)
# xper = xyz[:, :, 0] / -xyz[:, :, 2]
# yper = xyz[:, :, 1] / xyz[:, :, 2]
# return torch.stack([xper, yper, -xyz[:, :, 2]], dim=-1)
def w2pers(self, point_xyz, camrotc2w, campos):
point_xyz_shift = point_xyz[None, ...] - campos[:, None, :]
xyz = torch.sum(camrotc2w[:, None, :, :] * point_xyz_shift[:, :, :, None], dim=-2)
# print(xyz.shape, (point_xyz_shift[:, None, :] * camrot.T).shape)
xper = xyz[:, :, 0] / xyz[:, :, 2]
yper = xyz[:, :, 1] / xyz[:, :, 2]
return torch.stack([xper, yper, xyz[:, :, 2]], dim=-1)
def vect2euler(self, xyz):
yz_norm = torch.norm(xyz[...,1:3], dim=-1)
e_x = torch.atan2(-xyz[...,1], xyz[...,2])
e_y = torch.atan2(xyz[...,0], yz_norm)
e_z = torch.zeros_like(e_y)
e_xyz = torch.stack([e_x, e_y, e_z], dim=-1)
return e_xyz
def euler2Rc2w(self, e_xyz):
cosxyz = torch.cos(e_xyz)
sinxyz = torch.sin(e_xyz)
cxsz = cosxyz[...,0]*sinxyz[...,2]
czsy = cosxyz[...,2]*sinxyz[...,1]
sxsz = sinxyz[...,0]*sinxyz[...,2]
r1 = torch.stack([cosxyz[...,1]*cosxyz[...,2], czsy*sinxyz[...,0] - cxsz, czsy*cosxyz[...,0] + sxsz], dim=-1)
r2 = torch.stack([cosxyz[...,1]*sinxyz[...,2], cosxyz[...,0]*cosxyz[...,2] + sxsz*sinxyz[...,1], -cosxyz[...,2]*sinxyz[...,0] + cxsz * sinxyz[...,1]], dim=-1)
r3 = torch.stack([-sinxyz[...,1], cosxyz[...,1]*sinxyz[...,0], cosxyz[...,0]*cosxyz[...,1]], dim=-1)
Rzyx = torch.stack([r1, r2, r3], dim=-2)
return Rzyx
def euler2Rw2c(self, e_xyz):
c = torch.cos(-e_xyz)
s = torch.sin(-e_xyz)
r1 = torch.stack([c[...,1] * c[...,2], -s[...,2], c[...,2]*s[...,1]], dim=-1)
r2 = torch.stack([s[...,0]*s[...,1] + c[...,0]*c[...,1]*s[...,2], c[...,0]*c[...,2], -c[...,1]*s[...,0]+c[...,0]*s[...,1]*s[...,2]], dim=-1)
r3 = torch.stack([-c[...,0]*s[...,1]+c[...,1]*s[...,0]*s[...,2], c[...,2]*s[...,0], c[...,0]*c[...,1]+s[...,0]*s[...,1]*s[...,2]], dim=-1)
Rxyz = torch.stack([r1, r2, r3], dim=-2)
return Rxyz
def get_w2c(self, cam_xyz, Rw2c):
t = -Rw2c @ cam_xyz[..., None] # N, 3
M = torch.cat([Rw2c, t], dim=-1)
ones = torch.as_tensor([[[0, 0, 0, 1]]], device=M.device, dtype=M.dtype).expand(len(M),-1, -1)
return torch.cat([M, ones], dim=-2)
def get_c2w(self, cam_xyz, Rc2w):
M = torch.cat([Rc2w, cam_xyz[..., None]], dim=-1)
ones = torch.as_tensor([[[0, 0, 0, 1]]], device=M.device, dtype=M.dtype).expand(len(M),-1, -1)
return torch.cat([M, ones], dim=-2)
# def pers2w(self, point_xyz_pers, camrotc2w, campos):
# # point_xyz_pers B X M X 3
#
# x_pers = point_xyz_pers[..., 0] * point_xyz_pers[..., 2]
# y_pers = - point_xyz_pers[..., 1] * point_xyz_pers[..., 2]
# z_pers = - point_xyz_pers[..., 2]
# xyz_c = torch.stack([x_pers, y_pers, z_pers], dim=-1)
# xyz_w_shift = torch.sum(xyz_c[...,None,:] * camrotc2w, dim=-1)
# # print("point_xyz_pers[..., 0, 0]", point_xyz_pers[..., 0, 0].shape, point_xyz_pers[..., 0, 0])
# ray_dirs = xyz_w_shift / (torch.linalg.norm(xyz_w_shift, dim=-1, keepdims=True) + 1e-7)
#
# xyz_w = xyz_w_shift + campos[:, None, :]
# return xyz_w, ray_dirs
def passfunc(self, input, vsize):
return input
def pointgaussian(self, input, std):
M, C = input.shape
input = torch.normal(mean=input, std=std)
return input
def pointuniform(self, input, std):
M, C = input.shape
jitters = torch.rand([M, C], dtype=torch.float32, device=input.device) - 0.5
input = input + jitters * std * 2
return input
def pointuniformadd(self, input, std):
addinput = self.pointuniform(input, std)
return torch.cat([input,addinput], dim=0)
def pointuniformdouble(self, input, std):
input = self.pointuniform(torch.cat([input,input], dim=0), std)
return input
def forward(self, inputs):
pixel_idx, camrotc2w, campos, near_plane, far_plane, h, w, intrinsic = inputs["pixel_idx"].to(torch.int32), inputs["camrotc2w"], inputs["campos"], inputs["near"], inputs["far"], inputs["h"], inputs["w"], inputs["intrinsic"]
# 1, 294, 24, 32; 1, 294, 24; 1, 291, 2
sample_pidx, sample_loc, ray_mask_tensor, point_xyz_pers_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, vsize = self.get_point_indices(inputs, camrotc2w, campos, pixel_idx, torch.min(near_plane).cpu().numpy(), torch.max(far_plane).cpu().numpy(), torch.max(h).cpu().numpy(), torch.max(w).cpu().numpy(), intrinsic.cpu().numpy()[0], vox_query=self.opt.NN<0)
sample_pnt_mask = sample_pidx >= 0
B, R, SR, K = sample_pidx.shape
sample_pidx = torch.clamp(sample_pidx, min=0).view(-1).long()
sampled_embedding = torch.index_select(torch.cat([self.xyz[None, ...], point_xyz_pers_tensor, self.points_embeding], dim=-1), 1, sample_pidx).view(B, R, SR, K, self.points_embeding.shape[2]+self.xyz.shape[1]*2)
sampled_color = None if self.points_color is None else torch.index_select(self.points_color, 1, sample_pidx).view(B, R, SR, K, self.points_color.shape[2])
sampled_dir = None if self.points_dir is None else torch.index_select(self.points_dir, 1, sample_pidx).view(B, R, SR, K, self.points_dir.shape[2])
sampled_conf = None if self.points_conf is None else torch.index_select(self.points_conf, 1, sample_pidx).view(B, R, SR, K, self.points_conf.shape[2])
sampled_Rw2c = self.Rw2c if self.Rw2c.dim() == 2 else torch.index_select(self.Rw2c, 0, sample_pidx).view(B, R, SR, K, self.Rw2c.shape[1], self.Rw2c.shape[2])
# filepath = "./sampled_xyz_full.txt"
# np.savetxt(filepath, self.xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";")
#
# filepath = "./sampled_xyz_pers_full.txt"
# np.savetxt(filepath, point_xyz_pers_tensor.reshape(-1, 3).detach().cpu().numpy(), delimiter=";")
# if self.xyz.grad is not None:
# print("xyz grad:", self.xyz.requires_grad, torch.max(self.xyz.grad), torch.min(self.xyz.grad))
# if self.points_embeding.grad is not None:
# print("points_embeding grad:", self.points_embeding.requires_grad, torch.max(self.points_embeding.grad))
# print("points_embeding 3", torch.max(self.points_embeding), torch.min(self.points_embeding))
return sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding[..., 6:], sampled_embedding[..., 3:6], sampled_embedding[..., :3], sample_pnt_mask, sample_loc, sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize, self.grid_vox_sz | 35,753 | 47.978082 | 368 | py |
pointnerf | pointnerf-master/models/neural_points/__init__.py | 0 | 0 | 0 | py | |
pointnerf | pointnerf-master/models/neural_points/query_point_indices.py | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
import torch
import pickle
import time
# import cupy
# import open3d.ml.tf as ml3d
# import frnn
from data.load_blender import load_blender_data
# X = torch.cuda.FloatTensor(8)
class Holder(pycuda.driver.PointerHolderBase):
def __init__(self, t):
super(Holder, self).__init__()
self.t = t
self.gpudata = t.data_ptr()
def get_pointer(self):
return self.t.data_ptr()
class lighting_fast_querier():
def __init__(self, device, opt):
print("querier device", device, device.index)
self.gpu = device.index
self.opt = opt
drv.init()
# self.device = drv.Device(gpu)
self.ctx = drv.Device(self.gpu).make_context()
self.get_occ_vox, self.near_vox_full, self.insert_vox_points, self.query_along_ray = self.build_cuda()
self.inverse = self.opt.inverse
def clean_up(self):
self.ctx.pop()
def get_hyperparameters(self, h, w, intrinsic, near_depth, far_depth):
# print("h,w,focal,near,far", h.shape, w.shape, focal.shape, near_depth.shape, far_depth.shape)
# x_r = w / 2 / focal
# y_r = h / 2 / focal
# ranges = np.array([-x_r, -y_r, near_depth, x_r, y_r, far_depth], dtype=np.float32)
# vdim = np.array([h, w, self.opt.z_depth_dim], dtype=np.int32)
# vsize = np.array([2 * x_r / vdim[0], 2 * y_r / vdim[1], z_r / vdim[2]], dtype=np.float32)
x_rl, x_rh = -intrinsic[0, 2] / intrinsic[0, 0], (w - intrinsic[0, 2]) / intrinsic[0, 0]
y_rl, y_rh = -intrinsic[1, 2] / intrinsic[1, 1], (h - intrinsic[1, 2]) / intrinsic[1, 1],
z_r = (far_depth - near_depth) if self.inverse == 0 else (1.0 / near_depth - 1.0 / far_depth)
# [-0.22929783 -0.1841962 2.125 0.21325193 0.17096843 4.525 ]
ranges = np.array([x_rl, y_rl, near_depth, x_rh, y_rh, far_depth], dtype=np.float32) if self.inverse == 0 else np.array([x_rl, y_rl, 1.0 / far_depth, x_rh, y_rh, 1.0 / near_depth], dtype=np.float32)
vdim = np.array([w, h, self.opt.z_depth_dim], dtype=np.int32)
vsize = np.array([(x_rh - x_rl) / vdim[0], (y_rh - y_rl) / vdim[1], z_r / vdim[2]], dtype=np.float32)
vscale = np.array(self.opt.vscale, dtype=np.int32)
scaled_vdim = np.ceil(vdim / vscale).astype(np.int32)
scaled_vsize = (vsize * vscale).astype(np.float32)
range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, np.asarray(self.opt.kernel_size, dtype=np.int32), np.asarray(self.opt.query_size, dtype=np.int32))
radius_limit, depth_limit = self.opt.radius_limit_scale * max(vsize[0], vsize[1]), self.opt.depth_limit_scale * vsize[2]
return radius_limit.astype(np.float32), depth_limit.astype(np.float32), ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu
def query_points(self, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, h, w, intrinsic, near_depth, far_depth, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor):
# print("attr", hasattr(self, "h"), self.opt.feedforward)
#
# if not hasattr(self, "h") or self.opt.feedforward > 0 or self.vscale != self.opt.vscale or self.kernel_size != self.opt.kernel_size:
# radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.get_hyperparameters(h, w, intrinsic, near_depth, far_depth)
# if self.opt.feedforward==0:
# self.radius_limit, self.depth_limit, self.ranges, self.vsize, self.vdim, self.scaled_vsize, self.scaled_vdim, self.vscale, self.range_gpu, self.scaled_vsize_gpu, self.scaled_vdim_gpu, self.vscale_gpu, self.kernel_size_gpu, self.kernel_size, self.query_size_gpu = radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, self.opt.kernel_size, query_size_gpu
#
# else:
# radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.radius_limit, self.depth_limit, self.ranges, self.vsize, self.vdim, self.scaled_vsize, self.scaled_vdim, self.vscale, self.range_gpu, self.scaled_vsize_gpu, self.scaled_vdim_gpu, self.vscale_gpu, self.kernel_size_gpu, self.query_size_gpu
radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.get_hyperparameters(h, w, intrinsic, near_depth, far_depth)
self.radius_limit, self.depth_limit, self.ranges, self.vsize, self.vdim, self.scaled_vsize, self.scaled_vdim, self.vscale, self.range_gpu, self.scaled_vsize_gpu, self.scaled_vdim_gpu, self.vscale_gpu, self.c, self.query_size_gpu = radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu
sample_pidx_tensor, sample_loc_tensor, pixel_idx_cur_tensor, ray_mask_tensor = self.query_grid_point_index(h, w,pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, self.opt.SR, self.opt.K, ranges, scaled_vsize, scaled_vdim, vscale, self.opt.max_o, self.opt.P, radius_limit, depth_limit, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kMaxThreadsPerBlock=self.opt.gpu_maxthr)
self.inverse = self.opt.inverse
if self.opt.is_train:
sample_loc_tensor = getattr(self, self.opt.shpnt_jitter, None)(sample_loc_tensor, vsize)
sample_loc_w_tensor, sample_ray_dirs_tensor = self.pers2w(sample_loc_tensor, cam_rot_tensor, cam_pos_tensor)
return sample_pidx_tensor, sample_loc_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize, ranges
def pers2w(self, point_xyz_pers, camrotc2w, campos):
# point_xyz_pers B X M X 3
x_pers = point_xyz_pers[..., 0] * point_xyz_pers[..., 2]
y_pers = point_xyz_pers[..., 1] * point_xyz_pers[..., 2]
z_pers = point_xyz_pers[..., 2]
xyz_c = torch.stack([x_pers, y_pers, z_pers], dim=-1)
xyz_w_shift = torch.sum(xyz_c[...,None,:] * camrotc2w, dim=-1)
# print("point_xyz_pers[..., 0, 0]", point_xyz_pers[..., 0, 0].shape, point_xyz_pers[..., 0, 0])
ray_dirs = xyz_w_shift / (torch.linalg.norm(xyz_w_shift, dim=-1, keepdims=True) + 1e-7)
xyz_w = xyz_w_shift + campos[:, None, :]
return xyz_w, ray_dirs
def gaussian(self, input, vsize):
B, R, SR, _ = input.shape
jitters = torch.normal(mean=torch.zeros([B, R, SR], dtype=torch.float32, device=input.device), std=torch.full([B, R, SR], vsize[2] / 4, dtype=torch.float32, device=input.device))
input[..., 2] = input[..., 2] + torch.clamp(jitters, min=-vsize[2]/2, max=vsize[2]/2)
return input
def uniform(self, input, vsize):
B, R, SR, _ = input.shape
jitters = torch.rand([B, R, SR], dtype=torch.float32, device=input.device) - 0.5
input[..., 2] = input[..., 2] + jitters * vsize[2]
return input
def build_cuda(self):
mod = SourceModule(
"""
#define KN """ + str(self.opt.K)
+ """
#include <cuda.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <vector>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <curand_kernel.h>
namespace cuda {
static __device__ inline uint8_t atomicAdd(uint8_t *address, uint8_t val) {
size_t offset = (size_t)address & 3;
uint32_t *address_as_ui = (uint32_t *)(address - offset);
uint32_t old = *address_as_ui;
uint32_t shift = offset * 8;
uint32_t old_byte;
uint32_t newval;
uint32_t assumed;
do {
assumed = old;
old_byte = (old >> shift) & 0xff;
// preserve size in initial cast. Casting directly to uint32_t pads
// negative signed values with 1's (e.g. signed -1 = unsigned ~0).
newval = static_cast<uint8_t>(val + old_byte);
newval = (old & ~(0x000000ff << shift)) | (newval << shift);
old = atomicCAS(address_as_ui, assumed, newval);
} while (assumed != old);
return __byte_perm(old, 0, offset); // need validate
}
static __device__ inline char atomicAdd(char* address, char val) {
// offset, in bytes, of the char* address within the 32-bit address of the space that overlaps it
size_t long_address_modulo = (size_t) address & 3;
// the 32-bit address that overlaps the same memory
auto* base_address = (unsigned int*) ((char*) address - long_address_modulo);
// A 0x3210 selector in __byte_perm will simply select all four bytes in the first argument in the same order.
// The "4" signifies the position where the first byte of the second argument will end up in the output.
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
// for selecting bytes within a 32-bit chunk that correspond to the char* address (relative to base_address)
unsigned int selector = selectors[long_address_modulo];
unsigned int long_old, long_assumed, long_val, replacement;
long_old = *base_address;
do {
long_assumed = long_old;
// replace bits in long_old that pertain to the char address with those from val
long_val = __byte_perm(long_old, 0, long_address_modulo) + val;
replacement = __byte_perm(long_old, long_val, selector);
long_old = atomicCAS(base_address, long_assumed, replacement);
} while (long_old != long_assumed);
return __byte_perm(long_old, 0, long_address_modulo);
}
static __device__ inline int8_t atomicAdd(int8_t *address, int8_t val) {
return (int8_t)cuda::atomicAdd((char*)address, (char)val);
}
static __device__ inline short atomicAdd(short* address, short val)
{
unsigned int *base_address = (unsigned int *)((size_t)address & ~2);
unsigned int long_val = ((size_t)address & 2) ? ((unsigned int)val << 16) : (unsigned short)val;
unsigned int long_old = ::atomicAdd(base_address, long_val);
if((size_t)address & 2) {
return (short)(long_old >> 16);
} else {
unsigned int overflow = ((long_old & 0xffff) + long_val) & 0xffff0000;
if (overflow)
atomicSub(base_address, overflow);
return (short)(long_old & 0xffff);
}
}
static __device__ float cas(double *addr, double compare, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *) addr;
return __longlong_as_double(atomicCAS(address_as_ull,
__double_as_longlong(compare),
__double_as_longlong(val)));
}
static __device__ float cas(float *addr, float compare, float val) {
unsigned int *address_as_uint = (unsigned int *) addr;
return __uint_as_float(atomicCAS(address_as_uint,
__float_as_uint(compare),
__float_as_uint(val)));
}
static __device__ inline uint8_t atomicCAS(uint8_t * const address, uint8_t const compare, uint8_t const value)
{
uint8_t const longAddressModulo = reinterpret_cast< size_t >( address ) & 0x3;
uint32_t *const baseAddress = reinterpret_cast< uint32_t * >( address - longAddressModulo );
uint32_t constexpr byteSelection[] = { 0x3214, 0x3240, 0x3410, 0x4210 }; // The byte position we work on is '4'.
uint32_t const byteSelector = byteSelection[ longAddressModulo ];
uint32_t const longCompare = compare;
uint32_t const longValue = value;
uint32_t longOldValue = * baseAddress;
uint32_t longAssumed;
uint8_t oldValue;
do {
// Select bytes from the old value and new value to construct a 32-bit value to use.
uint32_t const replacement = __byte_perm( longOldValue, longValue, byteSelector );
uint32_t const comparison = __byte_perm( longOldValue, longCompare, byteSelector );
longAssumed = longOldValue;
// Use 32-bit atomicCAS() to try and set the 8-bits we care about.
longOldValue = ::atomicCAS( baseAddress, comparison, replacement );
// Grab the 8-bit portion we care about from the old value at address.
oldValue = ( longOldValue >> ( 8 * longAddressModulo )) & 0xFF;
} while ( compare == oldValue and longAssumed != longOldValue ); // Repeat until other three 8-bit values stabilize.
return oldValue;
}
}
extern "C" {
__global__ void get_occ_vox(
const float* in_data, // B * N * 3
const int* in_actual_numpoints, // B
const int B,
const int N,
const float *d_coord_shift, // 3
const float *d_voxel_size, // 3
const int *d_grid_size, // 3
const int *kernel_size, // 3
const int pixel_size,
const int grid_size_vol,
uint8_t *coor_occ, // B * 400 * 400 * 400
int8_t *loc_coor_counter, // B * 400 * 400 * 400
int *near_depth_id_tensor, // B * 400 * 400
int *far_depth_id_tensor, // B * 400 * 400
const int inverse
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / N; // index of batch
if (i_batch >= B) { return; }
int i_pt = index - N * i_batch;
if (i_pt < in_actual_numpoints[i_batch]) {
int coor[3];
const float *p_pt = in_data + index * 3;
coor[0] = floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]);
if (coor[0] < 0 || coor[0] >= d_grid_size[0]) { return; }
coor[1] = floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]);
if (coor[1] < 0 || coor[1] >= d_grid_size[1]) { return; }
float z = p_pt[2];
if (inverse > 0){ z = 1.0 / z;}
coor[2] = floor((z - d_coord_shift[2]) / d_voxel_size[2]);
if (coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; }
int frust_id_b, coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
if (loc_coor_counter[coor_indx_b] < (int8_t)0 || cuda::atomicAdd(loc_coor_counter + coor_indx_b, (int8_t)-1) < (int8_t)0) { return; }
for (int coor_x = max(0, coor[0] - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], coor[0] + (kernel_size[0] + 1) / 2); coor_x++) {
for (int coor_y = max(0, coor[1] - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], coor[1] + (kernel_size[1] + 1) / 2); coor_y++) {
for (int coor_z = max(0, coor[2] - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], coor[2] + (kernel_size[2] + 1) / 2); coor_z++) {
frust_id_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y;
coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z;
if (coor_occ[coor_indx_b] > (uint8_t)0) { continue; }
cuda::atomicCAS(coor_occ + coor_indx_b, (uint8_t)0, (uint8_t)1);
atomicMin(near_depth_id_tensor + frust_id_b, coor_z);
atomicMax(far_depth_id_tensor + frust_id_b, coor_z);
}
}
}
}
}
__global__ void near_vox_full(
const int B,
const int SR,
const int *pixel_idx,
const int R,
const int *vscale,
const int *d_grid_size,
const int pixel_size,
const int grid_size_vol,
const int *kernel_size, // 3
uint8_t *pixel_map,
int8_t *ray_mask, // B * R
const uint8_t *coor_occ, // B * 400 * 400 * 400
int8_t *loc_coor_counter, // B * 400 * 400 * 400
const int *near_depth_id_tensor, // B * 400 * 400
const int *far_depth_id_tensor, // B * 400 * 400
short *voxel_to_coorz_idx // B * 400 * 400 * SR
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / R; // index of batch
if (i_batch >= B) { return; }
int vx_id = pixel_idx[index*2] / vscale[0], vy_id = pixel_idx[index*2 + 1] / vscale[1];
int i_xyvox_id = i_batch * pixel_size + vx_id * d_grid_size[1] + vy_id;
int near_id = near_depth_id_tensor[i_xyvox_id], far_id = far_depth_id_tensor[i_xyvox_id];
ray_mask[index] = far_id > 0 ? (int8_t)1 : (int8_t)0;
if (pixel_map[i_xyvox_id] > (uint8_t)0 || cuda::atomicCAS(pixel_map + i_xyvox_id, (uint8_t)0, (uint8_t)1) > (uint8_t)0) { return; }
int counter = 0;
for (int depth_id = near_id; depth_id <= far_id; depth_id++) {
if (coor_occ[i_xyvox_id * d_grid_size[2] + depth_id] > (uint8_t)0) {
voxel_to_coorz_idx[i_xyvox_id * SR + counter] = (short)depth_id;
// if (i_xyvox_id>81920){
// printf(" %d %d %d %d %d %d %d %d %d %d ", pixel_idx[index*2], vscale[0], i_batch, vx_id, vy_id, i_xyvox_id * SR + counter, i_xyvox_id, SR, counter, d_grid_size[1]);
// }
for (int coor_x = max(0, vx_id - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], vx_id + (kernel_size[0] + 1) / 2); coor_x++) {
for (int coor_y = max(0, vy_id - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], vy_id + (kernel_size[1] + 1) / 2); coor_y++) {
for (int coor_z = max(0, depth_id - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], depth_id + (kernel_size[2] + 1) / 2); coor_z++) {
int coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z;
// cuda::atomicCAS(loc_coor_counter + coor_indx_b, (int8_t)-1, (int8_t)1);
int8_t loc = loc_coor_counter[coor_indx_b];
if (loc < (int8_t)0) {
loc_coor_counter[coor_indx_b] = (int8_t)1;
}
}
}
}
if (counter >= SR - 1) { return; }
counter += 1;
}
}
}
__global__ void insert_vox_points(
float* in_data, // B * N * 3
int* in_actual_numpoints, // B
const int B,
const int N,
const int P,
const int max_o,
const int pixel_size,
const int grid_size_vol,
const float *d_coord_shift, // 3
const int *d_grid_size,
const float *d_voxel_size, // 3
const int8_t *loc_coor_counter, // B * 400 * 400 * 400
short *voxel_pnt_counter, // B * 400 * 400 * max_o
int *voxel_to_pntidx, // B * pixel_size * max_o * P
unsigned long seconds,
const int inverse
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / N; // index of batch
if (i_batch >= B) { return; }
if (index - i_batch * N < in_actual_numpoints[i_batch]) {
const float *p_pt = in_data + index * 3;
int coor_x = (p_pt[0] - d_coord_shift[0]) / d_voxel_size[0];
int coor_y = (p_pt[1] - d_coord_shift[1]) / d_voxel_size[1];
float z = p_pt[2];
if (inverse > 0){ z = 1.0 / z;}
int coor_z = (z - d_coord_shift[2]) / d_voxel_size[2];
int pixel_indx_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y;
int coor_indx_b = pixel_indx_b * d_grid_size[2] + coor_z;
if (coor_x < 0 || coor_x >= d_grid_size[0] || coor_y < 0 || coor_y >= d_grid_size[1] || coor_z < 0 || coor_z >= d_grid_size[2] || loc_coor_counter[coor_indx_b] < (int8_t)0) { return; }
int voxel_indx_b = pixel_indx_b * max_o + (int)loc_coor_counter[coor_indx_b];
//printf("voxel_indx_b, %d || ", voxel_indx_b);
int voxel_pntid = (int) cuda::atomicAdd(voxel_pnt_counter + voxel_indx_b, (short)1);
if (voxel_pntid < P) {
voxel_to_pntidx[voxel_indx_b * P + voxel_pntid] = index;
} else {
curandState state;
curand_init(index+seconds, 0, 0, &state);
int insrtidx = ceilf(curand_uniform(&state) * (voxel_pntid+1)) - 1;
if(insrtidx < P){
voxel_to_pntidx[voxel_indx_b * P + insrtidx] = index;
}
}
}
}
__global__ void query_rand_along_ray(
const float* in_data, // B * N * 3
const int B,
const int SR, // num. samples along each ray e.g., 128
const int R, // e.g., 1024
const int max_o,
const int P,
const int K, // num. neighbors
const int pixel_size,
const int grid_size_vol,
const float radius_limit2,
const float depth_limit2,
const float *d_coord_shift, // 3
const int *d_grid_size,
const float *d_voxel_size, // 3
const float *d_ray_voxel_size, // 3
const int *vscale, // 3
const int *kernel_size,
const int *pixel_idx, // B * R * 2
const int8_t *loc_coor_counter, // B * 400 * 400 * 400
const short *voxel_to_coorz_idx, // B * 400 * 400 * SR
const short *voxel_pnt_counter, // B * 400 * 400 * max_o
const int *voxel_to_pntidx, // B * pixel_size * max_o * P
int *sample_pidx, // B * R * SR * K
float *sample_loc, // B * R * SR * K
unsigned long seconds,
const int NN,
const int inverse
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * SR); // index of batch
int ray_idx_b = index / SR;
if (i_batch >= B || ray_idx_b >= B * R) { return; }
int ray_sample_loc_idx = index - ray_idx_b * SR;
int frustx = pixel_idx[ray_idx_b * 2] / vscale[0];
int frusty = pixel_idx[ray_idx_b * 2 + 1] / vscale[1];
int vxy_ind_b = i_batch * pixel_size + frustx * d_grid_size[1] + frusty;
int frustz = (int) voxel_to_coorz_idx[vxy_ind_b * SR + ray_sample_loc_idx];
float centerx = d_coord_shift[0] + frustx * d_voxel_size[0] + (pixel_idx[ray_idx_b * 2] % vscale[0] + 0.5) * d_ray_voxel_size[0];
float centery = d_coord_shift[1] + frusty * d_voxel_size[1] + (pixel_idx[ray_idx_b * 2 + 1] % vscale[1] + 0.5) * d_ray_voxel_size[1];
float centerz = d_coord_shift[2] + (frustz + 0.5) * d_voxel_size[2];
if (inverse > 0){ centerz = 1.0 / centerz;}
sample_loc[index * 3] = centerx;
sample_loc[index * 3 + 1] = centery;
sample_loc[index * 3 + 2] = centerz;
if (frustz < 0) { return; }
int coor_indx_b = vxy_ind_b * d_grid_size[2] + frustz;
int raysample_startid = index * K;
int kid = 0;
curandState state;
for (int coor_x = max(0, frustx - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], frustx + (kernel_size[0] + 1) / 2); coor_x++) {
for (int coor_y = max(0, frusty - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], frusty + (kernel_size[1] + 1) / 2); coor_y++) {
int pixel_indx_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y;
for (int coor_z = max(0, frustz - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], frustz + (kernel_size[2] + 1) / 2); coor_z++) {
int shift_coor_indx_b = pixel_indx_b * d_grid_size[2] + coor_z;
if(loc_coor_counter[shift_coor_indx_b] < (int8_t)0) {continue;}
int voxel_indx_b = pixel_indx_b * max_o + (int)loc_coor_counter[shift_coor_indx_b];
for (int g = 0; g < min(P, (int) voxel_pnt_counter[voxel_indx_b]); g++) {
int pidx = voxel_to_pntidx[voxel_indx_b * P + g];
if ((radius_limit2 == 0 || (in_data[pidx*3]-centerx) * (in_data[pidx*3]-centerx) + (in_data[pidx*3 + 1]-centery) * (in_data[pidx*3 + 1]-centery) <= radius_limit2) && (depth_limit2==0 || (in_data[pidx*3 + 2]-centerz) * (in_data[pidx*3 + 2]-centerz) <= depth_limit2)) {
if (kid++ < K) {
sample_pidx[raysample_startid + kid - 1] = pidx;
}
else {
curand_init(index+seconds, 0, 0, &state);
int insrtidx = ceilf(curand_uniform(&state) * (kid)) - 1;
if (insrtidx < K) {
sample_pidx[raysample_startid + insrtidx] = pidx;
}
}
}
}
}
}
}
}
__global__ void query_neigh_along_ray_layered(
const float* in_data, // B * N * 3
const int B,
const int SR, // num. samples along each ray e.g., 128
const int R, // e.g., 1024
const int max_o,
const int P,
const int K, // num. neighbors
const int pixel_size,
const int grid_size_vol,
const float radius_limit2,
const float depth_limit2,
const float *d_coord_shift, // 3
const int *d_grid_size,
const float *d_voxel_size, // 3
const float *d_ray_voxel_size, // 3
const int *vscale, // 3
const int *kernel_size,
const int *pixel_idx, // B * R * 2
const int8_t *loc_coor_counter, // B * 400 * 400 * 400
const short *voxel_to_coorz_idx, // B * 400 * 400 * SR
const short *voxel_pnt_counter, // B * 400 * 400 * max_o
const int *voxel_to_pntidx, // B * pixel_size * max_o * P
int *sample_pidx, // B * R * SR * K
float *sample_loc, // B * R * SR * K
unsigned long seconds,
const int NN,
const int inverse
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * SR); // index of batch
int ray_idx_b = index / SR;
if (i_batch >= B || ray_idx_b >= B * R) { return; }
int ray_sample_loc_idx = index - ray_idx_b * SR;
int frustx = pixel_idx[ray_idx_b * 2] / vscale[0];
int frusty = pixel_idx[ray_idx_b * 2 + 1] / vscale[1];
int vxy_ind_b = i_batch * pixel_size + frustx * d_grid_size[1] + frusty;
int frustz = (int) voxel_to_coorz_idx[vxy_ind_b * SR + ray_sample_loc_idx];
float centerx = d_coord_shift[0] + frustx * d_voxel_size[0] + (pixel_idx[ray_idx_b * 2] % vscale[0] + 0.5) * d_ray_voxel_size[0];
float centery = d_coord_shift[1] + frusty * d_voxel_size[1] + (pixel_idx[ray_idx_b * 2 + 1] % vscale[1] + 0.5) * d_ray_voxel_size[1];
float centerz = d_coord_shift[2] + (frustz + 0.5) * d_voxel_size[2];
if (inverse > 0){ centerz = 1.0 / centerz;}
sample_loc[index * 3] = centerx;
sample_loc[index * 3 + 1] = centery;
sample_loc[index * 3 + 2] = centerz;
if (frustz < 0) { return; }
// int coor_indx_b = vxy_ind_b * d_grid_size[2] + frustz;
int raysample_startid = index * K;
// curandState state;
int kid = 0, far_ind = 0, coor_z, coor_y, coor_x;
float far2 = 0.0;
float xyz2Buffer[KN];
for (int layer = 0; layer < (kernel_size[0]+1)/2; layer++){
int zlayer = min((kernel_size[2]+1)/2-1, layer);
for (int x = max(-frustx, -layer); x < min(d_grid_size[0] - frustx, layer+1); x++) {
for (int y = max(-frusty, -layer); y < min(d_grid_size[1] - frusty, layer+1); y++) {
coor_y = frusty + y;
coor_x = frustx + x;
int pixel_indx_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y;
for (int z = max(-frustz, -zlayer); z < min(d_grid_size[2] - frustz, zlayer + 1); z++) {
// if (max(abs(x),abs(y)) != layer || abs(z) != zlayer) continue;
if (max(abs(x),abs(y)) != layer && ((zlayer == layer) ? (abs(z) != zlayer) : 1)) continue;
// if (max(abs(x),abs(y)) != layer) continue;
coor_z = z + frustz;
int shift_coor_indx_b = pixel_indx_b * d_grid_size[2] + coor_z;
if(loc_coor_counter[shift_coor_indx_b] < (int8_t)0) {continue;}
int voxel_indx_b = pixel_indx_b * max_o + (int)loc_coor_counter[shift_coor_indx_b];
for (int g = 0; g < min(P, (int) voxel_pnt_counter[voxel_indx_b]); g++) {
int pidx = voxel_to_pntidx[voxel_indx_b * P + g];
float x_v = (NN < 2) ? (in_data[pidx*3]-centerx) : (in_data[pidx*3] * in_data[pidx*3+2]-centerx*centerz) ;
float y_v = (NN < 2) ? (in_data[pidx*3+1]-centery) : (in_data[pidx*3+1] * in_data[pidx*3+2]-centery*centerz) ;
float xy2 = x_v * x_v + y_v * y_v;
float z2 = (in_data[pidx*3 + 2]-centerz) * (in_data[pidx*3 + 2]-centerz);
float xyz2 = xy2 + z2;
if ((radius_limit2 == 0 || xy2 <= radius_limit2) && (depth_limit2==0 || z2 <= depth_limit2)){
if (kid++ < K) {
sample_pidx[raysample_startid + kid - 1] = pidx;
xyz2Buffer[kid-1] = xyz2;
if (xyz2 > far2){
far2 = xyz2;
far_ind = kid - 1;
}
} else {
if (xyz2 < far2) {
sample_pidx[raysample_startid + far_ind] = pidx;
xyz2Buffer[far_ind] = xyz2;
far2 = xyz2;
for (int i = 0; i < K; i++) {
if (xyz2Buffer[i] > far2) {
far2 = xyz2Buffer[i];
far_ind = i;
}
}
}
}
}
}
}
}
}
}
}
}
""", no_extern_c=True)
get_occ_vox = mod.get_function("get_occ_vox")
near_vox_full = mod.get_function("near_vox_full")
insert_vox_points = mod.get_function("insert_vox_points")
query_along_ray = mod.get_function("query_neigh_along_ray_layered") if self.opt.NN > 0 else mod.get_function("query_rand_along_ray")
return get_occ_vox, near_vox_full, insert_vox_points, query_along_ray
def switch_pixel_id(self, pixel_idx_tensor, h):
pixel_id = torch.cat([pixel_idx_tensor[..., 0:1], h - 1 - pixel_idx_tensor[..., 1:2]], dim=-1)
# print("pixel_id", pixel_id.shape, torch.min(pixel_id, dim=-2)[0], torch.max(pixel_id, dim=-2)[0])
return pixel_id
def query_grid_point_index(self, h, w, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, SR, K, ranges, scaled_vsize, scaled_vdim, vscale, max_o, P, radius_limit, depth_limit, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kMaxThreadsPerBlock = 1024):
device = point_xyz_pers_tensor.device
B, N = point_xyz_pers_tensor.shape[0], point_xyz_pers_tensor.shape[1]
pixel_size = scaled_vdim[0] * scaled_vdim[1]
grid_size_vol = pixel_size * scaled_vdim[2]
d_coord_shift = range_gpu[:3]
# ray_vsize_gpu = (vsize_gpu / vscale_gpu).astype(np.float32)
pixel_idx_cur_tensor = pixel_idx_tensor.reshape(B, -1, 2).clone()
R = pixel_idx_cur_tensor.shape[1]
# print("kernel_size_gpu {}, SR {}, K {}, ranges {}, scaled_vsize {}, scaled_vdim {}, vscale {}, max_o {}, P {}, radius_limit {}, depth_limit {}, range_gpu {}, scaled_vsize_gpu {}, scaled_vdim_gpu {}, vscale_gpu {} ".format(kernel_size_gpu, SR, K, ranges, scaled_vsize, scaled_vdim, vscale, max_o, P, radius_limit, depth_limit, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, pixel_idx_cur_tensor.shape))
# print("point_xyz_pers_tensor", ranges, scaled_vdim_gpu, torch.min(point_xyz_pers_tensor, dim=-2)[0], torch.max(point_xyz_pers_tensor, dim=-2)[0])
gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
coor_occ_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], scaled_vdim[2]], dtype=torch.uint8, device=device)
loc_coor_counter_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], scaled_vdim[2]], dtype=torch.int8, device=device)
near_depth_id_tensor = torch.full([B, scaled_vdim[0], scaled_vdim[1]], scaled_vdim[2], dtype=torch.int32, device=device)
far_depth_id_tensor = torch.full([B, scaled_vdim[0], scaled_vdim[1]], -1, dtype=torch.int32, device=device)
self.get_occ_vox(
Holder(point_xyz_pers_tensor),
Holder(actual_numpoints_tensor),
np.int32(B),
np.int32(N),
d_coord_shift,
scaled_vsize_gpu,
scaled_vdim_gpu,
query_size_gpu,
np.int32(pixel_size),
np.int32(grid_size_vol),
Holder(coor_occ_tensor),
Holder(loc_coor_counter_tensor),
Holder(near_depth_id_tensor),
Holder(far_depth_id_tensor),
np.int32(self.inverse),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
# print("near_depth_id_tensor", torch.min(near_depth_id_tensor), torch.max(far_depth_id_tensor),torch.max(loc_coor_counter_tensor), torch.max(torch.sum(coor_occ_tensor, dim=-1)), B*scaled_vdim[0]* scaled_vdim[1]*SR, pixel_size, scaled_vdim, vscale, scaled_vdim_gpu)
gridSize = int((B * R + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
voxel_to_coorz_idx_tensor = torch.full([B, scaled_vdim[0], scaled_vdim[1], SR], -1, dtype=torch.int16, device=device)
pixel_map_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1]], dtype=torch.uint8, device=device)
ray_mask_tensor = torch.zeros([B, R], dtype=torch.int8, device=device)
self.near_vox_full(
np.int32(B),
np.int32(SR),
# Holder(self.switch_pixel_id(pixel_idx_cur_tensor,h)),
Holder(pixel_idx_cur_tensor),
np.int32(R),
vscale_gpu,
scaled_vdim_gpu,
np.int32(pixel_size),
np.int32(grid_size_vol),
query_size_gpu,
Holder(pixel_map_tensor),
Holder(ray_mask_tensor),
Holder(coor_occ_tensor),
Holder(loc_coor_counter_tensor),
Holder(near_depth_id_tensor),
Holder(far_depth_id_tensor),
Holder(voxel_to_coorz_idx_tensor),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
# print("voxel_to_coorz_idx_tensor max", torch.max(torch.sum(voxel_to_coorz_idx_tensor > -1, dim=-1)))
# print("scaled_vsize_gpu",scaled_vsize_gpu, scaled_vdim_gpu)
# print("ray_mask_tensor",ray_mask_tensor.shape, torch.min(ray_mask_tensor), torch.max(ray_mask_tensor))
# print("pixel_idx_cur_tensor",pixel_idx_cur_tensor.shape, torch.min(pixel_idx_cur_tensor), torch.max(pixel_idx_cur_tensor))
pixel_id_num_tensor = torch.sum(ray_mask_tensor, dim=-1)
pixel_idx_cur_tensor = torch.masked_select(pixel_idx_cur_tensor, (ray_mask_tensor > 0)[..., None].expand(-1, -1, 2)).reshape(1, -1, 2)
del coor_occ_tensor, near_depth_id_tensor, far_depth_id_tensor, pixel_map_tensor
R = torch.max(pixel_id_num_tensor).cpu().numpy()
# print("loc_coor_counter_tensor",loc_coor_counter_tensor.shape)
loc_coor_counter_tensor = (loc_coor_counter_tensor > 0).to(torch.int8)
loc_coor_counter_tensor = loc_coor_counter_tensor * torch.cumsum(loc_coor_counter_tensor, dtype=torch.int8, dim=-1) - 1
if max_o is None:
max_o = torch.max(loc_coor_counter_tensor).cpu().numpy().astype(np.int32) + 1
# print("max_o", max_o)
voxel_pnt_counter_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], max_o], dtype=torch.int16, device=device)
voxel_to_pntidx_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], max_o, P], dtype=torch.int32, device=device)
gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
ray_vsize_gpu = (scaled_vsize_gpu / vscale_gpu).astype(np.float32)
seconds = time.time()
self.insert_vox_points(
Holder(point_xyz_pers_tensor),
Holder(actual_numpoints_tensor),
np.int32(B),
np.int32(N),
np.int32(P),
np.int32(max_o),
np.int32(pixel_size),
np.int32(grid_size_vol),
d_coord_shift,
scaled_vdim_gpu,
scaled_vsize_gpu,
Holder(loc_coor_counter_tensor),
Holder(voxel_pnt_counter_tensor),
Holder(voxel_to_pntidx_tensor),
np.uint64(seconds),
np.int32(self.inverse),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
# print("loc_coor_counter_tensor",loc_coor_counter_tensor.shape, torch.min(loc_coor_counter_tensor), torch.max(loc_coor_counter_tensor))
# print("voxel_pnt_counter_tensor",voxel_pnt_counter_tensor.shape, torch.min(voxel_pnt_counter_tensor), torch.max(voxel_pnt_counter_tensor))
# print("voxel_to_pntidx_tensor",voxel_to_pntidx_tensor.shape, torch.min(voxel_to_pntidx_tensor), torch.max(voxel_to_pntidx_tensor))
sample_pidx_tensor = torch.full([B, R, SR, K], -1, dtype=torch.int32, device=device)
sample_loc_tensor = torch.full([B, R, SR, 3], 0.0, dtype=torch.float32, device=device)
gridSize = int((R * SR + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
seconds = time.time()
# print(point_xyz_pers_tensor.shape, B, SR, R ,max_o, P, K, pixel_size, grid_size_vol, radius_limit, depth_limit, d_coord_shift, scaled_vdim_gpu, scaled_vsize_gpu, ray_vsize_gpu, vscale_gpu, kernel_size_gpu, pixel_idx_cur_tensor.shape, loc_coor_counter_tensor.shape, voxel_to_coorz_idx_tensor.shape, voxel_pnt_counter_tensor.shape, voxel_to_pntidx_tensor.shape, sample_pidx_tensor.shape, sample_loc_tensor.shape, gridSize)
if R > 0:
self.query_along_ray(
Holder(point_xyz_pers_tensor),
np.int32(B),
np.int32(SR),
np.int32(R),
np.int32(max_o),
np.int32(P),
np.int32(K),
np.int32(pixel_size),
np.int32(grid_size_vol),
np.float32(radius_limit ** 2),
np.float32(depth_limit ** 2),
d_coord_shift,
scaled_vdim_gpu,
scaled_vsize_gpu,
ray_vsize_gpu,
vscale_gpu,
kernel_size_gpu,
# Holder(self.switch_pixel_id(pixel_idx_cur_tensor,h)),
Holder(pixel_idx_cur_tensor),
Holder(loc_coor_counter_tensor),
Holder(voxel_to_coorz_idx_tensor),
Holder(voxel_pnt_counter_tensor),
Holder(voxel_to_pntidx_tensor),
Holder(sample_pidx_tensor),
Holder(sample_loc_tensor),
np.uint64(seconds),
np.int32(self.opt.NN),
np.int32(self.inverse),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
# print("max_o", max_o)
# print("voxel_pnt_counter", torch.max(voxel_pnt_counter_tensor))
# print("sample_pidx_tensor", torch.max(torch.sum(sample_pidx_tensor >= 0, dim=-1)))
# print("sample_pidx_tensor min max", torch.min(sample_pidx_tensor), torch.max(sample_pidx_tensor))
# print("sample_pidx_tensor", sample_pidx_tensor.shape, sample_pidx_tensor[0,80,3], sample_pidx_tensor[0,80,6], sample_pidx_tensor[0,80,9])
# print("sample_pidx_tensor, sample_loc_tensor, pixel_idx_cur_tensor, ray_mask_tensor", sample_pidx_tensor.shape, sample_loc_tensor.shape, pixel_idx_cur_tensor.shape, ray_mask_tensor.shape)
return sample_pidx_tensor, sample_loc_tensor, pixel_idx_cur_tensor, ray_mask_tensor
def load_pnts(point_path, point_num):
with open(point_path, 'rb') as f:
print("point_file_path################", point_path)
all_infos = pickle.load(f)
point_xyz = all_infos["point_xyz"]
print(len(point_xyz), point_xyz.dtype, np.mean(point_xyz, axis=0), np.min(point_xyz, axis=0),
np.max(point_xyz, axis=0))
np.random.shuffle(point_xyz)
return point_xyz[:min(len(point_xyz), point_num), :]
def np_to_gpuarray(*args):
result = []
for x in args:
if isinstance(x, np.ndarray):
result.append(pycuda.gpuarray.to_gpu(x))
else:
print("trans",x)
return result
def try_build(point_file, point_dir, ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj,
radius_limit, depth_limit, split=["train"], imgidx=0, gpu=0):
point_path = os.path.join(point_dir, point_file)
point_xyz = load_pnts(point_path, 819200000) # 81920 233872
imgs, poses, _, hwf, _ = load_blender_data(
os.path.expandvars("${nrDataRoot}") + "/nerf/nerf_synthetic/{}".format(obj), split, half_res=False, testskip=1)
H, W, focal = hwf
plt.figure()
plt.imshow(imgs[imgidx])
point_xyz_pers = w2img(point_xyz, poses[imgidx], focal)
point_xyz_tensor = torch.as_tensor(point_xyz, device="cuda:{}".format(gpu))[None, ...]
# plt.show()
point_xyz_pers_tensor = torch.as_tensor(point_xyz_pers, device="cuda:{}".format(gpu))[None, ...]
actual_numpoints_tensor = torch.ones([1], device=point_xyz_tensor.device, dtype=torch.int32) * len(point_xyz)
scaled_vsize = (vsize * vscale).astype(np.float32)
scaled_vdim = np.ceil(vdim / vscale).astype(np.int32)
print("vsize", vsize, "vdim", vdim, "scaled_vdim", scaled_vdim)
range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, kernel_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, kernel_size)
pixel_idx_tensor = torch.as_tensor(pixel_idx, device="cuda:{}".format(gpu), dtype=torch.int32)[None, ...]
sample_pidx_tensor, pixel_idx_cur_tensor = build_grid_point_index(pixel_idx_tensor, point_xyz_pers_tensor, actual_numpoints_tensor, kernel_size_gpu, SR, K, ranges, scaled_vsize, scaled_vdim, vscale, max_o, P, radius_limit, depth_limit, range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, gpu=gpu)
save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor,
pixel_idx_cur_tensor, vdim, vsize, ranges)
def w2img(point_xyz, transform_matrix, focal):
camrot = transform_matrix[:3, :3] # world 2 cam
campos = transform_matrix[:3, 3] #
point_xyz_shift = point_xyz - campos[None, :]
# xyz = np.sum(point_xyz_shift[:,None,:] * camrot.T, axis=-1)
xyz = np.sum(camrot[None, ...] * point_xyz_shift[:, :, None], axis=-2)
# print(xyz.shape, np.sum(camrot[None, None, ...] * point_xyz_shift[:,:,None], axis=-2).shape)
xper = xyz[:, 0] / -xyz[:, 2]
yper = xyz[:, 1] / xyz[:, 2]
x_pixel = np.round(xper * focal + 400).astype(np.int32)
y_pixel = np.round(yper * focal + 400).astype(np.int32)
print("focal", focal, np.tan(.5 * 0.6911112070083618))
print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel), np.min(y_pixel))
print("per xmax xmin:", np.max(xper), np.min(xper), "per ymax ymin:", np.max(yper), np.min(yper), "per zmax zmin:",
np.max(xyz[:, 2]), np.min(xyz[:, 2]))
print("min perx", -400 / focal, "max perx", 400 / focal)
background = np.ones([800, 800, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .2
plt.figure()
plt.imshow(background)
return np.stack([xper, yper, -xyz[:, 2]], axis=-1)
def render_mask_pers_points(queried_point_xyz, vsize, ranges, w, h):
pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32)
print(pixel_xy_inds.shape)
y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .5
plt.figure()
plt.imshow(background)
def save_mask_pers_points(queried_point_xyz, vsize, ranges, w, h):
pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32)
print(pixel_xy_inds.shape)
y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .5
image_dir = os.path.join(self.opt.checkpoints_dir, opt.name, 'images')
image_file = os.path.join(image_dir)
def render_pixel_mask(pixel_xy_inds, w, h):
y_pixel, x_pixel = pixel_xy_inds[0, :, 1], pixel_xy_inds[0, :, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .0
plt.figure()
plt.imshow(background)
def save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor,
pixel_idx_cur_tensor, vdim, vsize, ranges):
B, R, SR, K = sample_pidx_tensor.shape
# pixel_inds = torch.as_tensor([3210, 3217,3218,3219,3220, 3221,3222,3223,3224,3225,3226,3227,3228,3229,3230, 3231,3232,3233,3234,3235, 3236,3237,3238,3239,3240], device=sample_pidx_tensor.device, dtype=torch.int64)
point_inds = sample_pidx_tensor[0, :, :, :]
# point_inds = sample_pidx_tensor[0, pixel_inds, :, :]
mask = point_inds > -1
point_inds = torch.masked_select(point_inds, mask).to(torch.int64)
queried_point_xyz_tensor = point_xyz_tensor[0, point_inds, :]
queried_point_xyz = queried_point_xyz_tensor.cpu().numpy()
print("queried_point_xyz.shape", B, R, SR, K, point_inds.shape, queried_point_xyz_tensor.shape,
queried_point_xyz.shape)
print("pixel_idx_cur_tensor", pixel_idx_cur_tensor.shape)
render_pixel_mask(pixel_idx_cur_tensor.cpu().numpy(), vdim[0], vdim[1])
render_mask_pers_points(point_xyz_pers_tensor[0, point_inds, :].cpu().numpy(), vsize, ranges, vdim[0], vdim[1])
plt.show()
if __name__ == "__main__":
obj = "lego"
point_file = "{}.pkl".format(obj)
point_dir = os.path.expandvars("${nrDataRoot}/nerf/nerf_synthetic_points/")
r = 0.36000002589322094
ranges = np.array([-r, -r, 2., r, r, 6.], dtype=np.float32)
vdim = np.array([800, 800, 400], dtype=np.int32)
vsize = np.array([2 * r / vdim[0], 2 * r / vdim[1], 4. / vdim[2]], dtype=np.float32)
vscale = np.array([2, 2, 1], dtype=np.int32)
SR = 24
P = 16
kernel_size = np.array([5, 5, 1], dtype=np.int32)
radius_limit = 0 # r / 400 * 5 #r / 400 * 5
depth_limit = 0 # 4. / 400 * 1.5 # r / 400 * 2
max_o = None
K = 32
xrange = np.arange(0, 800, 1, dtype=np.int32)
yrange = np.arange(0, 800, 1, dtype=np.int32)
xv, yv = np.meshgrid(xrange, yrange, sparse=False, indexing='ij')
pixel_idx = np.stack([xv, yv], axis=-1).reshape(-1, 2) # 20000 * 2
gpu = 0
imgidx = 3
split = ["train"]
if gpu < 0:
import pycuda.autoinit
else:
drv.init()
dev1 = drv.Device(gpu)
ctx1 = dev1.make_context()
try_build(point_file, point_dir, ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj,
radius_limit, depth_limit, split=split, imgidx=imgidx, gpu=0) | 55,075 | 57.467091 | 475 | py |
pointnerf | pointnerf-master/models/depth_estimators/mvsnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .module import *
class FeatureNet(nn.Module):
def __init__(self):
super(FeatureNet, self).__init__()
self.inplanes = 32
self.conv0 = ConvBnReLU(3, 8, 3, 1, 1)
self.conv1 = ConvBnReLU(8, 8, 3, 1, 1)
self.conv2 = ConvBnReLU(8, 16, 5, 2, 2)
self.conv3 = ConvBnReLU(16, 16, 3, 1, 1)
self.conv4 = ConvBnReLU(16, 16, 3, 1, 1)
self.conv5 = ConvBnReLU(16, 32, 5, 2, 2)
self.conv6 = ConvBnReLU(32, 32, 3, 1, 1)
self.feature = nn.Conv2d(32, 32, 3, 1, 1)
def forward(self, x):
x = self.conv1(self.conv0(x))
x = self.conv4(self.conv3(self.conv2(x)))
x = self.feature(self.conv6(self.conv5(x)))
return x
class CostRegNet(nn.Module):
def __init__(self):
super(CostRegNet, self).__init__()
self.conv0 = ConvBnReLU3D(32, 8)
self.conv1 = ConvBnReLU3D(8, 16, stride=2)
self.conv2 = ConvBnReLU3D(16, 16)
self.conv3 = ConvBnReLU3D(16, 32, stride=2)
self.conv4 = ConvBnReLU3D(32, 32)
self.conv5 = ConvBnReLU3D(32, 64, stride=2)
self.conv6 = ConvBnReLU3D(64, 64)
self.conv7 = nn.Sequential(
nn.ConvTranspose3d(64, 32, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True))
self.conv9 = nn.Sequential(
nn.ConvTranspose3d(32, 16, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True))
self.conv11 = nn.Sequential(
nn.ConvTranspose3d(16, 8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(8),
nn.ReLU(inplace=True))
self.prob = nn.Conv3d(8, 1, 3, stride=1, padding=1)
def forward(self, x):
conv0 = self.conv0(x)
conv2 = self.conv2(self.conv1(conv0))
conv4 = self.conv4(self.conv3(conv2))
x = self.conv6(self.conv5(conv4))
x = conv4 + self.conv7(x)
x = conv2 + self.conv9(x)
x = conv0 + self.conv11(x)
x = self.prob(x)
return x
class RefineNet(nn.Module):
def __init__(self):
super(RefineNet, self).__init__()
self.conv1 = ConvBnReLU(4, 32)
self.conv2 = ConvBnReLU(32, 32)
self.conv3 = ConvBnReLU(32, 32)
self.res = ConvBnReLU(32, 1)
def forward(self, img, depth_init):
concat = F.cat((img, depth_init), dim=1)
depth_residual = self.res(self.conv3(self.conv2(self.conv1(concat))))
depth_refined = depth_init + depth_residual
return depth_refined
class MVSNet(nn.Module):
def __init__(self, refine=False):
super(MVSNet, self).__init__()
self.refine = refine
self.feature = FeatureNet()
self.cost_regularization = CostRegNet()
if self.refine:
self.refine_network = RefineNet()
def forward(self, imgs, proj_matrices, depth_values, features=None, prob_only=False):
imgs = torch.unbind(imgs, 1)
num_depth = depth_values.shape[1]
num_views = len(imgs)
# step 1. feature extraction
# in: images; out: 32-channel feature maps
if features is None:
features = [self.feature(img) for img in imgs]
# step 2. differentiable homograph, build cost volume
volume_sum = 0
volume_sq_sum = 0
for vid in range(num_views):
# warpped features
warped_volume = homo_warping(features[vid], proj_matrices[:, vid], depth_values)
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2) # the memory of warped_volume has been modified
del warped_volume
volume_variance = volume_sq_sum.div_(num_views).sub_(volume_sum.div_(num_views).pow_(2))
# step 3. cost volume regularization
cost_reg = self.cost_regularization(volume_variance)
cost_reg = cost_reg.squeeze(1)
prob_volume = F.softmax(cost_reg, dim=1)
if prob_only:
return features, prob_volume, cost_reg
depth = depth_regression(prob_volume, depth_values=depth_values)
with torch.no_grad():
# photometric confidence
prob_volume_sum4 = 4 * F.avg_pool3d(F.pad(prob_volume.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1), stride=1, padding=0).squeeze(1)
depth_index = depth_regression(prob_volume, depth_values=torch.arange(num_depth, device=prob_volume.device, dtype=torch.float)).long()
photometric_confidence = torch.gather(prob_volume_sum4, 1, depth_index.unsqueeze(1)).squeeze(1)
# step 4. depth map refinement
if not self.refine:
return depth, photometric_confidence, features, prob_volume # {"depth": depth, "photometric_confidence": photometric_confidence}
else:
refined_depth = self.refine_network(torch.cat((imgs[0], depth), 1))
return {"depth": depth, "refined_depth": refined_depth, "photometric_confidence": photometric_confidence}
def mvsnet_loss(depth_est, depth_gt, mask):
mask = mask > 0.5
return F.smooth_l1_loss(depth_est[mask], depth_gt[mask], size_average=True)
| 5,505 | 35.95302 | 147 | py |
pointnerf | pointnerf-master/models/depth_estimators/module.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvBnReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
super(ConvBnReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
return F.relu(self.bn(self.conv(x)), inplace=True)
class ConvBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
super(ConvBn, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
return self.bn(self.conv(x))
class ConvBnReLU3D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
super(ConvBnReLU3D, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False)
self.bn = nn.BatchNorm3d(out_channels)
def forward(self, x):
return F.relu(self.bn(self.conv(x)), inplace=True)
def homo_warping(src_fea, proj, depth_values):
# src_fea: [B, C, H, W]
# src_proj: [B, 4, 4]
# ref_proj: [B, 4, 4]
# depth_values: [B, Ndepth]
# out: [B, C, Ndepth, H, W]
batch, channels = src_fea.shape[0], src_fea.shape[1]
num_depth = depth_values.shape[1]
height, width = src_fea.shape[2], src_fea.shape[3]
with torch.no_grad():
rot = proj[:, :3, :3] # [B,3,3]
trans = proj[:, :3, 3:4] # [B,3,1]
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=src_fea.device),
torch.arange(0, width, dtype=torch.float32, device=src_fea.device)])
y, x = y.contiguous(), x.contiguous()
y, x = y.view(height * width), x.view(height * width)
xyz = torch.stack((x, y, torch.ones_like(x))) # [3, H*W]
xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1) # [B, 3, H*W]
rot_xyz = torch.matmul(rot, xyz) # [B, 3, H*W]
rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_values.view(batch, 1, num_depth,
1) # [B, 3, Ndepth, H*W]
proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1) # [B, 3, Ndepth, H*W]
proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :] # [B, 2, Ndepth, H*W]
proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3) # [B, Ndepth, H*W, 2]
grid = proj_xy
warped_src_fea = F.grid_sample(src_fea, grid.view(batch, num_depth * height, width, 2), mode='bilinear',
padding_mode='zeros')
warped_src_fea = warped_src_fea.view(batch, channels, num_depth, height, width)
return warped_src_fea
def depth_regression(p, depth_values):
# p: probability volume [B, D, H, W]
# depth_values: discrete depth values [B, D]
depth_values = depth_values.view(*depth_values.shape, 1, 1)
depth = torch.sum(p * depth_values, 1)
return depth
if __name__ == "__main__":
# some testing code, just IGNORE it
from datasets import find_dataset_def
from torch.utils.data import DataLoader
import numpy as np
import cv2
MVSDataset = find_dataset_def("dtu_yao")
dataset = MVSDataset("/home/xyguo/dataset/dtu_mvs/processed/mvs_training/dtu/", '../lists/dtu/train.txt', 'train',
3, 256)
dataloader = DataLoader(dataset, batch_size=2)
item = next(iter(dataloader))
imgs = item["imgs"][:, :, :, ::4, ::4].cuda()
proj_matrices = item["proj_matrices"].cuda()
mask = item["mask"].cuda()
depth = item["depth"].cuda()
depth_values = item["depth_values"].cuda()
imgs = torch.unbind(imgs, 1)
proj_matrices = torch.unbind(proj_matrices, 1)
ref_img, src_imgs = imgs[0], imgs[1:]
ref_proj, src_projs = proj_matrices[0], proj_matrices[1:]
warped_imgs = homo_warping(src_imgs[0], src_projs[0], ref_proj, depth_values)
cv2.imwrite('../tmp/ref.png', ref_img.permute([0, 2, 3, 1])[0].detach().cpu().numpy()[:, :, ::-1] * 255)
cv2.imwrite('../tmp/src.png', src_imgs[0].permute([0, 2, 3, 1])[0].detach().cpu().numpy()[:, :, ::-1] * 255)
for i in range(warped_imgs.shape[2]):
warped_img = warped_imgs[:, :, i, :, :].permute([0, 2, 3, 1]).contiguous()
img_np = warped_img[0].detach().cpu().numpy()
cv2.imwrite('../tmp/tmp{}.png'.format(i), img_np[:, :, ::-1] * 255)
# generate gt
def tocpu(x):
return x.detach().cpu().numpy().copy()
ref_img = tocpu(ref_img)[0].transpose([1, 2, 0])
src_imgs = [tocpu(x)[0].transpose([1, 2, 0]) for x in src_imgs]
ref_proj_mat = tocpu(ref_proj)[0]
src_proj_mats = [tocpu(x)[0] for x in src_projs]
mask = tocpu(mask)[0]
depth = tocpu(depth)[0]
depth_values = tocpu(depth_values)[0]
for i, D in enumerate(depth_values):
height = ref_img.shape[0]
width = ref_img.shape[1]
xx, yy = np.meshgrid(np.arange(0, width), np.arange(0, height))
print("yy", yy.max(), yy.min())
yy = yy.reshape([-1])
xx = xx.reshape([-1])
X = np.vstack((xx, yy, np.ones_like(xx)))
# D = depth.reshape([-1])
# print("X", "D", X.shape, D.shape)
X = np.vstack((X * D, np.ones_like(xx)))
X = np.matmul(np.linalg.inv(ref_proj_mat), X)
X = np.matmul(src_proj_mats[0], X)
X /= X[2]
X = X[:2]
yy = X[0].reshape([height, width]).astype(np.float32)
xx = X[1].reshape([height, width]).astype(np.float32)
warped = cv2.remap(src_imgs[0], yy, xx, interpolation=cv2.INTER_LINEAR)
# warped[mask[:, :] < 0.5] = 0
cv2.imwrite('../tmp/tmp{}_gt.png'.format(i), warped[:, :, ::-1] * 255)
| 6,155 | 39.235294 | 118 | py |
pointnerf | pointnerf-master/models/depth_estimators/__init__.py | 0 | 0 | 0 | py | |
pointnerf | pointnerf-master/models/helpers/geometrics.py | import torch
def homogenize(m):
"""Adds homogeneous coordinates to a [..., N,N] matrix, returning [..., N+1, N+1]."""
assert m.shape[-1] == m.shape[-2] # Must be square
n = m.shape[-1]
eye_n_plus_1 = torch.eye(n + 1).cuda().expand(list(m.shape[:-2]) + [-1, -1])
extra_col = eye_n_plus_1[..., :-1, -1:]
extra_row = eye_n_plus_1[..., -1:, :]
including_col = torch.cat([m, extra_col], dim=-1)
return torch.cat([including_col, extra_row], dim=-2)
def compute_world2local_dist(dists, radii, rotations):
"""Computes a transformation to the local element frames for encoding."""
# We assume the center is an XYZ position for this transformation:
# TODO(kgenova) Update this transformation to account for rotation.
# assert len(dists.shape) == 3
# batch_size, element_count = dists.shape[:2]
# eye_3x3 = torch.eye(3).cuda().expand([batch_size, element_count, -1, -1])
# eye_4x4 = torch.eye(4).cuda().expand([batch_size, element_count, -1, -1])
# Centering transform
# ones = torch.ones([batch_size, element_count, 1, 1])
dists = dists[..., None]
# tx = torch.cat([eye_3x3, -dists], dim=-1)
# tx = torch.cat([tx, eye_4x4[..., 3:4, :]], dim=-2) # Append last row
# Compute the inverse rotation:
rotation = roll_pitch_yaw_to_rotation_matrices(rotations) # torch.inverse(roll_pitch_yaw_to_rotation_matrices(rotations))
# print("rotation", rotation[0,0])
assert rotation.shape[-2:] == (3, 3)
# Compute a scale transformation:
diag = 1.0 / (radii + 1e-8)
scale = torch.diag_embed(diag)
# Apply both transformations and return the transformed points.
tx3x3 = torch.matmul(scale, rotation)
return torch.matmul(tx3x3, dists) #, torch.matmul(homogenize(tx3x3), tx)
def roll_pitch_yaw_to_rotation_matrices(roll_pitch_yaw):
"""Converts roll-pitch-yaw angles to rotation matrices.
Args:
roll_pitch_yaw: Tensor with shape [..., 3]. The last dimension contains
the roll, pitch, and yaw angles in radians. The resulting matrix
rotates points by first applying roll around the x-axis, then pitch
around the y-axis, then yaw around the z-axis.
Returns:
Tensor with shape [..., 3, 3]. The 3x3 rotation matrices corresponding to
the input roll-pitch-yaw angles.
"""
cosines = torch.cos(roll_pitch_yaw)
sines = torch.sin(roll_pitch_yaw)
cx, cy, cz = torch.unbind(cosines, dim=-1)
sx, sy, sz = torch.unbind(sines, dim=-1)
# pyformat: disable
rotation = torch.stack(
[cz * cy, cz * sy * sx - sz * cx, cz * sy * cx + sz * sx,
sz * cy, sz * sy * sx + cz * cx, sz * sy * cx - cz * sx,
-sy, cy * sx, cy * cx], dim=-1)
# pyformat: enable
#shape = torch.cat([roll_pitch_yaw.shape[:-1], [3, 3]], axis=0)
shape = list(roll_pitch_yaw.shape[:-1]) + [3, 3]
rotation = torch.reshape(rotation, shape)
return rotation
| 2,890 | 39.71831 | 125 | py |
pointnerf | pointnerf-master/models/helpers/networks.py | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
import numpy as np
def get_nonlinearity_layer(activation_type='PReLU'):
if activation_type == 'ReLU':
nonlinearity_layer = nn.ReLU(True)
elif activation_type == 'SELU':
nonlinearity_layer = nn.SELU(True)
elif activation_type == 'LeakyReLU':
nonlinearity_layer = nn.LeakyReLU(0.1, True)
elif activation_type == 'PReLU':
nonlinearity_layer = nn.PReLU()
else:
raise NotImplementedError('activation layer [{}] is not found'.format(activation_type))
return nonlinearity_layer
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
# norm_layer = functools.partial(nn.InstanceNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'group':
norm_layer = functools.partial(nn.GroupNorm, num_groups=16, affine=True)
elif norm_type == 'layer':
norm_layer = nn.LayerNorm
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(it):
lr_l = 1.0 - max(0, it - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.2,
threshold=0.01,
patience=5)
elif opt.lr_policy == 'iter_exponential_decay':
def lambda_rule(it):
lr_l = pow(opt.lr_decay_exp, it / opt.lr_decay_iters)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'cosine_annealing':
scheduler = CosineAnnealingLR(optimizer, T_max=self.args.num_epochs, eta_min=1e-7)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def get_xavier_multiplier(m, gain):
if isinstance(m, nn.Conv1d):
ksize = m.kernel_size[0]
n1 = m.in_channels
n2 = m.out_channels
std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
elif isinstance(m, nn.ConvTranspose1d):
ksize = m.kernel_size[0] // m.stride[0]
n1 = m.in_channels
n2 = m.out_channels
std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
elif isinstance(m, nn.Conv2d):
ksize = m.kernel_size[0] * m.kernel_size[1]
n1 = m.in_channels
n2 = m.out_channels
std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
elif isinstance(m, nn.ConvTranspose2d):
ksize = m.kernel_size[0] * m.kernel_size[1] // m.stride[0] // m.stride[1]
n1 = m.in_channels
n2 = m.out_channels
std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
elif isinstance(m, nn.Conv3d):
ksize = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2]
n1 = m.in_channels
n2 = m.out_channels
std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
elif isinstance(m, nn.ConvTranspose3d):
ksize = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] // m.stride[0] // m.stride[
1] // m.stride[2]
n1 = m.in_channels
n2 = m.out_channels
std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
elif isinstance(m, nn.Linear):
n1 = m.in_features
n2 = m.out_features
std = gain * np.sqrt(2.0 / (n1 + n2))
else:
return None
return std
def xavier_uniform_(m, gain):
std = get_xavier_multiplier(m, gain)
m.weight.data.uniform_(-std * np.sqrt(3.0), std * np.sqrt(3.0))
def init_weights(net, init_type='xavier_uniform', gain=1):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'xavier_uniform':
xavier_uniform_(m, gain)
elif init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [{}] is not implemented'.format(init_type))
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
# if classname == 'ConvTranspose2d':
# m.weight.data[:, :, 0::2, 1::2] = m.weight.data[:, :, 0::2, 0::2]
# m.weight.data[:, :, 1::2, 0::2] = m.weight.data[:, :, 0::2, 0::2]
# m.weight.data[:, :, 1::2, 1::2] = m.weight.data[:, :, 0::2, 0::2]
# elif classname == 'ConvTranspose3d':
# m.weight.data[:, :, 0::2, 0::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2]
# m.weight.data[:, :, 0::2, 1::2, 0::2] = m.weight.data[:, :, 0::2, 0::2, 0::2]
# m.weight.data[:, :, 0::2, 1::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2]
# m.weight.data[:, :, 1::2, 0::2, 0::2] = m.weight.data[:, :, 0::2, 0::2, 0::2]
# m.weight.data[:, :, 1::2, 0::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2]
# m.weight.data[:, :, 1::2, 1::2, 0::2] = m.weight.data[:, :, 0::2, 0::2, 0::2]
# m.weight.data[:, :, 1::2, 1::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2]
net.apply(init_func)
def init_seq(s, init_type='xavier_uniform'):
'''initialize sequential model'''
for a, b in zip(s[:-1], s[1:]):
if isinstance(b, nn.ReLU):
init_weights(a, init_type, nn.init.calculate_gain('relu'))
elif isinstance(b, nn.LeakyReLU):
init_weights(a, init_type, nn.init.calculate_gain('leaky_relu', b.negative_slope))
else:
init_weights(a, init_type)
init_weights(s[-1])
def positional_encoding(positions, freqs, ori=False):
'''encode positions with positional encoding
positions: :math:`(...,D)`
freqs: int
Return:
pts: :math:`(..., 2DF)`
'''
freq_bands = (2**torch.arange(freqs).float()).to(positions.device) # (F,)
ori_c = positions.shape[-1]
pts = (positions[..., None] * freq_bands).reshape(positions.shape[:-1] +
(freqs * positions.shape[-1], )) # (..., DF)
if ori:
pts = torch.cat([positions, torch.sin(pts), torch.cos(pts)], dim=-1).reshape(pts.shape[:-1]+(pts.shape[-1]*2+ori_c,))
else:
pts = torch.stack([torch.sin(pts), torch.cos(pts)], dim=-1).reshape(pts.shape[:-1]+(pts.shape[-1]*2,))
return pts | 7,692 | 39.489474 | 125 | py |
pointnerf | pointnerf-master/models/helpers/__init__.py | 0 | 0 | 0 | py | |
pointnerf | pointnerf-master/models/rendering/diff_ray_marching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def find_ray_generation_method(name):
assert isinstance(name, str), 'ray generation method name must be string'
if name == 'cube':
return cube_ray_generation
elif name == 'near_far_linear':
return near_far_linear_ray_generation
elif name == 'near_far_disparity_linear':
return near_far_disparity_linear_ray_generation
elif name == 'nerf_near_far_disparity_linear':
return nerf_near_far_disparity_linear_ray_generation
elif name == 'nerf_near_far_linear':
return nerf_near_far_linear_ray_generation
elif name == 'near_middle_far':
return near_middle_far_ray_generation
raise RuntimeError('No such ray generation method: ' + name)
def find_refined_ray_generation_method(name):
assert isinstance(name, str), 'ray generation method name must be string'
if name == 'cube':
return refine_cube_ray_generation
elif name.startswith('nerf'):
return nerf_refine_ray_generation
else:
#hack default
return refine_ray_generation
raise RuntimeError('No such refined ray generation method: ' + name)
def sample_pdf(in_bins, in_weights, n_samples, det=False):
# bins: N x R x S x 1
# weights: N x R x s x 1
in_shape = in_bins.shape
device = in_weights.device
bins = in_bins.data.cpu().numpy().reshape([-1, in_shape[2]])
bins = 0.5 * (bins[..., 1:] + bins[..., :-1])
# bins: [NR x (S-1)]
weights = in_weights.data.cpu().numpy().reshape([-1, in_shape[2]])
weights = weights[..., 1:-1]
# weights: [NR x (S-2)]
weights += 1e-5
pdf = weights / np.sum(weights, axis=-1, keepdims=True)
cdf = np.cumsum(pdf, axis=-1)
cdf = np.concatenate([np.zeros_like(cdf[..., :1]), cdf], -1)
# cdf: [NR x (S-1)]
if det:
ur = np.broadcast_to(np.linspace(0, 1, n_samples, dtype=np.float32),
(cdf.shape[0], n_samples))
else:
ur = np.random.rand(cdf.shape[0], n_samples).astype(np.float32)
# u: [NR x S2]
inds = np.stack(
[np.searchsorted(a, i, side='right') for a, i in zip(cdf, ur)])
below = np.maximum(0, inds - 1)
above = np.minimum(cdf.shape[-1] - 1, inds)
cdf_below = np.take_along_axis(cdf, below, 1)
cdf_above = np.take_along_axis(cdf, above, 1)
bins_below = np.take_along_axis(bins, below, 1)
bins_above = np.take_along_axis(bins, above, 1)
denom = cdf_above - cdf_below
denom = np.where(denom < 1e-5, np.ones_like(denom), denom)
t = (ur - cdf_below) / denom
samples = bins_below + t * (bins_above - bins_below)
samples = torch.from_numpy(samples).view(
(in_shape[0], in_shape[1], n_samples, 1)).to(device)
samples = torch.cat([samples, in_bins.detach()], dim=-2)
samples, _ = torch.sort(samples, dim=-2)
samples = samples.detach()
return samples
# def sample_pdf(in_bins, in_weights, n_samples, det=False):
# # bins: N x R x S x 1
# # weights: N x R x S x 1
# import tensorflow as tf
# tf.config.set_visible_devices([], 'GPU')
# ori_shape = in_bins.shape
# device = in_weights.device
# # bins: (N*R, S)
# bins = tf.convert_to_tensor(in_bins.data.cpu().numpy().reshape((-1, in_bins.shape[-2])))
# weights = tf.convert_to_tensor(in_weights.data.cpu().numpy().reshape((-1, in_weights.shape[-2])))
# bins = 0.5 * (bins[..., 1:] + bins[..., :-1])
# weights = weights[..., 1:-1]
# # Get pdf
# weights += 1e-5 # prevent nans
# pdf = weights / tf.reduce_sum(weights, -1, keepdims=True)
# cdf = tf.cumsum(pdf, -1)
# cdf = tf.concat([tf.zeros_like(cdf[..., :1]), cdf], -1)
# # Take uniform samples
# if det:
# u = tf.linspace(0., 1., n_samples)
# u = tf.broadcast_to(u, list(cdf.shape[:-1]) + [n_samples])
# else:
# u = tf.random.uniform(list(cdf.shape[:-1]) + [n_samples])
# # Invert CDF
# inds = tf.searchsorted(cdf, u, side='right')
# below = tf.maximum(0, inds - 1)
# above = tf.minimum(cdf.shape[-1] - 1, inds)
# inds_g = tf.stack([below, above], -1)
# cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape) - 2)
# bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape) - 2)
# denom = (cdf_g[..., 1] - cdf_g[..., 0])
# denom = tf.where(denom < 1e-5, tf.ones_like(denom), denom)
# t = (u - cdf_g[..., 0]) / denom
# samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
# # N x R x N_samples x 1
# samples = torch.from_numpy(samples.numpy()).view(
# (in_bins.shape[0], in_bins.shape[1], n_samples, 1)).to(in_bins.device)
# # print(samples[0,0,:, 0])
# # print(in_bins[0,0,:, 0])
# # N x R x (N_samples + S) x 1
# samples = torch.cat([samples, in_bins.detach()], dim=-2)
# # samples = torch.cat([samples, in_bins.data], dim=-2)
# samples, _ = torch.sort(samples, dim=-2)
# samples = samples.detach()
# return samples
def near_middle_far_ray_generation(campos,
raydir,
point_count,
near=0.1,
middle=2,
far=10,
middle_split=0.6,
jitter=0.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0,
1,
int(point_count * middle_split) + 1,
device=campos.device).view(1, -1)
vals0 = near * (1 - tvals) + middle * tvals # N x 1 x Sammples
tvals = torch.linspace(0,
1,
int(point_count * (1 - middle_split)) + 2,
device=campos.device).view(1, -1)
vals1 = 1. / (1. / middle *
(1 - tvals) + 1. / far * tvals) # N x 1 x Sammples
tvals = torch.cat([vals0, vals1], 2)
segment_length = (tvals[..., 1:] - tvals[..., :-1]) * (
1 + jitter * (torch.rand(
(raydir.shape[0], raydir.shape[1], tvals.shape[-1] - 1),
device=campos.device) - 0.5))
segment_length = segment_length[..., :point_count]
end_point_ts = torch.cumsum(segment_length, dim=2)
end_point_ts = torch.cat([
torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1),
device=end_point_ts.device), end_point_ts
],
dim=2)
end_point_ts = near + end_point_ts
middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None,
None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :,
None]
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
return raypos, segment_length, valid, middle_point_ts
def near_far_disparity_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=0.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0, 1, point_count + 1,
device=campos.device).view(1, -1)
tvals = 1. / (1. / near *
(1 - tvals) + 1. / far * tvals) # N x 1 x Sammples
segment_length = (tvals[..., 1:] -
tvals[..., :-1]) * (1 + jitter * (torch.rand(
(raydir.shape[0], raydir.shape[1], point_count),
device=campos.device) - 0.5))
end_point_ts = torch.cumsum(segment_length, dim=2)
end_point_ts = torch.cat([
torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1),
device=end_point_ts.device), end_point_ts
], dim=2)
end_point_ts = near + end_point_ts
middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None,
None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :,
None]
# print(tvals.shape, segment_length.shape, end_point_ts.shape, middle_point_ts.shape, raypos.shape)
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
# print("campos", campos.shape, campos[0])
# print("raydir", raydir.shape, raydir[0,0])
# print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0])
# print("raypos", raypos.shape, raypos[0,0])
return raypos, segment_length, valid, middle_point_ts
def nerf_near_far_disparity_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=1.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0, 1, point_count,
device=campos.device).view(1, -1)
tvals = 1. / (1. / near *
(1 - tvals) + 1. / far * tvals) # N x 1 x Sammples
if jitter > 0.0:
mids = .5 * (tvals[..., 1:] + tvals[..., :-1])
upper = torch.cat([mids, tvals[..., -1:]], -1)
lower = torch.cat([tvals[..., :1], mids], -1)
t_rand = torch.rand([tvals.shape[0],raydir.shape[1],tvals.shape[2]], device=campos.device)
tvals = lower + (upper - lower) * t_rand
# print("tvals, {}, t_rand {}, mids {}, upper {}, lower {}".format(tvals.shape, t_rand.shape, mids.shape, upper.shape, lower.shape))
segment_length = torch.cat([tvals[..., 1:] - tvals[..., :-1], torch.full((tvals.shape[0], tvals.shape[1], 1), 1e10, device=tvals.device)], axis=-1) * torch.linalg.norm(raydir[..., None, :], axis=-1)
# print("segment_length, {}".format(segment_length.shape))
raypos = campos[:, None,
None, :] + raydir[:, :, None, :] * tvals[:, :, :, None]
# print("raypos, {}, campos {}, raydir {}, tvals {}".format(raypos.shape, campos.shape, raydir.shape, tvals.shape))
# print("raypos", raypos[0])
valid = torch.ones_like(tvals,
dtype=raypos.dtype,
device=raypos.device)
# print("campos", campos.shape, campos[0])
# print("raydir", raydir.shape, raydir[0,0])
# print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0])
# print("raypos", raypos.shape, raypos[0,0])
return raypos, segment_length, valid, tvals
def nerf_near_far_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=1.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0, 1, point_count,
device=campos.device).view(1, -1)
tvals = near * (1.-tvals) + far * (tvals) # N x 1 x Sammples
if jitter > 0.0:
mids = .5 * (tvals[..., 1:] + tvals[..., :-1])
upper = torch.cat([mids, tvals[..., -1:]], -1)
lower = torch.cat([tvals[..., :1], mids], -1)
t_rand = torch.rand([tvals.shape[0],raydir.shape[1],tvals.shape[2]], device=campos.device)
tvals = lower + (upper - lower) * t_rand
# print("tvals, {}, t_rand {}, mids {}, upper {}, lower {}".format(tvals.shape, t_rand.shape, mids.shape, upper.shape, lower.shape))
segment_length = torch.cat([tvals[..., 1:] - tvals[..., :-1], torch.full((tvals.shape[0], tvals.shape[1], 1), 1e10, device=tvals.device)], axis=-1) * torch.linalg.norm(raydir[..., None, :], axis=-1)
raypos = campos[:, None, None, :] + raydir[:, :, None, :] * tvals[:, :, :, None]
# print("raypos, {}, campos {}, raydir {}, tvals {}".format(raypos.shape, campos.shape, raydir.shape, tvals.shape))
# print("raypos", raypos[0])
valid = torch.ones_like(tvals,
dtype=raypos.dtype,
device=raypos.device)
# print("campos", campos.shape, campos[0])
# print("raydir", raydir.shape, raydir[0,0])
# print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0])
# print("raypos", raypos.shape, raypos[0,0])
return raypos, segment_length, valid, tvals
def near_far_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=0.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
# print("campos", campos.shape)
# print("raydir", raydir.shape)
tvals = torch.linspace(0, 1, point_count + 1,
device=campos.device).view(1, -1)
tvals = near * (1 - tvals) + far * tvals # N x 1 x Sammples
segment_length = (tvals[..., 1:] -
tvals[..., :-1]) * (1 + jitter * (torch.rand(
(raydir.shape[0], raydir.shape[1], point_count),
device=campos.device) - 0.5))
end_point_ts = torch.cumsum(segment_length, dim=2)
end_point_ts = torch.cat([
torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1),
device=end_point_ts.device), end_point_ts
],
dim=2)
end_point_ts = near + end_point_ts
middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None, None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :, None]
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
segment_length*=torch.linalg.norm(raydir[..., None, :], axis=-1)
return raypos, segment_length, valid, middle_point_ts
def refine_ray_generation(campos,
raydir,
point_count,
prev_ts,
prev_weights,
domain_size=1.,
jitter=0,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# point_count: int
# prev_ts: N x Rays x PrevSamples
# prev_weights: N x Rays x PrevSamples
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
with torch.no_grad():
end_point_ts = sample_pdf(prev_ts[..., None], prev_weights,
point_count + 1, jitter <= 0)
end_point_ts = end_point_ts.view(end_point_ts.shape[:-1])
segment_length = end_point_ts[:, :, 1:] - end_point_ts[:, :, :-1]
middle_point_ts = (end_point_ts[:, :, :-1] +
end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None,
None, :] + raydir[:, :,
None, :] * middle_point_ts[:, :, :,
None]
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
segment_length*=torch.linalg.norm(raydir[..., None, :], axis=-1)
return raypos, segment_length, valid, middle_point_ts
def nerf_refine_ray_generation(campos,
raydir,
point_count,
prev_ts,
prev_weights,
domain_size=1.,
jitter=0,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# point_count: int
# prev_ts: N x Rays x PrevSamples, uniformed depth segments between near and far
# prev_weights: N x Rays x PrevSamples
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
with torch.no_grad():
end_point_ts = sample_pdf(prev_ts[..., None], prev_weights,
point_count + 1, jitter <= 0)
end_point_ts = end_point_ts.view(end_point_ts.shape[:-1])
segment_length = end_point_ts[:, :, 1:] - end_point_ts[:, :, :-1]
segment_length *= torch.linalg.norm(raydir[..., None, :], axis=-1)
middle_point_ts = (end_point_ts[:, :, :-1] +
end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None,
None, :] + raydir[:, :,
None, :] * middle_point_ts[:, :, :,
None]
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
return raypos, segment_length, valid, middle_point_ts
def refine_cube_ray_generation(campos,
raydir,
point_count,
prev_ts,
prev_weights,
domain_size=1.,
jitter=0,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# point_count: int
# prev_ts: N x Rays x PrevSamples
# prev_weights: N x Rays x PrevSamples
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
with torch.no_grad():
raypos, segment_length, _, middle_point_ts \
= refine_ray_generation(campos,
raydir,
point_count,
prev_ts,
prev_weights,
domain_size=domain_size,
jitter=jitter,
**kargs)
valid = torch.prod(torch.gt(raypos, -domain_size) *
torch.lt(raypos, domain_size),
dim=-1).byte()
return raypos, segment_length, valid, middle_point_ts
def ray_march(ray_dist,
ray_valid,
ray_features,
render_func,
blend_func,
bg_color=None):
# ray_dist: N x Rays x Samples
# ray_valid: N x Rays x Samples
# ray_features: N x Rays x Samples x Features
# Output
# ray_color: N x Rays x 3
# point_color: N x Rays x Samples x 3
# opacity: N x Rays x Samples
# acc_transmission: N x Rays x Samples
# blend_weight: N x Rays x Samples x 1
# background_transmission: N x Rays x 1
point_color = render_func(ray_features)
# we are essentially predicting predict 1 - e^-sigma
sigma = ray_features[..., 0] * ray_valid.float()
opacity = 1 - torch.exp(-sigma * ray_dist)
# cumprod exclusive
acc_transmission = torch.cumprod(1. - opacity + 1e-10, dim=-1)
temp = torch.ones(opacity.shape[0:2] + (1, )).to(
opacity.device).float() # N x R x 1
background_transmission = acc_transmission[:, :, [-1]]
acc_transmission = torch.cat([temp, acc_transmission[:, :, :-1]], dim=-1)
blend_weight = blend_func(opacity, acc_transmission)[..., None]
ray_color = torch.sum(point_color * blend_weight, dim=-2, keepdim=False)
if bg_color is not None:
ray_color += bg_color.to(opacity.device).float().view(
background_transmission.shape[0], 1, 3) * background_transmission
# #
# if point_color.shape[1] > 0 and (torch.any(torch.isinf(point_color)) or torch.any(torch.isnan(point_color))):
# print("ray_color", torch.min(ray_color),torch.max(ray_color))
# print("background_transmission", torch.min(background_transmission), torch.max(background_transmission))
background_blend_weight = blend_func(1, background_transmission)
# print("ray_color", torch.max(torch.abs(ray_color)), torch.max(torch.abs(sigma)), torch.max(torch.abs(opacity)),torch.max(torch.abs(acc_transmission)), torch.max(torch.abs(background_transmission)), torch.max(torch.abs(acc_transmission)), torch.max(torch.abs(background_blend_weight)))
return ray_color, point_color, opacity, acc_transmission, blend_weight, \
background_transmission, background_blend_weight
def alpha_ray_march(ray_dist, ray_valid, ray_features,
blend_func):
sigma = ray_features[..., 0] * ray_valid.float()
opacity = 1 - torch.exp(-sigma * ray_dist)
acc_transmission = torch.cumprod(1. - opacity + 1e-10, dim=-1)
temp = torch.ones(opacity.shape[0:2] + (1, )).to(
opacity.device).float() # N x R x 1
background_transmission = acc_transmission[:, :, [-1]]
acc_transmission = torch.cat([temp, acc_transmission[:, :, :-1]], dim=-1)
blend_weight = blend_func(opacity, acc_transmission)[..., None]
background_blend_weight = blend_func(1, background_transmission)
return opacity, acc_transmission, blend_weight, \
background_transmission, background_blend_weight
| 23,672 | 40.314136 | 290 | py |
pointnerf | pointnerf-master/models/rendering/__init__.py | 0 | 0 | 0 | py | |
pointnerf | pointnerf-master/models/rendering/diff_render_func.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils import format as fmt
def find_render_function(name):
if name == 'radiance':
return radiance_render
elif name == 'white':
return white_color
raise RuntimeError('Unknown render function: ' + name)
def find_blend_function(name):
if name == 'alpha':
return alpha_blend
elif name == 'alpha2':
return alpha2_blend
raise RuntimeError('Unknown blend function: ' + name)
def find_tone_map(name):
if name == 'gamma':
return simple_tone_map
elif name == 'normalize':
return normalize_tone_map
elif name == 'off':
return no_tone_map
raise RuntimeError('Unknown blend function: ' + name)
def alpha_blend(opacity, acc_transmission):
return opacity * acc_transmission
def alpha2_blend(opacity, acc_transmission):
'''
Consider a light collocated with the camera,
multiply the transmission twice to simulate the light in a round trip
'''
return opacity * acc_transmission * acc_transmission
def radiance_render(ray_feature):
return ray_feature[..., 1:4]
def white_color(ray_feature):
albedo = ray_feature[..., 1:4].clamp(0., 1.)
return torch.ones_like(albedo)
def simple_tone_map(color, gamma=2.2, exposure=1):
return torch.pow(color * exposure + 1e-5, 1 / gamma).clamp_(0, 1)
def no_tone_map(color, gamma=2.2, exposure=1):
# return color.clamp
return color
def normalize_tone_map(color):
color = F.normalize(color, dim=-1)
# print(color)
return color * 0.5 + 0.5
| 1,620 | 22.838235 | 73 | py |
pointnerf | pointnerf-master/models/aggregators/point_aggregators.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from ..helpers.networks import init_seq, positional_encoding
from utils.spherical import SphericalHarm_table as SphericalHarm
from ..helpers.geometrics import compute_world2local_dist
class PointAggregator(torch.nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.add_argument(
'--feature_init_method',
type=str,
default="rand",
help='which agg model to use [feature_interp | graphconv | affine_mix]')
parser.add_argument(
'--which_agg_model',
type=str,
default="viewmlp",
help='which agg model to use [viewmlp | nsvfmlp]')
parser.add_argument(
'--agg_distance_kernel',
type=str,
default="quadric",
help='which agg model to use [quadric | linear | feat_intrp | harmonic_intrp]')
parser.add_argument(
'--sh_degree',
type=int,
default=4,
help='degree of harmonics')
parser.add_argument(
'--sh_dist_func',
type=str,
default="sh_quadric",
help='sh_quadric | sh_linear | passfunc')
parser.add_argument(
'--sh_act',
type=str,
default="sigmoid",
help='sigmoid | tanh | passfunc')
parser.add_argument(
'--agg_axis_weight',
type=float,
nargs='+',
default=None,
help=
'(1., 1., 1.)'
)
parser.add_argument(
'--agg_dist_pers',
type=int,
default=1,
help='use pers dist')
parser.add_argument(
'--apply_pnt_mask',
type=int,
default=1,
help='use pers dist')
parser.add_argument(
'--modulator_concat',
type=int,
default=0,
help='use pers dist')
parser.add_argument(
'--agg_intrp_order',
type=int,
default=0,
help='interpolate first and feature mlp 0 | feature mlp then interpolate 1 | feature mlp color then interpolate 2')
parser.add_argument(
'--shading_feature_mlp_layer0',
type=int,
default=0,
help='interp to agged features mlp num')
parser.add_argument(
'--shading_feature_mlp_layer1',
type=int,
default=2,
help='interp to agged features mlp num')
parser.add_argument(
'--shading_feature_mlp_layer2',
type=int,
default=0,
help='interp to agged features mlp num')
parser.add_argument(
'--shading_feature_mlp_layer3',
type=int,
default=2,
help='interp to agged features mlp num')
parser.add_argument(
'--shading_feature_num',
type=int,
default=256,
help='agged shading feature channel num')
parser.add_argument(
'--point_hyper_dim',
type=int,
default=256,
help='agged shading feature channel num')
parser.add_argument(
'--shading_alpha_mlp_layer',
type=int,
default=1,
help='agged features to alpha mlp num')
parser.add_argument(
'--shading_color_mlp_layer',
type=int,
default=1,
help='agged features to alpha mlp num')
parser.add_argument(
'--shading_color_channel_num',
type=int,
default=3,
help='color channel num')
parser.add_argument(
'--num_feat_freqs',
type=int,
default=0,
help='color channel num')
parser.add_argument(
'--num_hyperfeat_freqs',
type=int,
default=0,
help='color channel num')
parser.add_argument(
'--dist_xyz_freq',
type=int,
default=2,
help='color channel num')
parser.add_argument(
'--dist_xyz_deno',
type=float,
default=0,
help='color channel num')
parser.add_argument(
'--weight_xyz_freq',
type=int,
default=2,
help='color channel num')
parser.add_argument(
'--weight_feat_dim',
type=int,
default=8,
help='color channel num')
parser.add_argument(
'--agg_weight_norm',
type=int,
default=1,
help='normalize weight, sum as 1')
parser.add_argument(
'--view_ori',
type=int,
default=0,
help='0 for pe+3 orignal channels')
parser.add_argument(
'--agg_feat_xyz_mode',
type=str,
default="None",
help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]')
parser.add_argument(
'--agg_alpha_xyz_mode',
type=str,
default="None",
help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]')
parser.add_argument(
'--agg_color_xyz_mode',
type=str,
default="None",
help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]')
parser.add_argument(
'--act_type',
type=str,
default="ReLU",
# default="LeakyReLU",
help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]')
parser.add_argument(
'--act_super',
type=int,
default=1,
# default="LeakyReLU",
help='1 to use softplus and widden sigmoid for last activation')
def __init__(self, opt):
super(PointAggregator, self).__init__()
self.act = getattr(nn, opt.act_type, None)
print("opt.act_type!!!!!!!!!", opt.act_type)
self.point_hyper_dim=opt.point_hyper_dim if opt.point_hyper_dim < opt.point_features_dim else opt.point_features_dim
block_init_lst = []
if opt.agg_distance_kernel == "feat_intrp":
feat_weight_block = []
in_channels = 2 * opt.weight_xyz_freq * 3 + opt.weight_feat_dim
out_channels = int(in_channels / 2)
for i in range(2):
feat_weight_block.append(nn.Linear(in_channels, out_channels))
feat_weight_block.append(self.act(inplace=True))
in_channels = out_channels
feat_weight_block.append(nn.Linear(in_channels, 1))
feat_weight_block.append(nn.Sigmoid())
self.feat_weight_mlp = nn.Sequential(*feat_weight_block)
block_init_lst.append(self.feat_weight_mlp)
elif opt.agg_distance_kernel == "sh_intrp":
self.shcomp = SphericalHarm(opt.sh_degree)
self.opt = opt
self.dist_dim = (4 if self.opt.agg_dist_pers == 30 else 6) if self.opt.agg_dist_pers > 9 else 3
self.dist_func = getattr(self, opt.agg_distance_kernel, None)
assert self.dist_func is not None, "InterpAggregator doesn't have disance_kernel {} ".format(opt.agg_distance_kernel)
self.axis_weight = None if opt.agg_axis_weight is None else torch.as_tensor(opt.agg_axis_weight, dtype=torch.float32, device="cuda")[None, None, None, None, :]
self.num_freqs = opt.num_pos_freqs if opt.num_pos_freqs > 0 else 0
self.num_viewdir_freqs = opt.num_viewdir_freqs if opt.num_viewdir_freqs > 0 else 0
self.pnt_channels = (2 * self.num_freqs * 3) if self.num_freqs > 0 else 3
self.viewdir_channels = (2 * self.num_viewdir_freqs * 3 + self.opt.view_ori * 3) if self.num_viewdir_freqs > 0 else 3
self.which_agg_model = opt.which_agg_model.split("_")[0] if opt.which_agg_model.startswith("feathyper") else opt.which_agg_model
getattr(self, self.which_agg_model+"_init", None)(opt, block_init_lst)
self.density_super_act = torch.nn.Softplus()
self.density_act = torch.nn.ReLU()
self.color_act = torch.nn.Sigmoid()
def raw2out_density(self, raw_density):
if self.opt.act_super > 0:
# return self.density_act(raw_density - 1) # according to mip nerf, to stablelize the training
return self.density_super_act(raw_density - 1) # according to mip nerf, to stablelize the training
else:
return self.density_act(raw_density)
def raw2out_color(self, raw_color):
color = self.color_act(raw_color)
if self.opt.act_super > 0:
color = color * (1 + 2 * 0.001) - 0.001 # according to mip nerf, to stablelize the training
return color
def viewmlp_init(self, opt, block_init_lst):
dist_xyz_dim = self.dist_dim if opt.dist_xyz_freq == 0 else 2 * abs(opt.dist_xyz_freq) * self.dist_dim
in_channels = opt.point_features_dim + (0 if opt.agg_feat_xyz_mode == "None" else self.pnt_channels) - (opt.weight_feat_dim if opt.agg_distance_kernel in ["feat_intrp", "meta_intrp"] else 0) - (opt.sh_degree ** 2 if opt.agg_distance_kernel == "sh_intrp" else 0) - (7 if opt.agg_distance_kernel == "gau_intrp" else 0)
in_channels += (2 * opt.num_feat_freqs * in_channels if opt.num_feat_freqs > 0 else 0) + (dist_xyz_dim if opt.agg_intrp_order > 0 else 0)
if opt.shading_feature_mlp_layer1 > 0:
out_channels = opt.shading_feature_num
block1 = []
for i in range(opt.shading_feature_mlp_layer1):
block1.append(nn.Linear(in_channels, out_channels))
block1.append(self.act(inplace=True))
in_channels = out_channels
self.block1 = nn.Sequential(*block1)
block_init_lst.append(self.block1)
else:
self.block1 = self.passfunc
if opt.shading_feature_mlp_layer2 > 0:
in_channels = in_channels + (0 if opt.agg_feat_xyz_mode == "None" else self.pnt_channels) + (
dist_xyz_dim if (opt.agg_intrp_order > 0 and opt.num_feat_freqs == 0) else 0)
out_channels = opt.shading_feature_num
block2 = []
for i in range(opt.shading_feature_mlp_layer2):
block2.append(nn.Linear(in_channels, out_channels))
block2.append(self.act(inplace=True))
in_channels = out_channels
self.block2 = nn.Sequential(*block2)
block_init_lst.append(self.block2)
else:
self.block2 = self.passfunc
if opt.shading_feature_mlp_layer3 > 0:
in_channels = in_channels + (3 if "1" in list(opt.point_color_mode) else 0) + (
4 if "1" in list(opt.point_dir_mode) else 0)
out_channels = opt.shading_feature_num
block3 = []
for i in range(opt.shading_feature_mlp_layer3):
block3.append(nn.Linear(in_channels, out_channels))
block3.append(self.act(inplace=True))
in_channels = out_channels
self.block3 = nn.Sequential(*block3)
block_init_lst.append(self.block3)
else:
self.block3 = self.passfunc
alpha_block = []
in_channels = opt.shading_feature_num + (0 if opt.agg_alpha_xyz_mode == "None" else self.pnt_channels)
out_channels = int(opt.shading_feature_num / 2)
for i in range(opt.shading_alpha_mlp_layer - 1):
alpha_block.append(nn.Linear(in_channels, out_channels))
alpha_block.append(self.act(inplace=False))
in_channels = out_channels
alpha_block.append(nn.Linear(in_channels, 1))
self.alpha_branch = nn.Sequential(*alpha_block)
block_init_lst.append(self.alpha_branch)
color_block = []
in_channels = opt.shading_feature_num + self.viewdir_channels + (
0 if opt.agg_color_xyz_mode == "None" else self.pnt_channels)
out_channels = int(opt.shading_feature_num / 2)
for i in range(opt.shading_color_mlp_layer - 1):
color_block.append(nn.Linear(in_channels, out_channels))
color_block.append(self.act(inplace=True))
in_channels = out_channels
color_block.append(nn.Linear(in_channels, 3))
self.color_branch = nn.Sequential(*color_block)
block_init_lst.append(self.color_branch)
for m in block_init_lst:
init_seq(m)
def passfunc(self, input):
return input
def trilinear(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * R * SR * K * 3
# return B * R * SR * K
dists = dists * pnt_mask[..., None]
dists = dists / grid_vox_sz
# dist: [1, 797, 40, 8, 3]; pnt_mask: [1, 797, 40, 8]
# dists = 1 + dists * torch.as_tensor([[1,1,1], [-1, 1, 1], [1, -1, 1], [1, 1, -1], [-1, 1, -1], [1, -1, -1], [-1, -1, 1], [-1, -1, -1]], dtype=torch.float32, device=dists.device).view(1, 1, 1, 8, 3)
dists = 1 - torch.abs(dists)
weights = pnt_mask * dists[..., 0] * dists[..., 1] * dists[..., 2]
norm_weights = weights / torch.clamp(torch.sum(weights, dim=-1, keepdim=True), min=1e-8)
# ijk = xyz.astype(np.int32)
# i, j, k = ijk[:, 0], ijk[:, 1], ijk[:, 2]
# V000 = data[i, j, k].astype(np.int32)
# V100 = data[(i + 1), j, k].astype(np.int32)
# V010 = data[i, (j + 1), k].astype(np.int32)
# V001 = data[i, j, (k + 1)].astype(np.int32)
# V101 = data[(i + 1), j, (k + 1)].astype(np.int32)
# V011 = data[i, (j + 1), (k + 1)].astype(np.int32)
# V110 = data[(i + 1), (j + 1), k].astype(np.int32)
# V111 = data[(i + 1), (j + 1), (k + 1)].astype(np.int32)
# xyz = xyz - ijk
# x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]
# Vxyz = (V000 * (1 - x) * (1 - y) * (1 - z)
# + V100 * x * (1 - y) * (1 - z) +
# + V010 * (1 - x) * y * (1 - z) +
# + V001 * (1 - x) * (1 - y) * z +
# + V101 * x * (1 - y) * z +
# + V011 * (1 - x) * y * z +
# + V110 * x * y * (1 - z) +
# + V111 * x * y * z)
return norm_weights, embedding
def avg(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * channel* R * SR * K
# return B * R * SR * K
weights = pnt_mask * 1.0
return weights, embedding
def quadric(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * channel* R * SR * K
# return B * R * SR * K
if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 1] == 1 and axis_weight[..., 2] ==1):
weights = 1./ torch.clamp(torch.sum(torch.square(dists[..., :3]), dim=-1), min= 1e-8)
else:
weights = 1. / torch.clamp(torch.sum(torch.square(dists)* axis_weight, dim=-1), min=1e-8)
weights = pnt_mask * weights
return weights, embedding
def numquadric(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * channel* R * SR * K
# return B * R * SR * K
if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 1] == 1 and axis_weight[..., 2] ==1):
weights = 1./ torch.clamp(torch.sum(torch.square(dists), dim=-1), min= 1e-8)
else:
weights = 1. / torch.clamp(torch.sum(torch.square(dists)* axis_weight, dim=-1), min=1e-8)
weights = pnt_mask * weights
return weights, embedding
def linear(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * R * SR * K * channel
# return B * R * SR * K
if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 2] ==1) :
weights = 1. / torch.clamp(torch.norm(dists[..., :3], dim=-1), min= 1e-6)
else:
weights = 1. / torch.clamp(torch.sqrt(torch.sum(torch.square(dists[...,:2]), dim=-1)) * axis_weight[..., 0] + torch.abs(dists[...,2]) * axis_weight[..., 1], min= 1e-6)
weights = pnt_mask * weights
return weights, embedding
def numlinear(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * R * SR * K * channel
# return B * R * SR * K
if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 2] ==1) :
weights = 1. / torch.clamp(torch.norm(dists, dim=-1), min= 1e-6)
else:
weights = 1. / torch.clamp(torch.sqrt(torch.sum(torch.square(dists[...,:2]), dim=-1)) * axis_weight[..., 0] + torch.abs(dists[...,2]) * axis_weight[..., 1], min= 1e-6)
weights = pnt_mask * weights
norm_weights = weights / torch.clamp(torch.sum(pnt_mask, dim=-1, keepdim=True), min=1)
return norm_weights, embedding
def sigmoid(self, input):
return torch.sigmoid(input)
def tanh(self, input):
return torch.tanh(input)
def sh_linear(self, dist_norm):
return 1 / torch.clamp(dist_norm, min=1e-8)
def sh_quadric(self, dist_norm):
return 1 / torch.clamp(torch.square(dist_norm), min=1e-8)
def sh_intrp(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * R * SR * K * channel
dist_norm = torch.linalg.norm(dists, dim=-1)
dist_dirs = dists / torch.clamp(dist_norm[...,None], min=1e-8)
shall = self.shcomp.sh_all(dist_dirs, filp_dir=False).view(dists.shape[:-1]+(self.shcomp.total_deg ** 2,))
sh_coefs = embedding[..., :self.shcomp.total_deg ** 2]
# shall: [1, 816, 24, 32, 16], sh_coefs: [1, 816, 24, 32, 16], pnt_mask: [1, 816, 24, 32]
# debug: weights = pnt_mask * torch.sum(shall, dim=-1)
# weights = pnt_mask * torch.sum(shall * getattr(self, self.opt.sh_act, None)(sh_coefs), dim=-1) * getattr(self, self.opt.sh_dist_func, None)(dist_norm)
weights = pnt_mask * torch.sum(getattr(self, self.opt.sh_act, None)(shall * sh_coefs), dim=-1) * getattr(self, self.opt.sh_dist_func, None)(dist_norm) # changed
return weights, embedding[..., self.shcomp.total_deg ** 2:]
def gau_intrp(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None):
# dists: B * R * SR * K * channel
# dist: [1, 752, 40, 32, 3]
B, R, SR, K, _ = dists.shape
scale = torch.abs(embedding[..., 0]) #
radii = vsize[2] * 20 * torch.sigmoid(embedding[..., 1:4])
rotations = torch.clamp(embedding[..., 4:7], max=np.pi / 4, min=-np.pi / 4)
gau_dist = compute_world2local_dist(dists, radii, rotations)[..., 0]
# print("gau_dist", gau_dist.shape)
weights = pnt_mask * scale * torch.exp(-0.5 * torch.sum(torch.square(gau_dist), dim=-1))
# print("gau_dist", gau_dist.shape, gau_dist[0, 0])
# print("weights", weights.shape, weights[0, 0, 0])
return weights, embedding[..., 7:]
def viewmlp(self, sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, weight, pnt_mask_flat, pts, viewdirs, total_len, ray_valid, in_shape, dists):
# print("sampled_Rw2c", sampled_Rw2c.shape, sampled_xyz.shape)
# assert sampled_Rw2c.dim() == 2
B, R, SR, K, _ = dists.shape
sampled_Rw2c = sampled_Rw2c.transpose(-1, -2)
uni_w2c = sampled_Rw2c.dim() == 2
if not uni_w2c:
sampled_Rw2c_ray = sampled_Rw2c[:,:,:,0,:,:].view(-1, 3, 3)
sampled_Rw2c = sampled_Rw2c.reshape(-1, 3, 3)[pnt_mask_flat, :, :]
pts_ray, pts_pnt = None, None
if self.opt.agg_feat_xyz_mode != "None" or self.opt.agg_alpha_xyz_mode != "None" or self.opt.agg_color_xyz_mode != "None":
if self.num_freqs > 0:
pts = positional_encoding(pts, self.num_freqs)
pts_ray = pts[ray_valid, :]
if self.opt.agg_feat_xyz_mode != "None" and self.opt.agg_intrp_order > 0:
pts_pnt = pts[..., None, :].repeat(1, K, 1).view(-1, pts.shape[-1])
if self.opt.apply_pnt_mask > 0:
pts_pnt=pts_pnt[pnt_mask_flat, :]
viewdirs = viewdirs @ sampled_Rw2c if uni_w2c else (viewdirs[..., None, :] @ sampled_Rw2c_ray).squeeze(-2)
if self.num_viewdir_freqs > 0:
viewdirs = positional_encoding(viewdirs, self.num_viewdir_freqs, ori=True)
ori_viewdirs, viewdirs = viewdirs[..., :3], viewdirs[..., 3:]
viewdirs = viewdirs[ray_valid, :]
if self.opt.agg_intrp_order == 0:
feat = torch.sum(sampled_embedding * weight[..., None], dim=-2)
feat = feat.view([-1, feat.shape[-1]])[ray_valid, :]
if self.opt.num_feat_freqs > 0:
feat = torch.cat([feat, positional_encoding(feat, self.opt.num_feat_freqs)], dim=-1)
pts = pts_ray
else:
dists_flat = dists.view(-1, dists.shape[-1])
if self.opt.apply_pnt_mask > 0:
dists_flat = dists_flat[pnt_mask_flat, :]
dists_flat /= (
1.0 if self.opt.dist_xyz_deno == 0. else float(self.opt.dist_xyz_deno * np.linalg.norm(vsize)))
dists_flat[..., :3] = dists_flat[..., :3] @ sampled_Rw2c if uni_w2c else (dists_flat[..., None, :3] @ sampled_Rw2c).squeeze(-2)
if self.opt.dist_xyz_freq != 0:
# print(dists.dtype, (self.opt.dist_xyz_deno * np.linalg.norm(vsize)).dtype, dists_flat.dtype)
dists_flat = positional_encoding(dists_flat, self.opt.dist_xyz_freq)
feat= sampled_embedding.view(-1, sampled_embedding.shape[-1])
# print("feat", feat.shape)
if self.opt.apply_pnt_mask > 0:
feat = feat[pnt_mask_flat, :]
if self.opt.num_feat_freqs > 0:
feat = torch.cat([feat, positional_encoding(feat, self.opt.num_feat_freqs)], dim=-1)
feat = torch.cat([feat, dists_flat], dim=-1)
weight = weight.view(B * R * SR, K, 1)
pts = pts_pnt
# used_point_embedding = feat[..., : self.opt.point_features_dim]
if self.opt.agg_feat_xyz_mode != "None":
feat = torch.cat([feat, pts], dim=-1)
# print("feat",feat.shape) # 501
feat = self.block1(feat)
if self.opt.shading_feature_mlp_layer2>0:
if self.opt.agg_feat_xyz_mode != "None":
feat = torch.cat([feat, pts], dim=-1)
if self.opt.agg_intrp_order > 0:
feat = torch.cat([feat, dists_flat], dim=-1)
feat = self.block2(feat)
if self.opt.shading_feature_mlp_layer3>0:
if sampled_color is not None:
sampled_color = sampled_color.view(-1, sampled_color.shape[-1])
if self.opt.apply_pnt_mask > 0:
sampled_color = sampled_color[pnt_mask_flat, :]
feat = torch.cat([feat, sampled_color], dim=-1)
if sampled_dir is not None:
sampled_dir = sampled_dir.view(-1, sampled_dir.shape[-1])
if self.opt.apply_pnt_mask > 0:
sampled_dir = sampled_dir[pnt_mask_flat, :]
sampled_dir = sampled_dir @ sampled_Rw2c if uni_w2c else (sampled_dir[..., None, :] @ sampled_Rw2c).squeeze(-2)
ori_viewdirs = ori_viewdirs[..., None, :].repeat(1, K, 1).view(-1, ori_viewdirs.shape[-1])
if self.opt.apply_pnt_mask > 0:
ori_viewdirs = ori_viewdirs[pnt_mask_flat, :]
feat = torch.cat([feat, sampled_dir - ori_viewdirs, torch.sum(sampled_dir*ori_viewdirs, dim=-1, keepdim=True)], dim=-1)
feat = self.block3(feat)
if self.opt.agg_intrp_order == 1:
if self.opt.apply_pnt_mask > 0:
feat_holder = torch.zeros([B * R * SR * K, feat.shape[-1]], dtype=torch.float32, device=feat.device)
feat_holder[pnt_mask_flat, :] = feat
else:
feat_holder = feat
feat = feat_holder.view(B * R * SR, K, feat_holder.shape[-1])
feat = torch.sum(feat * weight, dim=-2).view([-1, feat.shape[-1]])[ray_valid, :]
alpha_in = feat
if self.opt.agg_alpha_xyz_mode != "None":
alpha_in = torch.cat([alpha_in, pts], dim=-1)
alpha = self.raw2out_density(self.alpha_branch(alpha_in))
color_in = feat
if self.opt.agg_color_xyz_mode != "None":
color_in = torch.cat([color_in, pts], dim=-1)
color_in = torch.cat([color_in, viewdirs], dim=-1)
color_output = self.raw2out_color(self.color_branch(color_in))
# print("color_output", torch.sum(color_output), color_output.grad)
output = torch.cat([alpha, color_output], dim=-1)
elif self.opt.agg_intrp_order == 2:
alpha_in = feat
if self.opt.agg_alpha_xyz_mode != "None":
alpha_in = torch.cat([alpha_in, pts], dim=-1)
alpha = self.raw2out_density(self.alpha_branch(alpha_in))
# print(alpha_in.shape, alpha_in)
if self.opt.apply_pnt_mask > 0:
alpha_holder = torch.zeros([B * R * SR * K, alpha.shape[-1]], dtype=torch.float32, device=alpha.device)
alpha_holder[pnt_mask_flat, :] = alpha
else:
alpha_holder = alpha
alpha = alpha_holder.view(B * R * SR, K, alpha_holder.shape[-1])
alpha = torch.sum(alpha * weight, dim=-2).view([-1, alpha.shape[-1]])[ray_valid, :] # alpha:
# print("alpha", alpha.shape)
# alpha_placeholder = torch.zeros([total_len, 1], dtype=torch.float32,
# device=alpha.device)
# alpha_placeholder[ray_valid] = alpha
if self.opt.apply_pnt_mask > 0:
feat_holder = torch.zeros([B * R * SR * K, feat.shape[-1]], dtype=torch.float32, device=feat.device)
feat_holder[pnt_mask_flat, :] = feat
else:
feat_holder = feat
feat = feat_holder.view(B * R * SR, K, feat_holder.shape[-1])
feat = torch.sum(feat * weight, dim=-2).view([-1, feat.shape[-1]])[ray_valid, :]
color_in = feat
if self.opt.agg_color_xyz_mode != "None":
color_in = torch.cat([color_in, pts], dim=-1)
color_in = torch.cat([color_in, viewdirs], dim=-1)
color_output = self.raw2out_color(self.color_branch(color_in))
# color_output = torch.sigmoid(color_output)
# output_placeholder = torch.cat([alpha, color_output], dim=-1)
output = torch.cat([alpha, color_output], dim=-1)
# print("output_placeholder", output_placeholder.shape)
output_placeholder = torch.zeros([total_len, self.opt.shading_color_channel_num + 1], dtype=torch.float32, device=output.device)
output_placeholder[ray_valid] = output
return output_placeholder, None
def print_point(self, dists, sample_loc_w, sampled_xyz, sample_loc, sampled_xyz_pers, sample_pnt_mask):
# for i in range(dists.shape[0]):
# filepath = "./dists.txt"
# filepath1 = "./dists10.txt"
# filepath2 = "./dists20.txt"
# filepath3 = "./dists30.txt"
# filepath4 = "./dists40.txt"
# dists_cpu = dists.detach().cpu().numpy()
# np.savetxt(filepath1, dists_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";")
# np.savetxt(filepath2, dists_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";")
# np.savetxt(filepath3, dists_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";")
# np.savetxt(filepath4, dists_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";")
# dists_cpu = dists[i,...][torch.any(sample_pnt_mask, dim=-1)[i,...], :].detach().cpu().numpy()
# np.savetxt(filepath, dists_cpu.reshape(-1, 3), delimiter=";")
for i in range(sample_loc_w.shape[0]):
filepath = "./sample_loc_w.txt"
filepath1 = "./sample_loc_w10.txt"
filepath2 = "./sample_loc_w20.txt"
filepath3 = "./sample_loc_w30.txt"
filepath4 = "./sample_loc_w40.txt"
sample_loc_w_cpu = sample_loc_w.detach().cpu().numpy()
np.savetxt(filepath1, sample_loc_w_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath2, sample_loc_w_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath3, sample_loc_w_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath4, sample_loc_w_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";")
sample_loc_w_cpu = sample_loc_w[i,...][torch.any(sample_pnt_mask, dim=-1)[i,...], :].detach().cpu().numpy()
np.savetxt(filepath, sample_loc_w_cpu.reshape(-1, 3), delimiter=";")
for i in range(sampled_xyz.shape[0]):
sampled_xyz_cpu = sampled_xyz.detach().cpu().numpy()
filepath = "./sampled_xyz.txt"
filepath1 = "./sampled_xyz10.txt"
filepath2 = "./sampled_xyz20.txt"
filepath3 = "./sampled_xyz30.txt"
filepath4 = "./sampled_xyz40.txt"
np.savetxt(filepath1, sampled_xyz_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath2, sampled_xyz_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath3, sampled_xyz_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath4, sampled_xyz_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath, sampled_xyz_cpu[i, ...].reshape(-1, 3), delimiter=";")
for i in range(sample_loc.shape[0]):
filepath1 = "./sample_loc10.txt"
filepath2 = "./sample_loc20.txt"
filepath3 = "./sample_loc30.txt"
filepath4 = "./sample_loc40.txt"
filepath = "./sample_loc.txt"
sample_loc_cpu =sample_loc.detach().cpu().numpy()
np.savetxt(filepath1, sample_loc_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath2, sample_loc_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath3, sample_loc_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath4, sample_loc_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath, sample_loc[i, ...][torch.any(sample_pnt_mask, dim=-1)[i,...], :].reshape(-1, 3).detach().cpu().numpy(), delimiter=";")
for i in range(sampled_xyz_pers.shape[0]):
filepath1 = "./sampled_xyz_pers10.txt"
filepath2 = "./sampled_xyz_pers20.txt"
filepath3 = "./sampled_xyz_pers30.txt"
filepath4 = "./sampled_xyz_pers40.txt"
filepath = "./sampled_xyz_pers.txt"
sampled_xyz_pers_cpu = sampled_xyz_pers.detach().cpu().numpy()
np.savetxt(filepath1, sampled_xyz_pers_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath2, sampled_xyz_pers_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath3, sampled_xyz_pers_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath4, sampled_xyz_pers_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";")
np.savetxt(filepath, sampled_xyz_pers_cpu[i, ...].reshape(-1, 3), delimiter=";")
print("saved sampled points and shading points")
exit()
def gradiant_clamp(self, sampled_conf, min=0.0001, max=1):
diff = sampled_conf - torch.clamp(sampled_conf, min=min, max=max)
return sampled_conf - diff.detach()
def forward(self, sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, grid_vox_sz):
# return B * R * SR * channel
'''
:param sampled_conf: B x valid R x SR x K x 1
:param sampled_embedding: B x valid R x SR x K x F
:param sampled_xyz_pers: B x valid R x SR x K x 3
:param sampled_xyz: B x valid R x SR x K x 3
:param sample_pnt_mask: B x valid R x SR x K
:param sample_loc: B x valid R x SR x 3
:param sample_loc_w: B x valid R x SR x 3
:param sample_ray_dirs: B x valid R x SR x 3
:param vsize:
:return:
'''
ray_valid = torch.any(sample_pnt_mask, dim=-1).view(-1)
total_len = len(ray_valid)
in_shape = sample_loc_w.shape
if total_len == 0 or torch.sum(ray_valid) == 0:
# print("skip since no valid ray, total_len:", total_len, torch.sum(ray_valid))
return torch.zeros(in_shape[:-1] + (self.opt.shading_color_channel_num + 1,), device=ray_valid.device, dtype=torch.float32), ray_valid.view(in_shape[:-1]), None, None
if self.opt.agg_dist_pers < 0:
dists = sample_loc_w[..., None, :]
elif self.opt.agg_dist_pers == 0:
dists = sampled_xyz - sample_loc_w[..., None, :]
elif self.opt.agg_dist_pers == 1:
dists = sampled_xyz_pers - sample_loc[..., None, :]
elif self.opt.agg_dist_pers == 2:
if sampled_xyz_pers.shape[1] > 0:
xdist = sampled_xyz_pers[..., 0] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 0] * sample_loc[:, :, :, None, 2]
ydist = sampled_xyz_pers[..., 1] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 1] * sample_loc[:, :, :, None, 2]
zdist = sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 2]
dists = torch.stack([xdist, ydist, zdist], dim=-1)
else:
B, R, SR, K, _ = sampled_xyz_pers.shape
dists = torch.zeros([B, R, SR, K, 3], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype)
elif self.opt.agg_dist_pers == 10:
if sampled_xyz_pers.shape[1] > 0:
dists = sampled_xyz_pers - sample_loc[..., None, :]
dists = torch.cat([sampled_xyz - sample_loc_w[..., None, :], dists], dim=-1)
else:
B, R, SR, K, _ = sampled_xyz_pers.shape
dists = torch.zeros([B, R, SR, K, 6], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype)
elif self.opt.agg_dist_pers == 20:
if sampled_xyz_pers.shape[1] > 0:
xdist = sampled_xyz_pers[..., 0] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 0] * sample_loc[:, :, :, None, 2]
ydist = sampled_xyz_pers[..., 1] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 1] * sample_loc[:, :, :, None, 2]
zdist = sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 2]
dists = torch.stack([xdist, ydist, zdist], dim=-1)
# dists = torch.cat([sampled_xyz - sample_loc_w[..., None, :], dists], dim=-1)
dists = torch.cat([sampled_xyz - sample_loc_w[..., None, :], dists], dim=-1)
else:
B, R, SR, K, _ = sampled_xyz_pers.shape
dists = torch.zeros([B, R, SR, K, 6], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype)
elif self.opt.agg_dist_pers == 30:
if sampled_xyz_pers.shape[1] > 0:
w_dists = sampled_xyz - sample_loc_w[..., None, :]
dists = torch.cat([torch.sum(w_dists*sample_ray_dirs[..., None, :], dim=-1, keepdim=True), dists], dim=-1)
else:
B, R, SR, K, _ = sampled_xyz_pers.shape
dists = torch.zeros([B, R, SR, K, 4], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype)
else:
print("illegal agg_dist_pers code: ", agg_dist_pers)
exit()
# self.print_point(dists, sample_loc_w, sampled_xyz, sample_loc, sampled_xyz_pers, sample_pnt_mask)
weight, sampled_embedding = self.dist_func(sampled_embedding, dists, sample_pnt_mask, vsize, grid_vox_sz, axis_weight=self.axis_weight)
if self.opt.agg_weight_norm > 0 and self.opt.agg_distance_kernel != "trilinear" and not self.opt.agg_distance_kernel.startswith("num"):
weight = weight / torch.clamp(torch.sum(weight, dim=-1, keepdim=True), min=1e-8)
pnt_mask_flat = sample_pnt_mask.view(-1)
pts = sample_loc_w.view(-1, sample_loc_w.shape[-1])
viewdirs = sample_ray_dirs.view(-1, sample_ray_dirs.shape[-1])
conf_coefficient = 1
if sampled_conf is not None:
conf_coefficient = self.gradiant_clamp(sampled_conf[..., 0], min=0.0001, max=1)
output, _ = getattr(self, self.which_agg_model, None)(sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, weight * conf_coefficient, pnt_mask_flat, pts, viewdirs, total_len, ray_valid, in_shape, dists)
if (self.opt.sparse_loss_weight <=0) and ("conf_coefficient" not in self.opt.zero_one_loss_items) and self.opt.prob == 0:
weight, conf_coefficient = None, None
return output.view(in_shape[:-1] + (self.opt.shading_color_channel_num + 1,)), ray_valid.view(in_shape[:-1]), weight, conf_coefficient
| 37,876 | 45.417892 | 330 | py |
pointnerf | pointnerf-master/models/aggregators/__init__.py | 0 | 0 | 0 | py | |
pointnerf | pointnerf-master/models/mvs/renderer.py | import torch
import torch.nn.functional as F
from .mvs_utils import normal_vect, index_point_feature, build_color_volume
def depth2dist(z_vals, cos_angle):
# z_vals: [N_ray N_sample]
device = z_vals.device
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([1e10]).to(device).expand(dists[..., :1].shape)], -1) # [N_rays, N_samples]
dists = dists * cos_angle.unsqueeze(-1)
return dists
def ndc2dist(ndc_pts, cos_angle):
dists = torch.norm(ndc_pts[:, 1:] - ndc_pts[:, :-1], dim=-1)
dists = torch.cat([dists, 1e10*cos_angle.unsqueeze(-1)], -1) # [N_rays, N_samples]
return dists
def raw2alpha(sigma, dist, net_type):
alpha_softmax = F.softmax(sigma, 1)
alpha = 1. - torch.exp(-sigma)
T = torch.cumprod(torch.cat([torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10], -1), -1)[:, :-1]
weights = alpha * T # [N_rays, N_samples]
return alpha, weights, alpha_softmax
def batchify(fn, chunk):
"""Constructs a version of 'fn' that applies to smaller batches.
"""
if chunk is None:
return fn
def ret(inputs, alpha_only):
if alpha_only:
return torch.cat([fn.forward_alpha(inputs[i:i + chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
else:
return torch.cat([fn(inputs[i:i + chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
return ret
def run_network_mvs(pts, viewdirs, alpha_feat, fn, embed_fn, embeddirs_fn, netchunk=1024):
"""
Prepares inputs and applies network 'fn'.
"""
if embed_fn is not None:
pts = embed_fn(pts)
if alpha_feat is not None:
pts = torch.cat((pts,alpha_feat), dim=-1)
if viewdirs is not None:
if viewdirs.dim()!=3:
viewdirs = viewdirs[:, None].expand(-1,pts.shape[1],-1)
if embeddirs_fn is not None:
viewdirs = embeddirs_fn(viewdirs)
pts = torch.cat([pts, viewdirs], -1)
alpha_only = viewdirs is None
outputs_flat = batchify(fn, netchunk)(pts, alpha_only)
outputs = torch.reshape(outputs_flat, list(pts.shape[:-1]) + [outputs_flat.shape[-1]])
return outputs
def raw2outputs(raw, z_vals, dists, white_bkgd=False, net_type='v2'):
"""Transforms model's predictions to semantically meaningful values.
Args:
raw: [num_rays, num_samples along ray, 4]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
device = z_vals.device
rgb = raw[..., :3] # [N_rays, N_samples, 3]
alpha, weights, alpha_softmax = raw2alpha(raw[..., 3], dists, net_type) # [N_rays, N_samples]
rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3]
depth_map = torch.sum(weights * z_vals, -1)
disp_map = 1. / torch.max(1e-10 * torch.ones_like(depth_map, device=device), depth_map / torch.sum(weights, -1))
acc_map = torch.sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1. - acc_map[..., None])
return rgb_map, disp_map, acc_map, weights, depth_map, alpha
def gen_angle_feature(c2ws, rays_pts, rays_dir):
"""
Inputs:
c2ws: [1,v,4,4]
rays_pts: [N_rays, N_samples, 3]
rays_dir: [N_rays, 3]
Returns:
"""
N_rays, N_samples = rays_pts.shape[:2]
dirs = normal_vect(rays_pts.unsqueeze(2) - c2ws[:3, :3, 3][None, None]) # [N_rays, N_samples, v, 3]
angle = torch.sum(dirs[:, :, :3] * rays_dir.reshape(N_rays,1,1,3), dim=-1, keepdim=True).reshape(N_rays, N_samples, -1)
return angle
def gen_dir_feature(w2c_ref, rays_dir):
"""
Inputs:
c2ws: [1,v,4,4]
rays_pts: [N_rays, N_samples, 3]
rays_dir: [N_rays, 3]
Returns:
"""
dirs = rays_dir @ w2c_ref[:3,:3].t() # [N_rays, 3]
return dirs
def gen_pts_feats(imgs, volume_feature, rays_pts, pose_ref, rays_ndc, feat_dim, img_feat=None, img_downscale=1.0, use_color_volume=False, net_type='v0'):
N_rays, N_samples = rays_pts.shape[:2]
if img_feat is not None:
feat_dim += img_feat.shape[1]*img_feat.shape[2]
if not use_color_volume:
input_feat = torch.empty((N_rays, N_samples, feat_dim), device=imgs.device, dtype=torch.float)
ray_feats = index_point_feature(volume_feature, rays_ndc) if torch.is_tensor(volume_feature) else volume_feature(rays_ndc)
input_feat[..., :8] = ray_feats
input_feat[..., 8:] = build_color_volume(rays_pts, pose_ref, imgs, img_feat, with_mask=True, downscale=img_downscale)
else:
input_feat = index_point_feature(volume_feature, rays_ndc) if torch.is_tensor(volume_feature) else volume_feature(rays_ndc)
return input_feat
def rendering(args, pose_ref, rays_pts, rays_ndc, depth_candidates, rays_o, rays_dir,
volume_feature=None, imgs=None, network_fn=None, img_feat=None, network_query_fn=None, white_bkgd=False, **kwargs):
# rays angle
cos_angle = torch.norm(rays_dir, dim=-1)
# using direction
if pose_ref is not None:
angle = gen_dir_feature(pose_ref['w2cs'][0], rays_dir/cos_angle.unsqueeze(-1)) # view dir feature
else:
angle = rays_dir/cos_angle.unsqueeze(-1)
# rays_pts
input_feat = gen_pts_feats(imgs, volume_feature, rays_pts, pose_ref, rays_ndc, args.feat_dim, \
img_feat, args.img_downscale, args.use_color_volume, args.net_type)
# rays_ndc = rays_ndc * 2 - 1.0
# network_query_fn = lambda pts, viewdirs, rays_feats, network_fn: run_network_mvs(pts, viewdirs, rays_feats,
# network_fn,
# embed_fn=embed_fn,
# embeddirs_fn=embeddirs_fn,
# netchunk=args.netchunk)
# run_network_mvs
raw = network_query_fn(rays_ndc, angle, input_feat, network_fn)
if raw.shape[-1]>4:
input_feat = torch.cat((input_feat[...,:8],raw[...,4:]), dim=-1)
dists = depth2dist(depth_candidates, cos_angle)
# dists = ndc2dist(rays_ndc)
rgb_map, disp_map, acc_map, weights, depth_map, alpha = raw2outputs(raw, depth_candidates, dists, white_bkgd,args.net_type)
ret = {}
return rgb_map, input_feat, weights, depth_map, alpha, ret
def render_density(network_fn, rays_pts, density_feature, network_query_fn, chunk=1024 * 5):
densities = []
device = density_feature.device
for i in range(0, rays_pts.shape[0], chunk):
input_feat = rays_pts[i:i + chunk].to(device)
density = network_query_fn(input_feat, None, density_feature[i:i + chunk], network_fn)
densities.append(density)
return torch.cat(densities) | 7,260 | 38.461957 | 153 | py |
pointnerf | pointnerf-master/models/mvs/mvs_utils.py | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
# Misc
img2mse = lambda x, y : torch.mean((x - y) ** 2)
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
mse2psnr2 = lambda x : -10. * np.log(x) / np.log(10.)
def get_psnr(imgs_pred, imgs_gt):
psnrs = []
for (img,tar) in zip(imgs_pred,imgs_gt):
psnrs.append(mse2psnr2(np.mean((img - tar.cpu().numpy())**2)))
return np.array(psnrs)
def init_log(log, keys):
for key in keys:
log[key] = torch.tensor([0.0], dtype=float)
return log
def visualize_depth_numpy(depth, minmax=None, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi = np.min(x[x>0]) # get minimum positive depth (ignore background)
ma = np.max(x)
else:
mi,ma = minmax
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
x = (255*x).astype(np.uint8)
x_ = cv2.applyColorMap(x, cmap)
return x_, [mi,ma]
def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
if type(depth) is not np.ndarray:
depth = depth.cpu().numpy()
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi = np.min(x[x>0]) # get minimum positive depth (ignore background)
ma = np.max(x)
else:
mi,ma = minmax
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
x = (255*x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_, [mi,ma]
# Ray helpers
def get_rays_mvs(H, W, intrinsic, c2w, N=1024, isRandom=True, is_precrop_iters=False, chunk=-1, idx=-1):
device = c2w.device
if isRandom:
if is_precrop_iters and torch.rand((1,)) > 0.3:
xs, ys = torch.randint(W//6, W-W//6, (N,)).float().to(device), torch.randint(H//6, H-H//6, (N,)).float().to(device)
else:
xs, ys = torch.randint(0,W,(N,)).float().to(device), torch.randint(0,H,(N,)).float().to(device)
else:
ys, xs = torch.meshgrid(torch.linspace(0, H - 1, H), torch.linspace(0, W - 1, W)) # pytorch's meshgrid has indexing='ij'
ys, xs = ys.reshape(-1), xs.reshape(-1)
if chunk>0:
ys, xs = ys[idx*chunk:(idx+1)*chunk], xs[idx*chunk:(idx+1)*chunk]
ys, xs = ys.to(device), xs.to(device)
dirs = torch.stack([(xs-intrinsic[0,2])/intrinsic[0,0], (ys-intrinsic[1,2])/intrinsic[1,1], torch.ones_like(xs)], -1) # use 1 instead of -1
rays_d = dirs @ c2w[:3,:3].t() # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,-1].clone()
pixel_coordinates = torch.stack((ys,xs)) # row col
return rays_o, rays_d, pixel_coordinates
def ndc_2_cam(ndc_xyz, near_far, intrinsic, W, H):
inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device)
cam_z = ndc_xyz[..., 2:3] * (near_far[1] - near_far[0]) + near_far[0]
cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z
cam_xyz = torch.cat([cam_xy, cam_z], dim=-1)
cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t())
return cam_xyz
def get_ndc_coordinate(w2c_ref, intrinsic_ref, point_samples, inv_scale, near=2, far=6, pad=0, lindisp=False):
'''
point_samples [N_rays N_sample 3]
'''
N_rays, N_samples = point_samples.shape[:2]
point_samples = point_samples.reshape(-1, 3)
# wrap to ref view
if w2c_ref is not None:
R = w2c_ref[:3, :3] # (3, 3)
T = w2c_ref[:3, 3:] # (3, 1)
point_samples = torch.matmul(point_samples, R.t()) + T.reshape(1,3)
if intrinsic_ref is not None:
# using projection
point_samples_pixel = point_samples @ intrinsic_ref.t()
point_samples_pixel[:,:2] = (point_samples_pixel[:,:2] / point_samples_pixel[:,-1:] + 0.0) / inv_scale.reshape(1,2) # normalize to 0~1
if not lindisp:
point_samples_pixel[:,2] = (point_samples_pixel[:,2] - near) / (far - near) # normalize to 0~1
else:
point_samples_pixel[:,2] = (1.0/point_samples_pixel[:,2]-1.0/near)/(1.0/far - 1.0/near)
else:
# using bounding box
near, far = near.view(1,3), far.view(1,3)
point_samples_pixel = (point_samples - near) / (far - near) # normalize to 0~1
del point_samples
if pad>0:
W_feat, H_feat = (inv_scale+1)/4.0
point_samples_pixel[:,1] = point_samples_pixel[:,1] * H_feat / (H_feat + pad * 2) + pad / (H_feat + pad * 2)
point_samples_pixel[:,0] = point_samples_pixel[:,0] * W_feat / (W_feat + pad * 2) + pad / (W_feat + pad * 2)
point_samples_pixel = point_samples_pixel.view(N_rays, N_samples, 3)
return point_samples_pixel
def build_color_volume(point_samples, pose_ref, imgs, img_feat=None, downscale=1.0, with_mask=False):
'''
point_world: [N_ray N_sample 3]
imgs: [N V 3 H W]
'''
device = imgs.device
N, V, C, H, W = imgs.shape
inv_scale = torch.tensor([W - 1, H - 1]).to(device)
C += with_mask
C += 0 if img_feat is None else img_feat.shape[2]
colors = torch.empty((*point_samples.shape[:2], V*C), device=imgs.device, dtype=torch.float)
for i,idx in enumerate(range(V)):
w2c_ref, intrinsic_ref = pose_ref['w2cs'][idx], pose_ref['intrinsics'][idx].clone() # assume camera 0 is reference
point_samples_pixel = get_ndc_coordinate(w2c_ref, intrinsic_ref, point_samples, inv_scale)[None]
grid = point_samples_pixel[...,:2]*2.0-1.0
grid = grid.to(imgs.dtype)
data = F.grid_sample(imgs[:, idx], grid, align_corners=True, mode='bilinear', padding_mode='border')
if img_feat is not None:
data = torch.cat((data,F.grid_sample(img_feat[:,idx], grid, align_corners=True, mode='bilinear', padding_mode='zeros')),dim=1)
if with_mask:
in_mask = ((grid >-1.0)*(grid < 1.0))
in_mask = (in_mask[...,0]*in_mask[...,1]).float()
data = torch.cat((data,in_mask.unsqueeze(1)), dim=1)
colors[...,i*C:i*C+C] = data[0].permute(1, 2, 0)
del grid, point_samples_pixel, data
return colors
def normal_vect(vect, dim=-1):
return vect / (torch.sqrt(torch.sum(vect**2,dim=dim,keepdim=True))+1e-7)
def index_point_feature(volume_feature, ray_coordinate_ref, chunk=-1):
''''
Args:
volume_color_feature: [B, G, D, h, w]
volume_density_feature: [B C D H W]
ray_dir_world:[3 ray_samples N_samples]
ray_coordinate_ref: [3 N_rays N_samples]
ray_dir_ref: [3 N_rays]
depth_candidates: [N_rays, N_samples]
Returns:
[N_rays, N_samples]
'''
device = volume_feature.device
H, W = ray_coordinate_ref.shape[-3:-1]
if chunk != -1:
features = torch.zeros((volume_feature.shape[1],H,W), device=volume_feature.device, dtype=torch.float, requires_grad=volume_feature.requires_grad)
grid = ray_coordinate_ref.view(1, 1, 1, H * W, 3) * 2 - 1.0 # [1 1 H W 3] (x,y,z)
for i in range(0, H*W, chunk):
features[:,i:i + chunk] = F.grid_sample(volume_feature, grid[:,:,:,i:i + chunk], align_corners=True, mode='bilinear')[0]
features = features.permute(1,2,0)
else:
grid = ray_coordinate_ref.view(-1, 1, H, W, 3).to(device) * 2 - 1.0 # [1 1 H W 3] (x,y,z)
features = F.grid_sample(volume_feature, grid, align_corners=True, mode='bilinear')[:,:,0].permute(2,3,0,1).squeeze()#, padding_mode="border"
return features
def filter_keys(dict):
if 'N_samples' in dict.keys():
dict.pop('N_samples')
if 'ndc' in dict.keys():
dict.pop('ndc')
if 'lindisp' in dict.keys():
dict.pop('lindisp')
return dict
def sub_selete_data(data_batch, device, idx, filtKey=[], filtIndex=['view_ids_all','c2ws_all','scan','bbox','w2ref','ref2w','light_id','ckpt','idx']):
data_sub_selete = {}
for item in data_batch.keys():
data_sub_selete[item] = data_batch[item][:,idx].float() if (item not in filtIndex and torch.is_tensor(item) and item.dim()>2) else data_batch[item].float()
if not data_sub_selete[item].is_cuda:
data_sub_selete[item] = data_sub_selete[item].to(device)
return data_sub_selete
def detach_data(dictionary):
dictionary_new = {}
for key in dictionary.keys():
dictionary_new[key] = dictionary[key].detach().clone()
return dictionary_new
def read_pfm(filename):
file = open(filename, 'rb')
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def gen_render_path(c2ws, N_views=30):
N = len(c2ws)
rotvec, positions = [], []
rotvec_inteplat, positions_inteplat = [], []
weight = np.linspace(1.0, .0, N_views//3, endpoint=False).reshape(-1, 1)
for i in range(N):
r = R.from_matrix(c2ws[i, :3, :3])
euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3)
if i:
mask = np.abs(euler_ange - rotvec[0])>180
euler_ange[mask] += 360.0
rotvec.append(euler_ange)
positions.append(c2ws[i, :3, 3:].reshape(1, 3))
if i:
rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i])
positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i])
rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0])
positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0])
c2ws_render = []
angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat)
for rotvec, position in zip(angles_inteplat, positions_inteplat):
c2w = np.eye(4)
c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix()
c2w[:3, 3:] = position.reshape(3, 1)
c2ws_render.append(c2w.copy())
c2ws_render = np.stack(c2ws_render)
return c2ws_render
from scipy.interpolate import CubicSpline
################################################# MVS helper functions #####################################
from kornia.utils import create_meshgrid
def homo_warp_nongrid(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, filter=True, **kwargs):
# src_grid: B, 3, D*H*W xyz
B, M, _ = ref_cam_xyz.shape
if w2c is not None:
src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2)
else:
src_cam_xyz = ref_cam_xyz
src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2]
mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(src_grid, torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0
src_grid = src_grid.to(torch.float32) # grid xy
hard_id_xy = torch.ceil(src_grid[:,:,:])
src_grid = torch.masked_select(src_grid, mask).reshape(B, -1, 2) if filter else src_grid
src_grid[..., 0] = src_grid[..., 0] / ((WD - 1.0) / 2.0) - 1.0 # scale to -1~1
src_grid[..., 1] = src_grid[..., 1] / ((HD - 1.0) / 2.0) - 1.0 # scale to -1~1
return src_grid, mask, hard_id_xy
def homo_warp_fg_mask(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, **kwargs):
# src_grid: B, 3, D*H*W xyz
B, M, _ = ref_cam_xyz.shape
if w2c is not None:
src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2)
else:
src_cam_xyz = ref_cam_xyz
src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2]
mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(src_grid, torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0
src_grid = src_grid.to(torch.float32) # grid xy
hard_id_xy = torch.ceil(src_grid[:,:,:])[:,mask[0,...,0],:]
return id2mask(hard_id_xy, HD, WD)
def homo_warp_nongrid_occ(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, tolerate=0.1, scatter_cpu=True):
# src_grid: B, 3, D*H*W xyz
B, M, _ = ref_cam_xyz.shape
if w2c is not None:
src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2)
else:
src_cam_xyz = ref_cam_xyz
# print("src_cam_xyz",src_cam_xyz.shape, intrinsic.shape)
src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2]
# print("src_pix_xy1", src_grid.shape, torch.min(src_grid,dim=-2)[0], torch.max(src_grid,dim=-2)[0])
mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(torch.ceil(src_grid), torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0
src_grid = torch.masked_select(src_grid, mask).reshape(B, -1, 2)
cam_z = torch.masked_select(src_cam_xyz[:,:,2], mask[...,0]).reshape(B, -1)
src_grid = src_grid.to(torch.float32) # grid xy
# print("HD, WD", HD, WD) 512 640
src_grid_x = src_grid[..., 0:1] / ((WD - 1.0) / 2.0) - 1.0 # scale to -1~1
src_grid_y = src_grid[..., 1:2] / ((HD - 1.0) / 2.0) - 1.0 # scale to -1~1
# hard_id_xy: 1, 307405, 2
hard_id_xy = torch.ceil(src_grid[:,:,:])
# print("hard_id_xy", hard_id_xy.shape)
index = (hard_id_xy[...,0] * HD + hard_id_xy[...,1]).long() # 1, 307405
# print("index", index.shape, torch.min(index), torch.max(index))
min_depth, argmin = scatter_min(cam_z[:,:].cpu() if scatter_cpu else cam_z[:,:], index[:,:].cpu() if scatter_cpu else index[:,:], dim=1)
# print("argmin", min_depth.shape, min_depth, argmin.shape)
queried_depth = min_depth.to(ref_cam_xyz.device)[:, index[0,...]] if scatter_cpu else min_depth[:, index[0,...]]
block_mask = (cam_z <= (queried_depth + tolerate))
# print("mask", mask.shape, torch.sum(mask), block_mask.shape, torch.sum(block_mask))
mask[mask.clone()] = block_mask
# print("mask", mask.shape, torch.sum(mask), block_mask.shape, torch.sum(block_mask))
# print("src_grid_x", src_grid_x.shape)
src_grid_x = torch.masked_select(src_grid_x, block_mask[..., None]).reshape(B, -1, 1)
src_grid_y = torch.masked_select(src_grid_y, block_mask[..., None]).reshape(B, -1, 1)
# print("src_grid_x", src_grid_x.shape, src_grid_y.shape, mask.shape)
return torch.cat([src_grid_x, src_grid_y], dim=-1), mask, hard_id_xy
def id2mask(hard_id_xy, HD, WD):
mask = torch.zeros([HD, WD], dtype=torch.int8, device=hard_id_xy.device)
hard_id_xy = hard_id_xy.long()
mask[hard_id_xy[0,...,1], hard_id_xy[0,...,0]] = 1 # torch.ones_like(hard_id_xy[0,...,0], dtype=mask.dtype)
return mask
def gen_bg_points(batch):
plane_pnt, plane_normal = batch["plane_pnt"][0], batch["plane_normal"][0]
plane_pnt, plane_normal = torch.as_tensor(plane_pnt, dtype=torch.float32, device=batch['campos'].device), torch.as_tensor(plane_normal, dtype=torch.float32, device=batch['campos'].device)
cross_xyz_world = get_rayplane_cross(batch['campos'], batch['raydir'], plane_pnt[None, None, :], plane_normal[None, None, :])
return cross_xyz_world
def get_rayplane_cross(cam_pos, raydir, p_co, p_no, epsilon=1e-3):
"""
cam_pos: 1, 3
ray_dir: Define the line. 1, 2304, 3
p_co, p_no: define the plane:
p_co Is a point on the plane (plane coordinate). 1, 1, 3
p_no Is a normal vector defining the plane direction; 1, 1, 3
(does not need to be normalized).
Return a Vector or None (when the intersection can't be found).
"""
dot = torch.sum(p_no * raydir, dim=-1) # 1, 2304
board_mask = dot >= epsilon
dot_valid = dot[board_mask][None,:] # 1, 2304
w = cam_pos[None,:,:] - p_co # torch.Size([1, 1, 3])
fac = -torch.sum(p_no * w, dim=-1) / dot_valid # 1, 2304
ray_dir_valid = raydir[:,board_mask[0],:]
ray_dir_valid = ray_dir_valid * fac[..., None] # 1, 2304, 3
intersect_world_valid = cam_pos[None,...] + ray_dir_valid # 1, 2304, 3
intersect_world = torch.zeros_like(raydir)
intersect_world[:,board_mask[0],:] = intersect_world_valid
return intersect_world
def extract_from_2d_grid(src_feat, src_grid, mask):
B, M, _ = src_grid.shape
warped_src_feat = F.grid_sample(src_feat, src_grid[:, None, ...], mode='bilinear', padding_mode='zeros', align_corners=True) # (B, C, D, H*W)
warped_src_feat = warped_src_feat.permute(0,2,3,1).view(B, M, src_feat.shape[1]).cuda() # 1, 224874, 3
if mask is not None:
B, N, _ = mask.shape
full_src_feat = torch.zeros([B, N, src_feat.shape[1]], device=warped_src_feat.device, dtype=warped_src_feat.dtype)
full_src_feat[0, mask[0,:,0], :] = warped_src_feat
warped_src_feat = full_src_feat
return warped_src_feat
def homo_warp(src_feat, proj_mat, depth_values, src_grid=None, pad=0):
"""
src_feat: (B, C, H, W)
proj_mat: (B, 3, 4) equal to "src_proj @ ref_proj_inv"
depth_values: (B, D, H, W)
out: (B, C, D, H, W)
"""
if src_grid==None:
B, C, H, W = src_feat.shape
device = src_feat.device
if pad>0:
H_pad, W_pad = H + pad*2, W + pad*2
else:
H_pad, W_pad = H, W
depth_values = depth_values[...,None,None].repeat(1, 1, H_pad, W_pad)
D = depth_values.shape[1]
R = proj_mat[:, :, :3] # (B, 3, 3)
T = proj_mat[:, :, 3:] # (B, 3, 1)
# create grid from the ref frame
ref_grid = create_meshgrid(H_pad, W_pad, normalized_coordinates=False, device=device) # (1, H, W, 2)
if pad>0:
ref_grid -= pad
ref_grid = ref_grid.permute(0, 3, 1, 2) # (1, 2, H, W)
ref_grid = ref_grid.reshape(1, 2, W_pad * H_pad) # (1, 2, H*W)
ref_grid = ref_grid.expand(B, -1, -1) # (B, 2, H*W)
ref_grid = torch.cat((ref_grid, torch.ones_like(ref_grid[:, :1])), 1) # (B, 3, H*W)
ref_grid_d = ref_grid.repeat(1, 1, D) # (B, 3, D*H*W), X, Y, Z
src_grid_d = R @ ref_grid_d + T / depth_values.view(B, 1, D * W_pad * H_pad)
del ref_grid_d, ref_grid, proj_mat, R, T, depth_values # release (GPU) memory
src_grid = src_grid_d[:, :2] / src_grid_d[:, 2:] # divide by depth (B, 2, D*H*W)
del src_grid_d
src_grid[:, 0] = src_grid[:, 0] / ((W - 1) / 2) - 1 # scale to -1~1
src_grid[:, 1] = src_grid[:, 1] / ((H - 1) / 2) - 1 # scale to -1~1
src_grid = src_grid.permute(0, 2, 1) # (B, D*H*W, 2)
src_grid = src_grid.view(B, D, H_pad, W_pad, 2)
B, D, H_pad, W_pad = src_grid.shape[:4]
src_grid = src_grid.to(src_feat.dtype) # 1, 32, 128, 160
warped_src_feat = F.grid_sample(src_feat, src_grid.view(B, D, H_pad * W_pad, 2),
mode='bilinear', padding_mode='zeros',
align_corners=True) # (B, C, D, H*W)
warped_src_feat = warped_src_feat.view(B, -1, D, H_pad, W_pad)
return warped_src_feat, src_grid
############################### render path ####################################
def normalize(v):
"""Normalize a vector."""
return v/np.linalg.norm(v)
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def construct_vox_points(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None):
# xyz, N, 3
xyz = xyz_val if partition_xyz is None else partition_xyz
if space_min is None:
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
space_edge = torch.max(xyz_max - xyz_min) * 1.05
xyz_mid = (xyz_max + xyz_min) / 2
space_min = xyz_mid - space_edge / 2
space_max = xyz_mid + space_edge / 2
else:
space_edge = space_max - space_min
construct_vox_sz = space_edge / vox_res
xyz_shift = xyz - space_min[None, ...]
sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True)
xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0)
min_idx, _ = scatter_min(torch.arange(len(xyz), device=xyz.device), inv_idx, dim=0)
return xyz_centroid, sparse_grid_idx, min_idx
def construct_vox_points_xyz(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None):
# xyz, N, 3
xyz = xyz_val if partition_xyz is None else partition_xyz
if space_min is None:
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
space_edge = torch.max(xyz_max - xyz_min) * 1.05
xyz_mid = (xyz_max + xyz_min) / 2
space_min = xyz_mid - space_edge / 2
else:
space_edge = space_max - space_min
construct_vox_sz = space_edge / vox_res
xyz_shift = xyz - space_min[None, ...]
sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True)
xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0)
return xyz_centroid
def construct_vox_points_ind(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None):
# xyz, N, 3
xyz = xyz_val if partition_xyz is None else partition_xyz
if space_min is None:
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
space_edge = torch.max(xyz_max - xyz_min) * 1.05
xyz_mid = (xyz_max + xyz_min) / 2
space_min = xyz_mid - space_edge / 2
space_max = xyz_mid + space_edge / 2
else:
space_edge = space_max - space_min
construct_vox_sz = space_edge / vox_res
xyz_shift = xyz - space_min[None, ...]
sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True)
return sparse_grid_idx, inv_idx, space_min, space_max
def construct_vox_points_closest(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None):
# xyz, N, 3
xyz = xyz_val if partition_xyz is None else partition_xyz
if space_min is None:
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
space_edge = torch.max(xyz_max - xyz_min) * 1.05
xyz_mid = (xyz_max + xyz_min) / 2
space_min = xyz_mid - space_edge / 2
else:
space_edge = space_max - space_min
mask = (xyz_val - space_min[None,...])
mask *= (space_max[None,...] - xyz_val)
mask = torch.prod(mask, dim=-1) > 0
xyz_val = xyz_val[mask, :]
construct_vox_sz = space_edge / vox_res
xyz_shift = xyz - space_min[None, ...]
sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True)
xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0)
xyz_centroid_prop = xyz_centroid[inv_idx,:]
xyz_residual = torch.norm(xyz_val - xyz_centroid_prop, dim=-1)
print("xyz_residual", xyz_residual.shape)
_, min_idx = scatter_min(xyz_residual, inv_idx, dim=0)
print("min_idx", min_idx.shape)
return xyz_centroid, sparse_grid_idx, min_idx
def transform_points_to_voxels(points, point_cloud_range, voxel_sizes, max_pnts_per_vox, max_voxels, voxel_generator=None):
voxel_output = voxel_generator.generate(points)
if isinstance(voxel_output, dict):
voxels, coordinates, num_points = \
voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
else:
voxels, coordinates, num_points = voxel_output
return voxels, coordinates, num_points
def alpha_masking(points, alphas, intrinsics, c2ws, w2cs, near_far, opt=None):
w_xyz1 = torch.cat([points[..., :3], torch.ones_like(points[..., :1])], dim=-1)
H, W = alphas[0][0].shape
vishull_mask = None
range_mask = None
for i in range(len(alphas)):
alpha, intrinsic, c2w, w2c = torch.as_tensor(alphas[i][0], dtype=points.dtype, device=points.device), torch.as_tensor(intrinsics[i], dtype=points.dtype, device=points.device), torch.as_tensor(c2ws[i], dtype=points.dtype, device=points.device), torch.as_tensor(w2cs[i], dtype=points.dtype, device=points.device)
# print("w_xyz1",w_xyz1.shape, w2c.shape, intrinsic.shape, alpha.shape)
cam_xyz = w_xyz1 @ w2c.t()
torch.cuda.empty_cache()
if near_far is not None:
near_far_mask = torch.logical_and(cam_xyz[...,2]>=(near_far[0]-1.0), cam_xyz[...,2]<=near_far[1])
cam_xyz = cam_xyz[...,:3] @ intrinsic.t()
img_xy = torch.floor(cam_xyz[:, :2] / cam_xyz[:, -1:] + 0.0).long()
del cam_xyz
torch.cuda.empty_cache()
if opt is not None and (opt.alpha_range > 0 or opt.inall_img == 0):
range_mask = torch.logical_and(img_xy >= torch.zeros((1,2), dtype=img_xy.dtype, device=img_xy.device), img_xy < torch.as_tensor([[W,H]], dtype=img_xy.dtype, device=img_xy.device))
range_mask = torch.prod(range_mask, dim=-1) > 0
img_xy[..., 0] = torch.clamp(img_xy[..., 0], min=0, max=W-1)
img_xy[..., 1] = torch.clamp(img_xy[..., 1], min=0, max=H-1)
mask = alpha[img_xy[..., 1], img_xy[..., 0]]
if range_mask is not None:
mask = mask + (~range_mask).to(torch.float32)
mask = mask > 0.1
if near_far is not None:
vishull_mask = (mask*near_far_mask) if vishull_mask is None else vishull_mask*(mask*near_far_mask)
else:
vishull_mask=mask if vishull_mask is None else vishull_mask*mask
del img_xy
torch.cuda.empty_cache()
del range_mask
print("vishull_mask", vishull_mask.shape)
return vishull_mask > 0 | 26,948 | 43.397035 | 318 | py |
pointnerf | pointnerf-master/models/mvs/mvs_points_model.py | import torch
import os
from torch.utils.data import DataLoader
import imageio
# models
from .models import *
from .renderer import *
from .mvs_utils import *
from . import filter_utils
from ..helpers.networks import init_seq
from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet
from torch.optim.lr_scheduler import CosineAnnealingLR
from inplace_abn import InPlaceABN
from collections import OrderedDict
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from torchvision import transforms as T
feature_str_lst=['appr_feature_str0', 'appr_feature_str1', 'appr_feature_str2', 'appr_feature_str3']
def premlp_init(opt):
in_channels = 63
out_channels = opt.point_features_dim
blocks = []
act = getattr(nn, opt.act_type, None)
for i in range(opt.shading_feature_mlp_layer1):
blocks.append(nn.Linear(in_channels, out_channels))
blocks.append(act(inplace=True))
in_channels = out_channels
blocks = nn.Sequential(*blocks)
init_seq(blocks)
return blocks
class MvsPointsModel(nn.Module):
def __init__(self, args):
super(MvsPointsModel, self).__init__()
self.args = args
self.args.feat_dim = 8+3*4
self.idx = 0
# Create nerf model
self.render_kwargs_train, self.render_kwargs_test, start = create_mvs(args, mvs_mode=self.args.manual_depth_view, depth=args.depth_grid)
filter_keys(self.render_kwargs_train)
# Create mvs model
self.MVSNet = self.render_kwargs_train['network_featmvs']
if args.pre_d_est is not None and self.args.manual_depth_view > 0 :
self.load_pretrained_d_est(self.MVSNet, args.pre_d_est)
self.FeatureNet = self.render_kwargs_train['network_2d']
self.render_kwargs_train.pop('network_featmvs')
self.render_kwargs_train.pop('network_2d')
self.render_kwargs_train['NDC_local'] = False
if self.args.manual_depth_view == -1:
self.ProbNet = ProbNet(8).to(device)
if self.args.shading_feature_mlp_layer0 > 0:
self.premlp = premlp_init(args)
# self.eval_metric = [0.01, 0.05, 0.1]
self.sample_func = getattr(self, args.mvs_point_sampler, None)
self.cnt = 0
def load_pretrained_d_est(self, model, pre_d_est):
# load checkpoint file specified by args.loadckpt
print("loading model {}".format(pre_d_est))
state_dict = torch.load(pre_d_est, map_location=lambda storage, loc: storage)
new_state_dict = OrderedDict()
for k, v in state_dict['model'].items():
name = k[7:] # remove module.
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.add_argument("--mvs_lr", type=float, default=5e-4,
help='learning rate')
parser.add_argument('--pad', type=int, default=24)
parser.add_argument('--depth_grid', type=int, default=128)
parser.add_argument('--prob_thresh', type=float, default=0.8)
parser.add_argument('--dprob_thresh', type=float, default=0.8)
parser.add_argument('--num_neighbor', type=int, default=1)
parser.add_argument('--depth_vid', type=str, default="0", help="0123")
parser.add_argument('--ref_vid', type=int, default=0, help="0, 1, 2, or 3")
parser.add_argument('--num_each_depth', type=int, default=1)
parser.add_argument('--depth_conf_thresh', type=float, default=None)
parser.add_argument('--depth_occ', type=int, default=0)
parser.add_argument('--manual_depth_view', type=int, default=0, help="-1 for learning probability, 0 for gt, 1 for pretrained MVSNet")
parser.add_argument('--pre_d_est', type=str, default=None, help="loading pretrained depth estimator")
parser.add_argument('--manual_std_depth', type=float, default=0)
parser.add_argument('--far_plane_shift', type=float, default=None)
parser.add_argument('--mvs_point_sampler', type=str, default="gau_single_sampler")
parser.add_argument('--appr_feature_str0',
type=str,
nargs='+',
# default=["imgfeat_0_0123", "vol"],
default=["imgfeat_0_0", "vol"],
help=
"which feature_map")
parser.add_argument('--appr_feature_str1',
type=str,
nargs='+',
# default=["imgfeat_0_0123", "vol"],
default=["imgfeat_0_0", "vol"],
help=
"which feature_map")
parser.add_argument('--appr_feature_str2',
type=str,
nargs='+',
# default=["imgfeat_0_0123", "vol"],
default=["imgfeat_0_0", "vol"],
help=
"which feature_map")
parser.add_argument('--appr_feature_str3',
type=str,
nargs='+',
# default=["imgfeat_0_0123", "vol"],
default=["imgfeat_0_0", "vol"],
help=
"which feature_map")
parser.add_argument('--vox_res', type=int, default=0, help='vox_resolution if > 0')
def decode_batch(self, batch, idx=list(torch.arange(4))):
data_mvs = sub_selete_data(batch, device, idx, filtKey=[])
pose_ref = {'w2cs': data_mvs['w2cs'].squeeze(), 'intrinsics': data_mvs['intrinsics'].squeeze(),
'c2ws': data_mvs['c2ws'].squeeze(),'near_fars':data_mvs['near_fars'].squeeze()}
return data_mvs, pose_ref
def normalize_rgb(self, data, shape=(1,1,3,1,1)):
# to unnormalize image for visualization
# data N V C H W
device = data.device
mean = torch.tensor([0.485, 0.456, 0.406]).view(*shape).to(device)
std = torch.tensor([0.229, 0.224, 0.225]).view(*shape).to(device)
return (data - mean) / std
def gau_single_sampler(self, volume_prob, args, ref_intrinsic, near_far, cam_expected_depth=None, ndc_std_depth=None):
# volume_prob # ([1, 1, 128, 176, 208])
if cam_expected_depth is None:
B, C, D, H, W = volume_prob.shape
v = 1.0 / D
ndc_depths = torch.linspace(0.5 * v, 1.0 - 0.5 * v, steps=D, device=volume_prob.device)[None, None, :, None, None].expand(1, 1, -1, H, W)
# B, C, H, W
ndc_expected_depth = torch.sum(volume_prob * ndc_depths, dim=2) # ([1, 1, 1, 176, 208])
ndc_std_depth = torch.sqrt(torch.sum(volume_prob * torch.square(ndc_depths-ndc_expected_depth), dim=2)) #([1, 1, 176, 208])
mask = self.prob_filter(args.dprob_thresh, args.num_neighbor, volume_prob, ndc_expected_depth, ndc_std_depth)
else:
# [1, 1, 512, 640]
mask = torch.logical_and(cam_expected_depth >= near_far[0], cam_expected_depth <= near_far[1])
ndc_expected_depth = (cam_expected_depth - near_far[0]) / (near_far[1] - near_far[0])
sampled_depth = self.sample_by_gau(ndc_expected_depth, ndc_std_depth, args) #([1, 1, 5, 512, 640])
ndc_xyz, cam_xyz = self.depth2point(sampled_depth, ref_intrinsic, near_far) # 1, 1, 512, 640, 3
return ndc_xyz, cam_xyz, ndc_expected_depth.shape[-2:], mask
def sample_by_gau(self, ndc_expected_depth, ndc_std_depth, args):
B, C, H, W = ndc_expected_depth.shape
N = args.num_each_depth
# [1, 5, 1, 176, 208]
sampled_depth = ndc_std_depth[:,None,...] * torch.normal(mean=torch.zeros((B, N, C, H, W), device="cuda"), std=torch.ones((B, N, C, H, W), device=ndc_expected_depth.device)) + ndc_expected_depth[:,None,...]
return torch.clamp(sampled_depth, min=0.0, max=1.0)
def depth2point(self, sampled_depth, ref_intrinsic, near_far):
B, N, C, H, W = sampled_depth.shape
valid_z = sampled_depth
valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / (W - 1)
valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / (H - 1)
valid_y, valid_x = torch.meshgrid(valid_y, valid_x)
# B,N,H,W
valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1)
valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1)
ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view(B, N, C, H, W, 3) # 1, 1, 5, 512, 640, 3
cam_xyz = ndc_2_cam(ndc_xyz, near_far, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3
return ndc_xyz, cam_xyz
def prob_filter(self, thresh, num_neighbor, volume_prob, ndc_expected_depth, ndc_std_depth):
B, C, D, H, W = volume_prob.shape
ceil_idx = torch.ceil(ndc_expected_depth)
lower_idx = ceil_idx - num_neighbor // 2 + 1 # B, C, 1, H, W
# upper_idx = ceil_idx + num_neighbor // 2
shifts = torch.arange(0, num_neighbor, device=volume_prob.device, dtype=torch.int64)[None, :, None, None]
idx = torch.clamp(lower_idx.to(torch.int64) + shifts, min=0, max=D-1) # B, num_neighbor, H, W
select_probs = torch.gather(torch.squeeze(volume_prob, dim=1), 1, idx) # B, num_neighbor, H, W
sumprobs = torch.sum(select_probs, dim=1, keepdim=True) #([1, 1, 176, 208])
mask = sumprobs > thresh
return mask
def extract_2d(self, img_feats, view_ids, layer_ids, intrinsics, c2ws, w2cs, cam_xyz, HD, WD, cam_vid=0):
out_feats = []
colors = []
for vid in view_ids:
w2c = w2cs[:,vid,...] if vid != cam_vid else None
warp = homo_warp_nongrid_occ if self.args.depth_occ > 0 else homo_warp_nongrid
src_grid, mask, hard_id_xy = warp(c2ws[:,cam_vid,...], w2c, intrinsics[:,vid,...], cam_xyz, HD, WD, tolerate=0.1)
warped_feats = []
for lid in layer_ids:
img_feat = img_feats[lid] # 3, 32, 128, 160
warped_src_feat = extract_from_2d_grid(img_feat[vid:vid+1, ...], src_grid, mask)
if lid == 0:
colors.append(warped_src_feat)
else:
warped_feats.append(warped_src_feat)
warped_feats = torch.cat(warped_feats, dim=-1)
out_feats.append(warped_feats)
out_feats = torch.cat(out_feats, dim=-1)
colors = torch.cat(colors, dim=-1) if len(colors) > 0 else None
return out_feats, colors
def get_image_features(self, imgs):
return self.FeatureNet(imgs[:, :self.args.init_view_num])
def query_embedding(self, HDWD, cam_xyz, photometric_confidence, img_feats, c2ws, w2cs, intrinsics, cam_vid, pointdir_w=False):
HD, WD = HDWD
points_embedding = []
points_dirs = None
points_conf = None
points_colors = None
for feat_str in getattr(self.args, feature_str_lst[cam_vid]):
if feat_str.startswith("imgfeat"):
_, view_ids, layer_ids = feat_str.split("_")
view_ids = [int(a) for a in list(view_ids)]
layer_ids = [int(a) for a in list(layer_ids)]
twoD_feats, points_colors = self.extract_2d(img_feats, view_ids, layer_ids, intrinsics, c2ws, w2cs, cam_xyz, HD, WD, cam_vid=cam_vid)
points_embedding.append(twoD_feats)
elif feat_str.startswith("dir"):
_, view_ids = feat_str.split("_")
view_ids = torch.as_tensor([int(a) for a in list(view_ids)], dtype=torch.int64, device=cam_xyz.device)
cam_pos_world = c2ws[:, view_ids, :, 3] # B, V, 4
cam_trans = w2cs[:, cam_vid, ...] # B, 4, 4
cam_pos_cam = (cam_pos_world @ cam_trans.transpose(1, 2))[...,:3] # B, V, 4
points_dirs = cam_xyz[:,:, None, :] - cam_pos_cam[:, None, :, :] # B, N, V, 3 in current cam coord
points_dirs = points_dirs / (torch.linalg.norm(points_dirs, dim=-1, keepdims=True) + 1e-6) # B, N, V, 3
points_dirs = points_dirs.view(cam_xyz.shape[0], -1, 3) @ c2ws[:, cam_vid, :3, :3].transpose(1, 2)
if not pointdir_w:
points_dirs = points_dirs @ c2ws[:, self.args.ref_vid, :3, :3].transpose(1, 2) # in ref cam coord
# print("points_dirs", points_dirs.shape)
points_dirs = points_dirs.view(cam_xyz.shape[0], cam_xyz.shape[1], -1)
elif feat_str.startswith("point_conf"):
if photometric_confidence is None:
photometric_confidence = torch.ones_like(points_embedding[0][...,0:1])
points_conf = photometric_confidence
points_embedding = torch.cat(points_embedding, dim=-1)
if self.args.shading_feature_mlp_layer0 > 0:
points_embedding = self.premlp(torch.cat([points_embedding, points_colors, points_dirs, points_conf], dim=-1))
return points_embedding, points_colors, points_dirs, points_conf
def gen_points(self, batch):
if 'scan' in batch.keys():
batch.pop('scan')
log, loss = {},0
data_mvs, pose_ref = self.decode_batch(batch)
imgs, proj_mats = data_mvs['images'], data_mvs['proj_mats']
near_fars, depths_h = data_mvs['near_fars'], data_mvs['depths_h'] if 'depths_h' in data_mvs else None
# print("depths_h", batch["near_fars"], depths_h.shape, depths_h[0,0,:,:])
# volume_feature:(1, 8, D, 176, 208) img_feat:(B, V, C, h, w)
cam_expected_depth = None
ndc_std_depth = None
# volume_feature: 1, 8, 128, 176, 208;
# img_feat: 1, 3, 32, 128, 160;
# depth_values: 1, 128
photometric_confidence_lst=[]
cam_xyz_lst = []
nearfar_mask_lst = []
volume_prob = None
# w2c_ref = batch["w2cs"][:, self.args.ref_vid, ...].transpose(1, 2)
depth_vid = [int(self.args.depth_vid[i]) for i in range(len(self.args.depth_vid))]
if self.args.manual_depth_view < 1:
if self.args.manual_depth_view == -1:
img_feats = self.FeatureNet(imgs[:, :self.args.init_view_num])
for i in range(len(depth_vid)):
vid = depth_vid[i]
if self.args.manual_depth_view == -1:
volume_feature, img_feats, depth_values = self.MVSNet(imgs[:, :self.args.init_view_num], img_feats, proj_mats[:, vid, :3], near_fars[0, vid], pad=self.args.pad, vid=vid)
volume_prob = self.ProbNet(volume_feature) # ([1, 1, 128, 176, 208])
# print("volume_prob", volume_prob.shape)
elif self.args.manual_depth_view == 0:
cam_expected_depth = depths_h[:,vid:vid+1,...]
ndc_std_depth = torch.ones_like(cam_expected_depth) * self.args.manual_std_depth
ndc_xyz, cam_xyz, HDWD, nearfar_mask = self.sample_func(volume_prob, self.args, batch["intrinsics"][:, vid, ...], near_fars[0, vid], cam_expected_depth=cam_expected_depth, ndc_std_depth=ndc_std_depth)
if cam_xyz.shape[1] > 0:
cam_xyz_lst.append(cam_xyz)
nearfar_mask_lst.append(nearfar_mask)
else:
near_far_depth = batch["near_fars_depth"][0]
depth_interval, depth_min = (near_far_depth[1] - near_far_depth[0]) / 192., near_far_depth[0]
depth_values = (depth_min + torch.arange(0, 192, device="cuda", dtype=torch.float32) * depth_interval)[None, :]
dimgs = batch["mvs_images"] if "mvs_images" in batch else imgs
bmvs_2d_features=None
# print("dimgs",dimgs.shape)
bimgs = dimgs[:, :self.args.init_view_num].expand(len(self.args.depth_vid), -1, -1, -1, -1)
bvid = torch.as_tensor(depth_vid, dtype=torch.long, device="cuda")
bproj_mats = proj_mats[0, bvid, ...]
bdepth_values = depth_values.expand(len(self.args.depth_vid), -1)
if self.args.manual_depth_view == 1:
with torch.no_grad():
# 1, 128, 160; 1, 128, 160; prob_volume: 1, 192, 128, 160
depths_h, photometric_confidence, _, _ = self.MVSNet(bimgs, bproj_mats, bdepth_values, features=bmvs_2d_features)
depths_h, photometric_confidence = depths_h[:,None,...], photometric_confidence[:,None,...]
# B,N,H,W,3, B,N,H,W,3, 1, 1,1,H,W
else:
dnum = self.args.manual_depth_view
with torch.no_grad():
# prob_volume: 1, 192, 128, 160
_, prob_sm_volume, prob_raw_volume = self.MVSNet(
bimgs, bproj_mats, bdepth_values, features=bmvs_2d_features, prob_only=True)
# prob_volume = torch.sigmoid(prob_raw_volume)
prob_volume = prob_sm_volume
photometric_confidence, topk_idx = torch.topk(prob_volume, dnum, dim=1) # 1, 5, 128, 160; 1, 5, 128, 160
depths_h = torch.cat([depth_values[0,topk_idx[i].view(-1)].view(1, dnum, prob_volume.shape[-2], prob_volume.shape[-1]) for i in range(len(depth_vid))], dim=0)
bcam_expected_depth = torch.nn.functional.interpolate(depths_h, size=list(dimgs.shape)[-2:], mode='nearest')
photometric_confidence = torch.nn.functional.interpolate(photometric_confidence, size=list(dimgs.shape)[-2:], mode='nearest') # 1, 1, H, W
photometric_confidence_lst = torch.unbind(photometric_confidence[:,None,...], dim=0)
bndc_std_depth = torch.ones_like(bcam_expected_depth) * self.args.manual_std_depth
for i in range(len(depth_vid)):
vid = depth_vid[i]
cam_expected_depth, ndc_std_depth = bcam_expected_depth[i:i+1], bndc_std_depth[i:i+1]
ndc_xyz, cam_xyz, HDWD, nearfar_mask = self.sample_func(volume_prob, self.args, batch["intrinsics"][:, vid,...], near_fars[0, vid], cam_expected_depth=cam_expected_depth, ndc_std_depth=ndc_std_depth)
if cam_xyz.shape[1] > 0:
cam_xyz_lst.append(cam_xyz)
nearfar_mask_lst.append(nearfar_mask)
return cam_xyz_lst, photometric_confidence_lst, nearfar_mask_lst, HDWD, data_mvs, [batch["intrinsics"][:,int(vid),...] for vid in self.args.depth_vid], [batch["w2cs"][:,int(vid),...] for vid in self.args.depth_vid]
def forward(self, batch):
# 3 , 3, 3, 2, 4, dict, 3, 3
cam_xyz_lst, photometric_confidence_lst, nearfar_mask_lst, HDWD, data_mvs, intrinsics_lst, extrinsics_lst = self.gen_points(batch)
# #################### FILTER by Masks ##################
gpu_filter = True
if self.args.manual_depth_view != 0:
# cuda filter
if gpu_filter:
cam_xyz_lst, _, photometric_confidence_lst = filter_utils.filter_by_masks_gpu(cam_xyz_lst, intrinsics_lst, extrinsics_lst, photometric_confidence_lst, nearfar_mask_lst, self.args)
else:
cam_xyz_lst, _, photometric_confidence_lst = filter_utils.filter_by_masks([cam_xyz.cpu().numpy() for cam_xyz in cam_xyz_lst], [intrinsics.cpu().numpy() for intrinsics in intrinsics_lst], [extrinsics.cpu().numpy() for extrinsics in extrinsics_lst], [confidence.cpu().numpy() for confidence in photometric_confidence_lst], [nearfar_mask.cpu().numpy() for nearfar_mask in nearfar_mask_lst], self.args)
cam_xyz_lst = [torch.as_tensor(cam_xyz, device="cuda", dtype=torch.float32) for cam_xyz in cam_xyz_lst]
photometric_confidence_lst = [torch.as_tensor(confidence, device="cuda", dtype=torch.float32) for confidence in photometric_confidence_lst]
else:
B, N, C, H, W, _ = cam_xyz_lst[0].shape
cam_xyz_lst = [cam_xyz.view(C, H, W, 3) for cam_xyz in cam_xyz_lst]
cam_xyz_lst = [cam_xyz[nearfar_mask[0,...], :] for cam_xyz, nearfar_mask in zip(cam_xyz_lst, nearfar_mask_lst)]
# print("after filterd", cam_xyz_lst[0].shape)
photometric_confidence_lst = [torch.ones_like(cam_xyz[...,0]) for cam_xyz in cam_xyz_lst]
# img_feats = self.get_image_features(batch['images'])
img_feats = self.get_image_features(batch['mvs_images'])
points_features_lst = [self.query_embedding(HDWD, torch.as_tensor(cam_xyz_lst[i][None, ...], device="cuda", dtype=torch.float32), photometric_confidence_lst[i][None, ..., None], img_feats, data_mvs['c2ws'], data_mvs['w2cs'], batch["intrinsics"], int(self.args.depth_vid[i]), pointdir_w=False) for i in range(len(cam_xyz_lst))]
# #################### start query embedding ##################
xyz_ref_lst = [(torch.cat([xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1) @ torch.linalg.inv(
cam_extrinsics[0]).transpose(0, 1) @ batch["w2cs"][0, self.args.ref_vid, ...].transpose(0, 1))[..., :3] for
xyz_cam, cam_extrinsics in zip(cam_xyz_lst, extrinsics_lst)]
ref_xyz = torch.cat(xyz_ref_lst, dim=0)
points_embedding = torch.cat([points_features[0] for points_features in points_features_lst], dim=1)
points_colors = torch.cat([points_features[1] for points_features in points_features_lst], dim=1) if points_features_lst[0][1] is not None else None
points_ref_dirs = torch.cat([points_features[2] for points_features in points_features_lst], dim=1) if points_features_lst[0][2] is not None else None
points_conf = torch.cat([points_features[3] for points_features in points_features_lst], dim=1) if points_features_lst[0][3] is not None else None
return ref_xyz, points_embedding, points_colors, points_ref_dirs, points_conf
def save_points(self, xyz, dir, total_steps):
if xyz.ndim < 3:
xyz = xyz[None, ...]
os.makedirs(dir, exist_ok=True)
for i in range(xyz.shape[0]):
if isinstance(total_steps, str):
filename = 'step-{}-{}.txt'.format(total_steps, i)
else:
filename = 'step-{:04d}-{}.txt'.format(total_steps, i)
filepath = os.path.join(dir, filename)
np.savetxt(filepath, xyz[i, ...].reshape(-1, xyz.shape[-1]), delimiter=";")
def save_image(self, img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath) | 22,547 | 54.674074 | 414 | py |
pointnerf | pointnerf-master/models/mvs/filter_utils.py | import sys
import os
import pathlib
# sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import torch.nn.functional as F
import copy
import torch
import numpy as np
import time
from models.mvs import mvs_utils
from tqdm import tqdm
import cv2
from PIL import Image
def reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
## step1. project reference pixels to the source view
# reference view x, y
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])
# reference 3D space
xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref),
np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1]))
# source 3D space
xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)),
np.vstack((xyz_ref, np.ones_like(x_ref))))[:3]
# source view x, y
K_xyz_src = np.matmul(intrinsics_src, xyz_src)
xy_src = K_xyz_src[:2] / K_xyz_src[2:3]
## step2. reproject the source view points with source view depth estimation
# find the depth estimation of the source view
x_src = xy_src[0].reshape([height, width]).astype(np.float32)
y_src = xy_src[1].reshape([height, width]).astype(np.float32)
oor_mask = np.logical_or(np.logical_or(x_src >= width, x_src < 0),np.logical_or(y_src >= height, y_src < 0))
sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)
# print("depth_src",depth_src.shape, x_src.shape, y_src.shape)
# sampled_depth_src=depth_src
# mask = sampled_depth_src > 0
# source 3D space
# NOTE that we should use sampled source-view depth_here to project back
xyz_src = np.matmul(np.linalg.inv(intrinsics_src),
np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1]))
# reference 3D space
xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)),
np.vstack((xyz_src, np.ones_like(x_ref))))[:3]
# source view x, y, depth
depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32)
K_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected)
xy_reprojected = K_xyz_reprojected[:2] / K_xyz_reprojected[2:3]
x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32)
y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32)
return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src, oor_mask
def check_geometric_consistency(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src, oor_mask = reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src)
# check |p_reproj-p_1| < 1
dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)
# check |d_reproj-d_1| / d_1 < 0.01
depth_diff = np.abs(depth_reprojected - depth_ref)
relative_depth_diff = depth_diff / depth_ref
# H, W
mask = np.logical_and(dist < 1, relative_depth_diff < 0.01)
depth_reprojected[~mask] = 0
return mask, ~oor_mask, depth_reprojected, x2d_src, y2d_src
def filter_by_masks(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt):
xyz_world_lst=[]
xyz_ref_lst=[]
confidence_filtered_lst = []
B, N, H, W, _ = cam_xyz_all[0].shape
cam_xyz_all = [cam_xyz.reshape(H, W, 3) for cam_xyz in cam_xyz_all]
count = 0
for ref_view in tqdm(range(len(cam_xyz_all))):
ref_intrinsics, ref_extrinsics, ref_cam_xy, ref_depth_est, confidence, points_mask = intrinsics_all[ref_view][0], extrinsics_all[ref_view][0], cam_xyz_all[ref_view][...,:-1], cam_xyz_all[ref_view][...,-1], confidence_all[ref_view][0,0,...], points_mask_all[ref_view][0,0,...]
photo_mask = confidence > opt.depth_conf_thresh
sum_srcview_depth_ests = 0
geo_mask_sum = 0
visible_and_match_sum = 0
visible_sum = 0
# compute the geometric mask
for src_view in range(len(cam_xyz_all)):
if ref_view == src_view:
continue
src_intrinsics, src_extrinsics, src_depth_est = intrinsics_all[src_view][0], extrinsics_all[src_view][0], cam_xyz_all[src_view][...,-1]
geo_mask, vis_mask, depth_reprojected, x2d_src, y2d_src = check_geometric_consistency(ref_depth_est, ref_intrinsics, ref_extrinsics, src_depth_est, src_intrinsics, src_extrinsics)
visible_sum += vis_mask.astype(np.float32)
visible_and_match_sum += np.logical_and(vis_mask, geo_mask).astype(np.float32)
geo_mask_sum += geo_mask.astype(np.int32)
sum_srcview_depth_ests += depth_reprojected
depth_est_averaged = (sum_srcview_depth_ests + ref_depth_est) / (geo_mask_sum + 1)
# at least 3 source views matched
geo_mask = geo_mask_sum >= opt.geo_cnsst_num
final_mask = np.logical_and(np.logical_and(photo_mask, geo_mask), points_mask)
# vis_geo_mask = np.divide(visible_and_match_sum, visible_sum, out=np.ones_like(visible_and_match_sum), where=visible_sum!=0) > 0.05
# final_mask = np.logical_and(np.logical_and(photo_mask, vis_geo_mask), points_mask)
xy, depth = ref_cam_xy[final_mask,:], depth_est_averaged[final_mask][...,None]
xyz_ref = np.concatenate([xy, depth], axis=-1)
xyz_world = np.matmul(np.concatenate([xyz_ref, np.ones_like(xyz_ref[...,0:1])], axis=-1), np.transpose(np.linalg.inv(ref_extrinsics)))[:,:3]
confidence_filtered = confidence[final_mask]
xyz_world, xyz_ref, confidence_filtered = range_mask_np(xyz_world, xyz_ref, confidence_filtered, opt)
xyz_world_lst.append(xyz_world)
xyz_ref_lst.append(xyz_ref)
confidence_filtered_lst.append(confidence_filtered)
return xyz_ref_lst, xyz_world_lst, confidence_filtered_lst
def range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_lst, opt):
if opt.ranges[0] > -99.0:
for i in range(len(xyz_world_all)):
xyz_world, cam_xyz, confidence_filtered = range_mask_np(xyz_world_all[i], cam_xyz_all[i], confidence_filtered_lst[i], opt)
xyz_world_all[i], cam_xyz_all[i], confidence_filtered_lst[i] = xyz_world, cam_xyz, confidence_filtered
return xyz_world_all, cam_xyz_all, confidence_filtered_lst
def range_mask_np(xyz_world, xyz_ref, confidence_filtered, opt):
# print("range_mask_np")
if opt.ranges[0] > -99.0:
ranges = np.asarray(opt.ranges)
mask = np.prod(np.logical_and(xyz_world >= ranges[None, :3], xyz_world <= ranges[None, 3:]), axis=-1) > 0
xyz_world = xyz_world[mask]
xyz_ref = xyz_ref[mask]
confidence_filtered = confidence_filtered[mask]
return xyz_world, xyz_ref, confidence_filtered
def range_mask_torch(xyz_world, xyz_ref, confidence_filtered, opt):
# print("range_mask_torch")
if opt.ranges[0] > -99.0:
ranges = torch.as_tensor(opt.ranges, device=xyz_world.device, dtype=torch.float32)
mask = torch.prod(torch.logical_and(xyz_world[..., :3] >= ranges[None, :3], xyz_world[..., :3] <= ranges[None, 3:]), dim=-1) > 0
xyz_world = xyz_world[mask]
xyz_ref = xyz_ref[mask]
confidence_filtered = confidence_filtered[mask]
return xyz_world, xyz_ref, confidence_filtered
def reproject_with_depth_gpu(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
## step1. project reference pixels to the source view
# reference view x, y
y_ref, x_ref = torch.meshgrid(torch.arange(0, height, device=depth_ref.device), torch.arange(0, width, device=depth_ref.device))
x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])
# reference 3D space
xyz_ref = torch.matmul(torch.linalg.inv(intrinsics_ref),
torch.stack([x_ref, y_ref, torch.ones_like(x_ref, device=x_ref.device)], dim=0) * depth_ref.reshape([-1]))
# source 3D space
xyz_src = torch.matmul(torch.matmul(extrinsics_src, torch.linalg.inv(extrinsics_ref)),
torch.cat([xyz_ref, torch.ones_like(x_ref)[None,:]], dim=0))[:3]
# source view x, y
K_xyz_src = torch.matmul(intrinsics_src, xyz_src) # 3, 6400000
xy_src = K_xyz_src[:2] / K_xyz_src[2:3]
## step2. reproject the source view points with source view depth estimation
# find the depth estimation of the source view
x_src = xy_src[0].reshape([height, width]).to(torch.float32)
y_src = xy_src[1].reshape([height, width]).to(torch.float32)
oor_mask = torch.logical_or(torch.logical_or(x_src >= width, x_src < 0), torch.logical_or(y_src >= height, y_src < 0))
# sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)
sampled_depth_src = F.grid_sample(depth_src[None, None, ...], torch.stack([x_src * 2 / (width-1) - 1, y_src * 2 / (height-1) - 1], dim=-1)[None,...], align_corners=True, mode='bilinear', padding_mode='border')
# mask = sampled_depth_src > 0
# source 3D space
# NOTE that we should use sampled source-view depth_here to project back
xyz_src = torch.matmul(torch.linalg.inv(intrinsics_src), torch.cat([xy_src, torch.ones_like(x_ref)[None,:]], dim=0) * sampled_depth_src.reshape([-1]))
# reference 3D space
xyz_reprojected = torch.matmul(torch.matmul(extrinsics_ref, torch.linalg.inv(extrinsics_src)),
torch.cat([xyz_src, torch.ones_like(x_ref)[None,:]], dim=0))[:3]
# source view x, y, depth
depth_reprojected = xyz_reprojected[2].reshape([height, width]).to(torch.float32)
K_xyz_reprojected = torch.matmul(intrinsics_ref, xyz_reprojected)
xy_reprojected = K_xyz_reprojected[:2] / K_xyz_reprojected[2:3]
x_reprojected = xy_reprojected[0].reshape([height, width]).to(torch.float32)
y_reprojected = xy_reprojected[1].reshape([height, width]).to(torch.float32)
return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src, oor_mask
def check_geometric_consistency_gpu(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
y_ref, x_ref = torch.meshgrid(torch.arange(0, height, device=depth_ref.device),
torch.arange(0, width, device=depth_ref.device))
depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src, oor_mask = reproject_with_depth_gpu(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src)
# check |p_reproj-p_1| < 1
dist = torch.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)
# check |d_reproj-d_1| / d_1 < 0.01
depth_diff = torch.abs(depth_reprojected - depth_ref)
relative_depth_diff = depth_diff / depth_ref
# H, W
mask = torch.logical_and(dist < 1, relative_depth_diff < 0.01)
depth_reprojected[~mask] = 0
return mask, ~oor_mask, depth_reprojected, x2d_src, y2d_src
def filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=False, return_w=False, cpu2gpu=False, near_fars_all=None):
xyz_cam_lst=[]
xyz_world_lst=[]
confidence_filtered_lst = []
B, N, C, H, W, _ = cam_xyz_all[0].shape
cam_xyz_all = [cam_xyz.view(C,H,W,3) for cam_xyz in cam_xyz_all]
for cam_view in tqdm(range(len(cam_xyz_all))) if vis else range(len(cam_xyz_all)):
near_fars = near_fars_all[cam_view] if near_fars_all is not None else None
if opt.manual_depth_view > 1:
xyz_cam, cam_xy, confidence, points_mask, cam_extrinsics = cam_xyz_all[cam_view], cam_xyz_all[cam_view][0, :, :, :-1], confidence_all[cam_view][0,...], points_mask_all[cam_view][0,...], extrinsics_all[cam_view][0]
final_mask = torch.logical_and(confidence > opt.depth_conf_thresh, points_mask)
xyz_cam = xyz_cam[final_mask]
confidence *= 0.3
else:
cam_intrinsics, cam_extrinsics, cam_xy, cam_depth_est, confidence, points_mask = intrinsics_all[cam_view][0], extrinsics_all[cam_view][0], cam_xyz_all[cam_view][0,...,:-1], cam_xyz_all[cam_view][0,...,-1], confidence_all[cam_view][0,0,...], points_mask_all[cam_view][0,0,...]
if cpu2gpu:
cam_xy, cam_depth_est, confidence, points_mask = cam_xy.cuda(), cam_depth_est.cuda(), confidence.cuda(), points_mask.cuda()
sum_srcview_depth_ests = 0
geo_mask_sum = 0
visible_and_match_sum = 0
visible_and_not_match_sum = 0
visible_sum = 0
# compute the geometric mask
for src_view in range(len(cam_xyz_all)):
if cam_view == src_view:
continue
src_intrinsics, src_extrinsics, src_depth_est = intrinsics_all[src_view][0], extrinsics_all[src_view][0], cam_xyz_all[src_view][0,...,-1]
if cpu2gpu:
src_depth_est = src_depth_est.cuda()
geo_mask, vis_mask, depth_reprojected, x2d_src, y2d_src = check_geometric_consistency_gpu(cam_depth_est, cam_intrinsics, cam_extrinsics, src_depth_est, src_intrinsics, src_extrinsics)
visible_sum += vis_mask.to(torch.float32)
visible_and_match_sum += torch.logical_and(vis_mask, geo_mask).to(torch.float32)
visible_and_not_match_sum += torch.logical_and(vis_mask, ~geo_mask).to(torch.float32)
geo_mask_sum += geo_mask.to(torch.int32)
sum_srcview_depth_ests += depth_reprojected
depth_est_averaged = (sum_srcview_depth_ests + cam_depth_est) / (geo_mask_sum + 1)
# at least 3 source views matched
geo_mask = geo_mask_sum >= opt.geo_cnsst_num # visible_and_not_match_sum < 3 #
final_mask = torch.logical_and(confidence > opt.depth_conf_thresh, points_mask)
final_mask = torch.logical_and(final_mask, geo_mask) if len(cam_xyz_all)>1 else final_mask
xy, depth = cam_xy[final_mask,:], depth_est_averaged[final_mask][...,None]
xyz_cam = torch.cat([xy, depth], dim=-1)
confidence_filtered = confidence[final_mask]
if opt.default_conf > 1.0:
assert opt.manual_depth_view <= 1
confidence_filtered = reassign_conf(confidence_filtered, final_mask, geo_mask_sum, opt.geo_cnsst_num)
if opt.far_plane_shift is not None:
assert near_fars is not None
bg_mask = ~final_mask if final_mask.dim() == 2 else (torch.sum(final_mask, dim=0) < 1)
bg_xy = cam_xy[bg_mask,:]
xyz_cam_extra = torch.cat([bg_xy, torch.ones_like(bg_xy[...,:1]) * near_fars[1] + opt.far_plane_shift], dim=-1)
xyz_cam = torch.cat([xyz_cam, xyz_cam_extra], dim=0)
confidence_extra = torch.ones_like(xyz_cam_extra[...,-1]) * 0.02
confidence_filtered = torch.cat([confidence_filtered, confidence_extra], dim=0)
xyz_world = torch.cat([xyz_cam, torch.ones_like(xyz_cam[...,0:1])], axis=-1) @ torch.inverse(cam_extrinsics).transpose(0,1)
# print("xyz_world",xyz_world.shape)
xyz_world, xyz_cam, confidence_filtered = range_mask_torch(xyz_world, xyz_cam, confidence_filtered, opt)
xyz_cam_lst.append(xyz_cam.cpu() if cpu2gpu else xyz_cam)
xyz_world_lst.append(xyz_world[:,:3].cpu() if cpu2gpu else xyz_world[:,:3])
confidence_filtered_lst.append(confidence_filtered.cpu() if cpu2gpu else confidence_filtered)
return xyz_cam_lst, xyz_world_lst, confidence_filtered_lst
def reassign_conf(confidence_filtered, final_mask, geo_mask_sum, geo_cnsst_num):
geo_mask_sum = geo_mask_sum[final_mask] - geo_cnsst_num + 1
confidence_filtered *= (1 - 1.0 / torch.pow(1.14869, torch.clamp(geo_mask_sum, min=1, max=10))) # 1.14869 = root 2 by 5
return confidence_filtered
| 16,375 | 53.586667 | 287 | py |
pointnerf | pointnerf-master/models/mvs/models.py | import torch
torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
from .mvs_utils import *
from .mvs_utils import homo_warp
from inplace_abn import InPlaceABN
from .renderer import run_network_mvs
from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x : x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)
self.freq_bands = freq_bands.reshape(1,-1,1).cuda()
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
repeat = inputs.dim()-1
inputs_scaled = (inputs.unsqueeze(-2) * self.freq_bands.view(*[1]*repeat,-1,1)).reshape(*inputs.shape[:-1],-1)
inputs_scaled = torch.cat((inputs, torch.sin(inputs_scaled), torch.cos(inputs_scaled)),dim=-1)
return inputs_scaled
def get_embedder(multires, i=0, input_dims=3):
if i == -1:
return nn.Identity(), 3
embed_kwargs = {
'include_input' : True,
'input_dims' : input_dims,
'max_freq_log2' : multires-1,
'num_freqs' : multires,
'log_sampling' : True,
'periodic_fns' : [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj : eo.embed(x)
return embed, embedder_obj.out_dim
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
# self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
# attn = attn * mask
attn = F.softmax(attn, dim=-1)
# attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
# self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.fc(q)
q += residual
q = self.layer_norm(q)
return q, attn
class Renderer_ours(nn.Module):
def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False):
"""
"""
super(Renderer_ours, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat
self.pts_linears = nn.ModuleList(
[nn.Linear(self.in_ch_pts, W, bias=True)] + [nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + self.in_ch_pts, W) for i in range(D-1)])
self.pts_bias = nn.Linear(input_ch_feat, W)
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W//2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
self.pts_linears.apply(weights_init)
self.views_linears.apply(weights_init)
self.feature_linear.apply(weights_init)
self.alpha_linear.apply(weights_init)
self.rgb_linear.apply(weights_init)
def forward_alpha(self, x):
dim = x.shape[-1]
in_ch_feat = dim-self.in_ch_pts
input_pts, input_feats = torch.split(x, [self.in_ch_pts, in_ch_feat], dim=-1)
h = input_pts
bias = self.pts_bias(input_feats)
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) * bias
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
alpha = torch.relu(self.alpha_linear(h))
return alpha
def forward(self, x):
dim = x.shape[-1]
in_ch_feat = dim-self.in_ch_pts-self.in_ch_views
input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1)
h = input_pts
bias = self.pts_bias(input_feats)
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) * bias
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = torch.relu(self.alpha_linear(h))
feature = self.feature_linear(h)
h = torch.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = torch.sigmoid(self.rgb_linear(h))
outputs = torch.cat([rgb, alpha], -1)
else:
outputs = self.output_linear(h)
return outputs
class Renderer_color_fusion(nn.Module):
def __init__(self, D=8, W=128, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4],use_viewdirs=False):
"""
"""
super(Renderer_color_fusion, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat
self.pts_linears = nn.ModuleList(
[nn.Linear(input_ch, W, bias=True)] + [
nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + input_ch, W) for i in
range(D - 1)])
self.pts_bias = nn.Linear(input_ch_feat, W)
attension_dim = 16 + 3 + self.in_ch_views//3 # 16 + rgb dim + angle dim
self.ray_attention = MultiHeadAttention(4, attension_dim, 4, 4)
if use_viewdirs:
self.feature_linear = nn.Sequential(nn.Linear(W, 16), nn.ReLU())
self.alpha_linear = nn.Sequential(nn.Linear(W, 1), nn.ReLU())
self.rgb_out = nn.Sequential(nn.Linear(attension_dim, 3),nn.Sigmoid()) #
else:
self.output_linear = nn.Linear(W, output_ch)
self.pts_linears.apply(weights_init)
self.feature_linear.apply(weights_init)
self.alpha_linear.apply(weights_init)
self.rgb_out.apply(weights_init)
def forward_alpha(self,x):
input_pts, input_feats = torch.split(x, [self.in_ch_pts, self.in_ch_feat], dim=-1)
h = input_pts
bias = self.pts_bias(input_feats)
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) * bias
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
alpha = self.alpha_linear(h)
return alpha
def forward(self, x):
dim = x.shape[-1]
in_ch_feat = dim - self.in_ch_pts - self.in_ch_views
input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1)
h = input_pts
bias = self.pts_bias(input_feats)
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) * bias
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
alpha = self.alpha_linear(h)
# color
input_views = input_views.reshape(-1, 3, self.in_ch_views//3)
rgb = input_feats[..., 8:].reshape(-1, 3, 4)
rgb_in = rgb[..., :3]
N = rgb.shape[0]
feature = self.feature_linear(h)
h = feature.reshape(N, 1, -1).expand(-1, 3, -1)
h = torch.cat((h, input_views, rgb_in), dim=-1)
h, _ = self.ray_attention(h, h, h, mask=rgb[...,-1:])
rgb = self.rgb_out(h)
rgb = torch.sum(rgb , dim=1).reshape(*alpha.shape[:2], 3)
outputs = torch.cat([rgb, alpha], -1)
return outputs
class Renderer_attention2(nn.Module):
def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False):
"""
"""
super(Renderer_attention, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat
self.attension_dim = 4 + 8
self.color_attention = MultiHeadAttention(4, self.attension_dim, 4, 4)
self.weight_out = nn.Linear(self.attension_dim, 3)
self.pts_linears = nn.ModuleList(
[nn.Linear(self.in_ch_pts, W, bias=True)] + [nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + self.in_ch_pts, W) for i in range(D-1)])
self.pts_bias = nn.Linear(11, W)
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W//2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
self.pts_linears.apply(weights_init)
self.views_linears.apply(weights_init)
self.feature_linear.apply(weights_init)
self.alpha_linear.apply(weights_init)
self.rgb_linear.apply(weights_init)
def forward(self, x):
N_ray, N_sample, dim = x.shape
in_ch_feat = dim-self.in_ch_pts-self.in_ch_views
input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1)
if input_feats.shape[-1]>8+3:
colors = input_feats[...,8:].view(N_ray*N_sample,-1,4)
weight = torch.cat((colors,input_feats[...,:8].reshape(N_ray*N_sample, 1, -1).expand(-1, colors.shape[-2], -1)),dim=-1)
weight, _ = self.color_attention(weight, weight, weight)
colors = torch.sum(self.weight_out(weight),dim=-2).view(N_ray, N_sample, -1)
# colors = self.weight_out(input_feats)
else:
colors = input_feats[...,-3:]
h = input_pts
# bias = self.pts_bias(colors)
bias = self.pts_bias(torch.cat((input_feats[...,:8],colors),dim=-1))
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) * bias
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = torch.relu(self.alpha_linear(h))
feature = self.feature_linear(h)
h = torch.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = torch.sigmoid(self.rgb_linear(h))
outputs = torch.cat([rgb, alpha], -1)
else:
outputs = self.output_linear(h)
outputs = torch.cat((outputs,colors), dim=-1)
return outputs
class Renderer_attention(nn.Module):
def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False):
"""
"""
super(Renderer_attention, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat
self.attension_dim = 4 + 8
self.color_attention = MultiHeadAttention(4, self.attension_dim, 4, 4)
self.weight_out = nn.Linear(self.attension_dim, 3)
# self.weight_out = nn.Linear(self.in_ch_feat, 8)
self.pts_linears = nn.ModuleList(
[nn.Linear(self.in_ch_pts, W, bias=True)] + [nn.Linear(W, W, bias=True)]*(D-1))
self.pts_bias = nn.Linear(11, W)
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W//2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
self.pts_linears.apply(weights_init)
self.views_linears.apply(weights_init)
self.feature_linear.apply(weights_init)
self.alpha_linear.apply(weights_init)
self.rgb_linear.apply(weights_init)
def forward(self, x):
N_ray, N_sample, dim = x.shape
in_ch_feat = dim-self.in_ch_pts-self.in_ch_views
input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1)
if input_feats.shape[-1]>8+3:
colors = input_feats[...,8:].view(N_ray*N_sample,-1,4)
weight = torch.cat((colors,input_feats[...,:8].reshape(N_ray*N_sample, 1, -1).expand(-1, colors.shape[-2], -1)),dim=-1)
weight, _ = self.color_attention(weight, weight, weight)
colors = torch.sum(torch.sigmoid(self.weight_out(weight)),dim=-2).view(N_ray, N_sample, -1)
# colors = self.weight_out(input_feats)
else:
colors = input_feats[...,-3:]
h = input_pts
# bias = self.pts_bias(colors)
bias = self.pts_bias(torch.cat((input_feats[...,:8],colors),dim=-1))
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) + bias
h = F.relu(h)
# if i in self.skips:
# h = torch.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = torch.relu(self.alpha_linear(h))
feature = self.feature_linear(h)
h = torch.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = torch.sigmoid(self.rgb_linear(h))
outputs = torch.cat([rgb, alpha, colors], -1)
else:
outputs = self.output_linear(h)
outputs = torch.cat((outputs,colors), dim=-1)
return outputs
class Renderer_linear(nn.Module):
def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False):
"""
"""
super(Renderer_linear, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat
self.pts_linears = nn.ModuleList(
[nn.Linear(input_ch, W, bias=True)] + [nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + input_ch, W) for i in range(D-1)])
self.pts_bias = nn.Linear(input_ch_feat, W)
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W//2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
self.pts_linears.apply(weights_init)
self.views_linears.apply(weights_init)
self.feature_linear.apply(weights_init)
self.alpha_linear.apply(weights_init)
self.rgb_linear.apply(weights_init)
def forward_alpha(self,x):
dim = x.shape[-1]
input_pts, input_feats = torch.split(x, [self.in_ch_pts, self.in_ch_feat], dim=-1)
h = input_pts
bias = self.pts_bias(input_feats)
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) + bias
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
alpha = self.alpha_linear(h)
return alpha
def forward(self, x):
dim = x.shape[-1]
in_ch_feat = dim-self.in_ch_pts-self.in_ch_views
input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1)
h = input_pts
bias = self.pts_bias(input_feats) #if in_ch_feat == self.in_ch_feat else input_feats
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h) + bias
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = torch.relu(self.alpha_linear(h))
feature = self.feature_linear(h)
h = torch.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = torch.sigmoid(self.rgb_linear(h))
outputs = torch.cat([rgb, alpha], -1)
else:
outputs = self.output_linear(h)
return outputs
class MVSNeRF(nn.Module):
def __init__(self, D=8, W=256, input_ch_pts=3, input_ch_views=3, input_ch_feat=8, skips=[4], net_type='v2'):
"""
"""
super(MVSNeRF, self).__init__()
self.in_ch_pts, self.in_ch_views,self.in_ch_feat = input_ch_pts, input_ch_views, input_ch_feat
# we provide two version network structure
if 'v0' == net_type:
self.nerf = Renderer_ours(D=D, W=W,input_ch_feat=input_ch_feat,
input_ch=input_ch_pts, output_ch=4, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=True)
elif 'v1' == net_type:
self.nerf = Renderer_attention(D=D, W=W,input_ch_feat=input_ch_feat,
input_ch=input_ch_pts, output_ch=4, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=True)
elif 'v2' == net_type:
self.nerf = Renderer_linear(D=D, W=W,input_ch_feat=input_ch_feat,
input_ch=input_ch_pts, output_ch=4, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=True)
def forward_alpha(self, x):
return self.nerf.forward_alpha(x)
def forward(self, x):
RGBA = self.nerf(x)
return RGBA
def create_nerf_mvs(args, pts_embedder=True, use_mvs=False, dir_embedder=True, Depth=128):
"""Instantiate mvs NeRF's MLP model.
"""
if pts_embedder:
embed_fn, input_ch = get_embedder(args.multires, args.i_embed, input_dims=args.pts_dim)
else:
embed_fn, input_ch = None, args.pts_dim
embeddirs_fn = None
if dir_embedder:
embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed, input_dims=args.dir_dim)
else:
embeddirs_fn, input_ch_views = None, args.dir_dim
skips = [4]
model = MVSNeRF(D=args.netdepth, W=args.netwidth,
input_ch_pts=input_ch, skips=skips,
input_ch_views=input_ch_views, input_ch_feat=args.feat_dim, net_type=args.net_type).to(device)
grad_vars = []
grad_vars += list(model.parameters())
model_fine = None
if args.N_importance > 0:
model_fine = MVSNeRF(D=args.netdepth, W=args.netwidth,
input_ch_pts=input_ch, skips=skips,
input_ch_views=input_ch_views, input_ch_feat=args.feat_dim).to(device)
grad_vars += list(model_fine.parameters())
network_query_fn = lambda pts, viewdirs, rays_feats, network_fn: run_network_mvs(pts, viewdirs, rays_feats, network_fn,
embed_fn=embed_fn,
embeddirs_fn=embeddirs_fn,
netchunk=args.netchunk)
EncodingNet = None
net_2d = FeatureNet(intermediate=True)
if use_mvs:
EncodingNet = MVSNet(net_2d, Depth=Depth).to(device)
grad_vars += list(EncodingNet.parameters()) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
start = 0
##########################
# Load checkpoints
ckpts = []
if args.ckpt is not None and args.ckpt != 'None':
ckpts = [args.ckpt]
print('Found ckpts', ckpts)
if len(ckpts) > 0 :
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path)
# Load model
if use_mvs:
state_dict = ckpt['network_mvs_state_dict']
EncodingNet.load_state_dict(state_dict)
model.load_state_dict(ckpt['network_fn_state_dict'])
# if model_fine is not None:
# model_fine.load_state_dict(ckpt['network_fine_state_dict'])
##########################
render_kwargs_train = {
'network_query_fn': network_query_fn,
'perturb': args.perturb,
'N_importance': args.N_importance,
'network_fine': model_fine,
'N_samples': args.N_samples,
'network_fn': model,
'network_mvs': EncodingNet,
'use_viewdirs': args.use_viewdirs,
'white_bkgd': args.white_bkgd,
'raw_noise_std': args.raw_noise_std,
}
render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
return render_kwargs_train, render_kwargs_test, start, grad_vars
def create_mvs(args, mvs_mode=-1, depth=128):
"""Instantiate mvs NeRF's MLP model.
"""
net_2d = FeatureNet(intermediate=True).to(device)
EncodingNet = None
if mvs_mode == -1:
EncodingNet = MVSNet(depth=depth).to(device)
elif mvs_mode >= 1:
EncodingNet = Ofcl_MVSNet(refine=False).to(device)
EncodingNet.eval()
start = 0
render_kwargs_train = {
'network_featmvs': EncodingNet,
'network_2d': net_2d,
}
render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
return render_kwargs_train, render_kwargs_test, start
############################################# MVS Net models ################################################
class ConvBnReLU(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, pad=1,
norm_act=InPlaceABN):
super(ConvBnReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride=stride, padding=pad, bias=False)
self.bn = norm_act(out_channels)
def forward(self, x):
return self.bn(self.conv(x))
class ConvBnReLU3D(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, pad=1,
norm_act=InPlaceABN):
super(ConvBnReLU3D, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels,
kernel_size, stride=stride, padding=pad, bias=False)
self.bn = norm_act(out_channels)
# self.bn = nn.ReLU()
def forward(self, x):
return self.bn(self.conv(x))
################################### feature net ######################################
class FeatureNet(nn.Module):
"""
output 3 levels of features using a FPN structure
"""
def __init__(self, intermediate=False, norm_act=InPlaceABN):
super(FeatureNet, self).__init__()
self.conv0 = nn.Sequential(
ConvBnReLU(3, 8, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(8, 8, 3, 1, 1, norm_act=norm_act))
self.conv1 = nn.Sequential(
ConvBnReLU(8, 16, 5, 2, 2, norm_act=norm_act),
ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act))
self.conv2 = nn.Sequential(
ConvBnReLU(16, 32, 5, 2, 2, norm_act=norm_act),
ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act))
self.toplayer = nn.Conv2d(32, 32, 1)
self.intermediate = intermediate
def _upsample_add(self, x, y):
return F.interpolate(x, scale_factor=2,
mode="bilinear", align_corners=True) + y
def forward(self, x):
B, V, _, H, W = x.shape
x = x.reshape(B * V, 3, H, W)
if self.intermediate:
x1 = self.conv0(x) # (B, 8, H, W)
x2 = self.conv1(x1) # (B, 16, H//2, W//2)
x3 = self.conv2(x2) # (B, 32, H//4, W//4)
x3 = self.toplayer(x3) # (B, 32, H//4, W//4)
return [x, x1, x2, x3]
else:
# x: (B, 3, H, W)
x = self.conv0(x) # (B, 8, H, W)
x = self.conv1(x) # (B, 16, H//2, W//2)
x = self.conv2(x) # (B, 32, H//4, W//4)
x = self.toplayer(x) # (B, 32, H//4, W//4)
return [x]
class CostRegNet(nn.Module):
def __init__(self, in_channels, norm_act=InPlaceABN):
super(CostRegNet, self).__init__()
self.conv0 = ConvBnReLU3D(in_channels, 8, norm_act=norm_act)
self.conv1 = ConvBnReLU3D(8, 16, stride=2, norm_act=norm_act)
self.conv2 = ConvBnReLU3D(16, 16, norm_act=norm_act)
self.conv3 = ConvBnReLU3D(16, 32, stride=2, norm_act=norm_act)
self.conv4 = ConvBnReLU3D(32, 32, norm_act=norm_act)
self.conv5 = ConvBnReLU3D(32, 64, stride=2, norm_act=norm_act)
self.conv6 = ConvBnReLU3D(64, 64, norm_act=norm_act)
self.conv7 = nn.Sequential(
nn.ConvTranspose3d(64, 32, 3, padding=1, output_padding=1,
stride=2, bias=False),
norm_act(32))
self.conv9 = nn.Sequential(
nn.ConvTranspose3d(32, 16, 3, padding=1, output_padding=1,
stride=2, bias=False),
norm_act(16))
self.conv11 = nn.Sequential(
nn.ConvTranspose3d(16, 8, 3, padding=1, output_padding=1,
stride=2, bias=False),
norm_act(8))
# self.conv12 = nn.Conv3d(8, 8, 3, stride=1, padding=1, bias=True)
def forward(self, x):
conv0 = self.conv0(x)
conv2 = self.conv2(self.conv1(conv0))
conv4 = self.conv4(self.conv3(conv2))
x = self.conv6(self.conv5(conv4))
x = conv4 + self.conv7(x)
del conv4
x = conv2 + self.conv9(x)
del conv2
x = conv0 + self.conv11(x)
del conv0
# x = self.conv12(x)
return x
class ProbNet(nn.Module):
def __init__(self, in_channels, norm_act=InPlaceABN):
super(ProbNet, self).__init__()
self.conv0 = ConvBnReLU3D(in_channels, 1, norm_act=norm_act)
def forward(self, x):
x = F.softmax(self.conv0(x), dim=2)
return x
class MVSNet(nn.Module):
def __init__(self,
depth=128,
num_groups=1,
norm_act=InPlaceABN,
levels=1):
super(MVSNet, self).__init__()
self.levels = levels # 3 depth levels
self.n_depths = [128,32,8]
self.G = num_groups # number of groups in groupwise correlation
self.N_importance = 0
self.chunk = 1024
self.D = depth
self.cost_reg_2 = CostRegNet(32+9, norm_act)
def build_volume_costvar(self, feats, proj_mats, depth_values, pad=0):
# feats: (B, V, C, H, W)
# proj_mats: (B, V, 3, 4)
# depth_values: (B, D, H, W)
# cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w)
# volume_sum [B, G, D, h, w]
# prob_volume [B D H W]
# volume_feature [B C D H W]
B, V, C, H, W = feats.shape
D = depth_values.shape[1]
ref_feats, src_feats = feats[:, 0], feats[:, 1:]
src_feats = src_feats.permute(1, 0, 2, 3, 4) # (V-1, B, C, h, w)
proj_mats = proj_mats[:, 1:]
proj_mats = proj_mats.permute(1, 0, 2, 3) # (V-1, B, 3, 4)
if pad > 0:
ref_feats = F.pad(ref_feats, (pad, pad, pad, pad), "constant", 0)
ref_volume = ref_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w)
volume_sum = ref_volume
volume_sq_sum = ref_volume ** 2
del ref_feats
in_masks = torch.ones((B, 1, D, H + pad * 2, W + pad * 2), device=volume_sum.device)
for i, (src_feat, proj_mat) in enumerate(zip(src_feats, proj_mats)):
warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad)
grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2)
in_mask = ((grid > -1.0) * (grid < 1.0))
in_mask = (in_mask[..., 0] * in_mask[..., 1])
in_masks += in_mask.float()
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2)
del warped_volume, src_feat, proj_mat
del src_feats, proj_mats
count = 1.0 / in_masks
img_feat = volume_sq_sum * count - (volume_sum * count) ** 2
del volume_sq_sum, volume_sum, count
return img_feat, in_masks
def build_volume_costvar_img(self, imgs, feats, proj_mats, depth_values, pad=0, vid=0):
# feats: (B, V, C, H, W)
# proj_mats: (B, V, 3, 4)
# depth_values: (B, D, H, W)
# cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w)
# volume_sum [B, G, D, h, w]
# prob_volume [B D H W]
# volume_feature [B C D H W]
B, V, C, H, W = feats.shape
D = depth_values.shape[1]
cur_feats, src_feats = feats[:, vid, ...], feats.permute(1, 0, 2, 3, 4) # (V, B, C, h, w)
proj_mats = proj_mats.permute(1, 0, 2, 3) # (V, B, 3, 4)
if pad > 0:
cur_feats = F.pad(cur_feats, (pad, pad, pad, pad), "constant", 0)
img_feat = torch.empty((B, 9 + 32, D, *cur_feats.shape[-2:]), device=feats.device, dtype=torch.float)
imgs = F.interpolate(imgs.view(B * V, *imgs.shape[2:]), (H, W), mode='bilinear', align_corners=False).view(B, V,-1,H,W).permute(1, 0, 2, 3, 4)
img_feat[:, :3, :, pad:H + pad, pad:W + pad] = imgs[0].unsqueeze(2).expand(-1, -1, D, -1, -1)
cur_volume = cur_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w)
volume_sum = cur_volume
volume_sq_sum = cur_volume ** 2
del cur_feats
src_view_count = 0
in_masks = torch.ones((B, V, D, H + pad * 2, W + pad * 2), device=volume_sum.device)
for i, (src_img, src_feat, proj_mat) in enumerate(zip(imgs, src_feats, proj_mats)):
# warped_volume: 1, 32, 128, 176, 208 B, D, H_pad, W_pad , grid B, D, W_pad, H_pad
if i == vid:
continue
src_view_count+=1
warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad)
img_feat[:, src_view_count * 3:(src_view_count + 1) * 3], _ = homo_warp(src_img, proj_mat, depth_values, src_grid=grid, pad=pad)
grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2)
in_mask = ((grid > -1.0) * (grid < 1.0))
in_mask = (in_mask[..., 0] * in_mask[..., 1])
in_masks[:, src_view_count] = in_mask.float()
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2)
del warped_volume, src_feat, proj_mat
del src_feats, proj_mats
count = 1.0 / torch.sum(in_masks, dim=1, keepdim=True)
img_feat[:, -32:] = volume_sq_sum * count - (volume_sum * count) ** 2
del volume_sq_sum, volume_sum, count
return img_feat, in_masks
def forward(self, imgs, feats, proj_mats, near_far, pad=0, return_color=False, lindisp=False, vid=0):
# imgs: (B, V, 3, H, W)
# proj_mats: (B, V, 3, 4) from fine to coarse
# init_depth_min, depth_interval: (B) or float
# near_far (B, V, 2)
B, V, _, H, W = imgs.shape
imgs = imgs.reshape(B * V, 3, H, W)
imgs = imgs.view(B, V, 3, H, W)
feats_l = feats[-1] # (B*V, C, h, w)
feats_l = feats_l.view(B, V, *feats_l.shape[1:]) # (B, V, C, h, w)
t_vals = torch.linspace(0., 1., steps=self.D, device=imgs.device, dtype=imgs.dtype) # (B, D)
near, far = near_far # assume batch size==1
if not lindisp:
depth_values = near * (1.-t_vals) + far * (t_vals)
else:
depth_values = 1. / (1. / near * (1. - t_vals) + 1. / far * (t_vals))
# print("near , far", near, far)
# print("depth_values", depth_values)
depth_values = depth_values.unsqueeze(0)
# volume_feat, in_masks = self.build_volume_costvar(feats_l, proj_mats, depth_values, pad=pad)
volume_feat, in_masks = self.build_volume_costvar_img(imgs, feats_l, proj_mats, depth_values, pad=pad, vid=vid)
if return_color:
feats_l = torch.cat((volume_feat[:,:V*3].view(B, V, 3, *volume_feat.shape[2:]),in_masks.unsqueeze(2)),dim=2)
# print("pre cost volume_feat", volume_feat.shape) ([1, 41, 128, 176, 208])
volume_feat = self.cost_reg_2(volume_feat) # (B, 1, D, h, w)
volume_feat = volume_feat.reshape(1,-1,*volume_feat.shape[2:])
return volume_feat, feats, depth_values
class RefVolume(nn.Module):
def __init__(self, volume):
super(RefVolume, self).__init__()
self.feat_volume = nn.Parameter(volume)
def forward(self, ray_coordinate_ref):
'''coordinate: [N, 3]
z,x,y
'''
device = self.feat_volume.device
H, W = ray_coordinate_ref.shape[-3:-1]
grid = ray_coordinate_ref.view(-1, 1, H, W, 3).to(device) * 2 - 1.0 # [1 1 H W 3] (x,y,z)
features = F.grid_sample(self.feat_volume, grid, align_corners=True, mode='bilinear')[:, :, 0].permute(2, 3, 0,1).squeeze()
return features
| 37,147 | 35.963184 | 166 | py |
pointnerf | pointnerf-master/run/editing.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from options import EditOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
torch.manual_seed(0)
np.random.seed(0)
import cv2
from PIL import Image
from tqdm import tqdm
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.)
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath)
def masking(mask, firstdim_lst, seconddim_lst):
first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst]
second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst]
return first_lst, second_lst
def render(model, dataset, visualizer, opt, gen_vid=False):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.render_total
print("test set size {}, interval {}".format(total_num, opt.test_num_step))
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
cam_posts = []
cam_dirs = []
for i in range(0, total_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
# cam_posts.append(data['campos'])
# cam_dirs.append(data['raydir'] + data['campos'][None,...])
# continue
visuals = None
stime = time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# print("tmpgts", tmpgts["gt_image"].shape)
# print(data["pixel_idx"])
model.set_input(data)
model.test()
curr_visuals = model.get_current_visuals(data=data)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i)
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num), 0)
print('--------------------------------Finish generating vid--------------------------------')
return
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def load_parts_info(opt, name, inds_name, trans_name):
resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(os.path.join(opt.checkpoints_dir , "edit_srcs" , name))
checkpoint = os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "{}_net_ray_marching.pth".format(resume_iter))
trans_file = None if trans_name.strip() == "no" else os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "transforms", trans_name + ".txt")
inds_file = None if inds_name.strip() == "all" else os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "parts_index", inds_name + ".txt")
Matrix = torch.eye(4, device="cuda", dtype=torch.float32) if trans_file is None else np.loadtxt(trans_file)
Rot = Matrix[:3,:3]
Translation = Matrix[:3, 3]
saved_features = torch.load(checkpoint, map_location="cuda")
print("loaded neural points from ", checkpoint, saved_features.keys())
if inds_file is None:
inds = torch.ones(len(saved_features["neural_points.xyz"]), dtype=torch.bool, device="cuda")
else:
inds = np.loadtxt(inds_file)
return saved_features, inds, Rot, Translation
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def main():
torch.backends.cudnn.benchmark = True
opt = EditOptions().parse()
print("opt.color_loss_items ", opt.color_loss_items)
if opt.debug:
torch.autograd.set_detect_anomaly(True)
print(fmt.RED +
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Debug Mode')
print(
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' +
fmt.END)
visualizer = Visualizer(opt)
test_opt = copy.deepcopy(opt)
test_opt.is_train = False
test_opt.random_sample = 'no_crop'
test_opt.random_sample_size = min(32, opt.random_sample_size)
test_opt.batch_size = 1
test_opt.n_threads = 0
test_opt.split = "test"
# test_dataset = create_dataset(test_opt)
img_lst=None
opt.is_train = False
opt.mode = 2
if opt.resume_iter == "best":
opt.resume_iter = "latest"
opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir)
model = create_model(opt)
points_xyz_all = torch.zeros([0,3], device="cuda", dtype=torch.float32)
points_embedding_all = torch.zeros([1,0,63], device="cuda", dtype=torch.float32)
points_conf_all = torch.zeros([1,0,1], device="cuda", dtype=torch.float32)
points_color_all = torch.zeros([1,0,3], device="cuda", dtype=torch.float32)
points_dir_all = torch.zeros([1,0,3], device="cuda", dtype=torch.float32)
Rw2c_all = torch.zeros([0,3,3], device="cuda", dtype=torch.float32)
for name, inds_name, trans_name in zip(opt.neural_points_names, opt.parts_index_names, opt.Transformation_names):
saved_features, inds, Rot, Tran = load_parts_info(opt, name, inds_name, trans_name)
inds = torch.as_tensor(inds, dtype=torch.bool, device="cuda")
Rot = torch.as_tensor(Rot, dtype=torch.float32, device=inds.device)
Tran = torch.as_tensor(Tran, dtype=torch.float32, device=inds.device)
xyz, points_embeding, points_conf, points_dir, points_color, eulers, Rw2c = saved_features["neural_points.xyz"][inds,:], saved_features["neural_points.points_embeding"][:,inds,:] if "neural_points.points_embeding" in saved_features else None,saved_features["neural_points.points_conf"][:,inds,:] if "neural_points.points_conf" in saved_features else None, saved_features["neural_points.points_dir"][:,inds,:] if "neural_points.points_dir" in saved_features else None, saved_features["neural_points.points_color"][:,inds,:] if "neural_points.points_color" in saved_features else None, saved_features["neural_points.eulers"] if "neural_points.eulers" in saved_features else None, saved_features["neural_points.Rw2c"] if "neural_points.Rw2c" in saved_features else None
Mat = torch.eye(4, device=Rot.device, dtype=torch.float32)
Mat[:3,:3] = Rot
Mat[:3,3] = Tran
xyz = (torch.cat([xyz, torch.ones_like(xyz[:,:1])], dim=-1) @ Mat.transpose(0,1))[:,:3]
print("Rot", Rot)
Rw2c = Rot if Rw2c is None else Rw2c @ Rot.transpose(0,1) #.transpose(0,1) # w2c is reversed against movement
Rw2c = Rw2c[None, ...].expand(len(xyz),-1,-1)
points_xyz_all = torch.cat([points_xyz_all, xyz], dim=0)
Rw2c_all = torch.cat([Rw2c_all, Rw2c], dim=0)
points_embedding_all = torch.cat([points_embedding_all, points_embeding], dim=1)
points_conf_all = torch.cat([points_conf_all, points_conf], dim=1)
points_color_all = torch.cat([points_color_all, points_color], dim=1)
points_dir_all = torch.cat([points_dir_all, points_dir], dim=1)
model.set_points(points_xyz_all.cuda(), points_embedding_all.cuda(), points_color=points_color_all.cuda(),
points_dir=points_dir_all.cuda(), points_conf=points_conf_all.cuda(), Rw2c=Rw2c_all.cuda(), editing=True)
visualizer.save_neural_points("pnts", model.neural_points.xyz, None, None, save_ref=False)
print("vis")
# exit()
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.test_num_step=1 # opt.test_num_step
test_opt.name = opt.name + "/{}".format(opt.render_name)
test_opt.render_only = 1
model.opt.no_loss = 1
model.opt.is_train = 0
model.setup(opt)
print("full datasets test:")
test_dataset = create_dataset(test_opt)
render(model, test_dataset, Visualizer(test_opt), test_opt, gen_vid=True)
# model.opt.no_loss = 0
# model.opt.is_train = 1
other_states = {
'epoch_count': 0,
'total_steps': 0,
}
print('saving model ({}, epoch {}, total_steps {})'.format(opt.name, 0, 0))
model.save_networks(0, other_states)
#
# def save_points_conf(visualizer, xyz, points_color, points_conf, total_steps):
# print("total:", xyz.shape, points_color.shape, points_conf.shape)
# colors, confs = points_color[0], points_conf[0,...,0]
# pre = -1000
# for i in range(12):
# thresh = (i * 0.1) if i <= 10 else 1000
# mask = ((confs <= thresh) * (confs > pre)) > 0
# thresh_xyz = xyz[mask, :]
# thresh_color = colors[mask, :]
# visualizer.save_neural_points(f"{total_steps}-{thresh}", thresh_xyz, thresh_color[None, ...], None, save_ref=False)
# pre = thresh
# exit()
if __name__ == '__main__':
main()
| 11,213 | 42.465116 | 774 | py |
pointnerf | pointnerf-master/run/evaluate.py | import os, sys, time, argparse, cv2
import numpy as np
try:
from skimage.measure import compare_ssim
from skimage.measure import compare_psnr
except:
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
def compare_ssim(gt, img, win_size, multichannel=True):
return structural_similarity(gt, img, win_size=win_size, multichannel=multichannel)
import torch
from skimage.metrics import mean_squared_error
import lpips
parser = argparse.ArgumentParser(description="compute scores")
parser.add_argument('-i', '--imgFolder', help="The folder that contain output images.")
parser.add_argument('-g', '--gtFolder', default=None, help="The folder that contain gt images. By default it uses imgFolder")
parser.add_argument('-o', '--outFolder', default=None, help="The folder that contain output files. By default it uses imgFolder")
parser.add_argument('-is', '--imgStr', default="step-%04d-fine_raycolor.png", help="The string format for input images.")
parser.add_argument('-gs', '--gtStr', default="step-%04d-gt_image.png", help="The string format for GT images.")
parser.add_argument('-l', '--id_list', nargs='+', default=list(range(999)), help="The list of ids to test. By default it's 0~999.")
parser.add_argument('-m', '--metrics', nargs='+', default=["psnr", "ssim", "lpips", "vgglpips"], help="The list of metrics to compute. By default it computes psnr, ssim and rmse.")
def report_metrics(gtFolder, imgFolder, outFolder, metrics, id_list, imgStr="step-%04d-fine_raycolor.png", gtStr="step-%04d-gt_image.png", use_gpu=False, print_info=True):
total ={}
loss_fn, loss_fn_vgg = None, None
if print_info:
print("test id_list", id_list)
print(gtFolder, imgFolder, outFolder)
print(imgStr, gtStr)
if "lpips" in metrics:
loss_fn = lpips.LPIPS(net='alex', version='0.1') # we follow NVSF to use alex 0.1, NeRF use lpips.LPIPS(net='vgg')
loss_fn = loss_fn.cuda() if use_gpu else loss_fn
if "vgglpips" in metrics:
loss_fn_vgg = lpips.LPIPS(net='vgg', version='0.1') #lpips.LPIPS(net='vgg')
loss_fn_vgg = loss_fn_vgg.cuda() if use_gpu else loss_fn_vgg
for i in id_list:
img = cv2.imread(imgFolder+"/"+imgStr%i)
gt = cv2.imread(gtFolder+"/"+gtStr%i)
# print("img", imgFolder+"/"+imgStr%i)
if img is None or gt is None:
break
img = np.asarray(img, np.float32)/255.0
gt = np.asarray(gt, np.float32)/255.0
for key in metrics:
if key == "psnr":
val = compare_psnr(gt, img)
elif key == "ssim":
val = compare_ssim(gt, img, 11, multichannel=True)
elif key == "lpips":
# image should be RGB, IMPORTANT: normalized to [-1,1]
img_tensor = torch.from_numpy(img)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
gt_tensor = torch.from_numpy(gt)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
img_tensor = img_tensor.cuda() if use_gpu else img_tensor
gt_tensor = gt_tensor.cuda() if use_gpu else gt_tensor
val = loss_fn(img_tensor, gt_tensor).item()
elif key == "vgglpips":
# image should be RGB, IMPORTANT: normalized to [-1,1]
img_tensor = torch.from_numpy(img)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
gt_tensor = torch.from_numpy(gt)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
img_tensor = img_tensor.cuda() if use_gpu else img_tensor
gt_tensor = gt_tensor.cuda() if use_gpu else gt_tensor
val = loss_fn_vgg(img_tensor, gt_tensor).item()
elif key == "rmse":
val = np.sqrt(mean_squared_error(gt, img))
else:
raise NotImplementedError("metrics of {} not implemented".format(key))
if key not in total:
total[key] = [val]
else:
total[key].append(val)
del loss_fn
del loss_fn_vgg
torch.cuda.empty_cache()
print(len(id_list), "images computed")
if len(total) > 0:
outStr = ""
for key in total.keys():
vals = np.asarray(total[key]).reshape(-1)
np.savetxt(outFolder+"/"+key+'.txt', vals)
outStr+= key + ": %.6f\n"%np.mean(vals)
print(outStr)
with open(outFolder+"/scores.txt", "w") as f:
f.write(outStr)
############################
if __name__ == '__main__':
args = parser.parse_args()
if args.gtFolder is None:
args.gtFolder = args.imgFolder
if args.outFolder is None:
args.outFolder = args.imgFolder
report_metrics(args.gtFolder, args.imgFolder, args.outFolder, args.metrics, args.id_list, imgStr=args.imgStr, gtStr=args.gtStr, use_gpu=True, print_info=False)
# python run/evaluate.py -i ${nrCheckpoint}/dragon-test/images -g ${nrCheckpoint}/dragon-test/images -is step-%04d-fine_raycolor.png
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/lego_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcollego360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "lego_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/lego_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcollego360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "lego_test_64_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ship_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcolship360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "ship_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ship_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcolship360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "ship_test_64_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/chair_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pchair360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3.sh/test_250000/images --imgStr "chair_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/chair_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pchair360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3.sh/test_250000/images --imgStr "chair_test_64_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/materials_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/materials360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "materials_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/materials_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/materials360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "materials_test_64_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/drums_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/drums360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_pru6_e4_prle3/test_250000/images --imgStr "drums_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/drums_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/drums360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_pru6_e4_prle3/test_250000/images --imgStr "drums_test_64_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ficus_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pficus360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl8e3/test_250000/images --imgStr "ficus_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ficus_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pficus360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl8e3/test_250000/images --imgStr "ficus_test_64_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/mic_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pmic360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl2e3/test_250000/images --imgStr "mic_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/mic_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pmic360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl2e3/test_250000/images --imgStr "mic_test_64_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/hotdog_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/photdog360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask5_agg2_zeroone0.0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "hotdog_test_8_50_%d_infer.png"
# python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/hotdog_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/photdog360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask5_agg2_zeroone0.0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "hotdog_test_64_50_%d_infer.png" | 9,769 | 61.229299 | 324 | py |
pointnerf | pointnerf-master/run/train_ft_nonstop.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.)
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath)
def nearest_view(campos, raydir, xyz, id_list):
cam_ind = torch.zeros([0,1], device=campos.device, dtype=torch.long)
step=10000
for i in range(0, len(xyz), step):
dists = xyz[i:min(len(xyz),i+step), None, :] - campos[None, ...] # N, M, 3
dists_norm = torch.norm(dists, dim=-1) # N, M
dists_dir = dists / (dists_norm[...,None]+1e-6) # N, M, 3
dists = dists_norm / 200 + (1.1 - torch.sum(dists_dir * raydir[None, :],dim=-1)) # N, M
cam_ind = torch.cat([cam_ind, torch.argmin(dists, dim=1).view(-1,1)], dim=0) # N, 1
return cam_ind
def gen_points_filter_embeddings(dataset, visualizer, opt):
print('-----------------------------------Generate Points-----------------------------------')
opt.is_train=False
opt.mode = 1
model = create_model(opt)
model.setup(opt)
model.eval()
cam_xyz_all = []
intrinsics_all = []
extrinsics_all = []
confidence_all = []
points_mask_all = []
intrinsics_full_lst = []
confidence_filtered_all = []
near_fars_all = []
gpu_filter = True
cpu2gpu= len(dataset.view_id_list) > 300
imgs_lst, HDWD_lst, c2ws_lst, w2cs_lst, intrinsics_lst = [],[],[],[],[]
with torch.no_grad():
for i in tqdm(range(0, len(dataset.view_id_list))):
data = dataset.get_init_item(i)
model.set_input(data)
# intrinsics 1, 3, 3, 3
points_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, c2ws, w2cs, intrinsics, near_fars = model.gen_points()
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
B, N, C, H, W, _ = points_xyz_lst[0].shape
# print("points_xyz_lst",points_xyz_lst[0].shape)
cam_xyz_all.append((points_xyz_lst[0].cpu() if cpu2gpu else points_xyz_lst[0]) if gpu_filter else points_xyz_lst[0].cpu().numpy())
# intrinsics_lst[0] 1, 3, 3
intrinsics_all.append(intrinsics_lst[0] if gpu_filter else intrinsics_lst[0])
extrinsics_all.append(extrinsics_lst[0] if gpu_filter else extrinsics_lst[0].cpu().numpy())
if opt.manual_depth_view !=0:
confidence_all.append((photometric_confidence_lst[0].cpu() if cpu2gpu else photometric_confidence_lst[0]) if gpu_filter else photometric_confidence_lst[0].cpu().numpy())
points_mask_all.append((point_mask_lst[0].cpu() if cpu2gpu else point_mask_lst[0]) if gpu_filter else point_mask_lst[0].cpu().numpy())
imgs_lst.append(data["images"].cpu())
HDWD_lst.append(HDWD)
c2ws_lst.append(c2ws)
w2cs_lst.append(w2cs)
intrinsics_full_lst.append(intrinsics)
near_fars_all.append(near_fars[0,0])
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
# #################### start query embedding ##################
torch.cuda.empty_cache()
if opt.manual_depth_view != 0:
if gpu_filter:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=True, return_w=True, cpu2gpu=cpu2gpu, near_fars_all=near_fars_all)
else:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks(cam_xyz_all, [intr.cpu().numpy() for intr in intrinsics_all], extrinsics_all, confidence_all, points_mask_all, opt)
# print(xyz_ref_lst[0].shape) # 224909, 3
else:
cam_xyz_all = [cam_xyz_all[i].reshape(-1,3)[points_mask_all[i].reshape(-1),:] for i in range(len(cam_xyz_all))]
xyz_world_all = [np.matmul(np.concatenate([cam_xyz_all[i], np.ones_like(cam_xyz_all[i][..., 0:1])], axis=-1), np.transpose(np.linalg.inv(extrinsics_all[i][0,...])))[:, :3] for i in range(len(cam_xyz_all))]
xyz_world_all, cam_xyz_all, confidence_filtered_all = filter_by_masks.range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_all, opt)
del cam_xyz_all
# for i in range(len(xyz_world_all)):
# visualizer.save_neural_points(i, torch.as_tensor(xyz_world_all[i], device="cuda", dtype=torch.float32), None, data, save_ref=opt.load_points==0)
# exit()
# xyz_world_all = xyz_world_all.cuda()
# confidence_filtered_all = confidence_filtered_all.cuda()
points_vid = torch.cat([torch.ones_like(xyz_world_all[i][...,0:1]) * i for i in range(len(xyz_world_all))], dim=0)
xyz_world_all = torch.cat(xyz_world_all, dim=0) if gpu_filter else torch.as_tensor(
np.concatenate(xyz_world_all, axis=0), device="cuda", dtype=torch.float32)
confidence_filtered_all = torch.cat(confidence_filtered_all, dim=0) if gpu_filter else torch.as_tensor(np.concatenate(confidence_filtered_all, axis=0), device="cuda", dtype=torch.float32)
print("xyz_world_all", xyz_world_all.shape, points_vid.shape, confidence_filtered_all.shape)
torch.cuda.empty_cache()
# visualizer.save_neural_points(0, xyz_world_all, None, None, save_ref=False)
# print("vis 0")
print("%%%%%%%%%%%%% getattr(dataset, spacemin, None)", getattr(dataset, "spacemin", None))
if getattr(dataset, "spacemin", None) is not None:
mask = (xyz_world_all - dataset.spacemin[None, ...].to(xyz_world_all.device)) >= 0
mask *= (dataset.spacemax[None, ...].to(xyz_world_all.device) - xyz_world_all) >= 0
mask = torch.prod(mask, dim=-1) > 0
first_lst, second_lst = masking(mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
# visualizer.save_neural_points(50, xyz_world_all, None, None, save_ref=False)
# print("vis 50")
if getattr(dataset, "alphas", None) is not None:
vishull_mask = mvs_utils.alpha_masking(xyz_world_all, dataset.alphas, dataset.intrinsics, dataset.cam2worlds, dataset.world2cams, dataset.near_far if opt.ranges[0] < -90.0 and getattr(dataset,"spacemin",None) is None else None, opt=opt)
first_lst, second_lst = masking(vishull_mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
print("alpha masking xyz_world_all", xyz_world_all.shape, points_vid.shape)
# visualizer.save_neural_points(100, xyz_world_all, None, data, save_ref=opt.load_points == 0)
# print("vis 100")
if opt.vox_res > 0:
xyz_world_all, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(xyz_world_all.cuda() if len(xyz_world_all) < 99999999 else xyz_world_all[::(len(xyz_world_all)//99999999+1),...].cuda(), opt.vox_res)
points_vid = points_vid[sampled_pnt_idx,:]
confidence_filtered_all = confidence_filtered_all[sampled_pnt_idx]
print("after voxelize:", xyz_world_all.shape, points_vid.shape)
xyz_world_all = xyz_world_all.cuda()
xyz_world_all = [xyz_world_all[points_vid[:,0]==i, :] for i in range(len(HDWD_lst))]
confidence_filtered_all = [confidence_filtered_all[points_vid[:,0]==i] for i in range(len(HDWD_lst))]
cam_xyz_all = [(torch.cat([xyz_world_all[i], torch.ones_like(xyz_world_all[i][...,0:1])], dim=-1) @ extrinsics_all[i][0].t())[...,:3] for i in range(len(HDWD_lst))]
points_embedding_all, points_color_all, points_dir_all, points_conf_all = [], [], [], []
for i in tqdm(range(len(HDWD_lst))):
if len(xyz_world_all[i]) > 0:
embedding, color, dir, conf = model.query_embedding(HDWD_lst[i], torch.as_tensor(cam_xyz_all[i][None, ...], device="cuda", dtype=torch.float32), torch.as_tensor(confidence_filtered_all[i][None, :, None], device="cuda", dtype=torch.float32) if len(confidence_filtered_all) > 0 else None, imgs_lst[i].cuda(), c2ws_lst[i], w2cs_lst[i], intrinsics_full_lst[i], 0, pointdir_w=True)
points_embedding_all.append(embedding)
points_color_all.append(color)
points_dir_all.append(dir)
points_conf_all.append(conf)
xyz_world_all = torch.cat(xyz_world_all, dim=0)
points_embedding_all = torch.cat(points_embedding_all, dim=1)
points_color_all = torch.cat(points_color_all, dim=1) if points_color_all[0] is not None else None
points_dir_all = torch.cat(points_dir_all, dim=1) if points_dir_all[0] is not None else None
points_conf_all = torch.cat(points_conf_all, dim=1) if points_conf_all[0] is not None else None
visualizer.save_neural_points(200, xyz_world_all, points_color_all, data, save_ref=opt.load_points == 0)
print("vis")
model.cleanup()
del model
return xyz_world_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, [img[0].cpu() for img in imgs_lst], [c2w for c2w in c2ws_lst], [w2c for w2c in w2cs_lst] , intrinsics_all, [list(HDWD) for HDWD in HDWD_lst]
def masking(mask, firstdim_lst, seconddim_lst):
first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst]
second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst]
return first_lst, second_lst
def render_vid(model, dataset, visualizer, opt, bg_info, steps=0, gen_vid=True):
print('-----------------------------------Rendering-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step))
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
for i in range(0, total_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
# cam_posts.append(data['campos'])
# cam_dirs.append(data['raydir'] + data['campos'][None,...])
# continue
visuals = None
stime = time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# print("tmpgts", tmpgts["gt_image"].shape)
# print(data["pixel_idx"])
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
model.test()
curr_visuals = model.get_current_visuals(data=data)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if key == "gt_image": continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
if key == "gt_image": continue
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i)
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num), 0)
print('--------------------------------Finish generating vid--------------------------------')
return
def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=False, lpips=True):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
count = 0;
for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step
data = dataset.get_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool)
edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1
edge_mask=edge_mask.reshape(-1) > 0
np_edge_mask=edge_mask.numpy().astype(bool)
totalpixel = pixel_idx.shape[1]
tmpgts = {}
tmpgts["gt_image"] = data['gt_image'].clone()
tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None
# data.pop('gt_image', None)
data.pop('gt_mask', None)
visuals = None
stime = time.time()
ray_masks = []
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
# xyz_world_sect_plane_lst.append(xyz_world_sect_plane)
model.test()
curr_visuals = model.get_current_visuals(data=data)
# print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda())))
# print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:])))
chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy()
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_masks.append(model.output["ray_mask"] > 0)
if len(ray_masks) > 0:
ray_masks = torch.cat(ray_masks, dim=1)
# visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True)
# exit()
# print("curr_visuals",curr_visuals)
pixel_idx=pixel_idx.to(torch.long)
gt_image = torch.zeros((height*width, 3), dtype=torch.float32)
gt_image[edge_mask, :] = tmpgts['gt_image'].clone()
if 'gt_image' in model.visual_names:
visuals['gt_image'] = gt_image
if 'gt_mask' in curr_visuals:
visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask']
if 'ray_masked_coarse_raycolor' in model.visual_names:
visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape)
visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
if 'ray_depth_masked_coarse_raycolor' in model.visual_names:
visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'ray_depth_masked_gt_image' in model.visual_names:
visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'gt_image_ray_masked' in model.visual_names:
visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
for key, value in visuals.items():
if key in opt.visual_items:
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i, opt=opt)
acc_dict = {}
if "coarse_raycolor" in opt.test_color_loss_items:
loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda())
acc_dict.update({"coarse_raycolor": loss})
print("coarse_raycolor", loss, mse2psnr(loss))
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3)
ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3)
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt")
# filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone()
# tmpgtssave[~ray_masks,:] = 1.0
# img = np.array(tmpgtssave.view(height,width,3))
# save_image(img, filepath)
#
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor")
# filepath = os.path.join(
# "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3))
# csave[~ray_masks, :] = 1.0
# csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:]
# img = np.array(csave.view(height, width, 3))
# save_image(img, filepath)
loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss)))
if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1)
masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3))
loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_depth_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss)))
print(acc_dict.items())
visualizer.accumulate_losses(acc_dict)
count+=1
visualizer.print_losses(count)
psnr = visualizer.get_psnr(opt.test_color_loss_items[0])
# visualizer.reset()
print('--------------------------------Finish Test Rendering--------------------------------')
report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1]))
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps)
print('--------------------------------Finish generating vid--------------------------------')
return psnr
def probe_hole(model, dataset, visualizer, opt, bg_info, test_steps=0, opacity_thresh=0.7):
print('-----------------------------------Probing Holes-----------------------------------')
add_xyz = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_conf = torch.zeros([0, 1], device="cuda", dtype=torch.float32)
add_color = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_dir = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_embedding = torch.zeros([0, opt.point_features_dim], device="cuda", dtype=torch.float32)
kernel_size = model.opt.kernel_size
if opt.prob_kernel_size is not None:
tier = np.sum(np.asarray(opt.prob_tiers) < test_steps)
print("cal by tier", tier)
model.opt.query_size = np.asarray(opt.prob_kernel_size[tier*3:tier*3+3])
print("prob query size =", model.opt.query_size)
model.opt.prob = 1
total_num = len(model.top_ray_miss_ids) -1 if opt.prob_mode == 0 and opt.prob_num_step > 1 else len(dataset)
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
max_num = len(dataset) // opt.prob_num_step
take_top = False
if opt.prob_top == 1 and opt.prob_mode <= 0: # and opt.far_thresh <= 0:
if getattr(model, "top_ray_miss_ids", None) is not None:
mask = model.top_ray_miss_loss[:-1] > 0.0
frame_ids = model.top_ray_miss_ids[:-1][mask][:max_num]
print(len(frame_ids), max_num)
print("prob frame top_ray_miss_loss:", model.top_ray_miss_loss)
take_top = True
else:
print("model has no top_ray_miss_ids")
else:
frame_ids = list(range(len(dataset)))[:max_num]
random.shuffle(frame_ids)
frame_ids = frame_ids[:max_num]
print("{}/{} has holes, id_lst to prune".format(len(frame_ids), total_num), frame_ids, opt.prob_num_step)
print("take top:", take_top, "; prob frame ids:", frame_ids)
with tqdm(range(len(frame_ids))) as pbar:
for j in pbar:
i = frame_ids[j]
pbar.set_description("Processing frame id %d" % i)
data = dataset.get_item(i)
bg = data['bg_color'][None, :].cuda()
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool, device='cuda')
edge_mask[pixel_idx[0, ..., 1].to(torch.long), pixel_idx[0, ..., 0].to(torch.long)] = 1
edge_mask = edge_mask.reshape(-1) > 0
totalpixel = pixel_idx.shape[1]
gt_image_full = data['gt_image'].cuda()
probe_keys = ["coarse_raycolor", "ray_mask", "ray_max_sample_loc_w", "ray_max_far_dist", "ray_max_shading_opacity", "shading_avg_color", "shading_avg_dir", "shading_avg_conf", "shading_avg_embedding"]
prob_maps = {}
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
output = model.test()
chunk_pixel_id = data["pixel_idx"].to(torch.long)
output["ray_mask"] = output["ray_mask"][..., None]
for key in probe_keys:
if "ray_max_shading_opacity" not in output and key != 'coarse_raycolor':
break
if output[key] is None:
prob_maps[key] = None
else:
if key not in prob_maps.keys():
C = output[key].shape[-1]
prob_maps[key] = torch.zeros((height, width, C), device="cuda", dtype=output[key].dtype)
prob_maps[key][chunk_pixel_id[0, ..., 1], chunk_pixel_id[0, ..., 0], :] = output[key]
gt_image = torch.zeros((height * width, 3), dtype=torch.float32, device=prob_maps["ray_mask"].device)
gt_image[edge_mask, :] = gt_image_full
gt_image = gt_image.reshape(height, width, 3)
miss_ray_mask = (prob_maps["ray_mask"] < 1) * (torch.norm(gt_image - bg, dim=-1, keepdim=True) > 0.002)
miss_ray_inds = (edge_mask.reshape(height, width, 1) * miss_ray_mask).squeeze(-1).nonzero() # N, 2
neighbor_inds = bloat_inds(miss_ray_inds, 1, height, width)
neighboring_miss_mask = torch.zeros_like(gt_image[..., 0])
neighboring_miss_mask[neighbor_inds[..., 0], neighbor_inds[...,1]] = 1
if opt.far_thresh > 0:
far_ray_mask = (prob_maps["ray_mask"] > 0) * (prob_maps["ray_max_far_dist"] > opt.far_thresh) * (torch.norm(gt_image - prob_maps["coarse_raycolor"], dim=-1, keepdim=True) < 0.1)
neighboring_miss_mask += far_ray_mask.squeeze(-1)
neighboring_miss_mask = (prob_maps["ray_mask"].squeeze(-1) > 0) * neighboring_miss_mask * (prob_maps["ray_max_shading_opacity"].squeeze(-1) > opacity_thresh) > 0
add_xyz = torch.cat([add_xyz, prob_maps["ray_max_sample_loc_w"][neighboring_miss_mask]], dim=0)
add_conf = torch.cat([add_conf, prob_maps["shading_avg_conf"][neighboring_miss_mask]], dim=0) * opt.prob_mul if prob_maps["shading_avg_conf"] is not None else None
add_color = torch.cat([add_color, prob_maps["shading_avg_color"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_color"] is not None else None
add_dir = torch.cat([add_dir, prob_maps["shading_avg_dir"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_dir"] is not None else None
add_embedding = torch.cat([add_embedding, prob_maps["shading_avg_embedding"][neighboring_miss_mask]], dim=0)
if len(add_xyz) > -1:
output = prob_maps["coarse_raycolor"].permute(2,0,1)[None, None,...]
visualizer.save_ref_views({"images": output}, i, subdir="prob_img_{:04d}".format(test_steps))
model.opt.kernel_size = kernel_size
if opt.bgmodel.startswith("planepoints"):
mask = dataset.filter_plane(add_xyz)
first_lst, _ = masking(mask, [add_xyz, add_embedding, add_color, add_dir, add_conf], [])
add_xyz, add_embedding, add_color, add_dir, add_conf = first_lst
if len(add_xyz) > 0:
visualizer.save_neural_points("prob{:04d}".format(test_steps), add_xyz, None, None, save_ref=False)
visualizer.print_details("vis added points to probe folder")
if opt.prob_mode == 0 and opt.prob_num_step > 1:
model.reset_ray_miss_ranking()
del visualizer, prob_maps
model.opt.prob = 0
return add_xyz, add_embedding, add_color, add_dir, add_conf
def bloat_inds(inds, shift, height, width):
inds = inds[:,None,:]
sx, sy = torch.meshgrid(torch.arange(-shift, shift+1, dtype=torch.long), torch.arange(-shift, shift+1, dtype=torch.long))
shift_inds = torch.stack([sx, sy],dim=-1).reshape(1, -1, 2).cuda()
inds = inds + shift_inds
inds = inds.reshape(-1, 2)
inds[...,0] = torch.clamp(inds[...,0], min=0, max=height-1)
inds[...,1] = torch.clamp(inds[...,1], min=0, max=width-1)
return inds
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def create_all_bg(dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=False):
total_num = dataset.total
height = dataset.height
width = dataset.width
bg_ray_lst = []
random_sample = dataset.opt.random_sample
for i in range(0, total_num):
dataset.opt.random_sample = "no_crop"
if dummy:
data = dataset.get_dummyrot_item(i)
else:
data = dataset.get_item(i)
raydir = data['raydir'].clone()
# print("data['pixel_idx']",data['pixel_idx'].shape) # 1, 512, 640, 2
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
start=0
end = height * width
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"])
bg_ray = bg_ray.reshape(bg_ray.shape[0], height, width, 3) # 1, 512, 640, 3
bg_ray_lst.append(bg_ray)
dataset.opt.random_sample = random_sample
return bg_ray_lst
def main():
torch.backends.cudnn.benchmark = True
opt = TrainOptions().parse()
cur_device = torch.device('cuda:{}'.format(opt.gpu_ids[0]) if opt.
gpu_ids else torch.device('cpu'))
print("opt.color_loss_items ", opt.color_loss_items)
if opt.debug:
torch.autograd.set_detect_anomaly(True)
print(fmt.RED +
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Debug Mode')
print(
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' +
fmt.END)
visualizer = Visualizer(opt)
train_dataset = create_dataset(opt)
normRw2c = train_dataset.norm_w2c[:3,:3] # torch.eye(3, device="cuda") #
img_lst=None
best_PSNR=0.0
best_iter=0
points_xyz_all=None
with torch.no_grad():
print(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth")
if len([n for n in glob.glob(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth") if os.path.isfile(n)]) > 0:
if opt.bgmodel.endswith("plane"):
_, _, _, _, _, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt)
resume_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.resume_iter == "best":
opt.resume_iter = "latest"
resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir)
if resume_iter is None:
epoch_count = 1
total_steps = 0
visualizer.print_details("No previous checkpoints, start from scratch!!!!")
else:
opt.resume_iter = resume_iter
states = torch.load(
os.path.join(resume_dir, '{}_states.pth'.format(resume_iter)), map_location=cur_device)
epoch_count = states['epoch_count']
total_steps = states['total_steps']
best_PSNR = states['best_PSNR'] if 'best_PSNR' in states else best_PSNR
best_iter = states['best_iter'] if 'best_iter' in states else best_iter
best_PSNR = best_PSNR.item() if torch.is_tensor(best_PSNR) else best_PSNR
visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
visualizer.print_details('Continue training from {} epoch'.format(opt.resume_iter))
visualizer.print_details(f"Iter: {total_steps}")
visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
del states
opt.mode = 2
opt.load_points=1
opt.resume_dir=resume_dir
opt.resume_iter = resume_iter
opt.is_train=True
model = create_model(opt)
elif opt.load_points < 1:
points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt)
opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir)
opt.is_train=True
opt.mode = 2
model = create_model(opt)
else:
load_points = opt.load_points
opt.is_train = False
opt.mode = 1
opt.load_points = 0
model = create_model(opt)
model.setup(opt)
model.eval()
if load_points in [1,3]:
points_xyz_all = train_dataset.load_init_points()
if load_points == 2:
points_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=100)
if load_points == 3:
depth_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=80)
print("points_xyz_all",points_xyz_all.shape)
print("depth_xyz_all", depth_xyz_all.shape)
filter_res = 100
pc_grid_id, _, pc_space_min, pc_space_max = mvs_utils.construct_vox_points_ind(points_xyz_all, filter_res)
d_grid_id, depth_inds, _, _ = mvs_utils.construct_vox_points_ind(depth_xyz_all, filter_res, space_min=pc_space_min, space_max=pc_space_max)
all_grid= torch.cat([pc_grid_id, d_grid_id], dim=0)
min_id = torch.min(all_grid, dim=-2)[0]
max_id = torch.max(all_grid, dim=-2)[0] - min_id
max_id_lst = (max_id+1).cpu().numpy().tolist()
mask = torch.ones(max_id_lst, device=d_grid_id.device)
pc_maskgrid_id = (pc_grid_id - min_id[None,...]).to(torch.long)
mask[pc_maskgrid_id[...,0], pc_maskgrid_id[...,1], pc_maskgrid_id[...,2]] = 0
depth_maskinds = (d_grid_id[depth_inds,:] - min_id).to(torch.long)
depth_maskinds = mask[depth_maskinds[...,0], depth_maskinds[...,1], depth_maskinds[...,2]]
depth_xyz_all = depth_xyz_all[depth_maskinds > 0]
visualizer.save_neural_points("dep_filtered", depth_xyz_all, None, None, save_ref=False)
print("vis depth; after pc mask depth_xyz_all",depth_xyz_all.shape)
points_xyz_all = [points_xyz_all, depth_xyz_all] if opt.vox_res > 0 else torch.cat([points_xyz_all, depth_xyz_all],dim=0)
del depth_xyz_all, depth_maskinds, mask, pc_maskgrid_id, max_id_lst, max_id, min_id, all_grid
if opt.ranges[0] > -99.0:
ranges = torch.as_tensor(opt.ranges, device=points_xyz_all.device, dtype=torch.float32)
mask = torch.prod(
torch.logical_and(points_xyz_all[..., :3] >= ranges[None, :3], points_xyz_all[..., :3] <= ranges[None, 3:]),
dim=-1) > 0
points_xyz_all = points_xyz_all[mask]
if opt.vox_res > 0:
points_xyz_all = [points_xyz_all] if not isinstance(points_xyz_all, list) else points_xyz_all
points_xyz_holder = torch.zeros([0,3], dtype=points_xyz_all[0].dtype, device="cuda")
for i in range(len(points_xyz_all)):
points_xyz = points_xyz_all[i]
vox_res = opt.vox_res // (1.5**i)
print("load points_xyz", points_xyz.shape)
_, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(points_xyz.cuda() if len(points_xyz) < 80000000 else points_xyz[::(len(points_xyz) // 80000000 + 1), ...].cuda(), vox_res)
points_xyz = points_xyz[sampled_pnt_idx, :]
print("after voxelize:", points_xyz.shape)
points_xyz_holder = torch.cat([points_xyz_holder, points_xyz], dim=0)
points_xyz_all = points_xyz_holder
if opt.resample_pnts > 0:
if opt.resample_pnts == 1:
print("points_xyz_all",points_xyz_all.shape)
inds = torch.min(torch.norm(points_xyz_all, dim=-1, keepdim=True), dim=0)[1] # use the point closest to the origin
else:
inds = torch.randperm(len(points_xyz_all))[:opt.resample_pnts, ...]
points_xyz_all = points_xyz_all[inds, ...]
campos, camdir = train_dataset.get_campos_ray()
cam_ind = nearest_view(campos, camdir, points_xyz_all, train_dataset.id_list)
unique_cam_ind = torch.unique(cam_ind)
print("unique_cam_ind", unique_cam_ind.shape)
points_xyz_all = [points_xyz_all[cam_ind[:,0]==unique_cam_ind[i], :] for i in range(len(unique_cam_ind))]
featuredim = opt.point_features_dim
points_embedding_all = torch.zeros([1, 0, featuredim], device=unique_cam_ind.device, dtype=torch.float32)
points_color_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32)
points_dir_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32)
points_conf_all = torch.zeros([1, 0, 1], device=unique_cam_ind.device, dtype=torch.float32)
print("extract points embeding & colors", )
for i in tqdm(range(len(unique_cam_ind))):
id = unique_cam_ind[i]
batch = train_dataset.get_item(id, full_img=True)
HDWD = [train_dataset.height, train_dataset.width]
c2w = batch["c2w"][0].cuda()
w2c = torch.inverse(c2w)
intrinsic = batch["intrinsic"].cuda()
# cam_xyz_all 252, 4
cam_xyz_all = (torch.cat([points_xyz_all[i], torch.ones_like(points_xyz_all[i][...,-1:])], dim=-1) @ w2c.transpose(0,1))[..., :3]
embedding, color, dir, conf = model.query_embedding(HDWD, cam_xyz_all[None,...], None, batch['images'].cuda(), c2w[None, None,...], w2c[None, None,...], intrinsic[:, None,...], 0, pointdir_w=True)
conf = conf * opt.default_conf if opt.default_conf > 0 and opt.default_conf < 1.0 else conf
points_embedding_all = torch.cat([points_embedding_all, embedding], dim=1)
points_color_all = torch.cat([points_color_all, color], dim=1)
points_dir_all = torch.cat([points_dir_all, dir], dim=1)
points_conf_all = torch.cat([points_conf_all, conf], dim=1)
# visualizer.save_neural_points(id, cam_xyz_all, color, batch, save_ref=True)
points_xyz_all=torch.cat(points_xyz_all, dim=0)
visualizer.save_neural_points("init", points_xyz_all, points_color_all, None, save_ref=load_points == 0)
print("vis")
# visualizer.save_neural_points("cam", campos, None, None, None)
# print("vis")
# exit()
opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir)
opt.is_train = True
opt.mode = 2
model = create_model(opt)
if points_xyz_all is not None:
if opt.bgmodel.startswith("planepoints"):
gen_pnts, gen_embedding, gen_dir, gen_color, gen_conf = train_dataset.get_plane_param_points()
visualizer.save_neural_points("pl", gen_pnts, gen_color, None, save_ref=False)
print("vis pl")
points_xyz_all = torch.cat([points_xyz_all, gen_pnts], dim=0)
points_embedding_all = torch.cat([points_embedding_all, gen_embedding], dim=1)
points_color_all = torch.cat([points_color_all, gen_dir], dim=1)
points_dir_all = torch.cat([points_dir_all, gen_color], dim=1)
points_conf_all = torch.cat([points_conf_all, gen_conf], dim=1)
model.set_points(points_xyz_all.cuda(), points_embedding_all.cuda(), points_color=points_color_all.cuda(),
points_dir=points_dir_all.cuda(), points_conf=points_conf_all.cuda(),
Rw2c=normRw2c.cuda() if opt.load_points < 1 and opt.normview != 3 else None)
epoch_count = 1
total_steps = 0
del points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all
opt.resume_dir = os.path.join(opt.checkpoints_dir, opt.name)
model.setup(opt, train_len=len(train_dataset))
model.train()
data_loader = create_data_loader(opt, dataset=train_dataset)
dataset_size = len(data_loader)
visualizer.print_details('# training images = {}'.format(dataset_size))
# create test loader
test_opt = copy.deepcopy(opt)
test_opt.is_train = False
test_opt.random_sample = 'no_crop'
test_opt.random_sample_size = min(48, opt.random_sample_size)
test_opt.batch_size = 1
test_opt.n_threads = 0
test_opt.prob = 0
test_opt.split = "test"
with open('/tmp/.neural-volumetric.name', 'w') as f:
f.write(opt.name + '\n')
visualizer.reset()
if total_steps > 0:
for scheduler in model.schedulers:
for i in range(total_steps):
scheduler.step()
fg_masks = None
bg_ray_train_lst, bg_ray_test_lst = [], []
if opt.bgmodel.endswith("plane"):
test_dataset = create_dataset(test_opt)
bg_ray_train_lst = create_all_bg(train_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst)
bg_ray_test_lst = create_all_bg(test_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst)
test_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_test_lst]
del test_dataset
if opt.vid > 0:
render_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step)
bg_ray_render_lst = create_all_bg(render_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=True)
render_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_render_lst]
else:
test_bg_info, render_bg_info = None, None
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = None, None, None, None, None
############ initial test ###############
if total_steps == 0 and opt.maximum_step <= 0:
with torch.no_grad():
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.name = opt.name + "/test_{}".format(total_steps)
test_opt.test_num_step = opt.test_num_step
test_dataset = create_dataset(test_opt)
model.opt.is_train = 0
model.opt.no_loss = 1
test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps)
model.opt.no_loss = 0
model.opt.is_train = 1
model.train()
exit()
if total_steps == 0 and (len(train_dataset.id_list) > 30 or len(train_dataset.view_id_list) > 30):
other_states = {
'epoch_count': 0,
'total_steps': total_steps,
}
model.save_networks(total_steps, other_states)
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, 0, total_steps))
real_start=total_steps
train_random_sample_size = opt.random_sample_size
for epoch in range(epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
for i, data in enumerate(data_loader):
if opt.maximum_step is not None and total_steps >= opt.maximum_step:
break
if opt.prune_iter > 0 and real_start != total_steps and total_steps % opt.prune_iter == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0 and total_steps <= opt.prune_max_iter:
with torch.no_grad():
model.clean_optimizer()
model.clean_scheduler()
model.prune_points(opt.prune_thresh)
model.setup_optimizer(opt)
model.init_scheduler(total_steps, opt)
torch.cuda.empty_cache()
torch.cuda.synchronize()
if opt.prob_freq > 0 and real_start != total_steps and total_steps % opt.prob_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0:
if opt.prob_kernel_size is not None:
tier = np.sum(np.asarray(opt.prob_tiers) < total_steps)
if (model.top_ray_miss_loss[0] > 1e-5 or opt.prob_mode != 0 or opt.far_thresh > 0) and (opt.prob_kernel_size is None or tier < (len(opt.prob_kernel_size) // 3)):
torch.cuda.empty_cache()
model.opt.is_train = 0
model.opt.no_loss = 1
with torch.no_grad():
prob_opt = copy.deepcopy(test_opt)
prob_opt.name = opt.name
# if opt.prob_type=0:
train_dataset.opt.random_sample = "no_crop"
if opt.prob_mode <= 0:
train_dataset.opt.random_sample_size = min(32, train_random_sample_size)
prob_dataset = train_dataset
elif opt.prob_mode == 1:
prob_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1)
else:
prob_dataset = create_comb_dataset(test_opt, opt, total_steps, test_num_step=1)
model.eval()
add_xyz, add_embedding, add_color, add_dir, add_conf = probe_hole(model, prob_dataset, Visualizer(prob_opt), prob_opt, None, test_steps=total_steps, opacity_thresh=opt.prob_thresh)
torch.cuda.empty_cache()
torch.cuda.synchronize()
if opt.prob_mode != 0:
del prob_dataset
# else:
if len(add_xyz) > 0:
print("len(add_xyz)", len(add_xyz))
model.grow_points(add_xyz, add_embedding, add_color, add_dir, add_conf)
length_added = len(add_xyz)
del add_xyz, add_embedding, add_color, add_dir, add_conf
torch.cuda.empty_cache()
torch.cuda.synchronize()
other_states = {
"best_PSNR": best_PSNR,
"best_iter": best_iter,
'epoch_count': epoch,
'total_steps': total_steps,
}
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
print("other_states",other_states)
model.save_networks(total_steps, other_states, back_gpu=False)
visualizer.print_details(
"$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(length_added, len(model.neural_points.xyz)))
torch.cuda.synchronize()
torch.cuda.empty_cache()
# # hard reset
# model.cleanup()
# pprint(vars(model))
del model
visualizer.reset()
gc.collect()
opt.is_train = 1
opt.no_loss = 0
opt.resume_iter = total_steps
model = create_model(opt)
model.setup(opt, train_len=len(train_dataset))
model.train()
if total_steps > 0:
for scheduler in model.schedulers:
for i in range(total_steps):
scheduler.step()
else:
print("$$$$$$$$$$$$$$$$$$$$$$$$$$ no qualified points to grow $$$$$$$$$$$$$$$$")
# exit()
# visualizer.print_details("$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(len(add_xyz), len(model.neural_points.xyz)))
train_dataset.opt.random_sample = "random"
model.train()
model.opt.no_loss = 0
model.opt.is_train = 1
train_dataset.opt.random_sample_size = train_random_sample_size
torch.cuda.synchronize()
torch.cuda.empty_cache()
else:
visualizer.print_details(
'nothing to probe, max ray miss is only {}'.format(model.top_ray_miss_loss[0]))
total_steps += 1
model.set_input(data)
if opt.bgmodel.endswith("plane"):
if len(bg_ray_train_lst) > 0:
bg_ray_all = bg_ray_train_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(model.input)
bg_ray, fg_masks = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks=fg_masks)
data["bg_ray"] = bg_ray
model.optimize_parameters(total_steps=total_steps)
losses = model.get_current_losses()
visualizer.accumulate_losses(losses)
if opt.lr_policy.startswith("iter"):
model.update_learning_rate(opt=opt, total_steps=total_steps)
if total_steps and total_steps % opt.print_freq == 0:
if opt.show_tensorboard:
visualizer.plot_current_losses_with_tb(total_steps, losses)
visualizer.print_losses(total_steps)
visualizer.reset()
if hasattr(opt, "save_point_freq") and total_steps and total_steps % opt.save_point_freq == 0 and (opt.prune_iter > 0 and total_steps <= opt.prune_max_iter or opt.save_point_freq==1):
visualizer.save_neural_points(total_steps, model.neural_points.xyz, model.neural_points.points_embeding, data, save_ref=opt.load_points==0)
visualizer.print_details('saving neural points at total_steps {})'.format(total_steps))
try:
if total_steps == 10000 or (total_steps % opt.save_iter_freq == 0 and total_steps > 0):
other_states = {
"best_PSNR": best_PSNR,
"best_iter": best_iter,
'epoch_count': epoch,
'total_steps': total_steps,
}
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
model.save_networks(total_steps, other_states)
except Exception as e:
visualizer.print_details(e)
if opt.vid > 0 and total_steps % opt.vid == 0 and total_steps > 0:
torch.cuda.empty_cache()
test_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step)
model.opt.is_train = 0
model.opt.no_loss = 1
with torch.no_grad():
render_vid(model, test_dataset, Visualizer(test_opt), test_opt, render_bg_info, steps=total_steps)
model.opt.no_loss = 0
model.opt.is_train = 1
del test_dataset
if total_steps == 10000 or (total_steps % opt.test_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0):
torch.cuda.empty_cache()
test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step)
model.opt.is_train = 0
model.opt.no_loss = 1
with torch.no_grad():
if opt.test_train == 0:
test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True)
else:
train_dataset.opt.random_sample = "no_crop"
test_psnr = test(model, train_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True)
train_dataset.opt.random_sample = opt.random_sample
model.opt.no_loss = 0
model.opt.is_train = 1
del test_dataset
best_iter = total_steps if test_psnr > best_PSNR else best_iter
best_PSNR = max(test_psnr, best_PSNR)
visualizer.print_details(f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}")
model.train()
# try:
# print("saving the model at the end of epoch")
# other_states = {'epoch_count': epoch, 'total_steps': total_steps}
# model.save_networks('latest', other_states)
#
# except Exception as e:
# print(e)
if opt.maximum_step is not None and total_steps >= opt.maximum_step:
visualizer.print_details('{}: End of stepts {} / {} \t Time Taken: {} sec'.format(
opt.name, total_steps, opt.maximum_step,
time.time() - epoch_start_time))
break
del train_dataset
other_states = {
'epoch_count': epoch,
'total_steps': total_steps,
}
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
model.save_networks(total_steps, other_states)
torch.cuda.empty_cache()
test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1)
model.opt.no_loss = 1
model.opt.is_train = 0
visualizer.print_details("full datasets test:")
with torch.no_grad():
test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, gen_vid=True, lpips=True)
best_iter = total_steps if test_psnr > best_PSNR else best_iter
best_PSNR = max(test_psnr, best_PSNR)
visualizer.print_details(
f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}")
exit()
def save_points_conf(visualizer, xyz, points_color, points_conf, total_steps):
print("total:", xyz.shape, points_color.shape, points_conf.shape)
colors, confs = points_color[0], points_conf[0,...,0]
pre = -1000
for i in range(12):
thresh = (i * 0.1) if i <= 10 else 1000
mask = ((confs <= thresh) * (confs > pre)) > 0
thresh_xyz = xyz[mask, :]
thresh_color = colors[mask, :]
visualizer.save_neural_points(f"{total_steps}-{thresh}", thresh_xyz, thresh_color[None, ...], None, save_ref=False)
pre = thresh
exit()
def create_render_dataset(test_opt, opt, total_steps, test_num_step=1):
test_opt.nerf_splits = ["render"]
test_opt.split = "render"
test_opt.name = opt.name + "/vid_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_opt.random_sample_size = 30
test_dataset = create_dataset(test_opt)
return test_dataset
def create_test_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1):
test_opt.prob = prob if prob is not None else test_opt.prob
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.name = opt.name + "/test_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_dataset = create_dataset(test_opt)
return test_dataset
def create_comb_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1):
test_opt.prob = prob if prob is not None else test_opt.prob
test_opt.nerf_splits = ["comb"]
test_opt.split = "comb"
test_opt.name = opt.name + "/comb_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_dataset = create_dataset(test_opt)
return test_dataset
if __name__ == '__main__':
main()
| 60,868 | 55.308048 | 392 | py |
pointnerf | pointnerf-master/run/render_vid.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import copy
import torch
import numpy as np
import time
from options import TestOptions
from data import create_data_loader, create_dataset
from models import create_model
from utils.visualizer import Visualizer
from utils import format as fmt
from tqdm import trange
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def render_vid(model, dataset, visualizer, opt, total_steps):
print(
'-----------------------------------Rendering Vid-----------------------------------'
)
model.eval()
render_num = len(dataset.render_poses)
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visual_lst = []
for i in range(render_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
visuals = None
starttime=time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# data['gt_image'] = gt_image[:, start:end, :]
# data['gt_mask'] = gt_mask[:, start:end, :]
model.set_input(data)
model.test()
curr_visuals = model.get_current_visuals()
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or value.shape[-1] != 3 or not key.endswith("color"):
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or value.shape[-1] != 3 or not key.endswith("color"):
continue
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visuals[key] = visuals[key].reshape(height, width, 3)
visual_lst.append(visuals)
print("render time:", time.time() - starttime)
visualizer.display_video(visual_lst, total_steps)
model.train()
print(
'--------------------------------Finish Rendering--------------------------------'
)
return
def main():
torch.backends.cudnn.benchmark = True
opt = TestOptions().parse()
opt.no_loss = True
opt.gpu_ids='0'
if opt.debug:
torch.autograd.set_detect_anomaly(True)
print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Debug Mode')
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END)
if opt.resume_dir:
resume_dir = opt.resume_dir
resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir)
opt.resume_iter = resume_iter
states = torch.load(os.path.join(resume_dir, '{}_states.pth'.format(resume_iter)))
epoch_count = states['epoch_count']
total_steps = states['total_steps']
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Test {} at epoch {}'.format(opt.resume_dir, opt.resume_iter))
print("Iter: ", total_steps)
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
else:
epoch_count = 1
total_steps = 0
# load model
model = create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
# create test loader
test_opt = copy.deepcopy(opt)
test_opt.is_train = False
test_opt.random_sample = 'no_crop'
test_opt.batch_size = 1
test_opt.n_threads = 0
test_dataset = create_dataset(test_opt)
dataset_size = len(test_dataset)
print('# training images = {}'.format(dataset_size))
with open('/tmp/.neural-volumetric.name', 'w') as f:
f.write(opt.name + '\n')
visualizer.reset()
render_vid(model, test_dataset, visualizer, test_opt, total_steps)
if __name__ == '__main__':
main()
| 4,702 | 34.097015 | 110 | py |
pointnerf | pointnerf-master/run/train_ft.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.)
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath)
def nearest_view(campos, raydir, xyz, id_list):
cam_ind = torch.zeros([0,1], device=campos.device, dtype=torch.long)
step=10000
for i in range(0, len(xyz), step):
dists = xyz[i:min(len(xyz),i+step), None, :] - campos[None, ...] # N, M, 3
dists_norm = torch.norm(dists, dim=-1) # N, M
dists_dir = dists / (dists_norm[...,None]+1e-6) # N, M, 3
dists = dists_norm / 200 + (1.1 - torch.sum(dists_dir * raydir[None, :],dim=-1)) # N, M
cam_ind = torch.cat([cam_ind, torch.argmin(dists, dim=1).view(-1,1)], dim=0) # N, 1
return cam_ind
def gen_points_filter_embeddings(dataset, visualizer, opt):
print('-----------------------------------Generate Points-----------------------------------')
opt.is_train=False
opt.mode = 1
model = create_model(opt)
model.setup(opt)
model.eval()
cam_xyz_all = []
intrinsics_all = []
extrinsics_all = []
confidence_all = []
points_mask_all = []
intrinsics_full_lst = []
confidence_filtered_all = []
near_fars_all = []
gpu_filter = True
cpu2gpu= len(dataset.view_id_list) > 300
imgs_lst, HDWD_lst, c2ws_lst, w2cs_lst, intrinsics_lst = [],[],[],[],[]
with torch.no_grad():
for i in tqdm(range(0, len(dataset.view_id_list))):
data = dataset.get_init_item(i)
model.set_input(data)
# intrinsics 1, 3, 3, 3
points_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, c2ws, w2cs, intrinsics, near_fars = model.gen_points()
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
B, N, C, H, W, _ = points_xyz_lst[0].shape
# print("points_xyz_lst",points_xyz_lst[0].shape)
cam_xyz_all.append((points_xyz_lst[0].cpu() if cpu2gpu else points_xyz_lst[0]) if gpu_filter else points_xyz_lst[0].cpu().numpy())
# intrinsics_lst[0] 1, 3, 3
intrinsics_all.append(intrinsics_lst[0] if gpu_filter else intrinsics_lst[0])
extrinsics_all.append(extrinsics_lst[0] if gpu_filter else extrinsics_lst[0].cpu().numpy())
if opt.manual_depth_view !=0:
confidence_all.append((photometric_confidence_lst[0].cpu() if cpu2gpu else photometric_confidence_lst[0]) if gpu_filter else photometric_confidence_lst[0].cpu().numpy())
points_mask_all.append((point_mask_lst[0].cpu() if cpu2gpu else point_mask_lst[0]) if gpu_filter else point_mask_lst[0].cpu().numpy())
imgs_lst.append(data["images"].cpu())
HDWD_lst.append(HDWD)
c2ws_lst.append(c2ws)
w2cs_lst.append(w2cs)
intrinsics_full_lst.append(intrinsics)
near_fars_all.append(near_fars[0,0])
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
# #################### start query embedding ##################
torch.cuda.empty_cache()
if opt.manual_depth_view != 0:
if gpu_filter:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=True, return_w=True, cpu2gpu=cpu2gpu, near_fars_all=near_fars_all)
else:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks(cam_xyz_all, [intr.cpu().numpy() for intr in intrinsics_all], extrinsics_all, confidence_all, points_mask_all, opt)
# print(xyz_ref_lst[0].shape) # 224909, 3
else:
cam_xyz_all = [cam_xyz_all[i].reshape(-1,3)[points_mask_all[i].reshape(-1),:] for i in range(len(cam_xyz_all))]
xyz_world_all = [np.matmul(np.concatenate([cam_xyz_all[i], np.ones_like(cam_xyz_all[i][..., 0:1])], axis=-1), np.transpose(np.linalg.inv(extrinsics_all[i][0,...])))[:, :3] for i in range(len(cam_xyz_all))]
xyz_world_all, cam_xyz_all, confidence_filtered_all = filter_by_masks.range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_all, opt)
del cam_xyz_all
# for i in range(len(xyz_world_all)):
# visualizer.save_neural_points(i, torch.as_tensor(xyz_world_all[i], device="cuda", dtype=torch.float32), None, data, save_ref=opt.load_points==0)
# exit()
# xyz_world_all = xyz_world_all.cuda()
# confidence_filtered_all = confidence_filtered_all.cuda()
points_vid = torch.cat([torch.ones_like(xyz_world_all[i][...,0:1]) * i for i in range(len(xyz_world_all))], dim=0)
xyz_world_all = torch.cat(xyz_world_all, dim=0) if gpu_filter else torch.as_tensor(
np.concatenate(xyz_world_all, axis=0), device="cuda", dtype=torch.float32)
confidence_filtered_all = torch.cat(confidence_filtered_all, dim=0) if gpu_filter else torch.as_tensor(np.concatenate(confidence_filtered_all, axis=0), device="cuda", dtype=torch.float32)
print("xyz_world_all", xyz_world_all.shape, points_vid.shape, confidence_filtered_all.shape)
torch.cuda.empty_cache()
# visualizer.save_neural_points(0, xyz_world_all, None, None, save_ref=False)
# print("vis 0")
print("%%%%%%%%%%%%% getattr(dataset, spacemin, None)", getattr(dataset, "spacemin", None))
if getattr(dataset, "spacemin", None) is not None:
mask = (xyz_world_all - dataset.spacemin[None, ...].to(xyz_world_all.device)) >= 0
mask *= (dataset.spacemax[None, ...].to(xyz_world_all.device) - xyz_world_all) >= 0
mask = torch.prod(mask, dim=-1) > 0
first_lst, second_lst = masking(mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
# visualizer.save_neural_points(50, xyz_world_all, None, None, save_ref=False)
# print("vis 50")
if getattr(dataset, "alphas", None) is not None:
vishull_mask = mvs_utils.alpha_masking(xyz_world_all, dataset.alphas, dataset.intrinsics, dataset.cam2worlds, dataset.world2cams, dataset.near_far if opt.ranges[0] < -90.0 and getattr(dataset,"spacemin",None) is None else None, opt=opt)
first_lst, second_lst = masking(vishull_mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
print("alpha masking xyz_world_all", xyz_world_all.shape, points_vid.shape)
# visualizer.save_neural_points(100, xyz_world_all, None, data, save_ref=opt.load_points == 0)
# print("vis 100")
if opt.vox_res > 0:
xyz_world_all, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(xyz_world_all.cuda() if len(xyz_world_all) < 99999999 else xyz_world_all[::(len(xyz_world_all)//99999999+1),...].cuda(), opt.vox_res)
points_vid = points_vid[sampled_pnt_idx,:]
confidence_filtered_all = confidence_filtered_all[sampled_pnt_idx]
print("after voxelize:", xyz_world_all.shape, points_vid.shape)
xyz_world_all = xyz_world_all.cuda()
xyz_world_all = [xyz_world_all[points_vid[:,0]==i, :] for i in range(len(HDWD_lst))]
confidence_filtered_all = [confidence_filtered_all[points_vid[:,0]==i] for i in range(len(HDWD_lst))]
cam_xyz_all = [(torch.cat([xyz_world_all[i], torch.ones_like(xyz_world_all[i][...,0:1])], dim=-1) @ extrinsics_all[i][0].t())[...,:3] for i in range(len(HDWD_lst))]
points_embedding_all, points_color_all, points_dir_all, points_conf_all = [], [], [], []
for i in tqdm(range(len(HDWD_lst))):
if len(xyz_world_all[i]) > 0:
embedding, color, dir, conf = model.query_embedding(HDWD_lst[i], torch.as_tensor(cam_xyz_all[i][None, ...], device="cuda", dtype=torch.float32), torch.as_tensor(confidence_filtered_all[i][None, :, None], device="cuda", dtype=torch.float32) if len(confidence_filtered_all) > 0 else None, imgs_lst[i].cuda(), c2ws_lst[i], w2cs_lst[i], intrinsics_full_lst[i], 0, pointdir_w=True)
points_embedding_all.append(embedding)
points_color_all.append(color)
points_dir_all.append(dir)
points_conf_all.append(conf)
xyz_world_all = torch.cat(xyz_world_all, dim=0)
points_embedding_all = torch.cat(points_embedding_all, dim=1)
points_color_all = torch.cat(points_color_all, dim=1) if points_color_all[0] is not None else None
points_dir_all = torch.cat(points_dir_all, dim=1) if points_dir_all[0] is not None else None
points_conf_all = torch.cat(points_conf_all, dim=1) if points_conf_all[0] is not None else None
visualizer.save_neural_points(200, xyz_world_all, points_color_all, data, save_ref=opt.load_points == 0)
print("vis")
model.cleanup()
del model
return xyz_world_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, [img[0].cpu() for img in imgs_lst], [c2w for c2w in c2ws_lst], [w2c for w2c in w2cs_lst] , intrinsics_all, [list(HDWD) for HDWD in HDWD_lst]
def masking(mask, firstdim_lst, seconddim_lst):
first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst]
second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst]
return first_lst, second_lst
def render_vid(model, dataset, visualizer, opt, bg_info, steps=0, gen_vid=True):
print('-----------------------------------Rendering-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step))
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
for i in range(0, total_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
# cam_posts.append(data['campos'])
# cam_dirs.append(data['raydir'] + data['campos'][None,...])
# continue
visuals = None
stime = time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# print("tmpgts", tmpgts["gt_image"].shape)
# print(data["pixel_idx"])
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
model.test()
curr_visuals = model.get_current_visuals(data=data)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if key == "gt_image": continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
if key == "gt_image": continue
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i)
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num), 0)
print('--------------------------------Finish generating vid--------------------------------')
return
def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=False, lpips=True):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
count = 0;
for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step
data = dataset.get_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool)
edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1
edge_mask=edge_mask.reshape(-1) > 0
np_edge_mask=edge_mask.numpy().astype(bool)
totalpixel = pixel_idx.shape[1]
tmpgts = {}
tmpgts["gt_image"] = data['gt_image'].clone()
tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None
# data.pop('gt_image', None)
data.pop('gt_mask', None)
visuals = None
stime = time.time()
ray_masks = []
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
# xyz_world_sect_plane_lst.append(xyz_world_sect_plane)
model.test()
curr_visuals = model.get_current_visuals(data=data)
# print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda())))
# print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:])))
chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy()
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_masks.append(model.output["ray_mask"] > 0)
if len(ray_masks) > 0:
ray_masks = torch.cat(ray_masks, dim=1)
# visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True)
# exit()
# print("curr_visuals",curr_visuals)
pixel_idx=pixel_idx.to(torch.long)
gt_image = torch.zeros((height*width, 3), dtype=torch.float32)
gt_image[edge_mask, :] = tmpgts['gt_image'].clone()
if 'gt_image' in model.visual_names:
visuals['gt_image'] = gt_image
if 'gt_mask' in curr_visuals:
visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask']
if 'ray_masked_coarse_raycolor' in model.visual_names:
visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape)
visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
if 'ray_depth_masked_coarse_raycolor' in model.visual_names:
visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'ray_depth_masked_gt_image' in model.visual_names:
visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'gt_image_ray_masked' in model.visual_names:
visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
for key, value in visuals.items():
if key in opt.visual_items:
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i, opt=opt)
acc_dict = {}
if "coarse_raycolor" in opt.test_color_loss_items:
loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda())
acc_dict.update({"coarse_raycolor": loss})
print("coarse_raycolor", loss, mse2psnr(loss))
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3)
ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3)
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt")
# filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone()
# tmpgtssave[~ray_masks,:] = 1.0
# img = np.array(tmpgtssave.view(height,width,3))
# save_image(img, filepath)
#
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor")
# filepath = os.path.join(
# "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3))
# csave[~ray_masks, :] = 1.0
# csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:]
# img = np.array(csave.view(height, width, 3))
# save_image(img, filepath)
loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss)))
if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1)
masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3))
loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_depth_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss)))
print(acc_dict.items())
visualizer.accumulate_losses(acc_dict)
count+=1
visualizer.print_losses(count)
psnr = visualizer.get_psnr(opt.test_color_loss_items[0])
# visualizer.reset()
print('--------------------------------Finish Test Rendering--------------------------------')
report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1]))
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps)
print('--------------------------------Finish generating vid--------------------------------')
return psnr
def probe_hole(model, dataset, visualizer, opt, bg_info, test_steps=0, opacity_thresh=0.7):
print('-----------------------------------Probing Holes-----------------------------------')
add_xyz = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_conf = torch.zeros([0, 1], device="cuda", dtype=torch.float32)
add_color = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_dir = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_embedding = torch.zeros([0, opt.point_features_dim], device="cuda", dtype=torch.float32)
kernel_size = model.opt.kernel_size
if opt.prob_kernel_size is not None:
tier = np.sum(np.asarray(opt.prob_tiers) < test_steps)
print("cal by tier", tier)
model.opt.query_size = np.asarray(opt.prob_kernel_size[tier*3:tier*3+3])
print("prob query size =", model.opt.query_size)
model.opt.prob = 1
total_num = len(model.top_ray_miss_ids) -1 if opt.prob_mode == 0 and opt.prob_num_step > 1 else len(dataset)
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
max_num = len(dataset) // opt.prob_num_step
take_top = False
if opt.prob_top == 1 and opt.prob_mode <= 0: # and opt.far_thresh <= 0:
if getattr(model, "top_ray_miss_ids", None) is not None:
mask = model.top_ray_miss_loss[:-1] > 0.0
frame_ids = model.top_ray_miss_ids[:-1][mask][:max_num]
print(len(frame_ids), max_num)
print("prob frame top_ray_miss_loss:", model.top_ray_miss_loss)
take_top = True
else:
print("model has no top_ray_miss_ids")
else:
frame_ids = list(range(len(dataset)))[:max_num]
random.shuffle(frame_ids)
frame_ids = frame_ids[:max_num]
print("{}/{} has holes, id_lst to prune".format(len(frame_ids), total_num), frame_ids, opt.prob_num_step)
print("take top:", take_top, "; prob frame ids:", frame_ids)
with tqdm(range(len(frame_ids))) as pbar:
for j in pbar:
i = frame_ids[j]
pbar.set_description("Processing frame id %d" % i)
data = dataset.get_item(i)
bg = data['bg_color'][None, :].cuda()
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool, device='cuda')
edge_mask[pixel_idx[0, ..., 1].to(torch.long), pixel_idx[0, ..., 0].to(torch.long)] = 1
edge_mask = edge_mask.reshape(-1) > 0
totalpixel = pixel_idx.shape[1]
gt_image_full = data['gt_image'].cuda()
probe_keys = ["coarse_raycolor", "ray_mask", "ray_max_sample_loc_w", "ray_max_far_dist", "ray_max_shading_opacity", "shading_avg_color", "shading_avg_dir", "shading_avg_conf", "shading_avg_embedding"]
prob_maps = {}
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
output = model.test()
chunk_pixel_id = data["pixel_idx"].to(torch.long)
output["ray_mask"] = output["ray_mask"][..., None]
for key in probe_keys:
if "ray_max_shading_opacity" not in output and key != 'coarse_raycolor':
break
if output[key] is None:
prob_maps[key] = None
else:
if key not in prob_maps.keys():
C = output[key].shape[-1]
prob_maps[key] = torch.zeros((height, width, C), device="cuda", dtype=output[key].dtype)
prob_maps[key][chunk_pixel_id[0, ..., 1], chunk_pixel_id[0, ..., 0], :] = output[key]
gt_image = torch.zeros((height * width, 3), dtype=torch.float32, device=prob_maps["ray_mask"].device)
gt_image[edge_mask, :] = gt_image_full
gt_image = gt_image.reshape(height, width, 3)
miss_ray_mask = (prob_maps["ray_mask"] < 1) * (torch.norm(gt_image - bg, dim=-1, keepdim=True) > 0.002)
miss_ray_inds = (edge_mask.reshape(height, width, 1) * miss_ray_mask).squeeze(-1).nonzero() # N, 2
neighbor_inds = bloat_inds(miss_ray_inds, 1, height, width)
neighboring_miss_mask = torch.zeros_like(gt_image[..., 0])
neighboring_miss_mask[neighbor_inds[..., 0], neighbor_inds[...,1]] = 1
if opt.far_thresh > 0:
far_ray_mask = (prob_maps["ray_mask"] > 0) * (prob_maps["ray_max_far_dist"] > opt.far_thresh) * (torch.norm(gt_image - prob_maps["coarse_raycolor"], dim=-1, keepdim=True) < 0.1)
neighboring_miss_mask += far_ray_mask.squeeze(-1)
neighboring_miss_mask = (prob_maps["ray_mask"].squeeze(-1) > 0) * neighboring_miss_mask * (prob_maps["ray_max_shading_opacity"].squeeze(-1) > opacity_thresh) > 0
add_xyz = torch.cat([add_xyz, prob_maps["ray_max_sample_loc_w"][neighboring_miss_mask]], dim=0)
add_conf = torch.cat([add_conf, prob_maps["shading_avg_conf"][neighboring_miss_mask]], dim=0) * opt.prob_mul if prob_maps["shading_avg_conf"] is not None else None
add_color = torch.cat([add_color, prob_maps["shading_avg_color"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_color"] is not None else None
add_dir = torch.cat([add_dir, prob_maps["shading_avg_dir"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_dir"] is not None else None
add_embedding = torch.cat([add_embedding, prob_maps["shading_avg_embedding"][neighboring_miss_mask]], dim=0)
if len(add_xyz) > -1:
output = prob_maps["coarse_raycolor"].permute(2,0,1)[None, None,...]
visualizer.save_ref_views({"images": output}, i, subdir="prob_img_{:04d}".format(test_steps))
model.opt.kernel_size = kernel_size
if opt.bgmodel.startswith("planepoints"):
mask = dataset.filter_plane(add_xyz)
first_lst, _ = masking(mask, [add_xyz, add_embedding, add_color, add_dir, add_conf], [])
add_xyz, add_embedding, add_color, add_dir, add_conf = first_lst
if len(add_xyz) > 0:
visualizer.save_neural_points("prob{:04d}".format(test_steps), add_xyz, None, None, save_ref=False)
visualizer.print_details("vis added points to probe folder")
if opt.prob_mode == 0 and opt.prob_num_step > 1:
model.reset_ray_miss_ranking()
del visualizer, prob_maps
model.opt.prob = 0
return add_xyz, add_embedding, add_color, add_dir, add_conf
def bloat_inds(inds, shift, height, width):
inds = inds[:,None,:]
sx, sy = torch.meshgrid(torch.arange(-shift, shift+1, dtype=torch.long), torch.arange(-shift, shift+1, dtype=torch.long))
shift_inds = torch.stack([sx, sy],dim=-1).reshape(1, -1, 2).cuda()
inds = inds + shift_inds
inds = inds.reshape(-1, 2)
inds[...,0] = torch.clamp(inds[...,0], min=0, max=height-1)
inds[...,1] = torch.clamp(inds[...,1], min=0, max=width-1)
return inds
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def create_all_bg(dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=False):
total_num = dataset.total
height = dataset.height
width = dataset.width
bg_ray_lst = []
random_sample = dataset.opt.random_sample
for i in range(0, total_num):
dataset.opt.random_sample = "no_crop"
if dummy:
data = dataset.get_dummyrot_item(i)
else:
data = dataset.get_item(i)
raydir = data['raydir'].clone()
# print("data['pixel_idx']",data['pixel_idx'].shape) # 1, 512, 640, 2
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
start=0
end = height * width
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"])
bg_ray = bg_ray.reshape(bg_ray.shape[0], height, width, 3) # 1, 512, 640, 3
bg_ray_lst.append(bg_ray)
dataset.opt.random_sample = random_sample
return bg_ray_lst
def main():
torch.backends.cudnn.benchmark = True
opt = TrainOptions().parse()
cur_device = torch.device('cuda:{}'.format(opt.gpu_ids[0]) if opt.
gpu_ids else torch.device('cpu'))
print("opt.color_loss_items ", opt.color_loss_items)
if opt.debug:
torch.autograd.set_detect_anomaly(True)
print(fmt.RED +
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Debug Mode')
print(
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' +
fmt.END)
visualizer = Visualizer(opt)
train_dataset = create_dataset(opt)
normRw2c = train_dataset.norm_w2c[:3,:3] # torch.eye(3, device="cuda") #
img_lst=None
best_PSNR=0.0
best_iter=0
points_xyz_all=None
with torch.no_grad():
print(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth")
if len([n for n in glob.glob(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth") if os.path.isfile(n)]) > 0:
if opt.bgmodel.endswith("plane"):
_, _, _, _, _, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt)
resume_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.resume_iter == "best":
opt.resume_iter = "latest"
resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir)
if resume_iter is None:
epoch_count = 1
total_steps = 0
visualizer.print_details("No previous checkpoints, start from scratch!!!!")
else:
opt.resume_iter = resume_iter
states = torch.load(
os.path.join(resume_dir, '{}_states.pth'.format(resume_iter)), map_location=cur_device)
epoch_count = states['epoch_count']
total_steps = states['total_steps']
best_PSNR = states['best_PSNR'] if 'best_PSNR' in states else best_PSNR
best_iter = states['best_iter'] if 'best_iter' in states else best_iter
best_PSNR = best_PSNR.item() if torch.is_tensor(best_PSNR) else best_PSNR
visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
visualizer.print_details('Continue training from {} epoch'.format(opt.resume_iter))
visualizer.print_details(f"Iter: {total_steps}")
visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
del states
opt.mode = 2
opt.load_points=1
opt.resume_dir=resume_dir
opt.resume_iter = resume_iter
opt.is_train=True
model = create_model(opt)
elif opt.load_points < 1:
points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt)
opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir)
opt.is_train=True
opt.mode = 2
model = create_model(opt)
else:
load_points = opt.load_points
opt.is_train = False
opt.mode = 1
opt.load_points = 0
model = create_model(opt)
model.setup(opt)
model.eval()
if load_points in [1,3]:
points_xyz_all = train_dataset.load_init_points()
if load_points == 2:
points_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=100)
if load_points == 3:
depth_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=80)
print("points_xyz_all",points_xyz_all.shape)
print("depth_xyz_all", depth_xyz_all.shape)
filter_res = 100
pc_grid_id, _, pc_space_min, pc_space_max = mvs_utils.construct_vox_points_ind(points_xyz_all, filter_res)
d_grid_id, depth_inds, _, _ = mvs_utils.construct_vox_points_ind(depth_xyz_all, filter_res, space_min=pc_space_min, space_max=pc_space_max)
all_grid= torch.cat([pc_grid_id, d_grid_id], dim=0)
min_id = torch.min(all_grid, dim=-2)[0]
max_id = torch.max(all_grid, dim=-2)[0] - min_id
max_id_lst = (max_id+1).cpu().numpy().tolist()
mask = torch.ones(max_id_lst, device=d_grid_id.device)
pc_maskgrid_id = (pc_grid_id - min_id[None,...]).to(torch.long)
mask[pc_maskgrid_id[...,0], pc_maskgrid_id[...,1], pc_maskgrid_id[...,2]] = 0
depth_maskinds = (d_grid_id[depth_inds,:] - min_id).to(torch.long)
depth_maskinds = mask[depth_maskinds[...,0], depth_maskinds[...,1], depth_maskinds[...,2]]
depth_xyz_all = depth_xyz_all[depth_maskinds > 0]
visualizer.save_neural_points("dep_filtered", depth_xyz_all, None, None, save_ref=False)
print("vis depth; after pc mask depth_xyz_all",depth_xyz_all.shape)
points_xyz_all = [points_xyz_all, depth_xyz_all] if opt.vox_res > 0 else torch.cat([points_xyz_all, depth_xyz_all],dim=0)
del depth_xyz_all, depth_maskinds, mask, pc_maskgrid_id, max_id_lst, max_id, min_id, all_grid
if opt.ranges[0] > -99.0:
ranges = torch.as_tensor(opt.ranges, device=points_xyz_all.device, dtype=torch.float32)
mask = torch.prod(
torch.logical_and(points_xyz_all[..., :3] >= ranges[None, :3], points_xyz_all[..., :3] <= ranges[None, 3:]),
dim=-1) > 0
points_xyz_all = points_xyz_all[mask]
if opt.vox_res > 0:
points_xyz_all = [points_xyz_all] if not isinstance(points_xyz_all, list) else points_xyz_all
points_xyz_holder = torch.zeros([0,3], dtype=points_xyz_all[0].dtype, device="cuda")
for i in range(len(points_xyz_all)):
points_xyz = points_xyz_all[i]
vox_res = opt.vox_res // (1.5**i)
print("load points_xyz", points_xyz.shape)
_, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(points_xyz.cuda() if len(points_xyz) < 80000000 else points_xyz[::(len(points_xyz) // 80000000 + 1), ...].cuda(), vox_res)
points_xyz = points_xyz[sampled_pnt_idx, :]
print("after voxelize:", points_xyz.shape)
points_xyz_holder = torch.cat([points_xyz_holder, points_xyz], dim=0)
points_xyz_all = points_xyz_holder
if opt.resample_pnts > 0:
if opt.resample_pnts == 1:
print("points_xyz_all",points_xyz_all.shape)
inds = torch.min(torch.norm(points_xyz_all, dim=-1, keepdim=True), dim=0)[1] # use the point closest to the origin
else:
inds = torch.randperm(len(points_xyz_all))[:opt.resample_pnts, ...]
points_xyz_all = points_xyz_all[inds, ...]
campos, camdir = train_dataset.get_campos_ray()
cam_ind = nearest_view(campos, camdir, points_xyz_all, train_dataset.id_list)
unique_cam_ind = torch.unique(cam_ind)
print("unique_cam_ind", unique_cam_ind.shape)
points_xyz_all = [points_xyz_all[cam_ind[:,0]==unique_cam_ind[i], :] for i in range(len(unique_cam_ind))]
featuredim = opt.point_features_dim
points_embedding_all = torch.zeros([1, 0, featuredim], device=unique_cam_ind.device, dtype=torch.float32)
points_color_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32)
points_dir_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32)
points_conf_all = torch.zeros([1, 0, 1], device=unique_cam_ind.device, dtype=torch.float32)
print("extract points embeding & colors", )
for i in tqdm(range(len(unique_cam_ind))):
id = unique_cam_ind[i]
batch = train_dataset.get_item(id, full_img=True)
HDWD = [train_dataset.height, train_dataset.width]
c2w = batch["c2w"][0].cuda()
w2c = torch.inverse(c2w)
intrinsic = batch["intrinsic"].cuda()
# cam_xyz_all 252, 4
cam_xyz_all = (torch.cat([points_xyz_all[i], torch.ones_like(points_xyz_all[i][...,-1:])], dim=-1) @ w2c.transpose(0,1))[..., :3]
embedding, color, dir, conf = model.query_embedding(HDWD, cam_xyz_all[None,...], None, batch['images'].cuda(), c2w[None, None,...], w2c[None, None,...], intrinsic[:, None,...], 0, pointdir_w=True)
conf = conf * opt.default_conf if opt.default_conf > 0 and opt.default_conf < 1.0 else conf
points_embedding_all = torch.cat([points_embedding_all, embedding], dim=1)
points_color_all = torch.cat([points_color_all, color], dim=1)
points_dir_all = torch.cat([points_dir_all, dir], dim=1)
points_conf_all = torch.cat([points_conf_all, conf], dim=1)
# visualizer.save_neural_points(id, cam_xyz_all, color, batch, save_ref=True)
points_xyz_all=torch.cat(points_xyz_all, dim=0)
visualizer.save_neural_points("init", points_xyz_all, points_color_all, None, save_ref=load_points == 0)
print("vis")
# visualizer.save_neural_points("cam", campos, None, None, None)
# print("vis")
# exit()
opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir)
opt.is_train = True
opt.mode = 2
model = create_model(opt)
if points_xyz_all is not None:
if opt.bgmodel.startswith("planepoints"):
gen_pnts, gen_embedding, gen_dir, gen_color, gen_conf = train_dataset.get_plane_param_points()
visualizer.save_neural_points("pl", gen_pnts, gen_color, None, save_ref=False)
print("vis pl")
points_xyz_all = torch.cat([points_xyz_all, gen_pnts], dim=0)
points_embedding_all = torch.cat([points_embedding_all, gen_embedding], dim=1)
points_color_all = torch.cat([points_color_all, gen_dir], dim=1)
points_dir_all = torch.cat([points_dir_all, gen_color], dim=1)
points_conf_all = torch.cat([points_conf_all, gen_conf], dim=1)
model.set_points(points_xyz_all.cuda(), points_embedding_all.cuda(), points_color=points_color_all.cuda(),
points_dir=points_dir_all.cuda(), points_conf=points_conf_all.cuda(),
Rw2c=normRw2c.cuda() if opt.load_points < 1 and opt.normview != 3 else None)
epoch_count = 1
total_steps = 0
del points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all
model.setup(opt, train_len=len(train_dataset))
model.train()
data_loader = create_data_loader(opt, dataset=train_dataset)
dataset_size = len(data_loader)
visualizer.print_details('# training images = {}'.format(dataset_size))
# create test loader
test_opt = copy.deepcopy(opt)
test_opt.is_train = False
test_opt.random_sample = 'no_crop'
test_opt.random_sample_size = min(48, opt.random_sample_size)
test_opt.batch_size = 1
test_opt.n_threads = 0
test_opt.prob = 0
test_opt.split = "test"
with open('/tmp/.neural-volumetric.name', 'w') as f:
f.write(opt.name + '\n')
visualizer.reset()
if total_steps > 0:
for scheduler in model.schedulers:
for i in range(total_steps):
scheduler.step()
fg_masks = None
bg_ray_train_lst, bg_ray_test_lst = [], []
if opt.bgmodel.endswith("plane"):
test_dataset = create_dataset(test_opt)
bg_ray_train_lst = create_all_bg(train_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst)
bg_ray_test_lst = create_all_bg(test_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst)
test_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_test_lst]
del test_dataset
if opt.vid > 0:
render_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step)
bg_ray_render_lst = create_all_bg(render_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=True)
render_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_render_lst]
else:
test_bg_info, render_bg_info = None, None
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = None, None, None, None, None
############ initial test ###############
if total_steps == 0 and opt.maximum_step <= 0:
with torch.no_grad():
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.name = opt.name + "/test_{}".format(total_steps)
test_opt.test_num_step = opt.test_num_step
test_dataset = create_dataset(test_opt)
model.opt.is_train = 0
model.opt.no_loss = 1
test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps)
model.opt.no_loss = 0
model.opt.is_train = 1
model.train()
exit()
if total_steps == 0 and (len(train_dataset.id_list) > 30 or len(train_dataset.view_id_list) > 30):
other_states = {
'epoch_count': 0,
'total_steps': total_steps,
}
model.save_networks(total_steps, other_states)
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, 0, total_steps))
real_start=total_steps
train_random_sample_size = opt.random_sample_size
for epoch in range(epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
for i, data in enumerate(data_loader):
if opt.maximum_step is not None and total_steps >= opt.maximum_step:
break
if opt.prune_iter > 0 and real_start != total_steps and total_steps % opt.prune_iter == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0 and total_steps <= opt.prune_max_iter:
with torch.no_grad():
model.clean_optimizer()
model.clean_scheduler()
model.prune_points(opt.prune_thresh)
model.setup_optimizer(opt)
model.init_scheduler(total_steps, opt)
torch.cuda.empty_cache()
torch.cuda.synchronize()
if opt.prob_freq > 0 and real_start != total_steps and total_steps % opt.prob_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0:
if opt.prob_kernel_size is not None:
tier = np.sum(np.asarray(opt.prob_tiers) < total_steps)
if (model.top_ray_miss_loss[0] > 1e-5 or opt.prob_mode != 0 or opt.far_thresh > 0) and (opt.prob_kernel_size is None or tier < (len(opt.prob_kernel_size) // 3)):
torch.cuda.empty_cache()
model.opt.is_train = 0
model.opt.no_loss = 1
with torch.no_grad():
prob_opt = copy.deepcopy(test_opt)
prob_opt.name = opt.name
# if opt.prob_type=0:
train_dataset.opt.random_sample = "no_crop"
if opt.prob_mode <= 0:
train_dataset.opt.random_sample_size = min(32, train_random_sample_size)
prob_dataset = train_dataset
elif opt.prob_mode == 1:
prob_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1)
else:
prob_dataset = create_comb_dataset(test_opt, opt, total_steps, test_num_step=1)
model.eval()
add_xyz, add_embedding, add_color, add_dir, add_conf = probe_hole(model, prob_dataset, Visualizer(prob_opt), prob_opt, None, test_steps=total_steps, opacity_thresh=opt.prob_thresh)
torch.cuda.empty_cache()
torch.cuda.synchronize()
if opt.prob_mode != 0:
del prob_dataset
# else:
if len(add_xyz) > 0:
print("len(add_xyz)", len(add_xyz))
model.clean_optimizer_scheduler()
model.grow_points(add_xyz, add_embedding, add_color, add_dir, add_conf)
length_added = len(add_xyz)
del add_xyz, add_embedding, add_color, add_dir, add_conf
torch.cuda.empty_cache()
torch.cuda.synchronize()
other_states = {
"best_PSNR": best_PSNR,
"best_iter": best_iter,
'epoch_count': epoch,
'total_steps': total_steps,
}
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
print("other_states",other_states)
model.save_networks(total_steps, other_states, back_gpu=False)
visualizer.print_details(
"$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(length_added, len(model.neural_points.xyz)))
# model.reset_optimizer(opt)
# model.reset_scheduler(total_steps, opt)
# model.cleanup()
# pprint(vars(model))
# del model
# visualizer.reset()
# gc.collect()
# torch.cuda.synchronize()
# torch.cuda.empty_cache()
# input("Press Enter to continue...")
# opt.is_train = 1
# opt.no_loss = 0
# model = create_model(opt)
#
# model.setup(opt, train_len=len(train_dataset))
# model.train()
#
# if total_steps > 0:
# for scheduler in model.schedulers:
# for i in range(total_steps):
# scheduler.step()
exit()
visualizer.print_details("$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(len(add_xyz), len(model.neural_points.xyz)))
train_dataset.opt.random_sample = "random"
model.train()
model.opt.no_loss = 0
model.opt.is_train = 1
train_dataset.opt.random_sample_size = train_random_sample_size
torch.cuda.synchronize()
torch.cuda.empty_cache()
else:
visualizer.print_details(
'nothing to probe, max ray miss is only {}'.format(model.top_ray_miss_loss[0]))
total_steps += 1
model.set_input(data)
if opt.bgmodel.endswith("plane"):
if len(bg_ray_train_lst) > 0:
bg_ray_all = bg_ray_train_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(model.input)
bg_ray, fg_masks = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks=fg_masks)
data["bg_ray"] = bg_ray
model.optimize_parameters(total_steps=total_steps)
losses = model.get_current_losses()
visualizer.accumulate_losses(losses)
if opt.lr_policy.startswith("iter"):
model.update_learning_rate(opt=opt, total_steps=total_steps)
if total_steps and total_steps % opt.print_freq == 0:
if opt.show_tensorboard:
visualizer.plot_current_losses_with_tb(total_steps, losses)
visualizer.print_losses(total_steps)
visualizer.reset()
if hasattr(opt, "save_point_freq") and total_steps and total_steps % opt.save_point_freq == 0 and (opt.prune_iter > 0 and total_steps <= opt.prune_max_iter or opt.save_point_freq==1):
visualizer.save_neural_points(total_steps, model.neural_points.xyz, model.neural_points.points_embeding, data, save_ref=opt.load_points==0)
visualizer.print_details('saving neural points at total_steps {})'.format(total_steps))
try:
if total_steps == 10000 or (total_steps % opt.save_iter_freq == 0 and total_steps > 0):
other_states = {
"best_PSNR": best_PSNR,
"best_iter": best_iter,
'epoch_count': epoch,
'total_steps': total_steps,
}
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
model.save_networks(total_steps, other_states)
except Exception as e:
visualizer.print_details(e)
if opt.vid > 0 and total_steps % opt.vid == 0 and total_steps > 0:
torch.cuda.empty_cache()
test_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step)
model.opt.is_train = 0
model.opt.no_loss = 1
with torch.no_grad():
render_vid(model, test_dataset, Visualizer(test_opt), test_opt, render_bg_info, steps=total_steps)
model.opt.no_loss = 0
model.opt.is_train = 1
del test_dataset
if total_steps == 10000 or (total_steps % opt.test_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0):
torch.cuda.empty_cache()
test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step)
model.opt.is_train = 0
model.opt.no_loss = 1
with torch.no_grad():
if opt.test_train == 0:
test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True)
else:
train_dataset.opt.random_sample = "no_crop"
test_psnr = test(model, train_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True)
train_dataset.opt.random_sample = opt.random_sample
model.opt.no_loss = 0
model.opt.is_train = 1
del test_dataset
best_iter = total_steps if test_psnr > best_PSNR else best_iter
best_PSNR = max(test_psnr, best_PSNR)
visualizer.print_details(f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}")
model.train()
# try:
# print("saving the model at the end of epoch")
# other_states = {'epoch_count': epoch, 'total_steps': total_steps}
# model.save_networks('latest', other_states)
#
# except Exception as e:
# print(e)
if opt.maximum_step is not None and total_steps >= opt.maximum_step:
visualizer.print_details('{}: End of stepts {} / {} \t Time Taken: {} sec'.format(
opt.name, total_steps, opt.maximum_step,
time.time() - epoch_start_time))
break
del train_dataset
other_states = {
'epoch_count': epoch,
'total_steps': total_steps,
}
visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
model.save_networks(total_steps, other_states)
torch.cuda.empty_cache()
test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1)
model.opt.no_loss = 1
model.opt.is_train = 0
visualizer.print_details("full datasets test:")
with torch.no_grad():
test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, gen_vid=True, lpips=True)
best_iter = total_steps if test_psnr > best_PSNR else best_iter
best_PSNR = max(test_psnr, best_PSNR)
visualizer.print_details(
f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}")
exit()
def save_points_conf(visualizer, xyz, points_color, points_conf, total_steps):
print("total:", xyz.shape, points_color.shape, points_conf.shape)
colors, confs = points_color[0], points_conf[0,...,0]
pre = -1000
for i in range(12):
thresh = (i * 0.1) if i <= 10 else 1000
mask = ((confs <= thresh) * (confs > pre)) > 0
thresh_xyz = xyz[mask, :]
thresh_color = colors[mask, :]
visualizer.save_neural_points(f"{total_steps}-{thresh}", thresh_xyz, thresh_color[None, ...], None, save_ref=False)
pre = thresh
exit()
def create_render_dataset(test_opt, opt, total_steps, test_num_step=1):
test_opt.nerf_splits = ["render"]
test_opt.split = "render"
test_opt.name = opt.name + "/vid_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_opt.random_sample_size = 30
test_dataset = create_dataset(test_opt)
return test_dataset
def create_test_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1):
test_opt.prob = prob if prob is not None else test_opt.prob
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.name = opt.name + "/test_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_dataset = create_dataset(test_opt)
return test_dataset
def create_comb_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1):
test_opt.prob = prob if prob is not None else test_opt.prob
test_opt.nerf_splits = ["comb"]
test_opt.split = "comb"
test_opt.name = opt.name + "/comb_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_dataset = create_dataset(test_opt)
return test_dataset
if __name__ == '__main__':
main()
| 60,882 | 55.268946 | 392 | py |
pointnerf | pointnerf-master/run/test_ft.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.)
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath)
def nearest_view(campos, raydir, xyz, id_list):
cam_ind = torch.zeros([0,1], device=campos.device, dtype=torch.long)
step=10000
for i in range(0, len(xyz), step):
dists = xyz[i:min(len(xyz),i+step), None, :] - campos[None, ...] # N, M, 3
dists_norm = torch.norm(dists, dim=-1) # N, M
dists_dir = dists / (dists_norm[...,None]+1e-6) # N, M, 3
dists = dists_norm / 200 + (1.1 - torch.sum(dists_dir * raydir[None, :],dim=-1)) # N, M
cam_ind = torch.cat([cam_ind, torch.argmin(dists, dim=1).view(-1,1)], dim=0) # N, 1
return cam_ind
def masking(mask, firstdim_lst, seconddim_lst):
first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst]
second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst]
return first_lst, second_lst
def render_vid(model, dataset, visualizer, opt, bg_info, steps=0, gen_vid=True):
print('-----------------------------------Rendering-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step))
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
for i in range(0, total_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
# cam_posts.append(data['campos'])
# cam_dirs.append(data['raydir'] + data['campos'][None,...])
# continue
visuals = None
stime = time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# print("tmpgts", tmpgts["gt_image"].shape)
# print(data["pixel_idx"])
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
model.test()
curr_visuals = model.get_current_visuals(data=data)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if key == "gt_image": continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
if key == "gt_image": continue
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i)
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num), 0)
print('--------------------------------Finish generating vid--------------------------------')
return
def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=True, lpips=True):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
count = 0;
for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step
data = dataset.get_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool)
edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1
edge_mask=edge_mask.reshape(-1) > 0
np_edge_mask=edge_mask.numpy().astype(bool)
totalpixel = pixel_idx.shape[1]
tmpgts = {}
tmpgts["gt_image"] = data['gt_image'].clone()
tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None
print("data['gt_image']")
# data.pop('gt_image', None)
data.pop('gt_mask', None)
visuals = None
stime = time.time()
ray_masks = []
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
model.test()
curr_visuals = model.get_current_visuals(data=data)
chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy()
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_masks.append(model.output["ray_mask"] > 0)
if len(ray_masks) > 0:
ray_masks = torch.cat(ray_masks, dim=1)
gt_image = torch.zeros((height*width, 3), dtype=torch.float32)
gt_image[edge_mask, :] = tmpgts['gt_image'].clone()
if 'gt_image' in model.visual_names:
visuals['gt_image'] = gt_image
if 'gt_mask' in curr_visuals:
visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask']
if 'ray_masked_coarse_raycolor' in model.visual_names:
visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape)
visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
if 'ray_depth_masked_coarse_raycolor' in model.visual_names:
visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'ray_depth_masked_gt_image' in model.visual_names:
visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'gt_image_ray_masked' in model.visual_names:
visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
for key, value in visuals.items():
if key in opt.visual_items:
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i, opt=opt)
acc_dict = {}
if "coarse_raycolor" in opt.test_color_loss_items:
loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda())
acc_dict.update({"coarse_raycolor": loss})
print("coarse_raycolor", loss, mse2psnr(loss))
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3)
ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3)
loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss)))
if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1)
masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3))
loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_depth_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss)))
print(acc_dict.items())
visualizer.accumulate_losses(acc_dict)
count+=1
visualizer.print_losses(count)
psnr = visualizer.get_psnr(opt.test_color_loss_items[0])
print('--------------------------------Finish Test Rendering--------------------------------')
report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1]))
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps)
print('--------------------------------Finish generating vid--------------------------------')
return psnr
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def main():
torch.backends.cudnn.benchmark = True
opt = TrainOptions().parse()
cur_device = torch.device('cuda:{}'.format(opt.gpu_ids[0]) if opt.
gpu_ids else torch.device('cpu'))
print("opt.color_loss_items ", opt.color_loss_items)
if opt.debug:
torch.autograd.set_detect_anomaly(True)
print(fmt.RED +
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Debug Mode')
print(
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' +
fmt.END)
visualizer = Visualizer(opt)
train_dataset = create_dataset(opt)
img_lst=None
with torch.no_grad():
print(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth")
if opt.bgmodel.endswith("plane"):
_, _, _, _, _, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt)
resume_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.resume_iter == "best":
opt.resume_iter = "latest"
resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir)
if resume_iter is None:
visualizer.print_details("No previous checkpoints at iter {} !!", resume_iter)
exit()
else:
opt.resume_iter = resume_iter
visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
visualizer.print_details('test at {} iters'.format(opt.resume_iter))
visualizer.print_details(f"Iter: {resume_iter}")
visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
opt.mode = 2
opt.load_points=1
opt.resume_dir=resume_dir
opt.resume_iter = resume_iter
opt.is_train=True
model = create_model(opt)
model.setup(opt, train_len=len(train_dataset))
# create test loader
test_opt = copy.deepcopy(opt)
test_opt.is_train = False
test_opt.random_sample = 'no_crop'
test_opt.random_sample_size = min(48, opt.random_sample_size)
test_opt.batch_size = 1
test_opt.n_threads = 0
test_opt.prob = 0
test_opt.split = "test"
visualizer.reset()
fg_masks = None
test_bg_info = None
if opt.bgmodel.endswith("plane"):
test_dataset = create_dataset(test_opt)
bg_ray_test_lst = create_all_bg(test_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst)
test_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_test_lst]
del test_dataset
# if opt.vid > 0:
# render_dataset = create_render_dataset(test_opt, opt, resume_iter, test_num_step=opt.test_num_step)
############ initial test ###############
with torch.no_grad():
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.name = opt.name + "/test_{}".format(resume_iter)
test_opt.test_num_step = opt.test_num_step
test_dataset = create_dataset(test_opt)
model.opt.is_train = 0
model.opt.no_loss = 1
test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=resume_iter)
if __name__ == '__main__':
main()
| 17,612 | 48.754237 | 331 | py |
pointnerf | pointnerf-master/run/train.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
torch.manual_seed(0)
np.random.seed(0)
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.)
def test(model, dataset, visualizer, opt, test_steps=0):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.total
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
count=0
for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step
data = dataset.get_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool)
edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1
edge_mask=edge_mask.reshape(-1) > 0
np_edge_mask=edge_mask.numpy().astype(bool)
totalpixel = pixel_idx.shape[1]
tmpgts = {}
tmpgts["gt_image"] = data['gt_image'].clone()
tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None
# data.pop('gt_image', None)
data.pop('gt_mask', None)
visuals = None
stime = time.time()
ray_masks = []
ray_depth_masks = []
xyz_world_sect_plane_lst = []
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
# xyz_world_sect_plane_lst.append(xyz_world_sect_plane)
model.test(gen_points=True)
curr_visuals = model.get_current_visuals(data=data)
# print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda())))
# print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:])))
chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy()
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_masks.append(model.output["ray_mask"] > 0)
ray_masks = torch.cat(ray_masks, dim=1)
# visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True)
# exit()
# print("curr_visuals",curr_visuals)
pixel_idx=pixel_idx.to(torch.long)
if 'gt_image' in model.visual_names:
visuals['gt_image'] = torch.zeros((height*width, 3), dtype=torch.float32)
visuals['gt_image'][edge_mask,:] = tmpgts['gt_image'].clone()
if 'gt_mask' in curr_visuals:
visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask']
if 'ray_masked_coarse_raycolor' in model.visual_names:
visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape)
visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
if 'ray_depth_masked_coarse_raycolor' in model.visual_names:
visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'ray_depth_masked_gt_image' in model.visual_names:
visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'gt_image_ray_masked' in model.visual_names:
visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
for key, value in visuals.items():
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i)
acc_dict = {}
if "coarse_raycolor" in opt.test_color_loss_items:
loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), visuals["gt_image"].view(1, -1, 3).cuda())
acc_dict.update({"coarse_raycolor": loss})
print("coarse_raycolor", loss, mse2psnr(loss))
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3)
ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3)
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt")
# filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone()
# tmpgtssave[~ray_masks,:] = 1.0
# img = np.array(tmpgtssave.view(height,width,3))
# save_image(img, filepath)
#
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor")
# filepath = os.path.join(
# "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3))
# csave[~ray_masks, :] = 1.0
# csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:]
# img = np.array(csave.view(height, width, 3))
# save_image(img, filepath)
loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss)))
if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1)
masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3))
loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_depth_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss)))
print(acc_dict.items())
visualizer.accumulate_losses(acc_dict)
count += 1
visualizer.print_losses(count)
# psnr = visualizer.get_psnr(opt.test_color_loss_items[0])
visualizer.reset()
print('--------------------------------Finish Test Rendering--------------------------------')
report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "rmse"],
[i for i in range(0, count)],
imgStr="step-%04d-{}_raycolor.png".format("coarse"))
print('--------------------------------Finish Evaluation--------------------------------')
return
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def main():
torch.backends.cudnn.benchmark = True
opt = TrainOptions().parse()
print("opt.color_loss_items ", opt.color_loss_items)
if opt.debug:
torch.autograd.set_detect_anomaly(True)
print(fmt.RED +
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Debug Mode')
print(
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' +
fmt.END)
data_loader = create_data_loader(opt)
dataset_size = len(data_loader)
print('# training images = {}'.format(dataset_size))
if opt.resume_dir:
resume_dir = opt.resume_dir
resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir)
opt.resume_iter = resume_iter
if resume_iter is None:
epoch_count = 1
total_steps = 0
print("No previous checkpoints, start from scratch!!!!")
else:
opt.resume_iter = resume_iter
states = torch.load(
os.path.join(resume_dir, '{}_states.pth'.format(resume_iter)))
epoch_count = states['epoch_count']
total_steps = states['total_steps']
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Continue training from {} epoch'.format(opt.resume_iter))
print("Iter: ", total_steps)
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
else:
epoch_count = 1
total_steps = 0
print("opt.resume_dir ", opt.resume_dir, opt.resume_iter)
# load model
model = create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
# create test loader
test_opt = copy.deepcopy(opt)
test_opt.is_train = False
test_opt.random_sample = 'no_crop'
test_opt.random_sample_size = min(32, opt.random_sample_size)
test_opt.batch_size = 1
test_opt.n_threads = 0
test_opt.split = "test"
# test_dataset = create_dataset(test_opt)
with open('/tmp/.neural-volumetric.name', 'w') as f:
f.write(opt.name + '\n')
visualizer.reset()
if total_steps > 0:
for scheduler in model.schedulers:
for i in range(total_steps):
scheduler.step()
for epoch in range(epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
epoch_iter = 0
for i, data in enumerate(data_loader):
if opt.maximum_step is not None and total_steps >= opt.maximum_step:
break
total_steps += 1
epoch_iter += 1
model.set_input(data)
model.optimize_parameters(total_steps=total_steps)
losses = model.get_current_losses()
visualizer.accumulate_losses(losses)
if opt.lr_policy.startswith("iter"):
model.update_learning_rate(opt=opt, total_steps=total_steps)
if total_steps and total_steps % opt.print_freq == 0:
if opt.show_tensorboard:
visualizer.plot_current_losses_with_tb(total_steps, losses)
visualizer.print_losses(total_steps)
visualizer.reset()
if hasattr(opt, "save_point_freq") and total_steps and total_steps % opt.save_point_freq == 0:
visualizer.save_neural_points(total_steps, model.neural_points.xyz, model.neural_points.points_embeding, data, save_ref=opt.load_points==0)
# if opt.train_and_test == 1 and total_steps % opt.test_freq == 0:
# test(model, test_dataset, visualizer, test_opt, total_steps)
# if opt.vid == 1 and total_steps % opt.test_freq == 0:
# model.opt.no_loss = 1
# render_vid(model, test_dataset, visualizer, test_opt, total_steps)
# model.opt.no_loss = 0
try:
if total_steps % opt.save_iter_freq == 0 and total_steps > 0:
other_states = {
'epoch_count': epoch,
'total_steps': total_steps,
}
print('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
model.save_networks(total_steps, other_states)
# if opt.vid == 1:
# model.opt.is_train = 0
# model.opt.no_loss = 1
# test_opt.nerf_splits = ["test"]
# test_opt.name = opt.name + "/test_{}".format(total_steps)
# test_opt.test_num = 999
# render_vid(model, test_dataset, Visualizer(test_opt), test_opt, total_steps)
# model.opt.no_loss = 0
# model.opt.is_train = 1
except Exception as e:
print(e)
if total_steps % opt.test_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0:
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.name = opt.name + "/test_{}".format(total_steps)
test_opt.test_num_step = opt.test_num_step
test_dataset = create_dataset(test_opt)
model.opt.is_train = 0
model.opt.no_loss = 1
test(model, test_dataset, Visualizer(test_opt), test_opt, total_steps)
model.opt.no_loss = 0
model.opt.is_train = 1
# try:
# print("saving the model at the end of epoch")
# other_states = {'epoch_count': epoch, 'total_steps': total_steps}
# model.save_networks('latest', other_states)
#
# except Exception as e:
# print(e)
if opt.vid == 1:
model.opt.is_train = 0
model.opt.no_loss = 1
render_vid(model, test_dataset, visualizer, test_opt, total_steps)
model.opt.no_loss = 0
model.opt.is_train = 1
if opt.maximum_step is not None and total_steps == opt.maximum_step:
print('{}: End of stepts {} / {} \t Time Taken: {} sec'.format(
opt.name, total_steps, opt.maximum_step,
time.time() - epoch_start_time))
break
print('{}: End of epoch {} / {} \t Time Taken: {} sec'.format(
opt.name, epoch, opt.niter + opt.niter_decay,
time.time() - epoch_start_time))
if not opt.lr_policy.startswith("iter"):
model.update_learning_rate(opt=opt)
other_states = {
'epoch_count': epoch,
'total_steps': total_steps,
}
print('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps))
model.save_networks(total_steps, other_states)
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.test_num_step=1
test_opt.name = opt.name + "/test_{}".format(total_steps)
test_dataset = create_dataset(test_opt)
model.opt.no_loss = 1
model.opt.is_train = 0
test(model, test_dataset, Visualizer(test_opt), test_opt, total_steps)
# model.opt.no_loss = 0
# model.opt.is_train = 1
if __name__ == '__main__':
main()
| 17,776 | 47.438692 | 219 | py |
pointnerf | pointnerf-master/run/visualize.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import copy
import torch
import numpy as np
import time
from options import TestOptions
from data import create_data_loader, create_dataset
from models import create_model
from utils.visualizer import Visualizer
from utils import format as fmt
def main():
torch.backends.cudnn.benchmark = True
opt = TestOptions().parse()
if opt.debug:
torch.autograd.set_detect_anomaly(True)
print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Debug Mode')
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END)
assert opt.resume_dir is not None
resume_dir = opt.resume_dir
states = torch.load(os.path.join(resume_dir, '{}_states.pth'.format(opt.resume_iter)))
epoch_count = states['epoch_count']
total_steps = states['total_steps']
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Resume from {} epoch'.format(opt.resume_iter))
print("Iter: ", total_steps)
print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
# load model
model = create_model(opt)
model.setup(opt)
thres = 10
grid, argb = model.net_ray_marching.module.build_point_cloud_visualization(0)
mask = argb[..., 0] > thres
points = grid[mask]
colors = argb[mask][..., 1:4]
import pyrender
mesh = pyrender.Mesh.from_points(points, colors=colors)
scene = pyrender.Scene()
scene.add(mesh)
pyrender.Viewer(scene, render_flags={'point_size': 10}, use_raymond_lighting=True)
if __name__ == '__main__':
main()
| 1,721 | 29.75 | 90 | py |
pointnerf | pointnerf-master/run/vis_grow_train.py | import sys
import os
import pathlib
sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..'))
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
# from render_vid import render_vid
torch.manual_seed(0)
np.random.seed(0)
from tqdm import tqdm
import cv2
from PIL import Image
import imageio
from utils.util import to8b
def read_image(filepath, dtype=None):
image = np.asarray(Image.open(filepath))
if dtype is not None and dtype == np.float32:
image = (image / 255).astype(dtype)
return image
def render_grow(pnt_dir, iters, vids):
print('-----------------------------------Rendering Grow-----------------------------------')
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
for t in tqdm(range(len(vids))):
vid = vids[t]
img_lst = []
for iter in iters:
img_dir = os.path.join(pnt_dir, 'prob_img_{}'.format(iter))
# ''step-{:04d}-{}.png'.format(i, name))
img_filepath = os.path.join(img_dir, "step-{}-0-ref0.png".format(vid))
img_arry = read_image(img_filepath, dtype=np.float32)
img_lst.append(img_arry)
stacked_imgs = [to8b(img_arry) for img_arry in img_lst]
filename = 'grow_video_{:04d}.mov'.format(vid)
imageio.mimwrite(os.path.join(pnt_dir, filename), stacked_imgs, fps=3, quality=8)
filename = 'grow_video_{:04d}.gif'.format(vid)
imageio.mimwrite(os.path.join(pnt_dir, filename), stacked_imgs, fps=3, format='GIF')
return
if __name__ == '__main__':
pnt_dir = "/home/xharlie/user_space/codes/testNr/checkpoints/scan103_normcam2_confcolordir_KNN8_LRelu_grid800_dmsk_full2geo0_agg2_zeroone1e4_confree_prl2e3_probe2e3_1_comb/points"
iters = list(range(1000, 25000, 1000))
vids = list(range(16, 20))
render_grow(pnt_dir, iters, vids)
| 2,394 | 34.746269 | 183 | py |
pointnerf | pointnerf-master/utils/format.py | PURPLE = '\033[1;35;48m'
CYAN = '\033[1;36;48m'
BOLD = '\033[1;37;48m'
BLUE = '\033[1;34;48m'
GREEN = '\033[1;32;48m'
YELLOW = '\033[1;33;48m'
RED = '\033[1;31;48m'
BLACK = '\033[1;30;48m'
UNDERLINE = '\033[4;37;48m'
END = '\033[1;37;0m'
| 238 | 20.727273 | 27 | py |
pointnerf | pointnerf-master/utils/visualizer.py | import numpy as np
import os
from PIL import Image
import shutil
from collections import OrderedDict
import time
import datetime
import torch
import imageio
from utils.util import to8b
from models.mvs.mvs_utils import *
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.)
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath)
def save_points(xyz, dir, total_steps):
if xyz.ndim < 3:
xyz = xyz[None, ...]
os.makedirs(dir, exist_ok=True)
for i in range(xyz.shape[0]):
if isinstance(total_steps,str):
filename = 'step-{}-{}.txt'.format(total_steps, i)
else:
filename = 'step-{:04d}-{}.txt'.format(total_steps, i)
filepath = os.path.join(dir, filename)
np.savetxt(filepath, xyz[i, ...].reshape(-1, xyz.shape[-1]), delimiter=";")
class Visualizer:
def __init__(self, opt):
self.opt = opt
self.log_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.image_dir = os.path.join(opt.checkpoints_dir, opt.name, 'images')
self.point_dir = os.path.join(opt.checkpoints_dir, opt.name, 'points')
self.vid_dir = os.path.join(opt.checkpoints_dir, opt.name, 'vids')
os.makedirs(self.vid_dir, exist_ok=True)
if opt.show_tensorboard > 0:
from tensorboardX import SummaryWriter
self.tb_writer = SummaryWriter(
os.path.join(
opt.checkpoints_dir, opt.name,
datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
def save_image(self, img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath)
def read_image(self, filepath, dtype=None):
image = np.asarray(Image.open(filepath))
if dtype is not None and dtype==np.float32:
image = (image / 255).astype(dtype)
return image
def display_current_results(self, visuals, total_steps, opt=None):
for name, img in visuals.items():
if opt is not None and name in opt.visual_items:
img = np.array(img)
filename = 'step-{:04d}-{}.png'.format(total_steps, name)
filepath = os.path.join(self.image_dir, filename)
save_image(img, filepath)
def display_video(self, visual_lst, total_steps):
for name in visual_lst[0].keys():
stacked_imgs = [to8b(visuals[name]) for visuals in visual_lst]
filename = 'video_{:04d}_{}.mov'.format(total_steps, name)
imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=5, quality=8)
filename = 'video_{:04d}_{}.gif'.format(total_steps, name)
imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=5, format='GIF')
def gen_video(self, name, steps, total_step):
img_lst = []
for i in steps:
img_filepath = os.path.join(self.image_dir, 'step-{:04d}-{}.png'.format(i, name))
img_arry = self.read_image(img_filepath, dtype=np.float32)
img_lst.append(img_arry)
stacked_imgs = [to8b(img_arry) for img_arry in img_lst]
filename = 'video_{:04d}_{}.mov'.format(total_step, name)
imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=20, quality=10)
filename = 'video_{:04d}_{}.gif'.format(total_step, name)
imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=5, format='GIF')
def save_neural_points(self, total_steps, xyz, features, data, save_ref=0):
if features is None:
if torch.is_tensor(xyz):
# xyz = xyz.detach().cpu().numpy()
xyz = xyz.detach().cpu().numpy()
save_points(xyz, self.point_dir, total_steps)
elif features.shape[-1] == 9:
pnt_lst = []
for i in range(0,3):
points = torch.cat([xyz, features[0, ..., i*3:i*3+3] * 255], dim=-1)
if torch.is_tensor(points):
# xyz = xyz.detach().cpu().numpy()
points = points.detach().cpu().numpy()
pnt_lst.append(points)
save_points(np.stack(pnt_lst,axis=0), self.point_dir, total_steps)
else:
points = torch.cat([xyz, features[0, ..., :3] * 255], dim=-1)
if torch.is_tensor(points):
# xyz = xyz.detach().cpu().numpy()
points = points.detach().cpu().numpy()
save_points(points, self.point_dir, total_steps)
if save_ref and "images" in data:
self.save_ref_views(data, total_steps)
def save_ref_views(self, data, total_steps, subdir=None):
dir = self.point_dir if subdir is None else os.path.join(self.point_dir, subdir)
for i in range(data['images'].shape[1]):
img = data['images'][0,i].permute(1,2,0).cpu().numpy()
filename = 'step-{}-{}-ref{}.png'.format(total_steps, 0, i)
filepath = os.path.join(dir, filename)
save_image(img, filepath)
if data['images'].shape[1] > 3:
img = data['images'][0,3].permute(1, 2, 0).cpu().numpy()
filename = 'step-{}-{}-trgt.png'.format(total_steps, 0)
filepath = os.path.join(dir, filename)
save_image(img, filepath)
def reset(self):
self.start_time = time.time()
self.acc_iterations = 0
self.acc_losses = OrderedDict()
def accumulate_losses(self, losses):
self.acc_iterations += 1
for k, v in losses.items():
if k not in self.acc_losses:
self.acc_losses[k] = 0
self.acc_losses[k] += v
if k.endswith('raycolor'):
psnrkey = k + "_psnr"
if psnrkey not in self.acc_losses:
self.acc_losses[psnrkey] = 0
self.acc_losses[psnrkey] += mse2psnr(v)
def get_psnr(self, key):
return self.acc_losses[key + "_psnr"] / self.acc_iterations
def print_losses(self, total_steps):
m = 'End of iteration {} \t Number of batches {} \t Time taken: {:.2f}s\n'.format(
total_steps, self.acc_iterations, (time.time() - self.start_time))
m += '[Average Loss] '
for k, v in self.acc_losses.items():
m += '{}: {:.10f} '.format(k, v / self.acc_iterations)
filepath = os.path.join(self.log_dir, 'log.txt')
with open(filepath, 'a') as f:
f.write(m + '\n')
print(m)
def print_details(self, str):
filepath = os.path.join(self.log_dir, 'log.txt')
with open(filepath, 'a') as f:
f.write(str + '\n')
print(str)
def plot_current_losses_with_tb(self, step, losses):
if not self.opt.show_tensorboard > 0:
return
for key in losses.keys():
curr_loss = losses[key]
self.tb_writer.add_scalar(key, float(curr_loss), step)
| 7,619 | 40.639344 | 101 | py |
pointnerf | pointnerf-master/utils/util.py | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from torchvision.utils import make_grid
from os.path import join
import torch.nn.functional as F
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def add_property2dict(target_dict, object, props):
for prop in props:
target_dict[prop] = getattr(object, prop)
def normalize(v, axis=0):
# axis = 0, normalize each col
# axis = 1, normalize each row
return v / (np.linalg.norm(v, axis=axis, keepdims=True) + 1e-9)
def to8b(x): return (255*np.clip(x, 0, 1)).astype(np.uint8)
def gen_render_path(c2ws, N_views=30):
N = len(c2ws)
rotvec, positions = [], []
rotvec_inteplat, positions_inteplat = [], []
weight = np.linspace(1.0, .0, N_views//3, endpoint=False).reshape(-1, 1)
for i in range(N):
r = R.from_matrix(c2ws[i, :3, :3])
euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3)
if i:
mask = np.abs(euler_ange - rotvec[0])>180
euler_ange[mask] += 360.0
rotvec.append(euler_ange)
positions.append(c2ws[i, :3, 3:].reshape(1, 3))
if i:
rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i])
positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i])
rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0])
positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0])
c2ws_render = []
angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat)
for rotvec, position in zip(angles_inteplat, positions_inteplat):
c2w = np.eye(4)
c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix()
c2w[:3, 3:] = position.reshape(3, 1)
c2ws_render.append(c2w.copy())
c2ws_render = np.stack(c2ws_render)
return c2ws_render
def unique_lst(list1):
x = np.array(list1)
return np.unique(x)
| 2,196 | 30.84058 | 109 | py |
pointnerf | pointnerf-master/utils/ncg_string.py | '''Manipulate strings'''
def underscore2camelcase(s):
assert s == s.lower(), 'Invalid underscore string: no upper case character is allowed, in "{}"'.format(s)
assert all([x.isdigit() or x.isalpha() or x == '_' for x in s]),\
'Invalid underscore, all character must be letters or numbers or underscore'
terms = s.split('_')
for x in terms:
assert x, 'Invalid underscore string: no consecutive _ is allowed, in "{}"'.format(s)
assert x[0].upper() != x[0], \
'Invalid underscore string: phrases must start with a character, in "{}'.format(s)
return ''.join([x[0].upper() + x[1:] for x in terms if x])
def camelcase2underscore(s):
assert s[0].isupper(), 'Invalid camel case, first character must be upper case, in "{}"'.format(s)
assert all([x.isdigit() or x.isalpha() for x in s]),\
'Invalid camel case, all character must be letters or numbers'
out = s[0].lower()
for x in s[1:]:
if x.lower() != x:
out += '_' + x.lower()
else:
out += x
return out
| 1,077 | 36.172414 | 109 | py |
pointnerf | pointnerf-master/utils/spherical.py | import torch
from scipy.special import sph_harm, lpmn, lpmv
from scipy.special import factorial
import numpy as np
import math
import time
class SphericalHarm(object):
def __init__(self, total_deg):
self.total_deg = total_deg
self.orderIds, self.lIds, self.mIds, self.num_at_deg, self.m0inorder, self.restinorder, self.orderinorg = self.genalpids(
self.total_deg)
self.sh_ordermIds, self.sh_orderlIds, self.sh_orderIds, self.sh_orderinorg = self.genshids(
self.total_deg)
self.f2m_1, self.Klm = self.precompff(self.total_deg)
self.orderKlm = self.Klm[self.orderIds]
# def sh_all(self, theta, phi):
# phi = phi.view(-1, 1)
# theta = theta.view(-1, 1)
def sh_all(self, indirs):
indirs = indirs.view(-1, 3)
theta = torch.acos(indirs[:, [2]])
phi = torch.atan2(indirs[:, [1]], indirs[:, [0]])
# phi = phi.view(-1, 1)
# theta = theta.view(-1, 1)
alp = self.associated_lengedre_poly_all(torch.cos(theta))
m0 = alp[:, self.m0inorder] * torch.from_numpy(
self.orderKlm[self.m0inorder]).to(theta.device).type(theta.dtype)
# print("alp", alp[:, self.m0inorder], self.orderKlm[self.m0inorder])
ms = torch.from_numpy(self.mIds[self.orderIds][self.restinorder]).to(
theta.device).type(theta.dtype)
restKlm = torch.from_numpy(self.orderKlm[self.restinorder]).to(
theta.device).type(theta.dtype)
m0p = restKlm * torch.cos(ms * phi) * alp[:, self.restinorder]
m0n = restKlm * torch.sin(ms * phi) * alp[:, self.restinorder]
# print(phi.shape, m0p.shape)
m = torch.cat([m0, m0p, m0n], 1)
m = m[:, self.sh_orderinorg]
return m
def associated_lengedre_poly_all(self, x):
x = x.view(-1, 1)
l = self.total_deg
# alp = torch.ones((x.shape[0], l * (l + 1) // 2), device=x.device)
ms = self.mIds[self.orderIds[:l]]
somx2 = torch.sqrt((1 - x) * (1 + x))
f2m_1s = torch.from_numpy(self.f2m_1[ms]).to(x.device).type(x.dtype)
pmm = torch.pow(-somx2, torch.from_numpy(ms).to(x.device)) * f2m_1s
alp = [pmm]
t = l - 1
if t > 0:
ms = self.mIds[self.orderIds[self.total_deg:self.total_deg + t]]
ms = torch.from_numpy(ms).to(x.device)
pmp1m = x * (2 * ms + 1) * pmm[:, :t]
alp.append(pmp1m)
cur = self.total_deg + t
for i in range(l - 2):
t = l - 2 - i
ms = self.mIds[self.orderIds[cur:cur + t]]
ms = torch.from_numpy(ms).to(x.device)
ls = ms + i + 2
plm = (x * (2 * ls - 1) * pmp1m[:, :t] -
(ls + ms - 1) * pmm[:, :t]) / (i + 2)
alp.append(plm)
pmm = pmp1m
pmp1m = plm
cur += t
alp = torch.cat(alp, 1)
return alp
def precompff(self, l):
f2m_1 = np.arange(l) + 1
f2m_1 = f2m_1 * 2 - 1
f2m_1 = np.cumprod(f2m_1)
f2m_1[1:] = f2m_1[:-1]
Klm = np.sqrt((2 * self.lIds + 1) * factorial(self.lIds - self.mIds) /
(4 * np.pi * factorial(self.lIds + self.mIds)))
m_n0 = np.reshape(np.where(self.mIds), -1)
Klm[m_n0] *= 2**0.5
return f2m_1, Klm
def genalpids(self, l):
r_orderIds = np.zeros(l * (l + 1) // 2, dtype=int)
n_per_deg = np.arange(l + 1)[1:]
num_deg = np.cumsum(n_per_deg)
i_order = num_deg - 1
k = 0
for i in range(l):
r_orderIds[k:k + len(i_order)] = i_order
k += len(i_order)
i_order = i_order[:-1] + n_per_deg[i:-1]
r_lids = np.zeros(l * (l + 1) // 2, dtype=int)
r_mids = np.zeros(l * (l + 1) // 2, dtype=int)
k = 0
for i in range(l):
r_lids[k:k + i + 1] = i
r_mids[k:k + i + 1] = np.arange(i + 1)
k += i + 1
r_m0inorder = [0] + list(range(l, 0, -1))
r_m0inorder = np.cumsum(np.asarray(r_m0inorder, dtype=int))[:l]
tmp = np.ones_like(r_orderIds)
tmp[r_m0inorder] = 0
r_restinorder = np.reshape(np.where(tmp), -1)
tmp = np.arange(len(r_orderIds))
r_orderinorg = tmp.copy()
r_orderinorg[r_orderIds] = tmp[:]
return r_orderIds, r_lids, r_mids, num_deg, r_m0inorder, r_restinorder, r_orderinorg
def genshids(self, l):
sh_ordermIds = np.zeros(l * l, dtype=int)
sh_orderlIds = np.zeros(l * l, dtype=int)
sh_orderIds = np.zeros(l * l, dtype=int)
sh_ordermIds[:len(self.m0inorder)] = self.mIds[self.orderIds][
self.m0inorder]
sh_orderlIds[:len(self.m0inorder)] = self.lIds[self.orderIds][
self.m0inorder]
k = len(self.m0inorder)
sh_ordermIds[k:k + len(self.restinorder)] = self.mIds[self.orderIds][
self.restinorder]
sh_orderlIds[k:k + len(self.restinorder)] = self.lIds[self.orderIds][
self.restinorder]
k += len(self.restinorder)
sh_ordermIds[k:k + len(self.restinorder)] = -self.mIds[self.orderIds][
self.restinorder]
sh_orderlIds[k:k + len(self.restinorder)] = self.lIds[self.orderIds][
self.restinorder]
print(k + len(self.restinorder))
sh_orderIds = sh_orderlIds + sh_ordermIds + sh_orderlIds * sh_orderlIds
tmp = np.arange(len(sh_orderIds))
sh_orderinorg = tmp.copy()
sh_orderinorg[sh_orderIds] = tmp[:]
return sh_ordermIds, sh_orderlIds, sh_orderIds, sh_orderinorg
class SphericalHarm_table(object):
def __init__(self, total_deg):
self.total_deg = total_deg
print(self.total_deg * self.total_deg)
def sh_all(self, indirs, filp_dir=True):
indirs = indirs.reshape(-1, 3)
x = -indirs[:, [0]] if filp_dir else indirs[:, [0]]
y = -indirs[:, [1]] if filp_dir else indirs[:, [1]]
z = indirs[:, [2]]
if self.total_deg == 1:
return self.SH_l0(x, y, z)
elif self.total_deg == 2:
return self.SH_l1(x, y, z)
elif self.total_deg == 3:
return self.SH_l2(x, y, z)
elif self.total_deg == 4:
return self.SH_l3(x, y, z)
elif self.total_deg == 5:
return self.SH_l4(x, y, z)
else:
print(
"Not supporting this order of SH table yet. Please use runtime SH computation."
)
exit()
def SH_l0(self, x, y, z):
l00 = 0.5 * np.sqrt(1 / np.pi) * torch.ones_like(x, device=x.device)
return l00
def SH_l1(self, x, y, z):
l1_m1 = np.sqrt(3 / 4 / np.pi) * y
l1_0 = np.sqrt(3 / 4 / np.pi) * z
l1_1 = np.sqrt(3 / 4 / np.pi) * x
return torch.cat([self.SH_l0(x, y, z), l1_m1, l1_0, l1_1], 1)
def SH_l2(self, x, y, z):
l2_m2 = 0.5 * np.sqrt(15 / np.pi) * x * y
l2_m1 = 0.5 * np.sqrt(15 / np.pi) * z * y
l2_0 = 0.25 * np.sqrt(5 / np.pi) * (-x * x - y * y + 2 * z * z)
l2_1 = 0.5 * np.sqrt(15 / np.pi) * x * z
l2_2 = 0.25 * np.sqrt(15 / np.pi) * (x * x - y * y)
return torch.cat([self.SH_l1(x, y, z), l2_m2, l2_m1, l2_0, l2_1, l2_2],
1)
def SH_l3(self, x, y, z):
l3_m3 = 0.25 * np.sqrt(35.0 / 2 / np.pi) * (3 * x * x - y * y) * y
l3_m2 = 0.5 * np.sqrt(105 / np.pi) * x * y * z
l3_m1 = 0.25 * np.sqrt(
21 / 2 / np.pi) * (4 * z * z - x * x - y * y) * y
l3_0 = 0.25 * np.sqrt(
7 / np.pi) * (2 * z * z - 3 * x * x - 3 * y * y) * z
l3_1 = 0.25 * np.sqrt(21 / 2 / np.pi) * (4 * z * z - x * x - y * y) * x
l3_2 = 0.25 * np.sqrt(105 / np.pi) * (x * x - y * y) * z
l3_3 = 0.25 * np.sqrt(35.0 / 2 / np.pi) * (x * x - 3 * y * y) * x
return torch.cat(
[self.SH_l2(x, y, z), l3_m3, l3_m2, l3_m1, l3_0, l3_1, l3_2, l3_3],
1)
def SH_l4(self, x, y, z):
l4_m4 = 0.75 * np.sqrt(35.0 / np.pi) * x * y * (x * x - y * y)
l4_m3 = 0.75 * np.sqrt(35.0 / 2 / np.pi) * (3 * x * x - y * y) * y * z
l4_m2 = 0.75 * np.sqrt(5 / np.pi) * x * y * (7 * z * z - 1)
l4_m1 = 0.75 * np.sqrt(5 / 2 / np.pi) * z * y * (7 * z * z - 3)
l4_0 = 3 / 16 * np.sqrt(
1 / np.pi) * (35 * z * z * z * z - 30 * z * z + 3)
l4_1 = 0.75 * np.sqrt(5 / 2 / np.pi) * x * z * (7 * z * z - 3)
l4_2 = 3 / 8 * np.sqrt(5 / np.pi) * (x * x - y * y) * (7 * z * z - 1)
l4_3 = 0.75 * np.sqrt(35.0 / 2 / np.pi) * (x * x - 3 * y * y) * x * z
l4_4 = 3 / 16 * np.sqrt(35.0 / np.pi) * (x * x *
(x * x - 3 * y * y) - y * y *
(3 * x * x - y * y))
return torch.cat([
self.SH_l3(x, y, z), l4_m4, l4_m3, l4_m2, l4_m1, l4_0, l4_1, l4_2,
l4_3, l4_4
], 1) | 9,061 | 37.236287 | 129 | py |
pointnerf | pointnerf-master/data/llff_ft_dataset.py | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
import glob
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
import itertools
from os.path import join
import cv2
# import torch.nn.functional as F
from .data_utils import get_dtu_raydir
import copy
FLIP_Z = np.asarray([
[1,0,0],
[0,1,0],
[0,0,-1],
], dtype=np.float32)
def normalize(v):
"""Normalize a vector."""
return v / np.linalg.norm(v)
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img
def unique_lst(list1):
x = np.array(list1)
return np.unique(x)
def average_poses(poses):
"""
Calculate the average pose, which is then used to center all poses
using @center_poses. Its computation is as follows:
1. Compute the center: the average of pose centers.
2. Compute the z axis: the normalized average z axis.
3. Compute axis y': the average y axis.
4. Compute x' = y' cross product z, then normalize it as the x axis.
5. Compute the y axis: z cross product x.
Note that at step 3, we cannot directly use y' as y axis since it's
not necessarily orthogonal to z axis. We need to pass from x to y.
Inputs:
poses: (N_images, 3, 4)
Outputs:
pose_avg: (3, 4) the average pose
"""
# 1. Compute the center
center = poses[..., 3].mean(0) # (3)
# 2. Compute the z axis
z = normalize(poses[..., 2].mean(0)) # (3)
# 3. Compute axis y' (no need to normalize as it's not the final output)
y_ = poses[..., 1].mean(0) # (3)
# 4. Compute the x axis
x = normalize(np.cross(y_, z)) # (3)
# 5. Compute the y axis (as z and x are normalized, y is already of norm 1)
y = np.cross(z, x) # (3)
pose_avg = np.stack([x, y, z, center], 1) # (3, 4)
return pose_avg
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions
def flip_z(poses):
z_flip_matrix = np.eye(4, dtype=np.float32)
z_flip_matrix[2, 2] = -1.0
return np.matmul(poses, z_flip_matrix[None,...])
class LlffFtDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
# ['random', 'random2', 'patch'], default: no random sample
parser.add_argument('--random_sample',
type=str,
default='none',
help='random sample pixels')
parser.add_argument('--random_sample_size',
type=int,
default=1024,
help='number of random samples')
parser.add_argument('--init_view_num',
type=int,
default=3,
help='number of random samples')
parser.add_argument('--shape_id', type=int, default=0, help='shape id')
parser.add_argument('--trgt_id', type=int, default=0, help='shape id')
parser.add_argument('--num_nn',
type=int,
default=1,
help='number of nearest views in a batch')
parser.add_argument(
'--near_plane',
type=float,
default=2.125,
help=
'Near clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument(
'--far_plane',
type=float,
default=4.525,
help=
'Far clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument(
'--bg_color',
type=str,
default="white",
help=
'background color, white|black(None)|random|rgb (float, float, float)'
)
parser.add_argument(
'--scan',
type=str,
default="scan1",
help=''
)
parser.add_argument(
'--full_comb',
type=int,
default=0,
help=''
)
parser.add_argument('--inverse_gamma_image',
type=int,
default=-1,
help='de-gamma correct the input image')
parser.add_argument('--pin_data_in_memory',
type=int,
default=-1,
help='load whole data in memory')
parser.add_argument('--normview',
type=int,
default=0,
help='load whole data in memory')
parser.add_argument(
'--id_range',
type=int,
nargs=3,
default=(0, 385, 1),
help=
'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.'
)
parser.add_argument(
'--img_wh',
type=int,
nargs=2,
default=(960, 640),
help='resize target of the image'
)
parser.add_argument(
'--id_list',
type=int,
nargs='+',
default=None,
help=
'the list of data ids selected in the original dataset. The default is range(0, 385).'
)
parser.add_argument(
'--split',
type=str,
default="train",
help=
'train, val, test'
)
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
parser.add_argument('--dir_norm',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--train_load_num',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--holdoff',
type=int,
default=8,
help='normalize the ray_dir to unit length or not, default not')
return parser
def initialize(self, opt, downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None):
self.opt = opt
self.data_dir = opt.data_root
self.scan = opt.scan
self.split = opt.split
self.img_wh = (int(opt.img_wh[0] * downSample), int(opt.img_wh[1] * downSample))
self.downSample = downSample
self.scale_factor = 1.0 / 1.0
self.max_len = max_len
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0])
if not self.opt.bg_color or self.opt.bg_color == 'black':
self.bg_color = (0, 0, 0)
elif self.opt.bg_color == 'white':
self.bg_color = (1, 1, 1)
elif self.opt.bg_color == 'random':
self.bg_color = 'random'
else:
self.bg_color = [float(one) for one in self.opt.bg_color.split(",")]
self.define_transforms()
self.ori_poses_bounds = np.load(os.path.join(self.data_dir, self.scan, 'poses_bounds.npy'))
poses, avg_poses, bounds = self.get_poses(self.ori_poses_bounds)
self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32)
norm_c2w = None
if opt.normview == 1:
norm_c2w = avg_poses
if opt.normview == 2:
self.norm_w2c, self.norm_c2w = torch.as_tensor(np.linalg.inv(avg_poses), device="cuda", dtype=torch.float32), torch.as_tensor(avg_poses, device="cuda", dtype=torch.float32)
self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(poses, bounds, norm_c2w=norm_c2w)
self.build_init_metas(opt.holdoff)
self.load_images()
self.total = len(self.id_list)
print("dataset total:", self.split, self.total)
#
# def read_images(self):
# image_paths = sorted(glob.glob(os.path.join(self.root_dir, 'images_4/*')))
def load_images(self):
imgs = []
image_paths = sorted(glob.glob(os.path.join(self.data_dir, self.scan, 'images_4/*')))
print("id_list", self.id_list, image_paths)
for i in self.all_id_list:
img = Image.open(image_paths[i]).convert('RGB')
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (3, h, w)
imgs.append(img)
self.imgs = imgs
def get_poses(self, poses_bounds):
poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5)
bounds = poses_bounds[:, -2:] # (N_images, 2)
# Step 1: rescale focal length according to training resolution
H, W, focal = poses[0, :, -1] # original intrinsics, same for all images
self.focal = [focal * self.img_wh[0] / W, focal * self.img_wh[1] / H]
# Step 2: correct poses
poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1)
poses, avg_poses = self.center_poses(poses, self.blender2opencv)
near_original = bounds.min()
far_original = bounds.max()
scale_factor = near_original * 0.75 # 0.75 is the default parameter
bounds /= scale_factor
poses[..., 3] /= scale_factor
avg_poses[..., 3] /= scale_factor
avg_poses_holder = np.eye(4)
avg_poses_holder[:3] = avg_poses
# 2.65 / 200 * 192 = 2.544, min 2.1250
# range_original = far_original - near_original
# scale_factor = range_original / 2.544
# bounds /= scale_factor
# poses[..., 3] /= scale_factor
# avg_poses[..., 3] /= scale_factor
# avg_poses_holder = np.eye(4)
# avg_poses_holder[:3] = avg_poses
return poses, avg_poses_holder, bounds
def build_proj_mats(self, poses, bounds, norm_c2w=None):
w, h = self.img_wh
proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], []
self.all_id_list = range(len(poses))
self.near_far = [bounds.min() * 0.8, bounds.max() * 1.2]
# self.near_far = np.asarray([bounds.min()*0.9, bounds.max()*1.1]).astype(np.float32)
print("dataset near_far", self.near_far)
for vid in self.all_id_list:
c2w = np.eye(4, dtype=np.float32)
c2w[:3] = poses[vid]
w2c = np.linalg.inv(c2w)
if norm_c2w is not None:
w2c = w2c @ norm_c2w
c2w = np.linalg.inv(w2c)
cam2worlds.append(c2w)
world2cams.append(w2c)
# build proj mat from source views to ref view
proj_mat_l = np.eye(4)
intrinsic = np.asarray([[self.focal[0], 0, w / 2], [0, self.focal[1], h / 2], [0, 0, 1]])
intrinsics.append(intrinsic.copy())
intrinsic[:2] = intrinsic[:2] / 4 # 4 times downscale in the feature space
proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4]
proj_mats += [[proj_mat_l, bounds[vid]]]
# proj_mats += [[proj_mat_l, self.near_far]]
return proj_mats, np.stack(intrinsics), np.stack(world2cams), np.stack(cam2worlds)
def build_init_metas(self, holdoff):
self.id_list_test = np.arange(len(self.all_id_list))[::holdoff]
self.id_list_train = np.array([i for i in np.arange(len(self.all_id_list)) if (i not in self.id_list_test)])
self.id_list = self.id_list_test if self.split == "test" else self.id_list_train
self.view_id_list = [] # index is id_list's position e.g., the real image id is id_list[view_id]
cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds[self.id_list_train, :, :]]
test_cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds[self.id_list_test, :, :]]
if self.split=="train":
cam_xyz = np.stack(cam_xyz_lst, axis=0)
test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0)
# if self.opt.full_comb <= 1:
triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=test_cam_xyz, full_comb=self.opt.full_comb >=1)
print("triangles:", triangles.shape)
if self.opt.full_comb <= 1:
self.view_id_list = [triangles[i] for i in range(len(triangles))]
elif self.opt.full_comb == 2: # all combination
triangles = list(itertools.combinations(range(len(cam_xyz)), 3))
self.view_id_list = [triangles[i] for i in range(len(triangles))]
elif self.opt.full_comb in [3,4]: # 1 jump
triplets = []
first_dict = {}
for tris in triangles:
if tris[0] not in first_dict.keys():
first_dict[tris[0]] = []
first_dict[tris[0]] += [tris[1], tris[2]]
for key, val in first_dict.items():
first_dict[key] = list(unique_lst(val))
if self.opt.full_comb == 3:
for key, val in first_dict.items():
pairs = list(itertools.combinations(first_dict[key], 2))
triplets += [[key]+list(pair) for pair in pairs]
self.view_id_list = [triplets[i] for i in range(len(triplets))]
elif self.opt.full_comb == 4:
second_dict = copy.deepcopy(first_dict)
for key, val in first_dict.items():
for second in val:
second_dict[key] += first_dict[second]
second_dict[key] = list(unique_lst(second_dict[key]))
second_dict[key] = [val for val in second_dict[key] if val != key and val not in first_dict[key]]
# print("key val", key, second_dict[key])
for key, val in second_dict.items():
pairs = list(itertools.combinations(second_dict[key], 2))
print("key val", key, pairs)
triplets += [[key] + list(pair) for pair in pairs]
print("len()", len(triplets))
# exit()
self.view_id_list = [triplets[i] for i in range(len(triplets))]
# print("&&&&&&&&&&&&&&&&&&&&&&&&&&self.view_id_list", len(self.view_id_list))
# elif self.opt.full_comb == 4: # 1 jump
# if self.opt.full_comb<0:
# with open(f'../data/nerf_synth_configs/list/lego360_init_pairs.txt') as f:
# for line in f:
# str_lst = line.rstrip().split(',')
# src_views = [int(x) for x in str_lst]
# self.view_id_list.append(src_views)
def center_poses(self, poses, blender2opencv):
"""
Center the poses so that we can use NDC.
See https://github.com/bmild/nerf/issues/34
Inputs:
poses: (N_images, 3, 4)
Outputs:
poses_centered: (N_images, 3, 4) the centered poses
pose_avg: (3, 4) the average pose
"""
pose_avg = average_poses(poses) # (3, 4)
pose_avg_homo = np.eye(4)
pose_avg_homo[:3] = pose_avg # convert to homogeneous coordinate for faster computation
# by simply adding 0, 0, 0, 1 as the last row
last_row = np.tile(np.array([0, 0, 0, 1]), (len(poses), 1, 1)) # (N_images, 1, 4)
poses_homo = \
np.concatenate([poses, last_row], 1) # (N_images, 4, 4) homogeneous coordinate
poses_centered = np.linalg.inv(pose_avg_homo) @ poses_homo # (N_images, 4, 4)
poses_centered = poses_centered @ blender2opencv
poses_centered = poses_centered[:, :3] # (N_images, 3, 4)
return poses_centered, (np.linalg.inv(pose_avg_homo) @ blender2opencv)[:3, :]
def define_transforms(self):
self.transform = T.ToTensor()
def __len__(self):
if self.split == 'train':
return len(self.id_list) if self.max_len <= 0 else self.max_len
return len(self.id_list) if self.max_len <= 0 else self.max_len
def name(self):
return 'NerfSynthFtDataset'
def __del__(self):
print("end loading")
def normalize_rgb(self, data):
# to unnormalize image for visualization
# data C, H, W
C, H, W = data.shape
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1)
return (data - mean) / std
def get_init_item(self, idx, crop=False):
sample = {}
init_view_num = self.opt.init_view_num
view_ids = self.view_id_list[idx]
if self.split == 'train':
view_ids = view_ids[:init_view_num]
affine_mat, affine_mat_inv = [], []
mvs_images, imgs, depths_h = [], [], []
proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views
for i in view_ids:
vid = self.id_list[i]
# mvs_images += [self.normalize_rgb(self.blackimgs[vid])]
# mvs_images += [self.whiteimgs[vid]]
# mvs_images += [self.blackimgs[vid]]
imgs += [self.imgs[vid]]
proj_mat_ls, near_far = self.proj_mats[vid]
intrinsics.append(self.intrinsics[vid])
w2cs.append(self.world2cams[vid])
c2ws.append(self.cam2worlds[vid])
affine_mat.append(proj_mat_ls)
affine_mat_inv.append(np.linalg.inv(proj_mat_ls))
near_fars.append(near_far)
for i in range(len(affine_mat)):
view_proj_mats = []
ref_proj_inv = affine_mat_inv[i]
for j in range(len(affine_mat)):
if i == j: # reference view
view_proj_mats += [np.eye(4)]
else:
view_proj_mats += [affine_mat[j] @ ref_proj_inv]
# view_proj_mats: 4, 4, 4
view_proj_mats = np.stack(view_proj_mats)
proj_mats.append(view_proj_mats[:, :3])
# (4, 4, 3, 4)
proj_mats = np.stack(proj_mats)
imgs = np.stack(imgs).astype(np.float32)
affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv)
intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars)
# view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub]
# c2ws_all = self.cam2worlds[self.remap[view_ids_all]]
sample['images'] = imgs # (V, 3, H, W)
sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4)
sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4)
sample['near_fars'] = near_fars.astype(np.float32)
sample['near_fars_depth'] = self.near_far
sample['proj_mats'] = proj_mats.astype(np.float32)
sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3)
sample['view_ids'] = np.array(view_ids)
# sample['light_id'] = np.array(light_idx)
sample['affine_mat'] = affine_mat
sample['affine_mat_inv'] = affine_mat_inv
# sample['scan'] = scan
# sample['c2ws_all'] = c2ws_all.astype(np.float32)
for key, value in sample.items():
if not isinstance(value, str):
if not torch.is_tensor(value):
value = torch.as_tensor(value)
sample[key] = value.unsqueeze(0)
return sample
def __getitem__(self, id, crop=False):
item = {}
vid = self.id_list[id]
img = self.imgs[vid]
w2c = self.world2cams[vid]
c2w = self.cam2worlds[vid]
intrinsic = self.intrinsics[vid]
proj_mat_ls, near_far = self.proj_mats[vid]
gt_image = np.transpose(img, (1,2,0))
# print("gt_image", gt_image.shape)
width, height = gt_image.shape[1], gt_image.shape[0]
camrot = (c2w[0:3, 0:3])
campos = c2w[0:3, 3]
# print("camrot", camrot, campos)
item["intrinsic"] = intrinsic
# item["intrinsic"] = sample['intrinsics'][0, ...]
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z
item['lightpos'] = item["campos"]
dist = np.linalg.norm(campos)
middle = dist + 0.7
item['middle'] = torch.FloatTensor([middle]).view(1, 1)
item['far'] = torch.FloatTensor([near_far[1] * 1.1]).view(1, 1)
item['near'] = torch.FloatTensor([near_far[0] * 0.9]).view(1, 1)
item['h'] = height
item['w'] = width
# item['depths_h'] = self.depths[id]
# print("near_far", near_far)
# bounding box
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
raise Exception("no gt_mask, no proportional_random !!!")
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
item["pixel_idx"] = pixelcoords
# print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:])
raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0)
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)]
# gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :]
gt_image = np.reshape(gt_image, (-1, 3))
item['gt_image'] = gt_image
item['id'] = vid
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
return item
def get_item(self, idx, crop=False):
item = self.__getitem__(idx, crop=crop)
for key, value in item.items():
if not isinstance(value, str):
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
def get_dummyrot_item(self, idx, crop=False):
item = {}
width, height = self.width, self.height
transform_matrix = self.render_poses[idx]
camrot = (transform_matrix[0:3, 0:3])
campos = transform_matrix[0:3, 3]
focal = self.focal
item["focal"] = focal
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float()
item['lightpos'] = item["campos"]
dist = np.linalg.norm(campos)
# near far
if self.opt.near_plane is not None:
near = self.opt.near_plane
else:
near = max(dist - 1.5, 0.02)
if self.opt.far_plane is not None:
far = self.opt.far_plane # near +
else:
far = dist + 0.7
middle = dist + 0.7
item['middle'] = torch.FloatTensor([middle]).view(1, 1)
item['far'] = torch.FloatTensor([far]).view(1, 1)
item['near'] = torch.FloatTensor([near]).view(1, 1)
item['h'] = self.height
item['w'] = self.width
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
px, py = self.proportional_select(gt_mask)
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0)
item["pixel_idx"] = pixelcoords
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
for key, value in item.items():
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
| 30,712 | 39.358739 | 184 | py |
pointnerf | pointnerf-master/data/nerf_synth_ft_dataset.py | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
# import torch.nn.functional as F
from .data_utils import get_dtu_raydir
FLIP_Z = np.asarray([
[1,0,0],
[0,1,0],
[0,0,-1],
], dtype=np.float32)
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions
class NerfSynthFtDataset(BaseDataset):
def initialize(self, opt, img_wh=[800,800], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None):
self.opt = opt
self.data_dir = opt.data_root
self.scan = opt.scan
self.split = opt.split
self.img_wh = (int(800 * downSample), int(800 * downSample))
self.downSample = downSample
self.scale_factor = 1.0 / 1.0
self.max_len = max_len
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0])
if not self.opt.bg_color or self.opt.bg_color == 'black':
self.bg_color = (0, 0, 0)
elif self.opt.bg_color == 'white':
self.bg_color = (1, 1, 1)
elif self.opt.bg_color == 'random':
self.bg_color = 'random'
else:
self.bg_color = [float(one) for one in self.opt.bg_color.split(",")]
self.define_transforms()
with open(os.path.join(self.data_dir, self.scan, f'transforms_train.json'), 'r') as f:
self.meta = json.load(f)
self.build_init_metas()
self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32)
if opt.normview > 0:
_, _ , w2cs, c2ws = self.build_proj_mats(list=torch.load('../data/dtu_configs/pairs.th')[f'{self.scan}_test'])
norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws)
if opt.normview >= 2:
self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32)
norm_w2c, norm_c2w = None, None
self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(norm_w2c=norm_w2c, norm_c2w=norm_c2w)
self.read_meta()
self.total = len(self.id_list)
print("dataset total:", self.split, self.total)
@staticmethod
def modify_commandline_options(parser, is_train):
# ['random', 'random2', 'patch'], default: no random sample
parser.add_argument('--random_sample',
type=str,
default='none',
help='random sample pixels')
parser.add_argument('--random_sample_size',
type=int,
default=1024,
help='number of random samples')
parser.add_argument('--init_view_num',
type=int,
default=3,
help='number of random samples')
parser.add_argument('--shape_id', type=int, default=0, help='shape id')
parser.add_argument('--trgt_id', type=int, default=0, help='shape id')
parser.add_argument('--num_nn',
type=int,
default=1,
help='number of nearest views in a batch')
parser.add_argument(
'--near_plane',
type=float,
default=2.125,
help=
'Near clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument(
'--far_plane',
type=float,
default=4.525,
help=
'Far clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument(
'--bg_color',
type=str,
default="white",
help=
'background color, white|black(None)|random|rgb (float, float, float)'
)
parser.add_argument(
'--scan',
type=str,
default="scan1",
help=''
)
parser.add_argument('--inverse_gamma_image',
type=int,
default=-1,
help='de-gamma correct the input image')
parser.add_argument('--pin_data_in_memory',
type=int,
default=-1,
help='load whole data in memory')
parser.add_argument('--normview',
type=int,
default=0,
help='load whole data in memory')
parser.add_argument(
'--id_range',
type=int,
nargs=3,
default=(0, 385, 1),
help=
'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.'
)
parser.add_argument(
'--id_list',
type=int,
nargs='+',
default=None,
help=
'the list of data ids selected in the original dataset. The default is range(0, 385).'
)
parser.add_argument(
'--split',
type=str,
default="train",
help=
'train, val, test'
)
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
parser.add_argument('--dir_norm',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--train_load_num',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
return parser
def normalize_cam(self, w2cs, c2ws):
# cam_xyz = c2ws[..., :3, 3]
# rtp = self.bcart2sphere(cam_xyz)
# print(rtp.shape)
# rtp = np.mean(rtp, axis=0)
# avg_xyz = self.sphere2cart(rtp)
# euler_lst = []
# for i in range(len(c2ws)):
# euler_angles = self.matrix2euler(c2ws[i][:3,:3])
# print("euler_angles", euler_angles)
# euler_lst += [euler_angles]
# euler = np.mean(np.stack(euler_lst, axis=0), axis=0)
# print("euler mean ",euler)
# M = self.euler2matrix(euler)
# norm_c2w = np.eye(4)
# norm_c2w[:3,:3] = M
# norm_c2w[:3,3] = avg_xyz
# norm_w2c = np.linalg.inv(norm_c2w)
# return norm_w2c, norm_c2w
index = 0
return w2cs[index], c2ws[index]
def define_transforms(self):
self.transform = T.ToTensor()
#
# def load_poses_all(self):
# c2ws = []
# List = sorted(os.listdir(os.path.join(self.data_dir, f'Cameras/train/')))
# for item in List:
# proj_mat_filename = os.path.join(self.data_dir, f'Cameras/train/{item}')
# intrinsic, w2c, near_far = self.read_cam_file(proj_mat_filename)
# intrinsic[:2] *= 4
# c2ws.append(np.linalg.inv(w2c))
# self.focal = [intrinsic[0, 0], intrinsic[1, 1]]
# return np.stack(c2ws)
def build_init_metas(self):
self.view_id_list = []
self.id_list = []
if self.split=="train":
with open(f'../data/nerf_synth_configs/list/{self.scan}_finetune_init_pairs_final.txt') as f:
num_lst = f.readline().rstrip().split(',')
num_viewpoint, num_pairs = int(num_lst[0]), int(num_lst[1])
# viewpoints (20)
for _ in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
str_lst=f.readline().rstrip().split(',')
src_views = [int(x) for x in str_lst]
self.view_id_list.append([ref_view] + src_views)
self.id_list.append(ref_view)
for _ in range(num_viewpoint, num_pairs):
ref_view = int(f.readline().rstrip())
str_lst = f.readline().rstrip().split(',')
src_views = [int(x) for x in str_lst]
self.view_id_list.append([ref_view] + src_views)
else:
self.id_list = torch.load('../data/dtu_configs/pairs.th')[f'{self.scan}_{self.split}']
def build_proj_mats(self, list=None, norm_w2c=None, norm_c2w=None):
proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], []
list = self.id_list if list is None else list
focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length
focal *= self.img_wh[0] / 800 # modify focal length to match size self.img_wh
self.focal = focal
self.near_far = np.array([2.0, 6.0])
for vid in list:
frame = self.meta['frames'][vid]
c2w = np.array(frame['transform_matrix']) @ self.blender2opencv
if norm_w2c is not None:
c2w = norm_w2c @ c2w
w2c = np.linalg.inv(c2w)
cam2worlds.append(c2w)
world2cams.append(w2c)
intrinsic = np.array([[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]])
intrinsics.append(intrinsic.copy())
# multiply intrinsics and extrinsics to get projection matrix
proj_mat_l = np.eye(4)
intrinsic[:2] = intrinsic[:2] / 4
proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4]
proj_mats += [(proj_mat_l, self.near_far)]
proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics)
world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds)
return proj_mats, intrinsics, world2cams, cam2worlds
def define_transforms(self):
self.transform = T.ToTensor()
def read_meta(self):
w, h = self.img_wh
self.image_paths = []
self.poses = []
self.all_rays = []
self.blackimgs = []
self.whiteimgs = []
self.depths = []
self.alphas = []
self.view_id_dict = {}
self.directions = get_ray_directions(h, w, [self.focal, self.focal]) # (h, w, 3)
count = 0
for i, idx in enumerate(self.id_list):
frame = self.meta['frames'][idx]
image_path = os.path.join(self.data_dir, self.scan, f"{frame['file_path']}.png")
self.image_paths += [image_path]
img = Image.open(image_path)
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (4, h, w)
self.depths += [(img[-1:, ...] > 0.1).numpy().astype(np.float32)]
self.alphas += [img[-1:].numpy().astype(np.float32)]
self.blackimgs += [img[:3] * img[-1:]]
self.whiteimgs += [img[:3] * img[-1:] + (1 - img[-1:])]
# ray directions for all pixels, same for all images (same H, W, focal)
# rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3)
#
# self.all_rays += [torch.cat([rays_o, rays_d,
# self.near_far[0] * torch.ones_like(rays_o[:, :1]),
# self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8)
self.view_id_dict[idx] = i
self.poses = self.cam2worlds
def __len__(self):
if self.split == 'train':
return len(self.id_list) if self.max_len <= 0 else self.max_len
return len(self.id_list) if self.max_len <= 0 else self.max_len
def name(self):
return 'NerfSynthFtDataset'
def __del__(self):
print("end loading")
def normalize_rgb(self, data):
# to unnormalize image for visualization
# data C, H, W
C, H, W = data.shape
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1)
return (data - mean) / std
def get_init_item(self, idx, crop=False):
sample = {}
init_view_num = self.opt.init_view_num
view_ids = self.view_id_list[idx]
if self.split == 'train':
view_ids = view_ids[:init_view_num]
affine_mat, affine_mat_inv = [], []
mvs_images, imgs, depths_h, alphas = [], [], [], []
proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views
for i in view_ids:
vid = self.view_id_dict[i]
# mvs_images += [self.normalize_rgb(self.blackimgs[vid])]
# mvs_images += [self.whiteimgs[vid]]
mvs_images += [self.blackimgs[vid]]
imgs += [self.whiteimgs[vid]]
proj_mat_ls, near_far = self.proj_mats[vid]
intrinsics.append(self.intrinsics[vid])
w2cs.append(self.world2cams[vid])
c2ws.append(self.cam2worlds[vid])
affine_mat.append(proj_mat_ls)
affine_mat_inv.append(np.linalg.inv(proj_mat_ls))
depths_h.append(self.depths[vid])
alphas.append(self.alphas[vid])
near_fars.append(near_far)
for i in range(len(affine_mat)):
view_proj_mats = []
ref_proj_inv = affine_mat_inv[i]
for j in range(len(affine_mat)):
if i == j: # reference view
view_proj_mats += [np.eye(4)]
else:
view_proj_mats += [affine_mat[j] @ ref_proj_inv]
# view_proj_mats: 4, 4, 4
view_proj_mats = np.stack(view_proj_mats)
proj_mats.append(view_proj_mats[:, :3])
# (4, 4, 3, 4)
proj_mats = np.stack(proj_mats)
imgs = np.stack(imgs).astype(np.float32)
mvs_images = np.stack(mvs_images).astype(np.float32)
depths_h = np.stack(depths_h)
alphas = np.stack(alphas)
affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv)
intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars)
# view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub]
# c2ws_all = self.cam2worlds[self.remap[view_ids_all]]
sample['images'] = imgs # (V, 3, H, W)
sample['mvs_images'] = mvs_images # (V, 3, H, W)
sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W)
sample['alphas'] = alphas.astype(np.float32) # (V, H, W)
sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4)
sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4)
sample['near_fars'] = near_fars.astype(np.float32)
sample['proj_mats'] = proj_mats.astype(np.float32)
sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3)
sample['view_ids'] = np.array(view_ids)
# sample['light_id'] = np.array(light_idx)
sample['affine_mat'] = affine_mat
sample['affine_mat_inv'] = affine_mat_inv
# sample['scan'] = scan
# sample['c2ws_all'] = c2ws_all.astype(np.float32)
for key, value in sample.items():
if not isinstance(value, str):
if not torch.is_tensor(value):
value = torch.as_tensor(value)
sample[key] = value.unsqueeze(0)
return sample
def __getitem__(self, id, crop=False):
item = {}
img = self.whiteimgs[id]
w2c = self.world2cams[id]
c2w = self.cam2worlds[id]
intrinsic = self.intrinsics[id]
proj_mat_ls, near_far = self.proj_mats[id]
gt_image = np.transpose(img, (1,2,0))
# print("gt_image", gt_image.shape)
width, height = gt_image.shape[1], gt_image.shape[0]
camrot = (c2w[0:3, 0:3])
campos = c2w[0:3, 3]
# print("camrot", camrot, campos)
item["intrinsic"] = intrinsic
# item["intrinsic"] = sample['intrinsics'][0, ...]
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z
item['lightpos'] = item["campos"]
dist = np.linalg.norm(campos)
middle = dist + 0.7
item['middle'] = torch.FloatTensor([middle]).view(1, 1)
item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1)
item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1)
item['h'] = height
item['w'] = width
item['depths_h'] = self.depths[id]
# bounding box
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
raise Exception("no gt_mask, no proportional_random !!!")
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
item["pixel_idx"] = pixelcoords
# print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:])
raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0)
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)]
# gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :]
gt_image = np.reshape(gt_image, (-1, 3))
item['gt_image'] = gt_image
item['id'] = id
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
return item
def get_item(self, idx, crop=False):
item = self.__getitem__(idx, crop=crop)
for key, value in item.items():
if not isinstance(value, str):
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
def get_dummyrot_item(self, idx, crop=False):
item = {}
width, height = self.width, self.height
transform_matrix = self.render_poses[idx]
camrot = (transform_matrix[0:3, 0:3])
campos = transform_matrix[0:3, 3]
focal = self.focal
item["focal"] = focal
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float()
item['lightpos'] = item["campos"]
dist = np.linalg.norm(campos)
# near far
if self.opt.near_plane is not None:
near = self.opt.near_plane
else:
near = max(dist - 1.5, 0.02)
if self.opt.far_plane is not None:
far = self.opt.far_plane # near +
else:
far = dist + 0.7
middle = dist + 0.7
item['middle'] = torch.FloatTensor([middle]).view(1, 1)
item['far'] = torch.FloatTensor([far]).view(1, 1)
item['near'] = torch.FloatTensor([near]).view(1, 1)
item['h'] = self.height
item['w'] = self.width
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
px, py = self.proportional_select(gt_mask)
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0)
item["pixel_idx"] = pixelcoords
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
for key, value in item.items():
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
| 26,636 | 39.055639 | 174 | py |
pointnerf | pointnerf-master/data/dtu_dataset.py | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
# import torch.nn.functional as F
from .data_utils import get_dtu_raydir
FLIP_Z = np.asarray([
[1,0,0],
[0,1,0],
[0,0,-1],
], dtype=np.float32)
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img
class DtuDataset(BaseDataset):
def initialize(self, opt, n_views=3, levels=1, img_wh=[640,512], downSample=1.0, max_len=-1):
self.opt = opt
self.data_dir = opt.data_root
if not self.opt.bg_color or self.opt.bg_color == 'black':
self.bg_color = (0, 0, 0)
elif self.opt.bg_color == 'white':
self.bg_color = (1, 1, 1)
elif self.opt.bg_color == 'random':
self.bg_color = 'random'
else:
self.bg_color = [float(one) for one in self.bg_color.split()]
if len(self.bg_color) != 3:
self.bg_color = None
self.img_wh = img_wh
self.downSample = downSample
self.scale_factor = 1.0 / 200
self.max_len = max_len
self.n_views = n_views
self.levels = levels # FPN levels
self.split = opt.split
self.build_metas()
self.build_proj_mats()
self.define_transforms()
self.near_far = np.asarray([2.125, 4.525])
if img_wh is not None:
assert img_wh[0] % 32 == 0 and img_wh[1] % 32 == 0, \
'img_wh must both be multiples of 32!'
self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0])
if os.path.isfile(self.data_dir + "/bb.txt"):
self.bb = np.loadtxt(self.data_dir + "/bb.txt")
print("boundingbox", self.bb)
else:
self.bb = np.array([-1, -1, -1, 1, 1, 1]).reshape(
(2, 3)).astype(np.float32)
self.total = len(self.metas)
print("dataset total:", self.split, self.total)
return
@staticmethod
def modify_commandline_options(parser, is_train):
# ['random', 'random2', 'patch'], default: no random sample
parser.add_argument('--random_sample',
type=str,
default='none',
help='random sample pixels')
parser.add_argument('--random_sample_size',
type=int,
default=1024,
help='number of random samples')
parser.add_argument('--shape_id', type=int, default=0, help='shape id')
parser.add_argument('--trgt_id', type=int, default=0, help='shape id')
parser.add_argument('--num_nn',
type=int,
default=1,
help='number of nearest views in a batch')
parser.add_argument(
'--near_plane',
type=float,
default=2.0,
help=
'Near clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument(
'--far_plane',
type=float,
default=6.0,
help=
'Far clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument('--init_view_num',
type=int,
default=3,
help='number of random samples')
parser.add_argument(
'--bg_color',
type=str,
default="white",
help=
'background color, white|black(None)|random|rgb (float, float, float)'
)
# parser.add_argument(
# '--z_dir',
# type=str,
# default="down",
# help=
# 'z axis up (in nerf json), down (in reflectance ply)'
# )
parser.add_argument('--inverse_gamma_image',
type=int,
default=-1,
help='de-gamma correct the input image')
parser.add_argument('--pin_data_in_memory',
type=int,
default=-1,
help='load whole data in memory')
parser.add_argument(
'--id_range',
type=int,
nargs=3,
default=(0, 385, 1),
help=
'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.'
)
parser.add_argument(
'--id_list',
type=int,
nargs='+',
default=None,
help=
'the list of data ids selected in the original dataset. The default is range(0, 385).'
)
parser.add_argument(
'--split',
type=str,
default="train",
help=
'train, val, test'
)
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
parser.add_argument('--dir_norm',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--train_load_num',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
return parser
def define_transforms(self):
self.transform = T.Compose([T.ToTensor(),
# T.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
])
def build_metas(self):
self.metas = []
with open(f'../data/dtu_configs/lists/dtu_{self.split}_all.txt') as f:
self.scans = [line.rstrip() for line in f.readlines()]
# light conditions 0-6 for training
# light condition 3 for testing (the brightest?)
light_idxs = [3] if 'train' != self.split else range(7)
self.id_list = []
for scan in self.scans:
with open(f'../data/dtu_configs/dtu_pairs.txt') as f:
num_viewpoint = int(f.readline())
# viewpoints (49)
for _ in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
for light_idx in light_idxs:
self.metas += [(scan, light_idx, ref_view, src_views)]
self.id_list.append([ref_view] + src_views)
self.id_list = np.unique(self.id_list)
self.build_remap()
def build_proj_mats(self):
proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], []
for vid in self.id_list:
proj_mat_filename = os.path.join(self.data_dir,
f'Cameras/train/{vid:08d}_cam.txt')
intrinsic, extrinsic, near_far = self.read_cam_file(proj_mat_filename)
intrinsic[:2] *= 4
extrinsic[:3, 3] *= self.scale_factor
intrinsic[:2] = intrinsic[:2] * self.downSample
intrinsics += [intrinsic.copy()]
# multiply intrinsics and extrinsics to get projection matrix
proj_mat_l = np.eye(4)
intrinsic[:2] = intrinsic[:2] / 4
proj_mat_l[:3, :4] = intrinsic @ extrinsic[:3, :4]
proj_mats += [(proj_mat_l, near_far)]
world2cams += [extrinsic]
cam2worlds += [np.linalg.inv(extrinsic)]
self.proj_mats, self.intrinsics = np.stack(proj_mats), np.stack(intrinsics)
self.world2cams, self.cam2worlds = np.stack(world2cams), np.stack(cam2worlds)
def read_cam_file(self, filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0]) * self.scale_factor
depth_max = depth_min + float(lines[11].split()[1]) * 192 * self.scale_factor * 1.06
self.depth_interval = float(lines[11].split()[1])
return intrinsics, extrinsics, [depth_min, depth_max]
def check_read_depth(self, depth_filename, processed_filename):
depth_h = np.array(read_pfm(depth_filename)[0], dtype=np.float32) # (800, 800) ? (1200. 1600)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
depth_h = cv2.resize(depth_h, None, fx=self.downSample, fy=self.downSample,
interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!!
depth = cv2.resize(depth_h, None, fx=1.0 / 4, fy=1.0 / 4,
interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!!
depth_pro = np.array(read_pfm(processed_filename)[0], dtype=np.float32) # (800, 800) ? (1200. 1600)
print("depth", depth.shape, depth_pro.shape, np.sum(np.abs(depth-depth_pro)))
def read_depth(self, filename, downSample=None):
downSample = self.downSample if downSample is None else downSample
depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800) ? (1200. 1600)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
depth_h = cv2.resize(depth_h, None, fx=downSample, fy=downSample,
interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!!
depth = cv2.resize(depth_h, None, fx=1.0 / 4, fy=1.0 / 4,
interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!!
mask = depth > 0
return depth, mask, depth_h
def build_remap(self):
self.remap = np.zeros(np.max(self.id_list) + 1).astype('int')
for i, item in enumerate(self.id_list):
self.remap[item] = i
def __len__(self):
return len(self.metas) if self.max_len <= 0 else self.max_len
def name(self):
return 'DtuDataset'
def __del__(self):
print("end loading")
def __getitem__(self, idx, crop=False):
sample = {}
scan, light_idx, target_view, src_views = self.metas[idx]
if self.split=='train':
ids = torch.randperm(5)[:3]
view_ids = [src_views[i] for i in ids] + [target_view]
else:
view_ids = [src_views[i] for i in range(3)] + [target_view]
affine_mat, affine_mat_inv = [], []
imgs, depths_h = [], []
proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views
for i, vid in enumerate(view_ids):
# NOTE that the id in image file names is from 1 to 49 (not 0~48)
img_filename = os.path.join(self.data_dir,
f'Rectified/{scan}_train/rect_{vid + 1:03d}_{light_idx}_r5000.png')
# print("img_filename",img_filename)
depth_filename = os.path.join(self.data_dir,
f'Depths_raw/{scan}/depth_map_{vid:04d}.pfm')
img = Image.open(img_filename)
# print("img_filename", img_filename, depth_filename)
img_wh = np.round(np.array(img.size) * self.downSample).astype('int')
img = img.resize(img_wh, Image.BILINEAR)
img = self.transform(img)
imgs += [img]
index_mat = self.remap[vid]
proj_mat_ls, near_far = self.proj_mats[index_mat]
intrinsics.append(self.intrinsics[index_mat])
w2cs.append(self.world2cams[index_mat])
c2ws.append(self.cam2worlds[index_mat])
affine_mat.append(proj_mat_ls)
affine_mat_inv.append(np.linalg.inv(proj_mat_ls))
if os.path.exists(depth_filename):
depth, mask, depth_h = self.read_depth(depth_filename)
# self.check_read_depth(depth_filename, os.path.join(self.data_dir, f'Depths/{scan}_train/depth_map_{vid:04d}.pfm'))
depth_h *= self.scale_factor
depths_h.append(depth_h)
else:
depths_h.append(np.zeros((1, 1)))
near_fars.append(near_far)
for i in range(len(affine_mat)):
view_proj_mats = []
ref_proj_inv = affine_mat_inv[i]
for j in range(len(affine_mat)):
if i == j: # reference view
view_proj_mats += [np.eye(4)]
else:
view_proj_mats += [affine_mat[j] @ ref_proj_inv]
# view_proj_mats: 4, 4, 4
view_proj_mats = np.stack(view_proj_mats)
proj_mats.append(view_proj_mats[:, :3])
# (4, 4, 3, 4)
proj_mats = np.stack(proj_mats)
imgs = np.stack(imgs).astype(np.float32)
# if self.split == 'train':
# imgs = colorjitter(imgs, 1.0+(torch.rand((4,))*2-1.0)*0.5)
# imgs = F.normalize(imgs,mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
depths_h = np.stack(depths_h)
# print("proj_mats", proj_mats[0].shape)
affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv)
intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars)
view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub]
c2ws_all = self.cam2worlds[self.remap[view_ids_all]]
sample['images'] = imgs # (V, 3, H, W)
sample['mvs_images'] = imgs #self.normalize_rgb(imgs) # (V, 3, H, W)
sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W)
sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4)
sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4)
sample['near_fars_depth'] = near_fars.astype(np.float32)[0]
sample['near_fars'] = np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1))
sample['proj_mats'] = proj_mats.astype(np.float32)
sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3)
sample['view_ids'] = np.array(view_ids)
sample['light_id'] = np.array(light_idx)
sample['affine_mat'] = affine_mat
sample['affine_mat_inv'] = affine_mat_inv
sample['scan'] = scan
sample['c2ws_all'] = c2ws_all.astype(np.float32)
item = {}
gt_image = np.transpose(imgs[self.opt.trgt_id, ...], (1,2,0))
width, height = gt_image.shape[1], gt_image.shape[0]
# gt_mask = (gt_image[..., -1] > 0.1).astype(np.float32)
# item['gt_mask'] = torch.from_numpy(gt_mask).permute(2, 0, 1).float()
# gt_image = gt_image / 255.0 # already / 255 for blender
transform_matrix = w2cs[self.opt.ref_vid] @ c2ws[self.opt.trgt_id]
# transform_matrix = w2cs[0] @ c2ws[0]
camrot = (transform_matrix[0:3, 0:3])
campos = transform_matrix[0:3, 3]
item["intrinsic"] = sample['intrinsics'][self.opt.trgt_id, ...]
# item["intrinsic"] = sample['intrinsics'][0, ...]
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z
item['lightpos'] = item["campos"]
dist = np.linalg.norm(campos)
middle = dist + 0.7
item['middle'] = torch.FloatTensor([middle]).view(1, 1)
item['far'] = torch.FloatTensor([near_fars[self.opt.trgt_id][1]]).view(1, 1)
item['near'] = torch.FloatTensor([near_fars[self.opt.trgt_id][0]]).view(1, 1)
item['h'] = height
item['w'] = width
# bounding box
item['bb'] = torch.from_numpy(self.bb).float()
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
raise Exception("no gt_mask, no proportional_random !!!")
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
item["pixel_idx"] = pixelcoords
# print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:])
raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0)
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)]
# gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :]
gt_image = np.reshape(gt_image, (-1, 3))
# gt_mask = np.reshape(gt_mask, (-1, 1))
# if self.opt.bg_color is not None:
# gt_image = np.clip(
# np.power(
# np.power(gt_image, 2.2) +
# (1 - gt_mask) * self.opt.bg_color, 1.0 / 2.2), 0, 1)
# gt_mask[gt_mask > 0] = 1
item['gt_image'] = gt_image
# item['gt_image'] = torch.from_numpy(gt_image).float().contiguous()
# item["gt_mask"] = torch.from_numpy(gt_mask).float().contiguous()
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
sample.update(item)
return sample
def get_item(self, idx, crop=False):
item = self.__getitem__(idx, crop=crop)
for key, value in item.items():
if not isinstance(value, str):
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
def get_dummyrot_item(self, idx, crop=False):
item = {}
width, height = self.width, self.height
transform_matrix = self.render_poses[idx]
camrot = (transform_matrix[0:3, 0:3])
campos = transform_matrix[0:3, 3]
focal = self.focal
item["focal"] = focal
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float()
item['lightpos'] = item["campos"]
dist = np.linalg.norm(campos)
# near far
if self.opt.near_plane is not None:
near = self.opt.near_plane
else:
near = max(dist - 1.5, 0.02)
if self.opt.far_plane is not None:
far = self.opt.far_plane # near +
else:
far = dist + 0.7
middle = dist + 0.7
item['middle'] = torch.FloatTensor([middle]).view(1, 1)
item['far'] = torch.FloatTensor([far]).view(1, 1)
item['near'] = torch.FloatTensor([near]).view(1, 1)
item['h'] = self.height
item['w'] = self.width
# bounding box
item['bb'] = torch.from_numpy(self.bb).float()
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
px, py = self.proportional_select(gt_mask)
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0)
item["pixel_idx"] = pixelcoords
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
for key, value in item.items():
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
def check_points_range(self):
import glob
from os import path
data_dir='/home/xharlie/user_space/data/nrData/dtu/'
near_far=[2.125, 4.525]
W, H = 640, 512
downSample=1
self.scale_factor=1/200
scale_factor=1/200
all_min_lst, all_max_lst = [], []
for idx in range(1, 129):
scan = "scan{}".format(idx)
obj_min_lst, obj_max_lst = [], []
for vid in range(49):
depth_filename = os.path.join(data_dir, f'Depths_raw/{scan}/depth_map_{vid:04d}.pfm')
camfilename = os.path.join(data_dir, f'Cameras/train/{vid:08d}_cam.txt')
if not path.exists(depth_filename) or not path.exists(camfilename):
print("depth_filename:", path.exists(depth_filename), "; camfilename ",path.exists(camfilename))
break
_, _, depth_h = self.read_depth(depth_filename, downSample=downSample)
depth_h *= scale_factor
mask = np.logical_and(depth_h >= near_far[0], depth_h <= near_far[1]).reshape(-1)
intrinsic, extrinsic, near_far = self.read_cam_file(camfilename)
intrinsic[:2] *= 4
extrinsic[:3, 3] *= scale_factor
intrinsic[:2] = intrinsic[:2] * downSample
w2c = extrinsic
c2w = np.linalg.inv(extrinsic)
# mask = torch.logical_and(depth_h >= near_far[0], cam_expected_depth <= near_far[1])
ndc_expected_depth = (depth_h - near_far[0]) / (near_far[1] - near_far[0]) # 512, 640
valid_z = ndc_expected_depth
valid_x = np.arange(W, dtype=np.float32) / (W - 1)
valid_y = np.arange(H, dtype=np.float32) / (H - 1)
valid_y, valid_x = np.meshgrid(valid_y, valid_x, indexing="ij") # 512, 640; 512, 640
# B,N,H,W
ndc_xyz = np.stack([valid_x, valid_y, valid_z], axis=-1) # 512, 640, 3
cam_xyz = self.ndc_2_cam(ndc_xyz, near_far, intrinsic, W, H)
w_xyz = np.concatenate([cam_xyz, np.ones_like(cam_xyz[...,0:1])], axis=-1) @ c2w.T # (327680, 4)
w_xyz = w_xyz[mask,:3]
xyz_min_np, xyz_max_np = np.min(w_xyz, axis=-2), np.max(w_xyz, axis=-2)
obj_min_lst.append(xyz_min_np)
obj_max_lst.append(xyz_max_np)
max_edge = max(xyz_max_np-xyz_min_np)
# print("xyz_min_np, xyz_max_np edges,", xyz_min_np, xyz_max_np, xyz_max_np-xyz_min_np)
if len(obj_min_lst) > 0:
obj_min = np.min(np.array(obj_min_lst), axis=-2)
obj_max= np.max(np.array(obj_max_lst), axis=-2)
all_min_lst.append(obj_min)
all_max_lst.append(obj_max)
print(scan, "min", obj_min, "max", obj_max)
obj_min = np.min(np.array(all_min_lst), axis=-2)
obj_max = np.max(np.array(all_max_lst), axis=-2)
print("xyz_min, xyz_max, edges,", obj_min, obj_max, obj_max-obj_min)
def ndc_2_cam(self, ndc_xyz, near_far, intrinsic, W, H):
inv_scale = np.array([[W - 1, H - 1]])
cam_z = ndc_xyz[..., 2:3] * (near_far[1] - near_far[0]) + near_far[0]
cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z
cam_xyz = np.concatenate([cam_xy, cam_z], axis=-1).reshape(-1,3)
cam_xyz = cam_xyz @ np.linalg.inv(intrinsic.T)
return cam_xyz
if __name__ == '__main__':
db = DtuDataset()
db.check_points_range()
# python -m data.dtu_dataset | 28,325 | 41.595489 | 174 | py |
pointnerf | pointnerf-master/data/fitplane.py | from skspatial.objects import Plane
from skspatial.objects import Points
from skspatial.plotting import plot_3d
from plyfile import PlyData, PlyElement
import matplotlib.pyplot as plt
import numpy as np
# points = Points([[0, 0, 0], [1, 3, 5], [-5, 6, 3], [3, 6, 7], [-2, 6, 7]])
# plydata = PlyData.read("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_tryy/points/sample.ply")
plydata = PlyData.read("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_normcam2_confcolordir_KNN8_LRelu_grid800_scan115_wbg_dmsk/points/light_brown_sample.ply")
vertex_data = plydata['vertex'].data # numpy array with fields ['x', 'y', 'z']
pts = np.zeros([vertex_data.size, 3])
color = np.zeros_like(pts)
pts[:, 0] = vertex_data['x']
pts[:, 1] = vertex_data['y']
pts[:, 2] = vertex_data['z']
# print(vertex_data)
color[:, 0] = vertex_data['red']
color[:, 1] = vertex_data['green']
color[:, 2] = vertex_data['blue']
points = Points(pts)
plane = Plane.best_fit(points)
print("plane", plane, pts.shape)
print("plane", plane.point, plane.normal)
print("color average", np.mean(color, axis=0))
coord=plane.point
coeff=plane.normal
# Plane(point=Point([-0.49666997, 0.52160616, 3.6239593 ]), normal=Vector([-0.11364093, 0.38778102, 0.91471942])) (15565, 3)
# a(x − x0) + b(y − y0) + c(z − z0) = 0
r=8
# a,b,c = coeff[0], coeff[1], coeff[2]
# x0,y0,z0=coord[0],coord[1],coord[2],
# xy = r * np.random.rand(int(1e5),2) - r/2
# z = (a*(xy[...,0]-x0) + b*(xy[...,1]-y0))/(-c) + z0
# gen_pnts = np.stack([xy[...,0], xy[...,1], z], axis=-1)
#
# np.savetxt('/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_tryy/points/planes_white.txt', gen_pnts, delimiter=';')
# plane Plane(point=Point([ 0.20770223, -0.74818161, 3.98697683]), normal=Vector([-0.11165793, 0.3806543 , 0.91795142])) (14801, 3)
# plane [ 0.20770223 -0.74818161 3.98697683] [-0.11165793 0.3806543 0.91795142]
# color average [150.72447808 99.68367002 63.40976961]
a,b,c = coeff[0], coeff[1], coeff[2]
x0,y0,z0=coord[0],coord[1],coord[2],
xy = r * np.random.rand(int(1e5),2) - r/2
z = (a*(xy[...,0]-x0) + b*(xy[...,1]-y0))/(-c) + z0
gen_pnts = np.stack([xy[...,0], xy[...,1], z], axis=-1)
color = np.ones_like(gen_pnts) * np.mean(color, axis=0, keepdims=True)
gen_pnts = np.concatenate([gen_pnts, color], axis=-1)
np.savetxt('/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_normcam2_confcolordir_KNN8_LRelu_grid800_scan115_wbg_dmsk/points/planes_light_brown.txt', gen_pnts, delimiter=';')
# plane Plane(point=Point([-0.04889537, -0.84123057, 4.03164617]), normal=Vector([-0.11154823, 0.3783277 , 0.91892608])) (30712, 3)
# plane [-0.04889537 -0.84123057 4.03164617] [-0.11154823 0.3783277 0.91892608]
# a,b,c = coeff[0], coeff[1], coeff[2]
# x0,y0,z0=coord[0],coord[1],coord[2],
# xy = r * np.random.rand(int(1e5),2) - r/2
# z = (a*(xy[...,0]-x0) + b*(xy[...,1]-y0))/(-c) + z0
# gen_pnts = np.stack([xy[...,0], xy[...,1], z], axis=-1)
# color = np.ones_like(gen_pnts) * np.mean(color, axis=0, keepdims=True)
# gen_pnts = np.concatenate([gen_pnts, color], axis=-1)
# np.savetxt('/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_normcam2_confcolordir_KNN8_LRelu_grid800_scan115_wbg_dmsk/points/planes_brown.txt', gen_pnts, delimiter=';')
# gen_points = Points(gen_pnts)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# plot_3d(
# gen_points.plotter(c='k', s=50, depthshade=False),
# plane.plotter(alpha=0.2, lims_x=(-5, 5), lims_y=(-5, 5)),
# )
#
# plt.show() | 3,495 | 37 | 181 | py |
pointnerf | pointnerf-master/data/base_dataset.py | import torch.utils.data as data
from PIL import Image
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return self.__class__.__name__
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
| 440 | 20 | 53 | py |
pointnerf | pointnerf-master/data/download-scannet.py | #!/usr/bin/env python
# Downloads ScanNet public data release
# Run with ./download-scannet.py (or python download-scannet.py on Windows)
# -*- coding: utf-8 -*-
import argparse
import os
import urllib.request #(for python3)
import urllib
import tempfile
BASE_URL = 'http://kaldir.vc.in.tum.de/scannet/'
TOS_URL = BASE_URL + 'ScanNet_TOS.pdf'
FILETYPES = ['.aggregation.json', '.sens', '.txt', '_vh_clean.ply', '_vh_clean_2.0.010000.segs.json', '_vh_clean_2.ply', '_vh_clean.segs.json', '_vh_clean.aggregation.json', '_vh_clean_2.labels.ply', '_2d-instance.zip', '_2d-instance-filt.zip', '_2d-label.zip', '_2d-label-filt.zip']
FILETYPES_TEST = ['.sens', '.txt', '_vh_clean.ply', '_vh_clean_2.ply']
PREPROCESSED_FRAMES_FILE = ['scannet_frames_25k.zip', '5.6GB']
TEST_FRAMES_FILE = ['scannet_frames_test.zip', '610MB']
LABEL_MAP_FILES = ['scannetv2-labels.combined.tsv', 'scannet-labels.combined.tsv']
DATA_EFFICIENT_FILES = ['limited-reconstruction-scenes.zip', 'limited-annotation-points.zip', 'limited-bboxes.zip', '1.7MB']
RELEASES = ['v2/scans', 'v1/scans']
RELEASES_TASKS = ['v2/tasks', 'v1/tasks']
RELEASES_NAMES = ['v2', 'v1']
RELEASE = RELEASES[0]
RELEASE_TASKS = RELEASES_TASKS[0]
RELEASE_NAME = RELEASES_NAMES[0]
LABEL_MAP_FILE = LABEL_MAP_FILES[0]
RELEASE_SIZE = '1.2TB'
V1_IDX = 1
def get_release_scans(release_file):
print("release_file",release_file)
scan_lines = urllib.request.urlopen(release_file)
# scan_lines = urllib.urlopen(release_file)
scans = []
for scan_line in scan_lines:
scan_id = scan_line.decode('utf8').rstrip('\n')
scans.append(scan_id)
return scans
def download_release(release_scans, out_dir, file_types, use_v1_sens):
if len(release_scans) == 0:
return
print('Downloading ScanNet ' + RELEASE_NAME + ' release to ' + out_dir + '...')
for scan_id in release_scans:
scan_out_dir = os.path.join(out_dir, scan_id)
download_scan(scan_id, scan_out_dir, file_types, use_v1_sens)
print('Downloaded ScanNet ' + RELEASE_NAME + ' release.')
def download_file(url, out_file):
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(out_file):
print('\t' + url + ' > ' + out_file)
fh, out_file_tmp = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(url, out_file_tmp)
# urllib.urlretrieve(url, out_file_tmp)
os.rename(out_file_tmp, out_file)
else:
print('WARNING: skipping download of existing file ' + out_file)
def download_scan(scan_id, out_dir, file_types, use_v1_sens):
print('Downloading ScanNet ' + RELEASE_NAME + ' scan ' + scan_id + ' ...')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
for ft in file_types:
v1_sens = use_v1_sens and ft == '.sens'
url = BASE_URL + RELEASE + '/' + scan_id + '/' + scan_id + ft if not v1_sens else BASE_URL + RELEASES[V1_IDX] + '/' + scan_id + '/' + scan_id + ft
out_file = out_dir + '/' + scan_id + ft
download_file(url, out_file)
print('Downloaded scan ' + scan_id)
def download_task_data(out_dir):
print('Downloading ScanNet v1 task data...')
files = [
LABEL_MAP_FILES[V1_IDX], 'obj_classification/data.zip',
'obj_classification/trained_models.zip', 'voxel_labeling/data.zip',
'voxel_labeling/trained_models.zip'
]
for file in files:
url = BASE_URL + RELEASES_TASKS[V1_IDX] + '/' + file
localpath = os.path.join(out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded task data.')
def download_tfrecords(in_dir, out_dir):
print('Downloading tf records (302 GB)...')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
split_to_num_shards = {'train': 100, 'val': 25, 'test': 10}
for folder_name in ['hires_tfrecords', 'lores_tfrecords']:
folder_dir = '%s/%s' % (in_dir, folder_name)
save_dir = '%s/%s' % (out_dir, folder_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for split, num_shards in split_to_num_shards.items():
for i in range(num_shards):
file_name = '%s-%05d-of-%05d.tfrecords' % (split, i, num_shards)
url = '%s/%s' % (folder_dir, file_name)
localpath = '%s/%s/%s' % (out_dir, folder_name, file_name)
download_file(url, localpath)
def download_label_map(out_dir):
print('Downloading ScanNet ' + RELEASE_NAME + ' label mapping file...')
files = [ LABEL_MAP_FILE ]
for file in files:
url = BASE_URL + RELEASE_TASKS + '/' + file
localpath = os.path.join(out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded ScanNet ' + RELEASE_NAME + ' label mapping file.')
def main():
parser = argparse.ArgumentParser(description='Downloads ScanNet public data release.')
parser.add_argument('-o', '--out_dir', required=True, help='directory in which to download')
parser.add_argument('--task_data', action='store_true', help='download task data (v1)')
parser.add_argument('--label_map', action='store_true', help='download label map file')
parser.add_argument('--v1', action='store_true', help='download ScanNet v1 instead of v2')
parser.add_argument('--id', help='specific scan id to download')
parser.add_argument('--preprocessed_frames', action='store_true', help='download preprocessed subset of ScanNet frames (' + PREPROCESSED_FRAMES_FILE[1] + ')')
parser.add_argument('--test_frames_2d', action='store_true', help='download 2D test frames (' + TEST_FRAMES_FILE[1] + '; also included with whole dataset download)')
parser.add_argument('--data_efficient', action='store_true', help='download data efficient task files; also included with whole dataset download)')
parser.add_argument('--tf_semantic', action='store_true', help='download google tensorflow records for 3D segmentation / detection')
parser.add_argument('--type', help='specific file type to download (.aggregation.json, .sens, .txt, _vh_clean.ply, _vh_clean_2.0.010000.segs.json, _vh_clean_2.ply, _vh_clean.segs.json, _vh_clean.aggregation.json, _vh_clean_2.labels.ply, _2d-instance.zip, _2d-instance-filt.zip, _2d-label.zip, _2d-label-filt.zip)')
args = parser.parse_args()
print('By pressing any key to continue you confirm that you have agreed to the ScanNet terms of use as described at:')
print(TOS_URL)
print('***')
print('Press any key to continue, or CTRL-C to exit.')
key = input('')
if args.v1:
global RELEASE
global RELEASE_TASKS
global RELEASE_NAME
global LABEL_MAP_FILE
RELEASE = RELEASES[V1_IDX]
RELEASE_TASKS = RELEASES_TASKS[V1_IDX]
RELEASE_NAME = RELEASES_NAMES[V1_IDX]
LABEL_MAP_FILE = LABEL_MAP_FILES[V1_IDX]
assert(not args.tf_semantic), "No tf records for v1"
release_file = BASE_URL + RELEASE + '.txt'
release_scans = get_release_scans(release_file)
file_types = FILETYPES;
release_test_file = BASE_URL + RELEASE + '_test.txt'
release_test_scans = get_release_scans(release_test_file)
file_types_test = FILETYPES_TEST;
out_dir_scans = os.path.join(args.out_dir, 'scans')
out_dir_test_scans = os.path.join(args.out_dir, 'scans_test')
out_dir_tasks = os.path.join(args.out_dir, 'tasks')
if args.type: # download file type
file_type = args.type
if file_type not in FILETYPES:
print('ERROR: Invalid file type: ' + file_type)
return
file_types = [file_type]
if file_type in FILETYPES_TEST:
file_types_test = [file_type]
else:
file_types_test = []
if args.task_data: # download task data
download_task_data(out_dir_tasks)
elif args.label_map: # download label map file
download_label_map(args.out_dir)
elif args.preprocessed_frames: # download preprocessed scannet_frames_25k.zip file
if args.v1:
print('ERROR: Preprocessed frames only available for ScanNet v2')
print('You are downloading the preprocessed subset of frames ' + PREPROCESSED_FRAMES_FILE[0] + ' which requires ' + PREPROCESSED_FRAMES_FILE[1] + ' of space.')
download_file(os.path.join(BASE_URL, RELEASE_TASKS, PREPROCESSED_FRAMES_FILE[0]), os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
elif args.test_frames_2d: # download test scannet_frames_test.zip file
if args.v1:
print('ERROR: 2D test frames only available for ScanNet v2')
print('You are downloading the 2D test set ' + TEST_FRAMES_FILE[0] + ' which requires ' + TEST_FRAMES_FILE[1] + ' of space.')
download_file(os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]), os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
elif args.data_efficient: # download data efficient task files
print('You are downloading the data efficient task files' + ' which requires ' + DATA_EFFICIENT_FILES[-1] + ' of space.')
for k in range(len(DATA_EFFICIENT_FILES)-1):
download_file(os.path.join(BASE_URL, RELEASE_TASKS, DATA_EFFICIENT_FILES[k]), os.path.join(out_dir_tasks, DATA_EFFICIENT_FILES[k]))
elif args.tf_semantic: # download google tf records
download_tfrecords(os.path.join(BASE_URL, RELEASE_TASKS, 'tf3d'), os.path.join(out_dir_tasks, 'tf3d'))
elif args.id: # download single scan
scan_id = args.id
is_test_scan = scan_id in release_test_scans
if scan_id not in release_scans and (not is_test_scan or args.v1):
print('ERROR: Invalid scan id: ' + scan_id)
else:
out_dir = os.path.join(out_dir_scans, scan_id) if not is_test_scan else os.path.join(out_dir_test_scans, scan_id)
scan_file_types = file_types if not is_test_scan else file_types_test
use_v1_sens = not is_test_scan
if not is_test_scan and not args.v1 and '.sens' in scan_file_types:
print('Note: ScanNet v2 uses the same .sens files as ScanNet v1: Press \'n\' to exclude downloading .sens files for each scan')
key = input('')
if key.strip().lower() == 'n':
scan_file_types.remove('.sens')
download_scan(scan_id, out_dir, scan_file_types, use_v1_sens)
else: # download entire release
if len(file_types) == len(FILETYPES):
print('WARNING: You are downloading the entire ScanNet ' + RELEASE_NAME + ' release which requires ' + RELEASE_SIZE + ' of space.')
else:
print('WARNING: You are downloading all ScanNet ' + RELEASE_NAME + ' scans of type ' + file_types[0])
print('Note that existing scan directories will be skipped. Delete partially downloaded directories to re-download.')
print('***')
print('Press any key to continue, or CTRL-C to exit.')
key = input('')
if not args.v1 and '.sens' in file_types:
print('Note: ScanNet v2 uses the same .sens files as ScanNet v1: Press \'n\' to exclude downloading .sens files for each scan')
key = input('')
if key.strip().lower() == 'n':
file_types.remove('.sens')
download_release(release_scans, out_dir_scans, file_types, use_v1_sens=True)
if not args.v1:
download_label_map(args.out_dir)
download_release(release_test_scans, out_dir_test_scans, file_types_test, use_v1_sens=False)
download_file(os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]), os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
for k in range(len(DATA_EFFICIENT_FILES)-1):
download_file(os.path.join(BASE_URL, RELEASE_TASKS, DATA_EFFICIENT_FILES[k]), os.path.join(out_dir_tasks, DATA_EFFICIENT_FILES[k]))
if __name__ == "__main__": main() | 12,182 | 51.064103 | 318 | py |
pointnerf | pointnerf-master/data/dtu_ft_dataset.py | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import itertools
import random
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from . import data_utils
from utils import util
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
# import torch.nn.functional as F
from .data_utils import get_dtu_raydir
FLIP_Z = np.asarray([
[1,0,0],
[0,1,0],
[0,0,-1],
], dtype=np.float32)
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions
class DtuFtDataset(BaseDataset):
def initialize(self, opt, n_views=3, img_wh=[640,512], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None):
self.opt = opt
self.data_dir = opt.data_root
self.scan = opt.scan
self.split = opt.split
assert int(640 * downSample) % 32 == 0, \
f'image width is {int(640 * downsample)}, it should be divisible by 32, you may need to modify the imgScale'
self.img_wh = (int(640 * downSample), int(512 * downSample))
self.downSample = downSample
self.scale_factor = 1.0 / 200
self.max_len = max_len
self.n_views = n_views
self.define_transforms()
self.pair_idx = torch.load('../data/dtu_configs/pairs.th')
self.pair_idx = [self.pair_idx['dtu_train'],self.pair_idx['dtu_test']]
print("dtu_ft train id", self.pair_idx[0])
print("dtu_ft test id", self.pair_idx[1])
self.bbox_3d = torch.tensor([[-1.0, -1.0, 2.2], [1.0, 1.0, 4.2]])
# self.near_far = np.asarray([2.125, 4.525])
if img_wh is not None:
assert img_wh[0] % 32 == 0 and img_wh[1] % 32 == 0, \
'img_wh must both be multiples of 32!'
self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0])
if not self.opt.bg_color or self.opt.bg_color == 'black':
self.bg_color = (0, 0, 0)
elif self.opt.bg_color == 'white':
self.bg_color = (1, 1, 1)
elif self.opt.bg_color == 'random':
self.bg_color = 'random'
else:
self.bg_color = [float(one) for one in self.opt.bg_color.split(",")]
self.build_init_metas()
self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32)
if opt.normview > 0:
_, _ , w2cs, c2ws = self.build_proj_mats(list=self.pair_idx[1])
norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws)
if opt.normview == 2:
self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32)
norm_w2c, norm_c2w = None, None
self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(norm_w2c=norm_w2c, norm_c2w=norm_c2w)
self.build_view_lst()
if opt.split != "render":
self.read_meta()
self.total = len(self.id_list)
else:
self.get_render_poses()
self.total = len(self.render_poses)
print("dataset total:", self.split, self.total)
def get_render_poses(self):
self.render_poses = util.gen_render_path(self.cam2worlds[:3,...], N_views=60)
# cam_xyz_lst = [c2w[:3, 3] for c2w in self.cam2worlds]
# cam_xyz = np.stack(cam_xyz_lst, axis=0)
# triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=None, full_comb=False)
# self.render_poses = util.gen_render_path_contour(triangles, self.cam2worlds, N_views=200)
# def gen_render_path(c2ws, N_views=30):
# N = len(c2ws)
# rotvec, positions = [], []
# rotvec_inteplat, positions_inteplat = [], []
# weight = np.linspace(1.0, .0, N_views // 3, endpoint=False).reshape(-1, 1)
# for i in range(N):
# r = R.from_matrix(c2ws[i, :3, :3])
# euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3)
# if i:
# mask = np.abs(euler_ange - rotvec[0]) > 180
# euler_ange[mask] += 360.0
# rotvec.append(euler_ange)
# positions.append(c2ws[i, :3, 3:].reshape(1, 3))
#
# if i:
# rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i])
# positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i])
#
# rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0])
# positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0])
#
# c2ws_render = []
# angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat)
# for rotvec, position in zip(angles_inteplat, positions_inteplat):
# c2w = np.eye(4)
# c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix()
# c2w[:3, 3:] = position.reshape(3, 1)
# c2ws_render.append(c2w.copy())
# c2ws_render = np.stack(c2ws_render)
# return c2ws_render
@staticmethod
def modify_commandline_options(parser, is_train):
# ['random', 'random2', 'patch'], default: no random sample
parser.add_argument('--random_sample',
type=str,
default='none',
help='random sample pixels')
parser.add_argument('--random_sample_size',
type=int,
default=1024,
help='number of random samples')
parser.add_argument('--init_view_num',
type=int,
default=3,
help='number of random samples')
parser.add_argument('--shape_id', type=int, default=0, help='shape id')
parser.add_argument('--trgt_id', type=int, default=0, help='shape id')
parser.add_argument('--num_nn',
type=int,
default=1,
help='number of nearest views in a batch')
parser.add_argument(
'--near_plane',
type=float,
default=2, #2.125,
help=
'Near clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument(
'--far_plane',
type=float,
default=6, #4.525,
help=
'Far clipping plane, by default it is computed according to the distance of the camera '
)
parser.add_argument(
'--bg_color',
type=str,
default="white",
help=
'background color, white|black(None)|random|rgb (float, float, float)'
)
parser.add_argument(
'--scan',
type=str,
default="scan1",
help=''
)
parser.add_argument('--inverse_gamma_image',
type=int,
default=-1,
help='de-gamma correct the input image')
parser.add_argument('--pin_data_in_memory',
type=int,
default=-1,
help='load whole data in memory')
parser.add_argument('--normview',
type=int,
default=0,
help='load whole data in memory')
parser.add_argument(
'--id_range',
type=int,
nargs=3,
default=(0, 385, 1),
help=
'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.'
)
parser.add_argument(
'--id_list',
type=int,
nargs='+',
default=None,
help=
'the list of data ids selected in the original dataset. The default is range(0, 385).'
)
parser.add_argument(
'--full_comb',
type=int,
default=0,
help=''
)
parser.add_argument(
'--split',
type=str,
default="train",
help=
'train, val, test'
)
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
parser.add_argument('--dir_norm',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--train_load_num',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
parser.add_argument('--uni_depth',
type=int,
default=0,
help='normalize the ray_dir to unit length or not, default not')
return parser
def define_transforms(self):
self.transform = T.ToTensor()
def read_cam_file(self, filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = self.opt.near_plane if self.opt.uni_depth > 0 else float(lines[11].split()[0]) * self.scale_factor
depth_max = self.opt.far_plane if self.opt.uni_depth > 0 else depth_min + float(lines[11].split()[1]) * 192 * 1.06 * self.scale_factor
self.depth_interval = float(lines[11].split()[1])
return intrinsics, extrinsics, [depth_min, depth_max]
def read_depth(self, filename):
depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
depth_h = cv2.resize(depth_h, None, fx=self.downSample, fy=self.downSample,
interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!!
return depth_h
def load_poses_all(self):
c2ws = []
List = sorted(os.listdir(os.path.join(self.data_dir, f'Cameras/train/')))
for item in List:
proj_mat_filename = os.path.join(self.data_dir, f'Cameras/train/{item}')
intrinsic, w2c, near_far = self.read_cam_file(proj_mat_filename)
intrinsic[:2] *= 4
c2ws.append(np.linalg.inv(w2c))
self.focal = [intrinsic[0, 0], intrinsic[1, 1]]
return np.stack(c2ws)
def build_view_lst(self):
cam_xyz_lst = [c2w[:3, 3] for c2w in self.cam2worlds]
if self.opt.full_comb == 1:
# pass
triangles = list(itertools.combinations(self.id_list, 3))
self.view_id_list = []
for tris in triangles:
tris = list(tris)
random.shuffle(tris)
self.view_id_list.append(tris)
elif self.opt.full_comb > 1:
# pass
cam_xyz = np.stack(cam_xyz_lst, axis=0)
# test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0)
# if self.opt.full_comb <= 1:
triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=None, full_comb=True)
if self.opt.full_comb == 2:
self.view_id_list = [triangles[i] for i in range(len(triangles))]
elif self.opt.full_comb in [3, 4]: # 1 jump
triplets = []
first_dict = {}
for tris in triangles:
if tris[0] not in first_dict.keys():
first_dict[tris[0]] = []
first_dict[tris[0]] += [tris[1], tris[2]]
for key, val in first_dict.items():
first_dict[key] = list(unique_lst(val))
if self.opt.full_comb == 3:
for key, val in first_dict.items():
pairs = list(itertools.combinations(first_dict[key], 2))
triplets += [[key] + list(pair) for pair in pairs]
self.view_id_list = [triplets[i] for i in range(len(triplets))]
elif self.opt.full_comb == 4:
second_dict = copy.deepcopy(first_dict)
for key, val in first_dict.items():
for second in val:
second_dict[key] += first_dict[second]
second_dict[key] = list(unique_lst(second_dict[key]))
second_dict[key] = [val for val in second_dict[key] if
val != key and val not in first_dict[key]]
# print("key val", key, second_dict[key])
for key, val in second_dict.items():
pairs = list(itertools.combinations(second_dict[key], 2))
print("key val", key, pairs)
triplets += [[key] + list(pair) for pair in pairs]
print("len()", len(triplets))
# exit()
self.view_id_list = [triplets[i] for i in range(len(triplets))]
for i in range(len(self.view_id_list)):
triplets = self.view_id_list[i]
real_trip = [self.id_list[j] for j in triplets]
self.view_id_list[i] = real_trip
def build_init_metas(self):
self.view_id_list = []
self.id_list = []
if self.split != "test":
with open(f'../data/dtu_configs/dtu_finetune_init_pairs.txt') as f:
num_viewpoint = int(f.readline())
# viewpoints (16)
for _ in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
str_lst=f.readline().rstrip().split(',')
src_views = [int(x) for x in str_lst]
self.view_id_list.append([ref_view] + src_views)
self.id_list.append(ref_view)
else:
self.id_list = self.pair_idx[1]
if self.split == "comb":
self.id_list += self.pair_idx[1]
with open(f'../data/dtu_configs/lists/dtu_test_ground.txt') as f:
lines = f.readlines()
for line in lines:
info = line.strip().split()
if self.scan == info[0]:
self.plane_ind = int(info[1])
print("self.plane_ind", self.plane_ind)
break
if self.opt.full_comb < 0:
with open(f'../data/nerf_synth_configs/list/lego360_init_pairs.txt') as f:
for line in f:
str_lst = line.rstrip().split(',')
src_views = [int(x) for x in str_lst]
self.view_id_list.append(src_views)
def build_proj_mats(self, list=None, norm_w2c=None, norm_c2w=None):
list = self.id_list if list is None else list
proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], []
for vid in list:
proj_mat_filename = os.path.join(self.data_dir,
f'Cameras/train/{vid:08d}_cam.txt')
intrinsic, extrinsic, near_far = self.read_cam_file(proj_mat_filename)
intrinsic[:2] *= 4
extrinsic[:3, 3] *= self.scale_factor
if norm_c2w is not None:
extrinsic = extrinsic @ norm_c2w
intrinsic[:2] = intrinsic[:2] * self.downSample
intrinsics += [intrinsic.copy()]
# multiply intrinsics and extrinsics to get projection matrix
proj_mat_l = np.eye(4)
intrinsic[:2] = intrinsic[:2] / 4
proj_mat_l[:3, :4] = intrinsic @ extrinsic[:3, :4]
proj_mats += [(proj_mat_l, near_far)]
world2cams += [extrinsic]
cam2worlds += [np.linalg.inv(extrinsic)]
proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics)
world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds)
return proj_mats, intrinsics, world2cams, cam2worlds
def bcart2sphere(self, xyz):
r = np.linalg.norm(xyz, axis=1)
xyn = np.linalg.norm(xyz[...,:2], axis=1)
th = np.arctan2(xyn, xyz[...,2])
ph = np.arctan2(xyz[...,1], xyz[...,0])
print("r", r.shape, r, xyn.shape, th.shape, ph.shape)
return np.stack([r,th,ph], axis=-1)
def sphere2cart(self, rtp):
r, th, ph = rtp[0], rtp[1], rtp[2]
x = r * np.sin(th) * np.cos(ph)
y = r * np.sin(th) * np.sin(ph)
z = r * np.cos(th)
return np.asarray([x,y,z])
def matrix2euler(self, M):
x = np.arctan2(-M[1][2], M[2][2])
cosY = np.sqrt(1 - M[0][2])
y = np.arctan2(M[0][2], cosY)
sinZ = np.cos(x) * M[1][0] + np.sin(x) * M[2][0]
cosZ = np.cos(x) * M[1][1] + np.sin(x) * M[2][1]
z = np.arctan2(sinZ, cosZ)
return np.asarray([x,y,z])
def euler2matrix(self, xyz):
Cxyz = np.cos(xyz)
Sxyz = np.sin(xyz)
Cx, Cy, Cz = Cxyz[0], Cxyz[1], Cxyz[2]
Sx, Sy, Sz = Sxyz[0], Sxyz[1], Sxyz[2]
M = [[Cy*Cz, -Cy*Sz, Sy],
[Sx*Sy*Cz + Cx*Sz, -Sx*Sy*Sz + Cx*Cz, -Sx*Cy],
[-Cx*Sy*Cz + Sx*Sz, Cx*Sy*Sz + Sx*Cz, Cx*Cy]
]
return np.array(M)
def normalize_cam(self, w2cs, c2ws):
# cam_xyz = c2ws[..., :3, 3]
# rtp = self.bcart2sphere(cam_xyz)
# print(rtp.shape)
# rtp = np.mean(rtp, axis=0)
# avg_xyz = self.sphere2cart(rtp)
# euler_lst = []
# for i in range(len(c2ws)):
# euler_angles = self.matrix2euler(c2ws[i][:3,:3])
# print("euler_angles", euler_angles)
# euler_lst += [euler_angles]
# euler = np.mean(np.stack(euler_lst, axis=0), axis=0)
# print("euler mean ",euler)
# M = self.euler2matrix(euler)
# norm_c2w = np.eye(4)
# norm_c2w[:3,:3] = M
# norm_c2w[:3,3] = avg_xyz
# norm_w2c = np.linalg.inv(norm_c2w)
# return norm_w2c, norm_c2w
index=0
return w2cs[index], c2ws[index]
def read_meta(self):
# sub select training views from pairing file
# if os.path.exists('configs/pairs.th'):
# self.img_idx = self.pair_idx[0] if 'train'== self.split else self.pair_idx[1]
# print(f'===> {self.split}ing index: {self.img_idx}')
# name = os.path.basename(self.data_dir)
# test_idx = torch.load('configs/pairs.th')[f'{name}_test']
# self.img_idx = test_idx if self.split!='train' else np.delete(np.arange(0,49),test_idx)
w, h = self.img_wh
self.image_paths = []
self.poses = []
self.all_rays = []
self.imgs = []
self.depths = []
self.all_rgbs = []
self.all_depth = []
self.view_id_dict = {}
count = 0
for i, idx in enumerate(self.id_list):
image_path = os.path.join(self.data_dir,
f'Rectified/{self.scan}_train/rect_{idx + 1:03d}_3_r5000.png')
depth_filename = os.path.join(self.data_dir,
f'Depths_raw/{self.scan}/depth_map_{idx:04d}.pfm')
self.image_paths += [image_path]
img = Image.open(image_path)
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (3, h, w)
self.imgs += [img]
self.all_rgbs += [img.reshape(3, -1).permute(1, 0)] # (h*w, 3) RGBA
if os.path.exists(depth_filename):
depth = self.read_depth(depth_filename)
depth *= self.scale_factor
self.depths += [depth]
self.all_depth += [torch.from_numpy(depth).float().view(-1,1)]
# ray directions for all pixels, same for all images (same H, W, focal)
intrinsic = self.intrinsics[count]
# center = [intrinsic[0,2], intrinsic[1,2]]
self.focal = [intrinsic[0,0], intrinsic[1,1]]
# self.directions = get_ray_directions(h, w, self.focal, center) # (h, w, 3)
# rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3)
#
# self.all_rays += [torch.cat([rays_o, rays_d,
# self.near_far[0] * torch.ones_like(rays_o[:, :1]),
# self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8)
self.view_id_dict[idx] = i
self.poses = self.cam2worlds
# if 'train' == self.split:
# self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3)
# self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3)
# else:
# self.all_rays = torch.stack(self.all_rays, 0) # (len(self.meta['frames]),h*w, 3)
# self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape(-1,*self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3)
# self.all_depth = torch.stack(self.all_depth, 0).reshape(-1,*self.img_wh[::-1]) # (len(self.meta['frames]),h,w,3)
# count+=1
def __len__(self):
return self.total
def name(self):
return 'DtuDataset'
def __del__(self):
print("end loading")
def get_campos_ray(self):
centerpixel = np.asarray(self.img_wh).astype(np.float32)[None, :] // 2
camposes = []
centerdirs = []
for i, idx in enumerate(self.id_list):
c2w = self.cam2worlds[i].astype(np.float32)
campos = c2w[:3, 3]
camrot = c2w[:3, :3]
raydir = get_dtu_raydir(centerpixel, self.intrinsics[0].astype(np.float32), camrot, True)
camposes.append(campos)
centerdirs.append(raydir)
camposes = np.stack(camposes, axis=0) # 2091, 3
centerdirs = np.concatenate(centerdirs, axis=0) # 2091, 3
# print("camposes", camposes.shape, centerdirs.shape)
return torch.as_tensor(camposes, device="cuda", dtype=torch.float32), torch.as_tensor(centerdirs, device="cuda",
dtype=torch.float32)
def get_init_item(self, idx, crop=False):
sample = {}
init_view_num = self.opt.init_view_num
view_ids = self.view_id_list[idx]
if self.split == 'train':
view_ids = view_ids[:init_view_num]
affine_mat, affine_mat_inv = [], []
imgs, depths_h = [], []
proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views
for i in view_ids:
vid = self.view_id_dict[i]
imgs += [self.imgs[vid]]
proj_mat_ls, near_far = self.proj_mats[vid]
intrinsics.append(self.intrinsics[vid])
w2cs.append(self.world2cams[vid])
c2ws.append(self.cam2worlds[vid])
affine_mat.append(proj_mat_ls)
affine_mat_inv.append(np.linalg.inv(proj_mat_ls))
depths_h.append(self.depths[vid])
near_fars.append(near_far)
for i in range(len(affine_mat)):
view_proj_mats = []
ref_proj_inv = affine_mat_inv[i]
for j in range(len(affine_mat)):
if i == j: # reference view
view_proj_mats += [np.eye(4)]
else:
view_proj_mats += [affine_mat[j] @ ref_proj_inv]
# view_proj_mats: 4, 4, 4
view_proj_mats = np.stack(view_proj_mats)
proj_mats.append(view_proj_mats[:, :3])
# (4, 4, 3, 4)
proj_mats = np.stack(proj_mats)
imgs = np.stack(imgs).astype(np.float32)
depths_h = np.stack(depths_h)
affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv)
intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars)
# view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub]
# c2ws_all = self.cam2worlds[self.remap[view_ids_all]]
sample['images'] = imgs # (V, 3, H, W)
sample['mvs_images'] = imgs #self.normalize_rgb(imgs) # (V, 3, H, W)
sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W)
sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4)
sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4)
sample['near_fars_depth'] = near_fars.astype(np.float32)[0]
sample['near_fars'] = np.tile(sample['near_fars_depth'][None,...],(len(imgs),1)) #np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1))
sample['proj_mats'] = proj_mats.astype(np.float32)
sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3)
sample['view_ids'] = np.array(view_ids)
# sample['light_id'] = np.array(light_idx)
sample['affine_mat'] = affine_mat
sample['affine_mat_inv'] = affine_mat_inv
# sample['scan'] = scan
# sample['c2ws_all'] = c2ws_all.astype(np.float32)
for key, value in sample.items():
if not isinstance(value, str):
if not torch.is_tensor(value):
value = torch.as_tensor(value)
sample[key] = value.unsqueeze(0)
return sample
def normalize_rgb(self, data):
# to unnormalize image for visualization
# data V, C, H, W
V, C, H, W = data.shape
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 3, 1, 1)
return (data - mean) / std
def __getitem__(self, id, crop=False, full_img=False):
item = {}
img = self.imgs[id]
if full_img:
item['images'] = img[None,...]
w2c = self.world2cams[id]
c2w = self.cam2worlds[id]
intrinsic = self.intrinsics[id]
proj_mat_ls, near_far = self.proj_mats[id]
gt_image = np.transpose(img, (1,2,0))
# print("gt_image", gt_image.shape)
width, height = gt_image.shape[1], gt_image.shape[0]
camrot = (c2w[0:3, 0:3])
campos = c2w[0:3, 3]
# print("camrot", camrot, campos)
item["c2w"] = torch.from_numpy(c2w).float()
item["intrinsic"] = intrinsic
# item["intrinsic"] = sample['intrinsics'][0, ...]
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z
item['lightpos'] = item["campos"]
dist = np.linalg.norm(campos)
middle = dist + 0.7
item['middle'] = torch.FloatTensor([middle]).view(1, 1)
item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1)
item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1)
item['h'] = height
item['w'] = width
plane_pnt, plane_normal, plane_color = self.get_plane_param(self.plane_ind)
item['plane_pnt'] = torch.FloatTensor(plane_pnt)
item['plane_normal'] = torch.FloatTensor(plane_normal)
item['plane_color'] = torch.FloatTensor(plane_color)
item['depths_h'] = self.depths[id]
# bounding box
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
raise Exception("no gt_mask, no proportional_random !!!")
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
item["pixel_idx"] = pixelcoords
# print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:])
raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0)
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)]
# gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :]
gt_image = np.reshape(gt_image, (-1, 3))
item['gt_image'] = gt_image
item['id'] = id
item['vid'] = self.id_list[id]
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
return item
def get_item(self, idx, crop=False, full_img=False):
item = self.__getitem__(idx, crop=crop, full_img=full_img)
for key, value in item.items():
if not isinstance(value, str):
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
def get_dummyrot_item(self, idx, crop=False):
item = {}
width, height = self.width, self.height
transform_matrix = self.render_poses[idx]
camrot = transform_matrix[0:3, 0:3]
campos = transform_matrix[0:3, 3]
item["campos"] = torch.from_numpy(campos).float()
item["camrotc2w"] = torch.from_numpy(camrot).float()
item['lightpos'] = item["campos"]
item['intrinsic'] = self.intrinsics[0]
# near far
item['far'] = torch.FloatTensor([self.opt.far_plane]).view(1, 1)
item['near'] = torch.FloatTensor([self.opt.near_plane]).view(1, 1)
item['h'] = self.height
item['w'] = self.width
subsamplesize = self.opt.random_sample_size
if self.opt.random_sample == "patch":
indx = np.random.randint(0, width - subsamplesize + 1)
indy = np.random.randint(0, height - subsamplesize + 1)
px, py = np.meshgrid(
np.arange(indx, indx + subsamplesize).astype(np.float32),
np.arange(indy, indy + subsamplesize).astype(np.float32))
elif self.opt.random_sample == "random":
px = np.random.randint(0,
width,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.randint(0,
height,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "random2":
px = np.random.uniform(0,
width - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
py = np.random.uniform(0,
height - 1e-5,
size=(subsamplesize,
subsamplesize)).astype(np.float32)
elif self.opt.random_sample == "proportional_random":
raise Exception("no gt_mask, no proportional_random !!!")
else:
px, py = np.meshgrid(
np.arange(width).astype(np.float32),
np.arange(height).astype(np.float32))
pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2
# raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot)
item["pixel_idx"] = pixelcoords
# print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:])
raydir = get_dtu_raydir(pixelcoords, self.intrinsics[0], camrot, self.opt.dir_norm > 0)
raydir = np.reshape(raydir, (-1, 3))
item['raydir'] = torch.from_numpy(raydir).float()
item['id'] = idx
plane_pnt, plane_normal, plane_color = self.get_plane_param(self.plane_ind)
item['plane_pnt'] = torch.FloatTensor(plane_pnt)
item['plane_normal'] = torch.FloatTensor(plane_normal)
item['plane_color'] = torch.FloatTensor(plane_color)
if self.bg_color:
if self.bg_color == 'random':
val = np.random.rand()
if val > 0.5:
item['bg_color'] = torch.FloatTensor([1, 1, 1])
else:
item['bg_color'] = torch.FloatTensor([0, 0, 0])
else:
item['bg_color'] = torch.FloatTensor(self.bg_color)
for key, value in item.items():
if not torch.is_tensor(value):
value = torch.as_tensor(value)
item[key] = value.unsqueeze(0)
return item
def get_plane_param(self, ind):
plane_pnt = [[-0.49666997, 0.52160616, 3.6239593], [0.20770223, -0.74818161, 3.98697683], [-0.04889537, -0.84123057, 4.03164617]][ind]
plane_normal = [[-0.11364093, 0.38778102, 0.91471942], [-0.11165793, 0.3806543, 0.91795142], [-0.11154823, 0.3783277, 0.91892608]][ind]
plane_color = [[1.0, 1.0, 1.0], [150.72447808/255, 99.68367002/255, 63.40976961/255], [80.28243032/255, 54.3915082/255, 35.07029825/255]][ind]
return plane_pnt, plane_normal, plane_color
def get_plane_param_points(self):
r, amount = 10, int(8e3)
plane_pnt, plane_normal, _ = self.get_plane_param(self.plane_ind)
a,b,c = plane_normal[0], plane_normal[1], plane_normal[2]
x0,y0,z0=plane_pnt[0],plane_pnt[1],plane_pnt[2],
x = r * (np.random.rand(amount, 1) - 0.7)
y = r * (np.random.rand(amount, 1) - 0.6)
xy = np.concatenate([x,y], axis=-1)
z = (a*(xy[...,0]-x0) + b*(xy[...,1]-y0))/(-c) + z0
gen_pnts = torch.as_tensor(np.stack([xy[...,0], xy[...,1], z], axis=-1), device="cuda", dtype=torch.float32)
featuredim=self.opt.point_features_dim
if "0" in list(self.opt.point_dir_mode):
featuredim -= 3
if "0" in list(self.opt.point_conf_mode):
featuredim -= 1
if "0" in list(self.opt.point_color_mode):
featuredim -= 3
gen_embedding = torch.rand(1, len(gen_pnts), featuredim, device="cuda", dtype=torch.float32)
gen_dir = torch.rand(1, len(gen_pnts), 3, device="cuda", dtype=torch.float32)
gen_dir = gen_dir / torch.clamp(torch.norm(gen_dir, dim=-1, keepdim=True), min=1e-6)
gen_color = torch.zeros([1, len(gen_pnts), 3], device="cuda", dtype=torch.float32)
gen_conf = torch.full([1, len(gen_pnts), 1], 0.3, device="cuda", dtype=torch.float32)
return gen_pnts, gen_embedding, gen_dir, gen_color, gen_conf
def filter_plane(self, add_xyz):
thresh = 0.2
plane_pnt, plane_normal, _ = self.get_plane_param(self.plane_ind)
a, b, c = plane_normal[0], plane_normal[1], plane_normal[2]
x0, y0, z0 = plane_pnt[0], plane_pnt[1], plane_pnt[2]
d = -a * x0 - b * y0 - c * z0
dist = torch.abs(add_xyz[...,0] * a + add_xyz[...,1] * b + add_xyz[...,2] * c + d)
return dist < thresh
| 40,122 | 41.912299 | 174 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.