repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/utils/init.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import six
import ast
import copy
import logging
import numpy as np
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def init_checkpoint(exe, init_checkpoint_path, main_program):
assert os.path.exists(
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
def existed_persitables(var):
if not fluid.io.is_persistable(var):
return False
if not os.path.exists(os.path.join(init_checkpoint_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(init_checkpoint_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(
exe,
init_checkpoint_path,
main_program=main_program,
predicate=existed_persitables)
log.info("Load model from {}".format(init_checkpoint_path))
def init_pretraining_params(exe,
pretraining_params_path,
main_program):
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
if not os.path.exists(os.path.join(pretraining_params_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(pretraining_params_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=existed_params)
log.info("Load pretraining parameters from {}.".format(
pretraining_params_path))
| 2,695 | 34.946667 | 108 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/utils/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/model/transformer_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
param_initializer=None,
name='multi_head_att'):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_query_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
k = layers.fc(input=keys,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_key_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
v = layers.fc(input=values,
size=d_value * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_value_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head], inplace=True)
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
# Since the inplace reshape in __split_heads changes the shape of k and
# v, which is the cache input for next time step, reshape the cache
# input from the previous time step first.
k = cache["k"] = layers.concat(
[layers.reshape(
cache["k"], shape=[0, 0, d_model]), k], axis=1)
v = cache["v"] = layers.concat(
[layers.reshape(
cache["v"], shape=[0, 0, d_model]), v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_output_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
return proj_out
def positionwise_feed_forward(x,
d_inner_hid,
d_hid,
dropout_rate,
hidden_act,
param_initializer=None,
name='ffn'):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act=hidden_act,
param_attr=fluid.ParamAttr(
name=name + '_fc_0.w_0',
initializer=param_initializer),
bias_attr=name + '_fc_0.b_0')
if dropout_rate:
hidden = layers.dropout(
hidden,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.,
name=''):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out_dtype = out.dtype
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float32")
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.ParamAttr(
name=name + '_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name=name + '_layer_norm_bias',
initializer=fluid.initializer.Constant(0.)))
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float16")
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
pre_process_layer(
enc_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
enc_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn'), ffd_output
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
model_name='',
name=''):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
checkpoints = []
for i in range(n_layer):
enc_output, cp = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i))
checkpoints.append(cp)
enc_input = enc_output
enc_output = pre_process_layer(
enc_output, preprocess_cmd, prepostprocess_dropout, name=model_name+"post_encoder")
return enc_output, checkpoints
| 12,649 | 35.666667 | 91 | py |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/model/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/research/DuReader-Retrieval-Baseline/src/model/ernie.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import six
import logging
import paddle.fluid as fluid
from io import open
from model.transformer_encoder import encoder, pre_process_layer
log = logging.getLogger(__name__)
class ErnieConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict.get(key, None)
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
class ErnieModel(object):
def __init__(self,
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
config,
weight_sharing=True,
model_name='',
is_noise=False):
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
if config['sent_type_vocab_size']:
self._sent_types = config['sent_type_vocab_size']
else:
self._sent_types = config['type_vocab_size']
self._use_task_id = config['use_task_id']
if self._use_task_id:
self._task_types = config['task_type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
if is_noise:
self._prepostprocess_dropout = 0
self._attention_dropout = 0
self._weight_sharing = weight_sharing
self.checkpoints = []
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._task_emb_name = "task_embedding"
self._emb_dtype = "float32"
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask)
def _build_model(self, model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask):
# padding id in vocabulary must be set to 0
emb_out = fluid.layers.embedding(
input=src_ids,
size=[self._voc_size, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=position_ids,
size=[self._max_position_seq_len, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._pos_emb_name, initializer=self._param_initializer))
sent_emb_out = fluid.layers.embedding(
sentence_ids,
size=[self._sent_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._sent_emb_name, initializer=self._param_initializer))
emb_out = emb_out + position_emb_out
emb_out = emb_out + sent_emb_out
if self._use_task_id:
task_emb_out = fluid.layers.embedding(
task_ids,
size=[self._task_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._task_emb_name,
initializer=self._param_initializer))
emb_out = emb_out + task_emb_out
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name=model_name + 'pre_encoder')
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
self._enc_out, self.checkpoints = encoder(
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
model_name=model_name,
name=model_name+'encoder')
def get_sequence_output(self):
return self._enc_out
def get_cls_output(self):
"""Get the first feature of each sequence for classification"""
cls_output = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
cls_output = fluid.layers.squeeze(cls_output, axes=[1])
return cls_output
def get_pooled_output(self):
"""Get the first feature of each sequence for classification"""
next_sent_feat = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
next_sent_feat = fluid.layers.fc(
input=next_sent_feat,
size=self._emb_size,
act="tanh",
param_attr=fluid.ParamAttr(
name="pooled_fc.w_0", initializer=self._param_initializer),
bias_attr="pooled_fc.b_0")
return next_sent_feat
def get_lm_output(self, mask_label, mask_pos):
"""Get the loss & accuracy for pretraining"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
# extract the first token feature in each sentence
self.next_sent_feat = self.get_pooled_output()
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = fluid.layers.layer_norm(
mask_trans_feat,
begin_norm_axis=len(mask_trans_feat.shape) - 1,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_bias',
initializer=fluid.initializer.Constant(1.)))
# transform: layer norm
#mask_trans_feat = pre_process_layer(
# mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._word_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._emb_dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss
def get_task_output(self, task, task_labels):
task_fc_out = fluid.layers.fc(
input=self.next_sent_feat,
size=task["num_labels"],
param_attr=fluid.ParamAttr(
name=task["task_name"] + "_fc.w_0",
initializer=self._param_initializer),
bias_attr=task["task_name"] + "_fc.b_0")
task_loss, task_softmax = fluid.layers.softmax_with_cross_entropy(
logits=task_fc_out, label=task_labels, return_softmax=True)
task_acc = fluid.layers.accuracy(input=task_softmax, label=task_labels)
mean_task_loss = fluid.layers.mean(task_loss)
return mean_task_loss, task_acc
| 10,858 | 38.631387 | 92 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/model.py | from typing import Sequence
import gym
import numpy as np
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.modules.noisy_layer import NoisyLayer
from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import ModelConfigDict
from ray.rllib.utils.annotations import override
from torch import nn
from ray.rllib.models.preprocessors import DictFlatteningPreprocessor, get_preprocessor
torch, nn = try_import_torch()
class PovBaselineModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
nn.Module.__init__(self)
super().__init__(obs_space, action_space, num_outputs,
model_config, name)
if num_outputs is None:
# required by rllib's lstm wrapper
num_outputs = int(np.product(self.obs_space.shape))
pov_embed_size = 256
inv_emded_size = 256
embed_size = 512
self.pov_embed = nn.Sequential(
nn.Conv2d(3, 64, 4, 4),
nn.ReLU(),
nn.Conv2d(64, 128, 4, 4),
nn.ReLU(),
nn.Conv2d(128, pov_embed_size, 4, 4),
nn.ReLU(),
)
self.inventory_compass_emb = nn.Sequential(
nn.Linear(7, inv_emded_size),
nn.ReLU(),
nn.Linear(inv_emded_size, inv_emded_size),
nn.ReLU(),
)
self.head = nn.Sequential(
nn.Linear(pov_embed_size + inv_emded_size, embed_size),
nn.ReLU(),
nn.Linear(embed_size, embed_size),
nn.ReLU(),
nn.Linear(embed_size, num_outputs),
)
def forward(self, input_dict, state, seq_lens):
obs = input_dict['obs']
pov = obs['pov'] / 255. - 0.5
pov = pov.transpose(2, 3).transpose(1, 2).contiguous()
pov_embed = self.pov_embed(pov)
pov_embed = pov_embed.reshape(pov_embed.shape[0], -1)
inventory_compass = torch.cat([obs['inventory'], obs['compass']], 1)
inv_comp_emb = self.inventory_compass_emb(inventory_compass)
head_input = torch.cat([pov_embed, inv_comp_emb], 1)
return self.head(head_input), state
class GridBaselineModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
# flat_obs = {o: obs_space[o] for o in ['agentPos', 'inventory']}
nn.Module.__init__(self)
super().__init__(obs_space, action_space, num_outputs,
model_config, name)
if num_outputs is None:
# required by rllib's lstm wrapper
num_outputs = int(np.product(self.obs_space.shape))
hidden_grid = 300
hidden_vec = 300
hidden = 300
self.encode_grid = nn.Sequential(
nn.Linear(9*11*11, hidden_grid),
nn.ReLU(),
nn.Linear(hidden_grid, hidden_grid),
nn.ReLU(),
nn.Linear(hidden_grid, hidden_grid),
nn.ReLU(),
nn.Linear(hidden_grid, hidden_grid),
)
self.encode_pos_inventory = nn.Sequential(
nn.Linear(11, hidden_vec),
nn.ReLU(),
nn.Linear(hidden_vec, hidden_vec),
nn.ReLU()
)
self.head = nn.Sequential(
nn.Linear(2*hidden_grid + hidden_vec, hidden),
nn.ReLU(),
nn.Linear(hidden, hidden),
nn.ReLU(),
nn.Linear(hidden, num_outputs)
)
def forward(self, input_dict, state, seq_lens):
grid = input_dict['obs']['grid']
target_grid = input_dict['obs']['target_grid']
grid = grid.reshape(grid.shape[0], -1)
target_grid = target_grid.reshape(target_grid.shape[0], -1)
vector_input = torch.cat([input_dict['obs']['agentPos'], input_dict['obs']['inventory']], -1)
grid_embed = self.encode_grid(grid)
target_grid_embed = self.encode_grid(target_grid)
vec_embed = self.encode_pos_inventory(vector_input)
head_input = torch.cat([grid_embed, target_grid_embed, vec_embed], -1)
return self.head(head_input), state | 4,341 | 37.087719 | 101 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/custom_agent.py | import tensorflow as tf
import torch
import gym
from copy import deepcopy as copy
from gym import spaces
import ray
import numpy as np
from torch._C import Value
import yaml
from wrappers import FakeIglu
from train import build_env, register_models
from ray.rllib.agents.registry import get_trainer_class
CONFIG_FILE = './apex_c32/apex_c32.yml'
class CustomAgent:
def __init__(self, action_space):
ray.init(local_mode=True)
self.action_space = action_space
with open(CONFIG_FILE, 'r') as f:
config = yaml.safe_load(f)['iglu-baseline']
with open('metadata', 'r') as f:
meta = yaml.safe_load(f)
if meta['action_space'] != config['config']['env_config']['action_space']:
metadata_as = meta['action_space']
model_as = config['config']['env_config']['action_space']
raise ValueError(
'requested action space in metadata file differs '
'from the one selected by the model. '
f'Metadata action space: {metadata_as}; Model action space {model_as}')
register_models()
Trainer = get_trainer_class(config['run'])
self.config = config
config['config']['in_evaluation'] = True
self.fake_env = build_env(
env_config=config['config']['env_config'],
env_factory=lambda: FakeIglu(config['config']['env_config'], wrap_actions=False)
)
self.visual = config['config']['env_config']['visual']
agent = Trainer(config=config['config'], env=FakeIglu)
agent.restore('./apex_c32/apex_c32')
self.agent = agent
self.actions = iter([])
self.state = None
def policy(self, obs, reward, done, info, state):
if self.agent.config['model'].get('use_lstm', False) and state is None:
cell_size = self.agent.config['model'].get('lstm_cell_size')
state = [
torch.zeros((cell_size,)).float(),
torch.zeros((cell_size,)).float(),
]
output = self.agent.compute_single_action(
obs, explore=False, state=state
)
if not isinstance(output, tuple):
action = output
else:
action, state, _ = output
return action, state
def act(self, obs, reward, done, info):
if done:
self.actions = iter([])
self.state = None
return
try:
action = next(self.actions)
except StopIteration:
obs = self.fake_env.wrap_observation(obs, reward, done, info)
agent_action, self.state = self.policy(obs, reward, done, info, self.state)
self.actions = iter(self.fake_env.stack_actions()(agent_action))
action = next(self.actions)
return copy(action)
| 2,851 | 35.101266 | 92 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/wrappers.py | from threading import stack_size
import gym
import os
import cv2
import shutil
import datetime
import pickle
import json
import uuid
import logging
from gym.core import ActionWrapper
import numpy as np
from collections import defaultdict
from typing import Generator
from minerl_patched.herobraine.hero import spaces
logger = logging.getLogger(__file__)
IGLU_ENABLE_LOG = os.environ.get('IGLU_ENABLE_LOG', '')
class Wrapper(gym.Wrapper):
def stack_actions(self):
if isinstance(self.env, Wrapper):
return self.env.stack_actions()
def wrap_observation(self, obs, reward, done, info):
if hasattr(self.env, 'wrap_observation'):
return self.env.wrap_observation(obs, reward, done, info)
else:
return obs
class ActionsWrapper(Wrapper):
def wrap_action(self, action) -> Generator:
raise NotImplementedError
def stack_actions(self):
def gen_actions(action):
for action in self.wrap_action(action):
wrapped = None
if hasattr(self.env, 'stack_actions'):
wrapped = self.env.stack_actions()
if wrapped is not None:
yield from wrapped(action)
else:
yield action
return gen_actions
def step(self, action):
total_reward = 0
for a in self.wrap_action(action):
obs, reward, done, info = super().step(a)
total_reward += reward
if done:
return obs, total_reward, done, info
return obs, total_reward, done, info
class ObsWrapper(Wrapper):
def observation(self, obs, reward=None, done=None, info=None):
raise NotImplementedError
def wrap_observation(self, obs, reward, done, info):
new_obs = self.observation(obs, reward, done, info)
return self.env.wrap_observation(new_obs, reward, done, info)
def reset(self):
return self.observation(super().reset())
def step(self, action):
obs, reward, done, info = super().step(action)
return self.observation(obs, reward, done, info), reward, done, info
class TimeLimit(Wrapper):
def __init__(self, env, limit):
super().__init__(env)
self.limit = limit
self.step_no = 0
def reset(self):
self.step_no = 0
return super().reset()
def step(self, action):
self.step_no += 1
obs, reward, done, info = super().step(action)
if self.step_no >= self.limit:
done = True
return obs, reward, done, info
class SizeReward(Wrapper):
def __init__(self, env):
super().__init__(env)
self.size = 0
def reset(self):
self.size = 0
return super().reset()
def step(self, action):
obs, reward, done, info = super().step(action)
intersection = self.env.unwrapped.task.task_monitor.max_int
reward = max(intersection, self.size) - self.size
self.size = max(intersection, self.size)
return obs, reward, done, info
class SelectAndPlace(ActionsWrapper):
def wrap_action(self, action):
if action['hotbar'] != 0:
yield action
action = self.env.action_space.noop()
action['use'] = 1
if action['use'] == 1 or action['attack'] == 1:
for _ in range(3):
yield action
action = self.env.action_space.noop()
yield action
def flat_action_space(action_space):
if action_space == 'human-level':
return flat_human_level
if action_space == 'discrete':
return flat_discrete
def flat_human_level(env, camera_delta=5):
binary = ['attack', 'forward', 'back', 'left', 'right', 'jump']
discretes = [env.action_space.no_op()]
for op in binary:
dummy = env.action_space.no_op()
dummy[op] = 1
discretes.append(dummy)
camera_x = env.action_space.no_op()
camera_x['camera'][0] = camera_delta
discretes.append(camera_x)
camera_x = env.action_space.no_op()
camera_x['camera'][0] = -camera_delta
discretes.append(camera_x)
camera_y = env.action_space.no_op()
camera_y['camera'][1] = camera_delta
discretes.append(camera_y)
camera_y = env.action_space.no_op()
camera_y['camera'][1] = -camera_delta
discretes.append(camera_y)
for i in range(6):
dummy = env.action_space.no_op()
dummy['hotbar'] = i + 1
discretes.append(dummy)
discretes.append(env.action_space.no_op())
return discretes
def flat_discrete(env, camera_delta=5):
discretes = [env.action_space.no_op()]
forward = env.action_space.no_op()
forward['forward'] = 2
discretes.append(forward)
backward = env.action_space.no_op()
backward['forward'] = 1
discretes.append(backward)
left = env.action_space.no_op()
left['strafe'] = 1
discretes.append(left)
right = env.action_space.no_op()
right['strafe'] = 2
discretes.append(right)
jumpforward = env.action_space.no_op()
jumpforward['forward'] = 2
jumpforward['jump'] = 1
discretes.append(jumpforward)
jumpbackward = env.action_space.no_op()
jumpbackward['forward'] = 1
jumpbackward['jump'] = 1
discretes.append(jumpbackward)
jumpleft = env.action_space.no_op()
jumpleft['strafe'] = 1
jumpleft['jump'] = 1
discretes.append(jumpleft)
jumpright = env.action_space.no_op()
jumpright['strafe'] = 2
jumpright['jump'] = 1
discretes.append(jumpright)
attack = env.action_space.no_op()
attack['attack'] = 1
discretes.append(attack)
camera_x = env.action_space.no_op()
camera_x['camera'][0] = camera_delta
discretes.append(camera_x)
camera_x = env.action_space.no_op()
camera_x['camera'][0] = -camera_delta
discretes.append(camera_x)
camera_y = env.action_space.no_op()
camera_y['camera'][1] = camera_delta
discretes.append(camera_y)
camera_y = env.action_space.no_op()
camera_y['camera'][1] = -camera_delta
discretes.append(camera_y)
for i in range(6):
dummy = env.action_space.no_op()
dummy['hotbar'] = i + 1
discretes.append(dummy)
return discretes
class Discretization(ActionsWrapper):
def __init__(self, env, flatten):
super().__init__(env)
camera_delta = 5
self.discretes = flatten(env, camera_delta)
self.action_space = gym.spaces.Discrete(len(self.discretes))
self.old_action_space = env.action_space
self.last_action = None
def wrap_action(self, action=None, raw_action=None):
if action is not None:
action = self.discretes[action]
elif raw_action is not None:
action = raw_action
yield action
class FakeIglu(gym.Env):
def __init__(self, config, wrap_actions=True):
action_space = config.get('action_space')
visual = config.get('visual')
if action_space == 'human-level':
self.action_space = spaces.Dict({
'forward': spaces.Discrete(2),
'back': spaces.Discrete(2),
'left': spaces.Discrete(2),
'right': spaces.Discrete(2),
'jump': spaces.Discrete(2),
'camera': spaces.Box(low=-180.0, high=180.0, shape=(2,)),
'attack': spaces.Discrete(2),
'use': spaces.Discrete(2),
'hotbar': spaces.Discrete(7),
})
elif action_space == 'discrete':
self.action_space = spaces.Dict({
'move': spaces.Discrete(3),
'strafe': spaces.Discrete(3),
'jump': spaces.Discrete(2),
'camera': spaces.Box(low=-180.0, high=180.0, shape=(2,)),
'attack': spaces.Discrete(2),
'use': spaces.Discrete(2),
'hotbar': spaces.Discrete(7),
})
elif action_space == 'continuous':
self.action_space = spaces.Dict({
'move_x': spaces.Box(low=-1., high=1., shape=(), dtype=np.float32),
'move_y': spaces.Box(low=-1., high=1., shape=(), dtype=np.float32),
'move_z': spaces.Box(low=-1., high=1., shape=(), dtype=np.float32),
'camera': spaces.Box(low=-180.0, high=180.0, shape=(2,)),
'attack': spaces.Discrete(2),
'use': spaces.Discrete(2),
'hotbar': spaces.Discrete(7),
})
if wrap_actions:
flatten_actions = flat_action_space(action_space)
self.discrete = flatten_actions(self, camera_delta=5)
self.full_action_space = self.action_space
self.action_space = spaces.Discrete(len(self.discrete))
if visual:
self.observation_space = spaces.Dict({
'pov': spaces.Box(0, 255, (64, 64, 3), dtype=np.float32),
'inventory': spaces.Box(low=0, high=20, shape=(6,), dtype=np.float32),
'compass': spaces.Box(low=-180.0, high=180.0, shape=(1,), dtype=np.float32),
})
else:
self.observation_space = spaces.Dict({
'agentPos': gym.spaces.Box(low=-5000.0, high=5000.0, shape=(5,)),
'grid': gym.spaces.Box(low=0.0, high=6.0, shape=(9, 11, 11)),
'inventory': gym.spaces.Box(low=0.0, high=20.0, shape=(6,)),
'target_grid': gym.spaces.Box(low=0.0, high=6.0, shape=(9, 11, 11))
})
self.step = 0
def reset(self):
self.step = 0
return self.observation_space.sample()
def step(self, action):
self.step += 1
done = self.step >= 1000
reward = 0
info = {}
return self.observation_space.sample(), reward, done, info
def update_taskset(self, *args, **kwargs):
pass
def set_task(self, *args, **kwargs):
pass
class VideoLogger(Wrapper):
def __init__(self, env, every=50):
super().__init__(env)
runtime = timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
self.dirname = f'action_logs/run-{runtime}'
self.every = every
self.filename = None
self.running_reward = 0
self.actions = []
self.flushed = False
os.makedirs(self.dirname, exist_ok=True)
def flush(self):
if self.filename is not None:
with open(f'{self.filename}-r{self.running_reward}.json', 'w') as f:
json.dump(self.actions, f)
self.out.release()
with open(f'{self.filename}-obs.pkl', 'wb') as f:
pickle.dump(self.obs, f)
self.obs = []
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
uid = str(uuid.uuid4().hex)
name = f'episode-{timestamp}-{uid}'
self.filename = os.path.join(self.dirname, name)
self.running_reward = 0
self.flushed = True
self.actions = []
self.frames = []
self.obs = []
self.out = cv2.VideoWriter(f'{self.filename}.mp4', cv2.VideoWriter_fourcc(*'mp4v'),
20, (64, 64))
def reset(self):
if not self.flushed:
self.flush()
return super().reset()
def close(self):
if not self.flushed:
self.flush()
return super().close()
def step(self, action):
# assuming dict
self.flushed = False
new_action = {}
for key in action:
new_action[key] = action[key]
if isinstance(new_action[key], np.ndarray):
new_action[key] = new_action[key].tolist()
obs, reward, done, info = super().step(action)
self.actions.append(new_action)
self.out.write(obs['pov'][..., ::-1])
self.obs.append({k: v for k, v in obs.items() if k != 'pov'})
self.obs[-1]['reward'] = reward
self.running_reward += reward
return obs, reward, done, info
class Logger(Wrapper):
def __init__(self, env):
super().__init__(env)
runtime = timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
self.dirname = f'action_logs/run-{runtime}'
self.filename = None
self.running_reward = 0
self.actions = []
self.flushed = False
os.makedirs(self.dirname, exist_ok=True)
def flush(self):
if self.filename is not None:
with open(f'{self.filename}-r{self.running_reward}.json', 'w') as f:
json.dump(self.actions, f)
self.out.release()
with open(f'{self.filename}-obs.pkl', 'wb') as f:
pickle.dump(self.obs, f)
self.obs = []
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
uid = str(uuid.uuid4().hex)
name = f'episode-{timestamp}-{uid}'
self.filename = os.path.join(self.dirname, name)
self.running_reward = 0
self.flushed = True
self.actions = []
self.frames = []
self.obs = []
self.out = cv2.VideoWriter(f'{self.filename}.mp4', cv2.VideoWriter_fourcc(*'mp4v'),
20, (64, 64))
def reset(self):
if not self.flushed:
self.flush()
return super().reset()
def close(self):
if not self.flushed:
self.flush()
return super().close()
def step(self, action):
# assuming dict
self.flushed = False
new_action = {}
for key in action:
new_action[key] = action[key]
if isinstance(new_action[key], np.ndarray):
new_action[key] = new_action[key].tolist()
obs, reward, done, info = super().step(action)
self.actions.append(new_action)
self.out.write(obs['pov'][..., ::-1])
self.obs.append({k: v for k, v in obs.items() if k != 'pov'})
self.obs[-1]['reward'] = reward
self.running_reward += reward
return obs, reward, done, info
class VisualObservationWrapper(ObsWrapper):
def __init__(self, env, include_target=False):
super().__init__(env)
self.observation_space = {
'pov': gym.spaces.Box(low=0, high=255, shape=(64, 64, 3)),
'inventory': gym.spaces.Box(low=0.0, high=20.0, shape=(6,)),
'compass': gym.spaces.Box(low=-180.0, high=180.0, shape=(1,))
}
if include_target:
self.observation_space['target_grid'] = \
gym.spaces.Box(low=0, high=6, shape=(9, 11, 11))
self.observation_space = gym.spaces.Dict(self.observation_space)
def observation(self, obs, reward=None, done=None, info=None):
if info is not None:
if 'target_grid' in info:
target_grid = info['target_grid']
del info['target_grid']
else:
logger.error(f'info: {info}')
if hasattr(self.unwrapped, 'should_reset'):
self.unwrapped.should_reset(True)
target_grid = self.env.unwrapped.tasks.current.target_grid
else:
target_grid = self.env.unwrapped.tasks.current.target_grid
return {
'pov': obs['pov'].astype(np.float32),
'inventory': obs['inventory'],
'compass': np.array([obs['compass']['angle'].item()])
}
class VectorObservationWrapper(ObsWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = gym.spaces.Dict({
'agentPos': gym.spaces.Box(low=-5000.0, high=5000.0, shape=(5,)),
'grid': gym.spaces.Box(low=0.0, high=6.0, shape=(9, 11, 11)),
'inventory': gym.spaces.Box(low=0.0, high=20.0, shape=(6,)),
'target_grid': gym.spaces.Box(low=0.0, high=6.0, shape=(9, 11, 11))
})
def observation(self, obs, reward=None, done=None, info=None):
if IGLU_ENABLE_LOG == '1':
self.check_component(
obs['agentPos'], 'agentPos', self.observation_space['agentPos'].low,
self.observation_space['agentPos'].high
)
self.check_component(
obs['inventory'], 'inventory', self.observation_space['inventory'].low,
self.observation_space['inventory'].high
)
self.check_component(
obs['grid'], 'grid', self.observation_space['grid'].low,
self.observation_space['grid'].high
)
if info is not None:
if 'target_grid' in info:
target_grid = info['target_grid']
del info['target_grid']
else:
logger.error(f'info: {info}')
if hasattr(self.unwrapped, 'should_reset'):
self.unwrapped.should_reset(True)
target_grid = self.env.unwrapped.tasks.current.target_grid
else:
target_grid = self.env.unwrapped.tasks.current.target_grid
return {
'agentPos': obs['agentPos'],
'inventory': obs['inventory'],
'grid': obs['grid'],
'target_grid': target_grid
}
def check_component(self, arr, name, low, hi):
if (arr < low).any():
logger.info(f'{name} is below level {low}:')
logger.info((arr < low).nonzero())
logger.info(arr[arr < low])
if (arr > hi).any():
logger.info(f'{name} is above level {hi}:')
logger.info((arr > hi).nonzero())
logger.info(arr[arr > hi])
| 17,626 | 33.767258 | 92 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/test_submission.py | import os
from collections import defaultdict
import yaml
import gym
import numpy as np
NB_EPISODES = 3
MAX_EPISODE_STEPS = 1000
VISUAL = False
def check_action(action, action_space):
if not isinstance(action, dict):
raise ValueError('action should be a dict')
for k in action:
if k not in action_space.spaces:
raise ValueError('unexpected action key: {}'.format(k))
def _play_game(agent_class, env_spec, env=None):
"""
Args:
agent_class:
env_specs (path to file or dict): path to yaml or dict with environment.
env:
"""
# To make things faster, set this to '0'
os.environ['IGLU_DISABLE_FAKE_RESET'] = '0'
import iglu
from iglu.tasks import TaskSet, Task, CustomTasks
stats = defaultdict(lambda: defaultdict(list))
if isinstance(env_spec, str) and os.path.exists(env_spec):
with open(env_spec, 'r') as f:
data = yaml.safe_load(f)
env_spec = data
if env is None:
requested_action_space = env_spec['action_space']
name = f'IGLUSilentBuilder{"Visual" if VISUAL else ""}-v0'
print(f'Running {name} using {requested_action_space} action space...')
env = gym.make(
name,
max_steps=MAX_EPISODE_STEPS,
action_space=requested_action_space
)
agent = agent_class(action_space=env.action_space)
# here we set the current structure as the task of the current environment
custom_grid = np.zeros((9, 11, 11)) # (y, x, z)
custom_grid[:3, 5, 5] = 1 # blue color
custom_grid[0, 4, 5] = 1 # blue color
custom_grid[0, 3, 5] = 1 # blue color
env.update_taskset(CustomTasks([
('<Architect> Please, build a stack of three red blocks somewhere.\n'
'<Builder> Sure.',
custom_grid)
]))
task = '<fake_task_id>'
for episode in range(NB_EPISODES):
obs = env.reset()
target_grid_size = len(env.tasks.current.target_grid.nonzero()[0])
done = False
reward = 0
total_reward = 0
info = {}
if VISUAL:
# remove the grid key which was needed only for reward computation
del obs['grid']
else:
# expose the target grid after reset
info['target_grid'] = env.tasks.current.target_grid.copy()
maximal_intersection = 0
while not done:
action = agent.act(obs, reward, done, info)
check_action(action, env.action_space)
obs, reward, done, info = env.step(action)
total_reward += reward
maximal_intersection = max(env.task.task_monitor.max_int, maximal_intersection)
# just for sanity check
if maximal_intersection > target_grid_size:
raise ValueError('intersetion cannot be bigger than a part of it.'
'Probably, the task inside the env is wrong')
# Let the agent know the game is done.
agent.act(obs, reward, done, info)
stats[task]['reward'].append(total_reward)
sr = float(maximal_intersection == target_grid_size)
stats[task]['success_rate'].append(sr)
cr = maximal_intersection / target_grid_size
stats[task]['completion_rate'].append(cr)
print(f'Episode {episode}/{NB_EPISODES} of task {task}: '
f'reward={total_reward}; succ_rate={sr}; compl_rate={cr}')
stats[task]['action_space'] = requested_action_space
env.close()
return stats
if __name__ == '__main__':
from custom_agent import CustomAgent
_play_game(CustomAgent, 'metadata') | 3,638 | 33.657143 | 91 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/train.py | import yaml
import ray
import os
import gym
import iglu
import sys
import wandb
import logging
from collections import defaultdict
from filelock import FileLock
from iglu.tasks import RandomTasks, TaskSet
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.tune.logger import DEFAULT_LOGGERS
from ray.tune.integration.wandb import WandbLogger
from argparse import ArgumentParser
from wrappers import \
SelectAndPlace, \
Discretization, \
flat_action_space, \
SizeReward, \
TimeLimit, \
VectorObservationWrapper, \
VisualObservationWrapper, \
Logger
from ray.rllib.evaluation.metrics import collect_episodes, summarize_episodes
from model import GridBaselineModel, PovBaselineModel
logging.basicConfig(stream=sys.stdout)
def evaluate_separately(trainer, eval_workers):
w = next(iter(eval_workers.remote_workers()))
env_ids = ray.get(w.foreach_env.remote(lambda env: list(env.tasks.preset.keys())))[0]
print(f'env id: {env_ids}')
i = 0
all_episodes = []
while i < len(env_ids):
for w in eval_workers.remote_workers():
w.foreach_env.remote(lambda env: env.set_task(env_ids[i]))
i += 1
ray.get([w.sample.remote() for w in eval_workers.remote_workers()])
episodes, _ = collect_episodes(
remote_workers=eval_workers.remote_workers(), timeout_seconds=99999)
all_episodes += episodes
metrics = summarize_episodes(episodes)
for eid, ep in zip(env_ids, all_episodes):
metrics[f'env_{eid}_reward'] = ep.episode_reward
return metrics
def build_env(env_config=None, env_factory=None):
"""
Args:
env_config (dict): a dictionary with following keys:
* action_space :: human-level | discrete | continuous
* visual :: (bool) whether to expose only visual observation
* size_reward :: (bool) whether to use reward for increasing size, otherwise default
* task_mode :: possible values are: 'one_task', 'many_tasks', 'random_tasks'
if task_mode is one_task -> string with task id
if task_mode is many_tasks -> list of task ids
if task_mode is random_tasks -> ignored
* task_id :: (str or list[str]) task id list of task ids
* random_tasks :: specification for the random tasks generator. for details,
see the documentation of iglu.tasks.RandomTasks
env_factory (callable, optional): function that returns a env instance
"""
import iglu
from iglu.tasks import TaskSet
if env_config is None:
env_config = defaultdict(lambda: defaultdict(dict))
if env_factory is None:
env = gym.make('IGLUSilentBuilder-v0', max_steps=5000)
if env_config['task_mode'] == 'one_task':
env.update_taskset(TaskSet(preset=[env_config['task_id']]))
env.set_task(env_config['task_id'])
elif env_config['task_mode'] == 'many_tasks':
env.update_taskset(TaskSet(preset=env_config['task_id']))
elif env_config['task_mode'] == 'random_tasks':
env.update_taskset(RandomTasks(
max_blocks=env_config['random_tasks'].get('max_blocks', 3),
height_levels=env_config['random_tasks'].get('height_levels', 1),
allow_float=env_config['random_tasks'].get('allow_float', False),
max_dist=env_config['random_tasks'].get('max_dist', 2),
num_colors=env_config['random_tasks'].get('num_colors', 1),
max_cache=env_config['random_tasks'].get('max_cache', 0),
))
else:
env = env_factory()
#env = Logger(env)
env = SelectAndPlace(env)
env = Discretization(env, flat_action_space(env_config['action_space']))
# visual - pov + inventory + compass + target grid;
# vector: grid + position + inventory + target grid
if env_config['visual']:
env = VisualObservationWrapper(env)
else:
env = VectorObservationWrapper(env)
if env_config.get('size_reward', False):
env = SizeReward(env)
env = TimeLimit(env, limit=env_config['time_limit'])
return env
def register_models():
ModelCatalog.register_custom_model(
"grid_baseline_model", GridBaselineModel)
ModelCatalog.register_custom_model(
"pov_baseline_model", PovBaselineModel)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-f', type=str, help='file')
parser.add_argument('--local', action='store_true', default=False)
parser.add_argument('--wdb', action='store_true', default=False)
args = parser.parse_args()
if args.local:
ray.init(local_mode=True)
tune.register_env('IGLUSilentBuilder-v0', build_env)
register_models()
with open(args.f) as f:
config = yaml.load(f)
for key in config:
if args.wdb:
config[key]['config']['logger_config'] = {}
config[key]['config']['logger_config']['wandb'] = {
"api_key": os.environ.get('WANDB_APIKEY'),
"project": key,
"log_config": False
}
config[key]['config']['env'] = config[key]['env']
run = config[key]['run']
print(config)
del config[key]['env'], config[key]['run']
config[key]['config']['custom_eval_function'] = evaluate_separately
if args.local:
config[key]['config']['num_workers'] = 1
config[key]['stop']['timesteps_total'] = 3000
config[key]['config']['timesteps_per_iteration'] = 100
# config[key]['config']['learning_starts'] = 0
# if args.wdb:
# del config[key]['config']['logger_config']['wandb']
if args.wdb:
loggers = DEFAULT_LOGGERS + (WandbLogger, )
else:
loggers = DEFAULT_LOGGERS
tune.run(run, **config[key], loggers=loggers)
| 5,967 | 38.263158 | 96 | py |
honeypot-camera | honeypot-camera-master/camera.py | # Copyright (c) 2014 Alexander Bredo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import tornado.ioloop
import tornado.web
import os, time
import datetime, math
from PIL import Image, ImageDraw, ImageEnhance
class CameraImageProcessor():
def __init__(self, in_filename, out_filename, width=640, height=480):
self.size = (width, height)
self.in_filename = in_filename
self.out_filename = out_filename
def process(self, prefix, postfix):
now = datetime.datetime.now()
original = Image.open(self.in_filename)
original.thumbnail(self.size, Image.ANTIALIAS)
original = ImageEnhance.Brightness(original).enhance(self.getDaylightIntensity(now.hour)) # overwrite original
watermark = Image.new("RGBA", original.size)
waterdraw = ImageDraw.ImageDraw(watermark, "RGBA")
waterdraw.text((4, 2), "%s @ %s -- %s" % (prefix, now, postfix))
original.paste(watermark, None, watermark)
original.save(self.out_filename, "JPEG")
def getDaylightIntensity(self, hour):
# D = [0; 24] and W = [0; 1]
return 0.45 * math.sin(0.25 * hour + 4.5) + 0.5
class CameraHandler(tornado.web.RequestHandler):
BOUNDARY = '--boundarydonotcross'
HEADERS = {
'Cache-Control': 'no-store, no-cache, must-revalidate, pre-check=0, post-check=0, max-age=0',
'Connection': 'close',
'Expires': 'Mon, 3 Jan 2000 12:34:56 GMT',
'Pragma': 'no-cache'
}
def get(self):
for hk, hv in CameraHandler.HEADERS.items():
self.set_header(hk, hv)
# TODO: Do not process if current
cip = CameraImageProcessor("img/Lighthouse.jpg", "img/camera.jpg")
cip.process("CAM3: COMPANY Facility Management", "(c) 2014 by COMPANY Engineering AG")
img_filename = "img/camera.jpg"
for hk, hv in self.image_headers(img_filename).items():
self.set_header(hk, hv)
with open(img_filename, "rb") as f:
self.write(f.read())
def image_headers(self, filename):
return {
'X-Timestamp': int(time.time()),
'Content-Length': os.path.getsize(filename),
'Content-Type': 'image/jpeg',
}
class RootHandler(tornado.web.RequestHandler):
settings = {
'title': 'COMPANY Facility Management',
'refresh': 5,
}
def get(self):
return self.render("templates/index.html", page=RootHandler.settings)
application = tornado.web.Application([
(r'/camera.jpg', CameraHandler),
(r'/', RootHandler),
(r'/(favicon\.ico)', tornado.web.StaticFileHandler, {'path': 'static/'}),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': 'static/'}),
])
if __name__ == "__main__":
application.listen(80)
tornado.ioloop.IOLoop.instance().start()
| 3,836 | 33.567568 | 112 | py |
gdelt-doc-api | gdelt-doc-api-main/setup.py | import setuptools
with open("requirements.txt", "r") as f:
requirements = [line.replace("\n", "") for line in f.readlines()]
with open("README.md", "r") as fh:
long_description = fh.read()
with open("gdeltdoc/_version.py", "r") as g:
version = "1.0.0"
for line in g.readlines():
if "version" in line:
version = line.split("=")[1].replace("\n", "").replace('"', "").replace(" ", "")
setuptools.setup(
name="gdeltdoc",
version=version,
author="Alex Smith",
author_email="alex@alexsmith.dev",
description="A client for the GDELT 2.0 Doc API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alex9smith/gdelt-doc-api",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=requirements,
) | 1,019 | 30.875 | 92 | py |
gdelt-doc-api | gdelt-doc-api-main/tests/test_filters.py | from gdeltdoc import Filters, near, repeat, multi_repeat, VALID_TIMESPAN_UNITS
import unittest
class FiltersTestCase(unittest.TestCase):
"""
Test that the correct query strings are generated from
various filters.
"""
def test_single_keyword_filter(self):
f = Filters(keyword="airline", start_date="2020-03-01", end_date="2020-03-02")
self.assertEqual(f.query_string,
'"airline" &startdatetime=20200301000000&enddatetime=20200302000000&maxrecords=250')
def test_single_keyphrase_filter(self):
f = Filters(keyword="climate change", start_date="2020-03-01", end_date="2020-03-02")
self.assertEqual(f.query_string,
'"climate change" &startdatetime=20200301000000&enddatetime=20200302000000&maxrecords=250')
def test_multiple_keywords(self):
f = Filters(keyword=["airline", "climate"], start_date = "2020-05-13", end_date = "2020-05-14")
self.assertEqual(f.query_string,
'(airline OR climate) &startdatetime=20200513000000&'
'enddatetime=20200514000000&maxrecords=250')
def test_multiple_themes(self):
f = Filters(theme=["ENV_CLIMATECHANGE", "LEADER"], start_date="2020-05-13", end_date="2020-05-14")
self.assertEqual(f.query_string,
'(theme:ENV_CLIMATECHANGE OR theme:LEADER) &startdatetime=20200513000000&'
'enddatetime=20200514000000&maxrecords=250')
def test_theme_and_keyword(self):
f = Filters(keyword="airline", theme="ENV_CLIMATECHANGE", start_date="2020-05-13", end_date="2020-05-14")
self.assertEqual(f.query_string,
'"airline" theme:ENV_CLIMATECHANGE &startdatetime=20200513000000&'
'enddatetime=20200514000000&maxrecords=250')
class NearTestCast(unittest.TestCase):
"""
Test that `near()` generates the right filters and errors.
"""
def test_two_words(self):
self.assertEqual(near(5, "airline", "crisis"), 'near5:"airline crisis" ')
def test_three_words(self):
self.assertEqual(near(10, "airline", "climate", "change"), 'near10:"airline climate change" ')
def test_one_word(self):
with self.assertRaisesRegex(ValueError, "At least two words"):
near(5, "airline")
class RepeatTestCase(unittest.TestCase):
"""
Test that `repeat()` generates the correct filters and errors.
"""
def test_repeat(self):
self.assertEqual(repeat(3, "environment"), 'repeat3:"environment" ')
def test_repeat_phrase(self):
with self.assertRaisesRegex(ValueError, "single word"):
repeat(5, "climate change ")
class MultiRepeatTestCase(unittest.TestCase):
"""
Test that `multi_repeat() generates the correct filters and errors.
"""
def test_multi_repeat(self):
self.assertEqual(multi_repeat([(2, "airline"), (3, "airport")], "AND"), 'repeat2:"airline" AND repeat3:"airport" ')
def test_multi_repeat_or(self):
self.assertEqual(multi_repeat([(2, "airline"), (3, "airport")], "OR"), '(repeat2:"airline" OR repeat3:"airport" )')
def test_multi_repeat_checks_method(self):
with self.assertRaisesRegex(ValueError, "method must be one of AND or OR"):
multi_repeat([(2, "airline"), (3, "airport")], "NOT_A_METHOD")
class TimespanTestCase(unittest.TestCase):
"""
Test that `Filter._validate_timespan` validates timespans correctly
"""
def test_allows_valid_units(self):
for unit in VALID_TIMESPAN_UNITS:
try:
Filters._validate_timespan(f"60{unit}")
except ValueError:
self.fail()
def test_forbids_invalid_units(self):
with self.assertRaisesRegex(ValueError, "is not a supported unit"):
Filters._validate_timespan(f"60milliseconds")
def test_forbids_invalid_values(self):
invalid_timespans = ["12.5min", "40days0", "2/3weeks"]
for timespan in invalid_timespans:
with self.assertRaises(ValueError):
Filters._validate_timespan(timespan)
def test_forbids_incorrectly_formatted_timespans(self):
with self.assertRaisesRegex(ValueError, "is not a supported unit"):
Filters._validate_timespan(f"min15")
def test_timespan_greater_than_60_mins(self):
with self.assertRaisesRegex(ValueError, "Period must be at least 60 minutes"):
Filters._validate_timespan(f"15min")
| 4,541 | 40.669725 | 123 | py |
gdelt-doc-api | gdelt-doc-api-main/tests/test_client.py | import pandas as pd
import unittest
from gdeltdoc import GdeltDoc, Filters
from datetime import datetime, timedelta
class ArticleSearchTestCast(unittest.TestCase):
"""
Test that the API client behaves correctly when doing an article search query
"""
def setUp(self):
self.start_date = (datetime.today() - timedelta(days=7)).strftime("%Y-%m-%d")
self.end_date = (datetime.today() - timedelta(days=6)).strftime("%Y-%m-%d")
f = Filters(
keyword="environment",
start_date=self.start_date,
end_date=self.end_date
)
self.articles = GdeltDoc().article_search(f)
def tearDown(self):
pass
def test_articles_is_a_df(self):
self.assertEqual(type(self.articles), pd.DataFrame)
def test_correct_columns(self):
self.assertEqual(
list(self.articles.columns),
[
"url",
"url_mobile",
"title",
"seendate",
"socialimage",
"domain",
"language",
"sourcecountry",
],
)
def test_rows_returned(self):
# This test could fail if there really are no articles
# that match the filter, but given the query used for
# This tests could fail if there really are no articles
# that match the filter, but given the query used for
# testing that's very unlikely.
self.assertGreaterEqual(self.articles.shape[0], 1)
class TimelineSearchTestCase(unittest.TestCase):
"""
Test that the various modes of timeline search behave correctly.
"""
# Make one set of API calls per tests suite run, not one per test
@classmethod
def setUpClass(cls):
cls.start_date = (datetime.today() - timedelta(days = 7)).strftime("%Y-%m-%d")
cls.end_date = (datetime.today() - timedelta(days = 6)).strftime("%Y-%m-%d")
f = Filters(
keyword="environment",
start_date=cls.start_date,
end_date=cls.end_date
)
gd = GdeltDoc()
cls.all_results = [
gd.timeline_search(mode, f) for mode in
["timelinevol", "timelinevolraw", "timelinelang", "timelinetone", "timelinesourcecountry"]
]
def test_all_modes_return_a_df(self):
self.assertTrue(
all([type(result) == pd.DataFrame for result in self.all_results])
)
def test_all_modes_return_data(self):
self.assertTrue(
all([result.shape[0] >= 1 for result in self.all_results])
)
def test_unsupported_mode(self):
with self.assertRaisesRegex(ValueError, "not in supported API modes"):
GdeltDoc().timeline_search(
"unsupported",
Filters(
keyword="environment",
start_date=self.start_date,
end_date=self.end_date
)
)
def test_vol_has_two_columns(self):
self.assertEqual(self.all_results[0].shape[1], 2)
def test_vol_raw_has_three_columns(self):
self.assertEqual(self.all_results[1].shape[1], 3)
class QueryTestCase(unittest.TestCase):
def test_handles_invalid_query_string(self):
with self.assertRaisesRegex(ValueError, "The query was not valid. The API error message was"):
GdeltDoc()._query("artlist", "environment×pan=mins15") | 3,475 | 31.185185 | 102 | py |
gdelt-doc-api | gdelt-doc-api-main/tests/__init__.py | 0 | 0 | 0 | py | |
gdelt-doc-api | gdelt-doc-api-main/gdeltdoc/errors.py | 0 | 0 | 0 | py | |
gdelt-doc-api | gdelt-doc-api-main/gdeltdoc/_version.py | version = "1.5.0" | 17 | 17 | 17 | py |
gdelt-doc-api | gdelt-doc-api-main/gdeltdoc/__init__.py | from gdeltdoc.api_client import GdeltDoc
from gdeltdoc.filters import Filters, near, repeat, multi_repeat, VALID_TIMESPAN_UNITS
from gdeltdoc._version import version
__version__ = version
| 189 | 30.666667 | 86 | py |
gdelt-doc-api | gdelt-doc-api-main/gdeltdoc/filters.py | from typing import Optional, List, Union, Tuple
from string import ascii_lowercase, digits
Filter = Union[List[str], str]
VALID_TIMESPAN_UNITS = ["min", "h", "hours", "d", "days", "w", "weeks", "m", "months"]
def near(n: int, *args) -> str:
"""
Build the filter to find articles containing words that occur within
`n` words of each other.
eg. near(5, "airline", "climate") finds articles containing both
"airline" and "climate" within 5 words.
"""
if len(args) < 2:
raise ValueError("At least two words must be provided")
return f"near{str(n)}:" + '"' + " ".join([a for a in args]) + '" '
def repeat(n: int, keyword: str) -> str:
"""
Build the filter to find articles containing `keyword` at least `n` times.
eg. repeat(2, "environment") finds articles containing the word "environment"
at least twice.
Only single word repetitions are allowed.
"""
if " " in keyword:
raise ValueError("Only single words can be repeated")
return f'repeat{str(n)}:"{keyword}" '
def multi_repeat(repeats: List[Tuple[int, str]], method: str) -> str:
"""
Build the filter to find articles containing multiple repeated words using `repeat()`
eg. multi_repeat([(2, "airline"), (3, "airport")], "AND") finds articles that contain the word "airline" at least
twice and "airport" at least 3 times.
Params
------
repeats: A list of (int, str) tuples to be passed to `repeat()`. Eg. [(2, "airline"), (3, "airport")]
method: How to combine the restrictions. Must be one of "AND" or "OR"
"""
if method not in ["AND", "OR"]:
raise ValueError(f"method must be one of AND or OR, not {method}")
to_repeat = [repeat(n, keyword) for (n, keyword) in repeats]
if method == "AND":
return f"{method} ".join(to_repeat)
elif method == "OR":
return "(" + f"{method} ".join(to_repeat) + ")"
class Filters:
def __init__(
self,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
timespan: Optional[str] = None,
num_records: int = 250,
keyword: Optional[Filter] = None,
domain: Optional[Filter] = None,
domain_exact: Optional[Filter] = None,
near: Optional[str] = None,
repeat: Optional[str] = None,
country: Optional[Filter] = None,
theme: Optional[Filter] = None,
) -> None:
"""
Construct filters for the GDELT API.
Filters for `keyword`, `domain`, `domain_exact`, `country` and `theme`
can be passed either as a single string or as a list of strings. If a list is
passed, the values in the list are wrapped in a boolean OR.
Params
------
start_date
The start date for the filter in YYYY-MM-DD format. The API officially only supports the
most recent 3 months of articles. Making a request for an earlier date range may still
return data, but it's not guaranteed.
Must provide either `start_date` and `end_date` or `timespan`
end_date
The end date for the filter in YYYY-MM-DD format.
timespan
A timespan to search for, relative to the time of the request. Must match one of the API's timespan
formats - https://blog.gdeltproject.org/gdelt-doc-2-0-api-debuts/
Must provide either `start_date` and `end_date` or `timespan`
num_records
The number of records to return. Only used in article list mode and can be up to 250.
keyword
Return articles containing the exact phrase `keyword` within the article text.
domain
Return articles from the specified domain. Does not require an exact match so
passing "cnn.com" will match articles from "cnn.com", "subdomain.cnn.com" and "notactuallycnn.com".
domain_exact
Similar to `domain`, but requires an exact match.
near
Return articles containing words close to each other in the text. Use `near()` to construct.
eg. near = near(5, "airline", "climate").
repeat
Return articles containing a single word repeated at least a number of times. Use `repeat()`
to construct. eg. repeat = repeat(3, "environment").
If you want to construct a filter with multiple repeated words, construct with `multi_repeat()`
instead. eg. repeat = multi_repeat([(2, "airline"), (3, "airport")], "AND")
country
Return articles published in a country, formatted as the FIPS 2 letter country code.
theme
Return articles that cover one of GDELT's GKG Themes. A full list of themes can be
found here: http://data.gdeltproject.org/api/v2/guides/LOOKUP-GKGTHEMES.TXT
"""
self.query_params: List[str] = []
self._valid_countries: List[str] = []
self._valid_themes: List[str] = []
# Check we have either start/end date or timespan, but not both
if not start_date and not end_date and not timespan:
raise ValueError("Must provide either start_date and end_date, or timespan")
if start_date and end_date and timespan:
raise ValueError(
"Can only provide either start_date and end_date, or timespan"
)
if keyword:
self.query_params.append(self._keyword_to_string(keyword))
if domain:
self.query_params.append(self._filter_to_string("domain", domain))
if domain_exact:
self.query_params.append(self._filter_to_string("domainis", domain_exact))
if country:
self.query_params.append(self._filter_to_string("sourcecountry", country))
if theme:
self.query_params.append(self._filter_to_string("theme", theme))
if near:
self.query_params.append(near)
if repeat:
self.query_params.append(repeat)
if start_date:
self.query_params.append(
f'&startdatetime={start_date.replace("-", "")}000000'
)
self.query_params.append(f'&enddatetime={end_date.replace("-", "")}000000')
else:
# Use timespan
self._validate_timespan(timespan)
self.query_params.append(f"×pan={timespan}")
if num_records > 250:
raise ValueError(f"num_records must 250 or less, not {num_records}")
self.query_params.append(f"&maxrecords={str(num_records)}")
@property
def query_string(self) -> str:
return "".join(self.query_params)
@staticmethod
def _filter_to_string(name: str, f: Filter) -> str:
"""
Convert a Filter into the string representation needed for the API.
Params
------
name
The filter name as required by the API
f
The Filter to convert
Returns
-------
str
The converted filter. Eg. "domain:cnn.com"
"""
if type(f) == str:
return f"{name}:{f} "
else:
# Build an OR statement
return "(" + " OR ".join([f"{name}:{clause}" for clause in f]) + ") "
@staticmethod
def _keyword_to_string(keywords: Filter) -> str:
"""
Convert a Filter for keywords into the string for the API.
The keyword argument is different to all the others in that there's
no parameter name needed and if a key phrase is passed it
must be wrapped in quotes.
Params
------
keyword
The keyword Filter
Returns
-------
str
The converted filter eg. "(airline OR shipping)"
"""
if type(keywords) == str:
return f'"{keywords}" '
else:
return (
"("
+ " OR ".join(
[f'"{word}"' if " " in word else word for word in keywords]
)
+ ") "
)
@staticmethod
def _validate_timespan(timespan: str) -> None:
"""
Validate that the supplied timespan is in a format recognised by the API.
Raises a `ValueError` if the timespan is not recognised.
Supported timespan units are:
- minutes - 15min
- hours - 24h or 24hours
- days - 30d or 30days
- months - 2m or 2months
Params
------
timespan
The timespan filter to be checked
Returns
-------
None
"""
value = timespan.rstrip(ascii_lowercase)
unit = timespan[len(value):]
if unit not in VALID_TIMESPAN_UNITS:
raise ValueError(f"Timespan {timespan} is invalid. {unit} is not a supported unit, must be one of {' '.join(VALID_TIMESPAN_UNITS)}")
if not all(d in digits for d in value):
raise ValueError(f"Timespan {timespan} is invalid. {value} could not be converted into an integer")
if unit == "min" and int(value) < 60:
raise ValueError(f"Timespan {timespan} is invalid. Period must be at least 60 minutes")
| 9,253 | 33.401487 | 144 | py |
gdelt-doc-api | gdelt-doc-api-main/gdeltdoc/helpers.py | import json
def load_json(json_message, max_recursion_depth: int = 100, recursion_depth: int = 0):
"""
tries to load a json formatted string and removes offending characters if present
https://stackoverflow.com/questions/37805751/simplejson-scanner-jsondecodeerror-invalid-x-escape-sequence-us-line-1-colu
:param json_message:
:param max_recursion_depth:
:param recursion_depth:
:return:
"""
try:
result = json.loads(json_message)
except Exception as e:
if recursion_depth >= max_recursion_depth:
raise ValueError("Max Recursion depth is reached. JSON can´t be parsed!")
# Find the offending character index:
idx_to_replace = int(e.pos)
# Remove the offending character:
if isinstance(json_message, bytes):
json_message.decode("utf-8")
json_message = list(json_message)
json_message[idx_to_replace] = ' '
new_message = ''.join(str(m) for m in json_message)
return load_json(json_message=new_message, max_recursion_depth=max_recursion_depth,
recursion_depth=recursion_depth+1)
return result
| 1,166 | 37.9 | 124 | py |
gdelt-doc-api | gdelt-doc-api-main/gdeltdoc/api_client.py | import requests
import pandas as pd
from gdeltdoc.filters import Filters
from typing import Dict
from gdeltdoc.helpers import load_json
from gdeltdoc._version import version
class GdeltDoc:
"""
API client for the GDELT 2.0 Doc API
```
from gdeltdoc import GdeltDoc, Filters
f = Filters(
keyword = "climate change",
start_date = "2020-05-10",
end_date = "2020-05-11"
)
gd = GdeltDoc()
# Search for articles matching the filters
articles = gd.article_search(f)
# Get a timeline of the number of articles matching the filters
timeline = gd.timeline_search("timelinevol", f)
```
### Article List
The article list mode of the API generates a list of news articles that match the filters.
The client returns this as a pandas DataFrame with columns `url`, `url_mobile`, `title`,
`seendate`, `socialimage`, `domain`, `language`, `sourcecountry`.
### Timeline Search
There are 5 available modes when making a timeline search:
* `timelinevol` - a timeline of the volume of news coverage matching the filters,
represented as a percentage of the total news articles monitored by GDELT.
* `timelinevolraw` - similar to `timelinevol`, but has the actual number of articles
and a total rather than a percentage
* `timelinelang` - similar to `timelinevol` but breaks the total articles down by published language.
Each language is returned as a separate column in the DataFrame.
* `timelinesourcecountry` - similar to `timelinevol` but breaks the total articles down by the country
they were published in. Each country is returned as a separate column in the DataFrame.
* `timelinetone` - a timeline of the average tone of the news coverage matching the filters.
See [GDELT's documentation](https://blog.gdeltproject.org/gdelt-doc-2-0-api-debuts/)
for more information about the tone metric.
"""
def __init__(self, json_parsing_max_depth: int = 100) -> None:
"""
Params
------
json_parsing_max_depth
A parameter for the json parsing function that removes illegal character. If 100 it will remove at max
100 characters before exiting with an exception
"""
self.max_depth_json_parsing = json_parsing_max_depth
def article_search(self, filters: Filters) -> pd.DataFrame:
"""
Make a query against the `ArtList` API to return a DataFrame of news articles that
match the supplied filters.
Params
------
filters
A `gdelt-doc.Filters` object containing the filter parameters for this query.
Returns
-------
pd.DataFrame
A pandas DataFrame of the articles returned from the API.
"""
articles = self._query("artlist", filters.query_string)
if "articles" in articles:
return pd.DataFrame(articles["articles"])
else:
return pd.DataFrame()
def timeline_search(self, mode: str, filters: Filters) -> pd.DataFrame:
"""
Make a query using one of the API's timeline modes.
Params
------
mode
The API mode to call. Must be one of "timelinevol", "timelinevolraw",
"timelinetone", "timelinelang", "timelinesourcecountry".
See https://blog.gdeltproject.org/gdelt-doc-2-0-api-debuts/ for a
longer description of each mode.
filters
A `gdelt-doc.Filters` object containing the filter parameters for this query.
Returns
-------
pd.DataFrame
A pandas DataFrame of the articles returned from the API.
"""
timeline = self._query(mode, filters.query_string)
results = {"datetime": [entry["date"] for entry in timeline["timeline"][0]["data"]]}
for series in timeline["timeline"]:
results[series["series"]] = [entry["value"] for entry in series["data"]]
if mode == "timelinevolraw":
results["All Articles"] = [
entry["norm"] for entry in timeline["timeline"][0]["data"]
]
formatted = pd.DataFrame(results)
formatted["datetime"] = pd.to_datetime(formatted["datetime"])
return formatted
def _query(self, mode: str, query_string: str) -> Dict:
"""
Submit a query to the GDELT API and return the results as a parsed JSON object.
Params
------
mode
The API mode to call. Must be one of "artlist", "timelinevol",
"timelinevolraw", "timelinetone", "timelinelang", "timelinesourcecountry".
query_string
The query parameters and date range to call the API with.
Returns
-------
Dict
The parsed JSON response from the API.
"""
if mode not in [
"artlist",
"timelinevol",
"timelinevolraw",
"timelinetone",
"timelinelang",
"timelinesourcecountry",
]:
raise ValueError(f"Mode {mode} not in supported API modes")
headers = {
"User-Agent": f"GDELT DOC Python API client {version} - https://github.com/alex9smith/gdelt-doc-api"
}
response = requests.get(
f"https://api.gdeltproject.org/api/v2/doc/doc?query={query_string}&mode={mode}&format=json",
headers=headers
)
if response.status_code not in [200, 202]:
raise ValueError("The gdelt api returned a non-successful statuscode. This is the response message: {}".
format(response.text))
# Response is text/html if it's an error and application/json if it's ok
if "text/html" in response.headers["content-type"]:
raise ValueError(f"The query was not valid. The API error message was: {response.text.strip()}")
return load_json(response.content, self.max_depth_json_parsing)
| 6,042 | 34.757396 | 116 | py |
SPIGA | SPIGA-main/spiga/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/eval/results_gen.py | import pkg_resources
import json
import copy
import torch
import spiga.data.loaders.dl_config as dl_cfg
import spiga.data.loaders.dataloader as dl
import spiga.inference.pretreatment as pretreat
from spiga.inference.framework import SPIGAFramework
from spiga.inference.config import ModelConfig
def main():
import argparse
pars = argparse.ArgumentParser(description='Experiment results generator')
pars.add_argument('database', type=str, help='Database name',
choices=['wflw', '300wpublic', '300wprivate', "merlrav", "cofw68"])
pars.add_argument('-a','--anns', type=str, default='test', help='Annotations type: test, valid or train')
pars.add_argument('--gpus', type=int, default=0, help='GPU Id')
args = pars.parse_args()
# Load model framework
model_cfg = ModelConfig(args.database)
model_framework = SPIGAFramework(model_cfg, gpus=[args.gpus])
# Generate results
tester = Tester(model_framework, args.database, anns_type=args.anns)
with torch.no_grad():
tester.generate_results()
class Tester:
def __init__(self, model_framework, database, anns_type='test'):
# Parameters
self.anns_type = anns_type
self.database = database
# Model initialization
self.model_framework = model_framework
# Dataloader
self.dl_eval = dl_cfg.AlignConfig(self.database, mode=self.anns_type)
self.dl_eval.aug_names = []
self.dl_eval.shuffle = False
self.dl_eval.target_dist = self.model_framework.model_cfg.target_dist
self.dl_eval.image_size = self.model_framework.model_cfg.image_size
self.dl_eval.ftmap_size = self.model_framework.model_cfg.ftmap_size
self.batch_size = 1
self.test_data, _ = dl.get_dataloader(self.batch_size, self.dl_eval,
pretreat=pretreat.NormalizeAndPermute(), debug=True)
# Results
self.data_struc = {'imgpath': str, 'bbox': None, 'headpose': None, 'ids': None, 'landmarks': None, 'visible': None}
self.result_path = pkg_resources.resource_filename('spiga', 'eval/results')
self.result_file = '/results_%s_%s.json' % (self.database, self.anns_type)
self.file_out = self.result_path + self.result_file
def generate_results(self):
data = []
for step, batch in enumerate(self.test_data):
print('Step: ', step)
inputs = self.model_framework.select_inputs(batch)
outputs_raw = self.model_framework.net_forward(inputs)
# Postprocessing
outputs = self.model_framework.postreatment(outputs_raw, batch['bbox'], batch['bbox_raw'])
# Data
data_dict = copy.deepcopy(self.data_struc)
data_dict['imgpath'] = batch['imgpath_local'][0]
data_dict['bbox'] = batch['bbox_raw'][0].numpy().tolist()
data_dict['visible'] = batch['visible'][0].numpy().tolist()
data_dict['ids'] = self.dl_eval.database.ldm_ids
data_dict['landmarks'] = outputs['landmarks'][0]
data_dict['headpose'] = outputs['headpose'][0]
data.append(data_dict)
# Save outputs
with open(self.file_out, 'w') as outfile:
json.dump(data, outfile)
if __name__ == '__main__':
main()
| 3,346 | 37.034091 | 123 | py |
SPIGA | SPIGA-main/spiga/eval/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/eval/benchmark/evaluator.py | import json
import pkg_resources
from collections import OrderedDict
# Paths
data_path = pkg_resources.resource_filename('spiga', 'data/annotations')
def main():
import argparse
pars = argparse.ArgumentParser(description='Benchmark alignments evaluator')
pars.add_argument('pred_file', nargs='+', type=str, help='Absolute path to the prediction json file (Multi file)')
pars.add_argument('--eval', nargs='+', type=str, default=['lnd'],
choices=['lnd', 'pose'], help='Evaluation modes')
pars.add_argument('-s', '--save', action='store_true', help='Save results')
args = pars.parse_args()
for pred_file in args.pred_file:
benchmark = get_evaluator(pred_file, args.eval, args.save)
benchmark.metrics()
class Evaluator:
def __init__(self, data_file, evals=(), save=True, process_err=True):
# Inputs
self.data_file = data_file
self.evals = evals
self.save = save
# Paths
data_name = data_file.split('/')[-1]
self.data_dir = data_file.split(data_name)[0]
# Information from name
data_name = data_name.split('.')[0]
data_name = data_name.split('_')
self.data_type = data_name[-1]
self.database = data_name[-2]
# Load predictions and annotations
anns_file = data_path + '/%s/%s.json' % (self.database, self.data_type)
self.anns = self.load_files(anns_file)
self.pred = self.load_files(data_file)
# Compute errors
self.error = OrderedDict()
self.error_pimg = OrderedDict()
self.metrics_log = OrderedDict()
if process_err:
self.compute_error(self.anns, self.pred)
def compute_error(self, anns, pred, select_ids=None):
database_ref = [self.database, self.data_type]
for eval in self.evals:
self.error[eval.name] = eval.compute_error(anns, pred, database_ref, select_ids)
self.error_pimg = eval.get_pimg_err(self.error_pimg)
return self.error
def metrics(self):
for eval in self.evals:
self.metrics_log[eval.name] = eval.metrics()
if self.save:
file_name = self.data_dir + '/metrics_%s_%s.txt' % (self.database, self.data_type)
with open(file_name, 'w') as file:
file.write(str(self))
return self.metrics_log
def load_files(self, input_file):
with open(input_file) as jsonfile:
data = json.load(jsonfile)
return data
def _dict2text(self, name, dictionary, num_tab=1):
prev_tabs = '\t'*num_tab
text = '%s {\n' % name
for k, v in dictionary.items():
if isinstance(v, OrderedDict) or isinstance(v, dict):
text += '{}{}'.format(prev_tabs, self._dict2text(k, v, num_tab=num_tab+1))
else:
text += '{}{}: {}\n'.format(prev_tabs, k, v)
text += (prev_tabs + '}\n')
return text
def __str__(self):
state_dict = self.metrics_log
text = self._dict2text('Metrics', state_dict)
return text
def get_evaluator(pred_file, evaluate=('lnd', 'pose'), save=False, process_err=True):
eval_list = []
if "lnd" in evaluate:
import spiga.eval.benchmark.metrics.landmarks as mlnd
eval_list.append(mlnd.MetricsLandmarks())
if "pose" in evaluate:
import spiga.eval.benchmark.metrics.pose as mpose
eval_list.append(mpose.MetricsHeadpose())
return Evaluator(pred_file, evals=eval_list, save=save, process_err=process_err)
if __name__ == '__main__':
main()
| 3,631 | 32.321101 | 118 | py |
SPIGA | SPIGA-main/spiga/eval/benchmark/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/eval/benchmark/metrics/landmarks.py | import os
import numpy as np
import json
from collections import OrderedDict
from scipy.integrate import simps
from spiga.data.loaders.dl_config import db_anns_path
from spiga.eval.benchmark.metrics.metrics import Metrics
class MetricsLandmarks(Metrics):
def __init__(self, name='landmarks'):
super().__init__(name)
self.db_info = None
self.nme_norm = "corners"
self.nme_thr = 8
self.percentile = [90, 95, 99]
# Cumulative plot axis length
self.bins = 10000
def compute_error(self, data_anns, data_pred, database, select_ids=None):
# Initialize global logs and variables of Computer Error function
self.init_ce(data_anns, data_pred, database)
self._update_lnd_param()
# Order data and compute nme
self.error['nme_per_img'] = []
self.error['ne_per_img'] = OrderedDict()
self.error['ne_per_ldm'] = OrderedDict()
for img_id, anns in enumerate(data_anns):
# Init variables per img
pred = data_pred[img_id]
# Get select ids to compute
if select_ids is None:
selected_ldm = anns['ids']
else:
selected_ldm = list(set(select_ids) & set(anns['ids']))
norm = self._get_img_norm(anns)
for ldm_id in selected_ldm:
# Compute Normalize Error
anns_ldm = self._get_lnd_from_id(anns, ldm_id)
pred_ldm = self._get_lnd_from_id(pred, ldm_id)
ne = self._dist_l2(anns_ldm, pred_ldm)/norm * 100
self.error['ne_per_img'].setdefault(img_id, []).append(ne)
self.error['ne_per_ldm'].setdefault(ldm_id, []).append(ne)
# NME per image
if self.database in ['merlrav']:
# LUVLI at MERLRAV divide by 68 despite the annotated landmarks in the image.
self.error['nme_per_img'].append(np.sum(self.error['ne_per_img'][img_id])/68)
else:
self.error['nme_per_img'].append(np.mean(self.error['ne_per_img'][img_id]))
# Cumulative NME
self.error['cumulative_nme'] = self._cumulative_error(self.error['nme_per_img'], bins=self.bins)
return self.error
def metrics(self):
# Initialize global logs and variables of Metrics function
self.init_metrics()
# Basic metrics (NME/NMPE/AUC/FR) for full dataset
nme, nmpe, auc, fr, _, _ = self._basic_metrics()
print('NME: %.3f' % nme)
self.metrics_log['nme'] = nme
for percent_id, percentile in enumerate(self.percentile):
print('NME_P%i: %.3f' % (percentile, nmpe[percent_id]))
self.metrics_log['nme_p%i' % percentile] = nmpe[percent_id]
self.metrics_log['nme_thr'] = self.nme_thr
self.metrics_log['nme_norm'] = self.nme_norm
print('AUC_%i: %.3f' % (self.nme_thr, auc))
self.metrics_log['auc'] = auc
print('FR_%i: %.3f' % (self.nme_thr, fr))
self.metrics_log['fr'] = fr
# Subset basic metrics
subsets = self.db_info['test_subsets']
if self.data_type == 'test' and len(subsets) > 0:
self.metrics_log['subset'] = OrderedDict()
for subset, img_filter in subsets.items():
self.metrics_log['subset'][subset] = OrderedDict()
nme, nmpe, auc, fr, _, _ = self._basic_metrics(img_select=img_filter)
print('> Landmarks subset: %s' % subset.upper())
print('NME: %.3f' % nme)
self.metrics_log['subset'][subset]['nme'] = nme
for percent_id, percentile in enumerate(self.percentile):
print('NME_P%i: %.3f' % (percentile, nmpe[percent_id]))
self.metrics_log['subset'][subset]['nme_p%i' % percentile] = nmpe[percent_id]
print('AUC_%i: %.3f' % (self.nme_thr, auc))
self.metrics_log['subset'][subset]['auc'] = auc
print('FR_%i: %.3f' % (self.nme_thr, fr))
self.metrics_log['subset'][subset]['fr'] = fr
# NME/NPE per landmark
self.metrics_log['nme_per_ldm'] = OrderedDict()
for percentile in self.percentile:
self.metrics_log['npe%i_per_ldm' % percentile] = OrderedDict()
for k, v in self.error['ne_per_ldm'].items():
self.metrics_log['nme_per_ldm'][k] = np.mean(v)
for percentile in self.percentile:
self.metrics_log['npe%i_per_ldm' % percentile][k] = np.percentile(v, percentile)
return self.metrics_log
def get_pimg_err(self, data_dict=None, img_select=None):
data = self.error['nme_per_img']
if img_select is not None:
data = [data[img_id] for img_id in img_select]
name_dict = self.name + '/nme'
if data_dict is not None:
data_dict[name_dict] = data
else:
data_dict = data
return data_dict
def _update_lnd_param(self):
db_info_file = db_anns_path.format(database=self.database, file_name='db_info')
if os.path.exists(db_info_file):
with open(db_info_file) as jsonfile:
self.db_info = json.load(jsonfile)
norm_dict = self.db_info['norm']
nme_norm, nme_thr = next(iter(norm_dict.items()))
print('Default landmarks configuration: \n %s: %i' % (nme_norm, nme_thr))
answer = input("Change default config? (N/Y) >>> ")
if answer.lower() in ['yes', 'y']:
answer = input("Normalization options: %s >>> " % str(list(norm_dict.keys())))
if answer in norm_dict.keys():
nme_norm = answer
nme_thr = norm_dict[nme_norm]
else:
print("Option %s not available keep in default one: %s" % (answer, nme_norm))
answer = input("Change threshold ->%s:%i ? (N/Y) >>> " % (nme_norm, nme_thr))
if answer.lower() in ['yes', 'y']:
answer = input('NME threshold: >>> ')
nme_thr = float(answer)
else:
print("Keeping default threshold: %i" % nme_thr)
self.nme_norm = nme_norm
self.nme_thr = nme_thr
else:
raise ValueError('Database %s specifics not defined. Missing db_info.json' % self.database)
def _dist_l2(self, pointA, pointB):
return float(((pointA - pointB) ** 2).sum() ** 0.5)
def _get_lnd_from_id(self, anns, ids):
idx = anns['ids'].index(ids)
ref = np.array(anns['landmarks'][idx])
return ref
def _get_img_norm(self, anns):
if self.nme_norm == 'pupils':
print('WARNING: Pupils norm only implemented for 68 landmark configuration')
left_eye = [7, 138, 139, 8, 141, 142]
right_eye = [11, 144, 145, 12, 147, 148]
refA = np.zeros(2)
refB = np.zeros(2)
for i in range(len(left_eye)):
refA += self._get_lnd_from_id(anns, left_eye[i])
refB += self._get_lnd_from_id(anns, right_eye[i])
refA = refA/len(left_eye) # Left
refB = refB/len(right_eye) # Right
elif self.nme_norm == 'corners':
refA = self._get_lnd_from_id(anns, 12) # Left
refB = self._get_lnd_from_id(anns, 7) # Right
elif self.nme_norm == 'diagonal':
refA = anns['bbox'][0:2]
refB = refA + anns['bbox'][2:4]
elif self.nme_norm == 'height':
return anns['bbox'][3]
elif self.nme_norm == 'lnd_bbox':
lnd = np.array(anns['landmarks'])
lnd_max = np.max(lnd, axis=0)
lnd_min = np.min(lnd, axis=0)
lnd_wh = lnd_max - lnd_min
return (lnd_wh[0]*lnd_wh[1])**0.5
elif self.nme_norm == 'bbox':
return (anns['bbox'][2] * anns['bbox'][3]) ** 0.5
else:
raise ValueError('Normalization %s not implemented' % self.nme_norm)
return self._dist_l2(refA, refB)
def _cumulative_error(self, error, bins=10000):
num_imgs, base = np.histogram(error, bins=bins)
cumulative = [x / float(len(error)) for x in np.cumsum(num_imgs)]
base = base[:bins]
cumulative, base = self._filter_cumulative(cumulative, base)
return [cumulative, base]
def _filter_cumulative(self, cumulative, base):
base = [x for x in base if (x < self.nme_thr)]
cumulative = cumulative[:len(base)]
return cumulative, base
def _basic_metrics(self, img_select=None):
data = self.error['nme_per_img']
if img_select is not None:
data = [data[img_id] for img_id in img_select]
[cumulative, base] = self._cumulative_error(data, bins=self.bins)
else:
[cumulative, base] = self.error['cumulative_nme']
# Normalize Mean Error across img
nme = np.mean(data)
# Normalize Mean Percentile Error across img
nmpe = []
for percentile in self.percentile:
nmpe.append(np.percentile(data, percentile))
# Area Under Curve and Failure Rate
auc, fr = self._auc_fr_metrics(cumulative, base)
return nme, nmpe, auc, fr, cumulative, base
def _auc_fr_metrics(self, cumulative, base):
if not base:
auc = 0.
fr = 100.
else:
auc = (simps(cumulative, x=base) / self.nme_thr) * 100.0
if base[-1] < self.nme_thr and cumulative[-1] == 1:
auc += ((self.nme_thr - base[-1]) / self.nme_thr) * 100
fr = (1 - cumulative[-1]) * 100.0
return auc, fr
| 9,779 | 40.265823 | 104 | py |
SPIGA | SPIGA-main/spiga/eval/benchmark/metrics/pose.py | import numpy as np
from sklearn.metrics import confusion_matrix
from spiga.eval.benchmark.metrics.metrics import Metrics
class MetricsHeadpose(Metrics):
def __init__(self, name='headpose'):
super().__init__(name)
# Angles
self.angles = ['yaw', 'pitch', 'roll']
# Confusion matrix intervals
self.pose_labels = [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]
# Percentile reference angles
self.error_labels = [2.5, 5, 10, 15, 30]
# Cumulative plot axis length
self.bins = 1000
def compute_error(self, data_anns, data_pred, database, select_ids=None):
# Initialize global logs and variables of Computer Error function
self.init_ce(data_anns, data_pred, database)
# Generate annotations if needed
if data_anns[0]['headpose'] is None:
print('Database anns generated by posit...')
data_anns = self._posit_anns()
print('Posit generation done...')
# Dictionary variables
self.error['data_pred'] = []
self.error['data_anns'] = []
self.error['data_pred_trl'] = []
self.error['data_anns_trl'] = []
self.error['mae_ypr'] = []
self.error['mae_mean'] = []
# Order data
for img_id, img_anns in enumerate(data_anns):
pose_anns = img_anns['headpose'][0:3]
self.error['data_anns'].append(pose_anns)
pose_pred = data_pred[img_id]['headpose'][0:3]
self.error['data_pred'].append(pose_pred)
# Compute MAE error
anns_array = np.array(self.error['data_anns'])
pred_array = np.array(self.error['data_pred'])
mae_ypr = np.abs((anns_array-pred_array))
self.error['mae_ypr'] = mae_ypr.tolist()
self.error['mae_mean'] = np.mean(mae_ypr, axis=-1).tolist()
# Quantize labeled data
label_anns = self._nearest_label(anns_array)
label_pred = self._nearest_label(pred_array)
self.error['label_anns'] = label_anns
self.error['label_pred'] = label_pred
for angle_id, angle in enumerate(self.angles):
# Confusion matrix
self.error['cm_%s' % angle] = confusion_matrix(label_anns[:, angle_id], label_pred[:, angle_id])
# Cumulative error
self.error['cumulative_%s' % angle] = self._cumulative_error(mae_ypr[:, angle_id], bins=self.bins)
return self.error
def metrics(self):
# Initialize global logs and variables of Metrics function
self.init_metrics()
# Mean Absolute Error
mae_ypr = np.array(self.error['mae_ypr'])
mae_ypr_mean = np.mean(mae_ypr, axis=0)
self.metrics_log['mae_ypr'] = mae_ypr_mean.tolist()
self.metrics_log['mae_mean'] = np.mean(mae_ypr_mean)
print('MAE [yaw, pitch, roll]: [%.3f, %.3f, %.3f]' % (mae_ypr_mean[0], mae_ypr_mean[1], mae_ypr_mean[2]))
print('MAE mean: %.3f' % self.metrics_log['mae_mean'])
# Per angle measurements
self.metrics_log['acc_label'] = []
self.metrics_log['acc_adj_label'] = []
for angle_id, angle in enumerate(self.angles):
# Accuracy per label
cm = self.error['cm_%s' % angle]
diagonal = np.diagonal(cm, offset=0).sum()
acc_main = diagonal / cm.sum().astype('float')
self.metrics_log['acc_label'].append(acc_main)
# Permissive accuracy
diagonal_adj = diagonal.sum() + np.diagonal(cm, offset=-1).sum() + np.diagonal(cm, offset=1).sum()
acc_adj = diagonal_adj / cm.sum().astype('float')
self.metrics_log['acc_adj_label'].append(acc_adj)
# Percentile of relevant angles
self.metrics_log['sr_%s' % angle] = {}
for angle_num in self.error_labels:
if max(mae_ypr[:, angle_id]) > angle_num:
[cumulative, base] = self.error['cumulative_%s' % angle]
perc = [cumulative[x[0] - 1] for x in enumerate(base) if x[1] > angle_num][0]
else:
perc = 1.
self.metrics_log['sr_%s' % angle][angle_num] = perc
print('Accuracy [yaw, pitch, roll]: ', self.metrics_log['acc_label'])
print('Accuracy [yaw, pitch, roll] (adjacency as TP): ', self.metrics_log['acc_adj_label'])
for angle in self.angles:
print('Success Rate %s: ' % angle, self.metrics_log['sr_%s' % angle])
return self.metrics_log
def get_pimg_err(self, data_dict, img_select=None):
mae_mean = self.error['mae_mean']
mae_ypr = self.error['mae_ypr']
if img_select is not None:
mae_mean = [mae_mean[img_id] for img_id in img_select]
mae_ypr = [mae_ypr[img_id] for img_id in img_select]
name_dict = self.name + '/%s'
data_dict[name_dict % 'mae'] = mae_mean
mae_ypr = np.array(mae_ypr)
data_dict[name_dict % 'mae_yaw'] = mae_ypr[:, 0].tolist()
data_dict[name_dict % 'mae_pitch'] = mae_ypr[:, 1].tolist()
data_dict[name_dict % 'mae_roll'] = mae_ypr[:, 2].tolist()
return data_dict
def _posit_anns(self):
import spiga.data.loaders.dl_config as dl_config
import spiga.data.loaders.dataloader as dl
# Load configuration
data_config = dl_config.AlignConfig(self.database, self.data_type)
data_config.image_size = (256, 256)
data_config.generate_pose = True
data_config.aug_names = []
data_config.shuffle = False
dataloader, _ = dl.get_dataloader(1, data_config, debug=True)
data_anns = []
for num_batch, batch_dict in enumerate(dataloader):
pose = batch_dict['pose'].numpy()
data_anns.append({'headpose': pose[0].tolist()})
return data_anns
def _nearest_label(self, data):
data_tile = data[:, :, np.newaxis]
data_tile = np.tile(data_tile, len(self.pose_labels))
diff_tile = np.abs(data_tile - self.pose_labels)
label_idx = diff_tile.argmin(axis=-1)
return label_idx
def _cumulative_error(self, error, bins=1000):
num_imgs, base = np.histogram(error, bins=bins)
cumulative = [x / float(len(error)) for x in np.cumsum(num_imgs)]
return [cumulative[:bins], base[:bins]]
| 6,373 | 38.8375 | 113 | py |
SPIGA | SPIGA-main/spiga/eval/benchmark/metrics/metrics.py | from collections import OrderedDict
class Metrics:
def __init__(self, name='metrics'):
# Data dicts
self.error = OrderedDict()
self.metrics_log = OrderedDict()
self.name = name
self.database = None
self.data_type = None
def compute_error(self, data_anns, data_pred, database, select_ids=None):
self.init_ce(data_anns, data_pred, database)
raise ValueError('Computer error has to be implemented by inheritance')
def init_ce(self, data_anns, data_pred, database):
# Update database info
[self.database, self.data_type] = database
# Logs and checks
print('Computing %s error...' % self.name)
if len(data_anns) == 0:
raise ValueError('Annotations miss for computing error in %s' % self.name)
if len(data_pred) == 0:
raise ValueError('Predictions miss for computing error in %s' % self.name)
elif len(data_pred) != len(data_anns):
raise Warning('Prediction vs annotations length mismatch')
def metrics(self):
self.init_metrics()
raise ValueError('Metrics has to be implemented by inheritance')
def init_metrics(self):
# Logs and checks
print('> Metrics %s:' % self.name)
if len(self.error) == 0:
raise ValueError('Error must be compute first in %s' % self.name)
def get_pimg_err(self, data_dict):
return data_dict
| 1,458 | 32.930233 | 86 | py |
SPIGA | SPIGA-main/spiga/eval/benchmark/metrics/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/models/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/models/spiga.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import spiga.models.gnn.pose_proj as pproj
from spiga.models.cnn.cnn_multitask import MultitaskCNN
from spiga.models.gnn.step_regressor import StepRegressor, RelativePositionEncoder
class SPIGA(nn.Module):
def __init__(self, num_landmarks=98, num_edges=15, steps=3, **kwargs):
super(SPIGA, self).__init__()
# Model parameters
self.steps = steps # Cascaded regressors
self.embedded_dim = 512 # GAT input channel
self.nstack = 4 # Number of stacked GATs per step
self.kwindow = 7 # Output cropped window dimension (kernel)
self.swindow = 0.25 # Scale of the cropped window at first step (Dft. 25% w.r.t the input featuremap)
self.offset_ratio = [self.swindow/(2**step)/2 for step in range(self.steps)]
# CNN parameters
self.num_landmarks = num_landmarks
self.num_edges = num_edges
# Initialize backbone
self.visual_cnn = MultitaskCNN(num_landmarks=self.num_landmarks, num_edges=self.num_edges)
# Features dimensions
self.img_res = self.visual_cnn.img_res
self.visual_res = self.visual_cnn.out_res
self.visual_dim = self.visual_cnn.ch_dim
# Initialize Pose head
self.channels_pose = 6
self.pose_fc = nn.Linear(self.visual_cnn.ch_dim, self.channels_pose)
# Initialize feature extractors:
# Relative positional encoder
shape_dim = 2 * (self.num_landmarks - 1)
shape_encoder = []
for step in range(self.steps):
shape_encoder.append(RelativePositionEncoder(shape_dim, self.embedded_dim, [256, 256]))
self.shape_encoder = nn.ModuleList(shape_encoder)
# Diagonal mask used to compute relative positions
diagonal_mask = (torch.ones(self.num_landmarks, self.num_landmarks) - torch.eye(self.num_landmarks)).type(torch.bool)
self.diagonal_mask = nn.parameter.Parameter(diagonal_mask, requires_grad=False)
# Visual feature extractor
conv_window = []
theta_S = []
for step in range(self.steps):
# S matrix per step
WH = self.visual_res # Width/height of ftmap
Wout = self.swindow / (2 ** step) * WH # Width/height of the window
K = self.kwindow # Kernel or resolution of the window
scale = K / WH * (Wout - 1) / (K - 1) # Scale of the affine transformation
# Rescale matrix S
theta_S_stp = torch.tensor([[scale, 0], [0, scale]])
theta_S.append(nn.parameter.Parameter(theta_S_stp, requires_grad=False))
# Convolutional to embedded to BxLxCx1x1
conv_window.append(nn.Conv2d(self.visual_dim, self.embedded_dim, self.kwindow))
self.theta_S = nn.ParameterList(theta_S)
self.conv_window = nn.ModuleList(conv_window)
# Initialize GAT modules
self.gcn = nn.ModuleList([StepRegressor(self.embedded_dim, 256, self.nstack) for i in range(self.steps)])
def forward(self, data):
# Inputs: Visual features and points projections
pts_proj, features = self.backbone_forward(data)
# Visual field
visual_field = features['VisualField'][-1]
# Params compute only once
gat_prob = []
features['Landmarks'] = []
for step in range(self.steps):
# Features generation
embedded_ft = self.extract_embedded(pts_proj, visual_field, step)
# GAT inference
offset, gat_prob = self.gcn[step](embedded_ft, gat_prob)
offset = F.hardtanh(offset)
# Update coordinates
pts_proj = pts_proj + self.offset_ratio[step] * offset
features['Landmarks'].append(pts_proj.clone())
features['GATProb'] = gat_prob
return features
def backbone_forward(self, data):
# Inputs: Image and model3D
imgs = data[0]
model3d = data[1]
cam_matrix = data[2]
# HourGlass Forward
features = self.visual_cnn(imgs)
# Head pose estimation
pose_raw = features['HGcore'][-1]
B, L, _, _ = pose_raw.shape
pose = pose_raw.reshape(B, L)
pose = self.pose_fc(pose)
features['Pose'] = pose.clone()
# Project model 3D
euler = pose[:, 0:3]
trl = pose[:, 3:]
rot = pproj.euler_to_rotation_matrix(euler)
pts_proj = pproj.projectPoints(model3d, rot, trl, cam_matrix)
pts_proj = pts_proj / self.visual_res
return pts_proj, features
def extract_embedded(self, pts_proj, receptive_field, step):
# Visual features
visual_ft = self.extract_visual_embedded(pts_proj, receptive_field, step)
# Shape features
shape_ft = self.calculate_distances(pts_proj)
shape_ft = self.shape_encoder[step](shape_ft)
# Addition
embedded_ft = visual_ft + shape_ft
return embedded_ft
def extract_visual_embedded(self, pts_proj, receptive_field, step):
# Affine matrix generation
B, L, _ = pts_proj.shape # Pts_proj range:[0,1]
centers = pts_proj + 0.5 / self.visual_res # BxLx2
centers = centers.reshape(B * L, 2) # B*Lx2
theta_trl = (-1 + centers * 2).unsqueeze(-1) # BxLx2x1
theta_s = self.theta_S[step] # 2x2
theta_s = theta_s.repeat(B * L, 1, 1) # B*Lx2x2
theta = torch.cat((theta_s, theta_trl), -1) # B*Lx2x3
# Generate crop grid
B, C, _, _ = receptive_field.shape
grid = torch.nn.functional.affine_grid(theta, (B * L, C, self.kwindow, self.kwindow))
grid = grid.reshape(B, L, self.kwindow, self.kwindow, 2)
grid = grid.reshape(B, L, self.kwindow * self.kwindow, 2)
# Crop windows
crops = torch.nn.functional.grid_sample(receptive_field, grid, padding_mode="border") # BxCxLxK*K
crops = crops.transpose(1, 2) # BxLxCxK*K
crops = crops.reshape(B * L, C, self.kwindow, self.kwindow)
# Flatten features
visual_ft = self.conv_window[step](crops)
_, Cout, _, _ = visual_ft.shape
visual_ft = visual_ft.reshape(B, L, Cout)
return visual_ft
def calculate_distances(self, pts_proj):
B, L, _ = pts_proj.shape # BxLx2
pts_a = pts_proj.unsqueeze(-2).repeat(1, 1, L, 1)
pts_b = pts_a.transpose(1, 2)
dist = pts_a - pts_b
dist_wo_self = dist[:, self.diagonal_mask, :].reshape(B, L, -1)
return dist_wo_self
| 6,704 | 37.982558 | 125 | py |
SPIGA | SPIGA-main/spiga/models/cnn/layers.py | from torch import nn
class Conv(nn.Module):
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True):
super(Conv, self).__init__()
self.inp_dim = inp_dim
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size - 1) // 2, bias=False)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
assert x.size()[1] == self.inp_dim, "{} {}".format(x.size()[1], self.inp_dim)
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Deconv(nn.Module):
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True):
super(Deconv, self).__init__()
self.inp_dim = inp_dim
self.deconv = nn.ConvTranspose2d(inp_dim, out_dim, kernel_size=kernel_size, stride=stride, bias=False)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
assert x.size()[1] == self.inp_dim, "{} {}".format(x.size()[1], self.inp_dim)
x = self.deconv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Residual(nn.Module):
def __init__(self, inp_dim, out_dim, kernel=3):
super(Residual, self).__init__()
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm2d(inp_dim)
self.conv1 = Conv(inp_dim, int(out_dim / 2), 1, relu=False)
self.bn2 = nn.BatchNorm2d(int(out_dim / 2))
self.conv2 = Conv(int(out_dim / 2), int(out_dim / 2), kernel, relu=False)
self.bn3 = nn.BatchNorm2d(int(out_dim / 2))
self.conv3 = Conv(int(out_dim / 2), out_dim, 1, relu=False)
self.skip_layer = Conv(inp_dim, out_dim, 1, relu=False)
if inp_dim == out_dim:
self.need_skip = False
else:
self.need_skip = True
def forward(self, x):
if self.need_skip:
residual = self.skip_layer(x)
else:
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out += residual
return out
| 2,619 | 31.75 | 112 | py |
SPIGA | SPIGA-main/spiga/models/cnn/coord_conv.py | import torch
import torch.nn as nn
class AddCoordsTh(nn.Module):
def __init__(self, x_dim=64, y_dim=64, with_r=False):
super(AddCoordsTh, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.with_r = with_r
xx_channel, yy_channel = self._prepare_coords()
self.xx_channel = nn.parameter.Parameter(xx_channel, requires_grad=False)
self.yy_channel = nn.parameter.Parameter(yy_channel, requires_grad=False)
def _prepare_coords(self):
xx_ones = torch.ones([1, self.y_dim], dtype=torch.int32)
xx_ones = xx_ones.unsqueeze(-1)
xx_range = torch.arange(self.x_dim, dtype=torch.int32).unsqueeze(0)
xx_range = xx_range.unsqueeze(1)
xx_channel = torch.matmul(xx_ones, xx_range)
xx_channel = xx_channel.unsqueeze(-1)
yy_ones = torch.ones([1, self.x_dim], dtype=torch.int32)
yy_ones = yy_ones.unsqueeze(1)
yy_range = torch.arange(self.y_dim, dtype=torch.int32).unsqueeze(0)
yy_range = yy_range.unsqueeze(-1)
yy_channel = torch.matmul(yy_range, yy_ones)
yy_channel = yy_channel.unsqueeze(-1)
xx_channel = xx_channel.permute(0, 3, 2, 1)
yy_channel = yy_channel.permute(0, 3, 2, 1)
xx_channel = xx_channel.float() / (self.x_dim - 1)
yy_channel = yy_channel.float() / (self.y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
return xx_channel, yy_channel
def forward(self, input_tensor):
"""
input_tensor: (batch, c, x_dim, y_dim)
"""
batch_size_tensor = input_tensor.shape[0]
xx_channel = self.xx_channel.repeat(batch_size_tensor, 1, 1, 1)
yy_channel = self.yy_channel.repeat(batch_size_tensor, 1, 1, 1)
ret = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
| 2,053 | 33.813559 | 92 | py |
SPIGA | SPIGA-main/spiga/models/cnn/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/models/cnn/hourglass.py | import torch.nn as nn
from spiga.models.cnn.layers import Conv, Deconv, Residual
class Hourglass(nn.Module):
def __init__(self, n, f, bn=None, increase=0):
super(Hourglass, self).__init__()
nf = f + increase
self.up1 = Residual(f, f)
# Lower branch
self.pool1 = Conv(f, f, 2, 2, bn=True, relu=True)
self.low1 = Residual(f, nf)
self.n = n
# Recursive hourglass
if self.n > 1:
self.low2 = Hourglass(n - 1, nf, bn=bn)
else:
self.low2 = Residual(nf, nf)
self.low3 = Residual(nf, f)
self.up2 = Deconv(f, f, 2, 2, bn=True, relu=True)
def forward(self, x):
up1 = self.up1(x)
pool1 = self.pool1(x)
low1 = self.low1(pool1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return up1 + up2
class HourglassCore(Hourglass):
def __init__(self, n, f, bn=None, increase=0):
super(HourglassCore, self).__init__(n, f, bn=bn, increase=increase)
nf = f + increase
if self.n > 1:
self.low2 = HourglassCore(n - 1, nf, bn=bn)
def forward(self, x, core=[]):
up1 = self.up1(x)
pool1 = self.pool1(x)
low1 = self.low1(pool1)
if self.n > 1:
low2, core = self.low2(low1, core=core)
else:
low2 = self.low2(low1)
core.append(low2)
low3 = self.low3(low2)
if self.n > 1:
core.append(low3)
up2 = self.up2(low3)
return up1 + up2, core
| 1,575 | 28.185185 | 75 | py |
SPIGA | SPIGA-main/spiga/models/cnn/transform_e2p.py | import torch
from torch import nn
class E2Ptransform(nn.Module):
"""Edge to Points trasnformation"""
def __init__(self, points, edges, out_dim=64):
super(E2Ptransform, self).__init__()
self.ones = nn.parameter.Parameter(torch.ones((1, out_dim, out_dim)), requires_grad=False)
edge_matrix = self._select_matrix(points, edges)
self.edge2point = nn.parameter.Parameter(edge_matrix, requires_grad=False) # Npoint X Nedges+1
def forward(self, edges):
B, L, H, W = edges.shape
edges_ext = torch.cat((edges, self.ones.repeat(B, 1, 1, 1)), 1)
edges_mat = edges_ext.permute(0, 2, 3, 1).reshape(B, H, W, 1, L+1)
edge2point = self.edge2point.transpose(-1, -2)
point_edges = torch.matmul(edges_mat, edge2point)
point_edges = point_edges.reshape(B, H, W, -1).permute(0, 3, 1, 2)
point_edges[point_edges > 1] = 1.
return point_edges
def _select_matrix(self, points, edges):
if points == 98 and edges == 15:
return WFLW_98x15
elif points == 68 and edges == 13:
return W300_68x13
elif points == 29 and edges == 13:
return COFW_29x13
elif points == 19 and edges == 6:
return AFLW19_19x6
else:
raise ValueError("E2P matrix not implemented")
# Database matrixE2P
WFLW_98x15 = torch.Tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
W300_68x13 = torch.Tensor([ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]])
AFLW19_19x6 = torch.Tensor([[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1]])
COFW_29x13 = torch.Tensor([ [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
| 16,877 | 64.418605 | 107 | py |
SPIGA | SPIGA-main/spiga/models/cnn/cnn_multitask.py | from torch import nn
from spiga.models.cnn.layers import Conv, Residual
from spiga.models.cnn.hourglass import HourglassCore
from spiga.models.cnn.coord_conv import AddCoordsTh
from spiga.models.cnn.transform_e2p import E2Ptransform
class MultitaskCNN(nn.Module):
def __init__(self, nstack=4, num_landmarks=98, num_edges=15, pose_req=True, **kwargs):
super(MultitaskCNN, self).__init__()
# Parameters
self.img_res = 256 # WxH input resolution
self.ch_dim = 256 # Default channel dimension
self.out_res = 64 # WxH output resolution
self.nstack = nstack # Hourglass modules stacked
self.num_landmarks = num_landmarks # Number of landmarks
self.num_edges = num_edges # Number of edges subsets (eyeR, eyeL, nose, etc)
self.pose_required = pose_req # Multitask flag
# Image preprocessing
self.pre = nn.Sequential(
AddCoordsTh(x_dim=self.img_res, y_dim=self.img_res, with_r=True),
Conv(6, 64, 7, 2, bn=True, relu=True),
Residual(64, 128),
Conv(128, 128, 2, 2, bn=True, relu=True),
Residual(128, 128),
Residual(128, self.ch_dim)
)
# Hourglass modules
self.hgs = nn.ModuleList([HourglassCore(4, self.ch_dim) for i in range(self.nstack)])
self.hgs_out = nn.ModuleList([
nn.Sequential(
Residual(self.ch_dim, self.ch_dim),
Conv(self.ch_dim, self.ch_dim, 1, bn=True, relu=True)
) for i in range(nstack)])
if self.pose_required:
self.hgs_core = nn.ModuleList([
nn.Sequential(
Residual(self.ch_dim, self.ch_dim),
Conv(self.ch_dim, self.ch_dim, 2, 2, bn=True, relu=True),
Residual(self.ch_dim, self.ch_dim),
Conv(self.ch_dim, self.ch_dim, 2, 2, bn=True, relu=True)
) for i in range(nstack)])
# Attention module (ADnet style)
self.outs_points = nn.ModuleList([nn.Sequential(Conv(self.ch_dim, self.num_landmarks, 1, relu=False, bn=False),
nn.Sigmoid()) for i in range(self.nstack - 1)])
self.outs_edges = nn.ModuleList([nn.Sequential(Conv(self.ch_dim, self.num_edges, 1, relu=False, bn=False),
nn.Sigmoid()) for i in range(self.nstack - 1)])
self.E2Ptransform = E2Ptransform(self.num_landmarks, self.num_edges, out_dim=self.out_res)
self.outs_features = nn.ModuleList([Conv(self.ch_dim, self.num_landmarks, 1, relu=False, bn=False)for i in range(self.nstack - 1)])
# Stacked Hourglass inputs (nstack > 1)
self.merge_preds = nn.ModuleList([Conv(self.num_landmarks, self.ch_dim, 1, relu=False, bn=False) for i in range(self.nstack - 1)])
self.merge_features = nn.ModuleList([Conv(self.ch_dim, self.ch_dim, 1, relu=False, bn=False) for i in range(self.nstack - 1)])
def forward(self, imgs):
x = self.pre(imgs)
outputs = {'VisualField': [],
'HGcore': []}
core_raw = []
for i in range(self.nstack):
# Hourglass
hg, core_raw = self.hgs[i](x, core=core_raw)
if self.pose_required:
core = self.hgs_core[i](core_raw[-self.hgs[i].n])
outputs['HGcore'].append(core)
hg = self.hgs_out[i](hg)
# Visual features
outputs['VisualField'].append(hg)
# Prepare next stacked input
if i < self.nstack - 1:
# Attentional modules
points = self.outs_points[i](hg)
edges = self.outs_edges[i](hg)
edges_ext = self.E2Ptransform(edges)
point_edges = points * edges_ext
# Landmarks
maps = self.outs_features[i](hg)
preds = maps * point_edges
# Outputs
x = x + self.merge_preds[i](preds) + self.merge_features[i](hg)
return outputs
| 4,196 | 43.178947 | 139 | py |
SPIGA | SPIGA-main/spiga/models/gnn/step_regressor.py | import torch.nn as nn
from spiga.models.gnn.layers import MLP
from spiga.models.gnn.gat import GAT
class StepRegressor(nn.Module):
def __init__(self, input_dim: int, feature_dim: int, nstack=4, decoding=[256, 128, 64, 32]):
super(StepRegressor, self).__init__()
assert nstack > 0
self.nstack = nstack
self.gat = nn.ModuleList([GAT(input_dim, feature_dim, 4)])
for _ in range(nstack-1):
self.gat.append(GAT(feature_dim, feature_dim, 4))
self.decoder = OffsetDecoder(feature_dim, decoding)
def forward(self, embedded, prob_list=[]):
embedded = embedded.transpose(-1, -2)
for i in range(self.nstack):
embedded, prob = self.gat[i](embedded)
prob_list.append(prob)
offset = self.decoder(embedded)
return offset.transpose(-1, -2), prob_list
class OffsetDecoder(nn.Module):
def __init__(self, feature_dim, layers):
super().__init__()
self.decoder = MLP([feature_dim] + layers + [2])
def forward(self, embedded):
return self.decoder(embedded)
class RelativePositionEncoder(nn.Module):
def __init__(self, input_dim, feature_dim, layers):
super().__init__()
self.encoder = MLP([input_dim] + layers + [feature_dim])
def forward(self, feature):
feature = feature.transpose(-1, -2)
return self.encoder(feature).transpose(-1, -2)
| 1,423 | 31.363636 | 96 | py |
SPIGA | SPIGA-main/spiga/models/gnn/layers.py | from torch import nn
def MLP(channels: list):
n = len(channels)
layers = []
for i in range(1, n):
layers.append(nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
| 349 | 25.923077 | 88 | py |
SPIGA | SPIGA-main/spiga/models/gnn/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/models/gnn/gat.py | from copy import deepcopy
import torch
from torch import nn
import torch.nn.functional as F
from spiga.models.gnn.layers import MLP
class GAT(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_heads=4):
super().__init__()
num_heads_in = num_heads
self.reshape = None
if input_dim != output_dim:
for num_heads_in in range(num_heads, 0, -1):
if input_dim % num_heads_in == 0:
break
self.reshape = MLP([input_dim, output_dim])
self.attention = MessagePassing(input_dim, num_heads_in, out_dim=output_dim)
def forward(self, features):
message, prob = self.attention(features)
if self.reshape:
features = self.reshape(features)
output = features + message
return output, prob
class MessagePassing(nn.Module):
def __init__(self, feature_dim: int, num_heads: int, out_dim=None):
super().__init__()
self.attn = Attention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, out_dim])
def forward(self, features):
message, prob = self.attn(features, features, features)
return self.mlp(torch.cat([features, message], dim=1)), prob
class Attention(nn.Module):
def __init__(self, num_heads: int, feature_dim: int):
super().__init__()
assert feature_dim % num_heads == 0
self.dim = feature_dim // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(feature_dim, feature_dim, kernel_size=1)
self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, prob = self.attention(query, key, value)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)), prob
def attention(self, query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim ** .5
prob = F.softmax(scores, dim=-1)
return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob
| 2,284 | 35.269841 | 92 | py |
SPIGA | SPIGA-main/spiga/models/gnn/pose_proj.py | import torch
import math
def euler_to_rotation_matrix(euler):
# http://euclideanspace.com/maths/geometry/rotations/conversions/eulerToMatrix/index.htm
# Change coordinates system
euler[:, 0] = -(euler[:, 0]-90)
euler[:, 1] = -euler[:, 1]
euler[:, 2] = -(euler[:, 2]+90)
# Convert to radians
rad = euler*(math.pi/180.0)
cy = torch.cos(rad[:, 0])
sy = torch.sin(rad[:, 0])
cp = torch.cos(rad[:, 1])
sp = torch.sin(rad[:, 1])
cr = torch.cos(rad[:, 2])
sr = torch.sin(rad[:, 2])
# Init R matrix tensors
working_device = None
if euler.is_cuda:
working_device = euler.device
Ry = torch.zeros((euler.shape[0], 3, 3), device=working_device)
Rp = torch.zeros((euler.shape[0], 3, 3), device=working_device)
Rr = torch.zeros((euler.shape[0], 3, 3), device=working_device)
# Yaw
Ry[:, 0, 0] = cy
Ry[:, 0, 2] = sy
Ry[:, 1, 1] = 1.
Ry[:, 2, 0] = -sy
Ry[:, 2, 2] = cy
# Pitch
Rp[:, 0, 0] = cp
Rp[:, 0, 1] = -sp
Rp[:, 1, 0] = sp
Rp[:, 1, 1] = cp
Rp[:, 2, 2] = 1.
# Roll
Rr[:, 0, 0] = 1.
Rr[:, 1, 1] = cr
Rr[:, 1, 2] = -sr
Rr[:, 2, 1] = sr
Rr[:, 2, 2] = cr
return torch.matmul(torch.matmul(Ry, Rp), Rr)
def projectPoints(pts, rot, trl, cam_matrix):
# Get working device
working_device = None
if pts.is_cuda:
working_device = pts.device
# Perspective projection model
trl = trl.unsqueeze(2)
extrinsics = torch.cat((rot, trl), 2)
proj_matrix = torch.matmul(cam_matrix, extrinsics)
# Homogeneous landmarks
ones = torch.ones(pts.shape[:2], device=working_device, requires_grad=trl.requires_grad)
ones = ones.unsqueeze(2)
pts_hom = torch.cat((pts, ones), 2)
# Project landmarks
pts_proj = pts_hom.permute((0, 2, 1)) # Transpose
pts_proj = torch.matmul(proj_matrix, pts_proj)
pts_proj = pts_proj.permute((0, 2, 1))
pts_proj = pts_proj/pts_proj[:, :, 2].unsqueeze(2) # Lambda = 1
return pts_proj[:, :, :-1]
| 2,046 | 25.24359 | 92 | py |
SPIGA | SPIGA-main/spiga/demo/app.py | import os
import cv2
import pkg_resources
# My libs
import spiga.demo.analyze.track.get_tracker as tr
import spiga.demo.analyze.extract.spiga_processor as pr_spiga
from spiga.demo.analyze.analyzer import VideoAnalyzer
from spiga.demo.visualize.viewer import Viewer
# Paths
video_out_path_dft = pkg_resources.resource_filename('spiga', 'demo/outputs')
if not os.path.exists(video_out_path_dft):
os.makedirs(video_out_path_dft)
def main():
import argparse
pars = argparse.ArgumentParser(description='Face App')
pars.add_argument('-i', '--input', type=str, default='0', help='Video input')
pars.add_argument('-d', '--dataset', type=str, default='wflw',
choices=['wflw', '300wpublic', '300wprivate', 'merlrav'],
help='SPIGA pretrained weights per dataset')
pars.add_argument('-t', '--tracker', type=str, default='RetinaSort',
choices=['RetinaSort', 'RetinaSort_Res50'], help='Tracker name')
pars.add_argument('-sh', '--show', nargs='+', type=str, default=['fps', 'face_id', 'landmarks', 'headpose'],
choices=['fps', 'bbox', 'face_id', 'landmarks', 'headpose'],
help='Select the attributes of the face to be displayed ')
pars.add_argument('-s', '--save', action='store_true', help='Save record')
pars.add_argument('-nv', '--noview', action='store_false', help='Do not visualize the window')
pars.add_argument('--outpath', type=str, default=video_out_path_dft, help='Video output directory')
pars.add_argument('--fps', type=int, default=30, help='Frames per second')
pars.add_argument('--shape', nargs='+', type=int, help='Visualizer shape (W,H)')
args = pars.parse_args()
if args.shape:
if len(args.shape) != 2:
raise ValueError('--shape requires two values: width and height. Ej: --shape 256 256')
else:
video_shape = tuple(args.shape)
else:
video_shape = None
if not args.noview and not args.save:
raise ValueError('No results will be saved neither shown')
video_app(args.input, spiga_dataset=args.dataset, tracker=args.tracker, fps=args.fps,
save=args.save, output_path=args.outpath, video_shape=video_shape, visualize=args.noview, plot=args.show)
def video_app(input_name, spiga_dataset=None, tracker=None, fps=30, save=False,
output_path=video_out_path_dft, video_shape=None, visualize=True, plot=()):
# Load video
try:
capture = cv2.VideoCapture(int(input_name))
video_name = None
if not visualize:
print('WARNING: Webcam must be visualized in order to close the app')
visualize = True
except:
try:
capture = cv2.VideoCapture(input_name)
video_name = input_name.split('/')[-1][:-4]
except:
raise ValueError('Input video path %s not valid' % input_name)
if capture is not None:
# Initialize viewer
if video_shape is not None:
vid_w, vid_h = video_shape
else:
vid_w, vid_h = capture.get(3), capture.get(4)
viewer = Viewer('face_app', width=vid_w, height=vid_h, fps=fps)
if save:
viewer.record_video(output_path, video_name)
# Initialize face tracker
faces_tracker = tr.get_tracker(tracker)
faces_tracker.detector.set_input_shape(capture.get(4), capture.get(3))
# Initialize processors
processor = pr_spiga.SPIGAProcessor(dataset=spiga_dataset)
# Initialize Analyzer
faces_analyzer = VideoAnalyzer(faces_tracker, processor=processor)
# Convert FPS to the amount of milliseconds that each frame will be displayed
if visualize:
viewer.start_view()
while capture.isOpened():
ret, frame = capture.read()
if ret:
# Process frame
faces_analyzer.process_frame(frame)
# Show results
key = viewer.process_image(frame, drawers=[faces_analyzer], show_attributes=plot)
if key:
break
else:
break
capture.release()
viewer.close()
if __name__ == '__main__':
main()
| 4,280 | 38.638889 | 119 | py |
SPIGA | SPIGA-main/spiga/demo/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/visualize/viewer.py | import os
import cv2
import copy
import time
import numpy as np
# Demo libs
import spiga.demo.visualize.plotter as plt
class Viewer:
def __init__(self, window_title, width=None, height=None, fps=30):
"""
Initialization of the viewer canvas using width and height in pixels
:param window_title: The string with the window title to display.
:param width: The given width in pixels of the window canvas.
:param height: The given height in pixels of the window canvas.
:param fps: Frames per second
"""
# Visualizer parameters
self.canvas = None
self.width = width
self.height = height
self.window_title = window_title
self.visualize = False
# Time variables
self.fps = fps
self.fps_inference = 0
self.fps_mean = 0
self.fps_lifo = np.zeros(self.fps)
self.timer = time.time()
self.frame_cnt = -1
# Video/Image writer
self.write = False
self.video_name = window_title # Initial name
self.video_path = None
self.video_writer = None
# Plots
self.plotter = plt.Plotter()
self.fps_draw_params = {'text_size': 0.75,
'text_thick': 2,
'coord': (10, 50),
'font': cv2.FONT_HERSHEY_SIMPLEX,
'color': (255, 255, 255)}
def start_view(self):
self._kill_window()
cv2.namedWindow(self.window_title)
self.visualize = True
def record_video(self, video_path, video_name=None):
self.write = True
if video_name is not None:
self.video_name = video_name
self.video_path = video_path
if not os.path.exists(video_path):
os.makedirs(video_path)
file_name = os.path.join(self.video_path, self.video_name + '.mp4')
self.video_writer = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc(*'MP4V'),
self.fps, (int(self.width), int(self.height)))
def save_canvas(self, file_path=None):
if file_path is None:
if self.video_path is None:
raise ValueError('Path not defined neither video_path is available')
else:
file_path = self.video_path
file_name = os.path.join(file_path, '/%s_%i.jpg' % (self.video_name, self.frame_cnt))
cv2.imwrite(file_path + file_name, self.canvas)
def reset_params(self, width, height, window_title, fps=30):
self.width = width
self.height = height
self._kill_window()
if self.video_name == self.window_title:
self.video_name = window_title
self.window_title = window_title
self.fps = fps
def close(self):
if self.write:
self.video_writer.release()
self._kill_window()
def process_image(self, input_img, drawers=(), show_attributes=('fps')):
# Variables
image = copy.copy(input_img)
img_h, img_w, img_ch = image.shape
# Convert gray scale image to color if needed
if img_ch == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
# Draw features on image
image = self._draw_over_canvas(image, drawers, show_attributes)
# Resize image if needed to canvas shape
if img_w != self.width or img_h != self.height:
image = cv2.resize(image, (self.width, self.height))
# Update canvas
self.canvas = image
# Visualize FPS
if 'fps' in show_attributes:
self._plot_fps()
# Write the resulting frame
if self.write:
self.video_writer.write(self.canvas)
# Timing loop variables
loop_time = self._update_timers()
break_flag = False
# Visualization
if self.visualize:
cv2.imshow(self.window_title, self.canvas)
sleep_time = int(1000 * (1 / self.fps - loop_time))
if sleep_time <= 0:
sleep_time = 1
if cv2.waitKey(sleep_time) & 0xFF == ord('q'):
break_flag = True
self.timer = time.time()
return break_flag
def _plot_fps(self):
# Plot algorithm time
params = self.fps_draw_params
cv2.putText(self.canvas, ('FPS: %.2f' % self.fps_mean), params['coord'], params['font'], params['text_size'],
params['color'], params['text_thick'], cv2.LINE_AA)
def _draw_over_canvas(self, image, drawers, show_attributes):
for drawer in drawers:
image = drawer.plot_features(image, self.plotter, show_attributes)
return image
def _kill_window(self):
self.visualize = False
try:
cv2.destroyWindow(self.window_title)
except:
pass
def _update_timers(self):
self.frame_cnt += 1
loop_time = time.time() - self.timer
self.fps_inference = 1/loop_time
lifo_idx = self.frame_cnt % self.fps
self.fps_lifo[lifo_idx] = self.fps_inference
if lifo_idx == 0:
self.fps_mean = np.mean(self.fps_lifo)
return loop_time
| 5,277 | 31.182927 | 117 | py |
SPIGA | SPIGA-main/spiga/demo/visualize/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/visualize/plotter.py | # Demo libs
import spiga.demo.visualize.layouts.plot_basics as pl_basic
import spiga.demo.visualize.layouts.plot_bbox as pl_bbox
import spiga.demo.visualize.layouts.plot_landmarks as pl_lnd
import spiga.demo.visualize.layouts.plot_headpose as pl_hpose
class Plotter:
def __init__(self):
self.basic = pl_basic.BasicLayout()
self.bbox = pl_bbox.BboxLayout()
self.landmarks = pl_lnd.LandmarkLayout()
self.hpose = pl_hpose.HeadposeLayout()
| 475 | 30.733333 | 61 | py |
SPIGA | SPIGA-main/spiga/demo/visualize/layouts/plot_bbox.py | import cv2
# Demo libs
from spiga.demo.visualize.layouts.plot_basics import BasicLayout
class BboxLayout(BasicLayout):
BasicLayout.thickness_dft['bbox'] = 2
def __init__(self):
super().__init__()
def draw_bbox(self, canvas, bbox, score_thr=0, show_score=True, thick=None, color=BasicLayout.colors['blue']):
if thick is None:
thick = self.thickness['bbox']
if bbox[4] > score_thr:
text = "{:.4f}".format(bbox[4])
b = list(map(int, bbox))
cv2.rectangle(canvas, (b[0], b[1]), (b[2], b[3]), color, thick)
if show_score:
self.draw_bbox_text(canvas, b, text, offset=(0, 12), color=color)
return canvas
def draw_bbox_line(self, canvas, bbox, score_thr=0, show_score=True, thick=None, color=BasicLayout.colors['blue']):
if thick is None:
thick = self.thickness['bbox']
if bbox[4] > score_thr:
text = "{:.4f}".format(bbox[4])
b = list(map(int, bbox))
cv2.line(canvas, (b[0], b[1]), (b[0], b[1] + 15), color, thick)
cv2.line(canvas, (b[0], b[1]), (b[0] + 100, b[1]), color, thick)
if show_score:
self.draw_bbox_text(canvas, b, text, offset=(0, 12), color=color)
return canvas
def draw_bbox_text(self, canvas, bbox, text, offset=(0, 0), color=BasicLayout.colors['white']):
b = list(map(int, bbox))
cx = b[0] + offset[0]
cy = b[1] + offset[1]
cv2.putText(canvas, text, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, color)
return canvas
def draw_bboxes(self, canvas, dets, score_thr=0, show_score=True, thick=None, colors=(BasicLayout.colors['blue'])):
num_colors = len(colors)
for idx, bbox in enumerate(dets):
color = colors[idx % num_colors]
canvas = self.draw_bbox(canvas, bbox, score_thr=score_thr, show_score=show_score, thick=thick, color=color)
return canvas
| 1,993 | 35.925926 | 119 | py |
SPIGA | SPIGA-main/spiga/demo/visualize/layouts/plot_basics.py | import numpy as np
import cv2
class BasicLayout:
# Variables
colors = {'green': (0, 255, 0),
'red': (0, 0, 255),
'blue': (255, 0, 0),
'purple': (128, 0, 128),
'white': (255, 255, 255),
'black': (0, 0, 0)}
thickness_dft = {'circle': 2}
def __init__(self):
self.thickness = self.thickness_dft
def draw_circles(self, canvas, coord_list, color=colors['red'], thick=None):
if thick is None:
thick = self.thickness['circle']
for xy in coord_list:
xy = np.array(xy + 0.5, dtype=int)
canvas = cv2.circle(canvas, (xy[0], xy[1]), thick, color, -1)
return canvas
def update_thickness(self, thick_dict):
for k, v in thick_dict.items():
self.thickness[k] = v
def reset_thickness(self):
self.thickness = self.thickness_dft
def update_thick_byratio(self, ratio_dict):
for key, ratio in ratio_dict.items():
self.thickness[key] = int(self.thickness_dft[key] * ratio + 0.5)
| 1,086 | 25.512195 | 80 | py |
SPIGA | SPIGA-main/spiga/demo/visualize/layouts/plot_landmarks.py | import numpy as np
# Demo libs
from spiga.demo.visualize.layouts.plot_basics import BasicLayout
class LandmarkLayout(BasicLayout):
BasicLayout.thickness_dft['lnd'] = 3
def __init__(self):
super().__init__()
def draw_landmarks(self, image, landmarks, visible=None, mask=None,
thick=None, colors=(BasicLayout.colors['green'], BasicLayout.colors['red'])):
# Initialize variables if need it
if visible is None:
visible = np.ones(len(landmarks))
if mask is None:
mask = np.ones(len(landmarks))
if thick is None:
thick = self.thickness['lnd']
if isinstance(landmarks, (list, tuple)):
landmarks = np.array(landmarks)
if isinstance(visible, (list, tuple)):
visible = np.array(visible)
if isinstance(mask, (list, tuple)):
mask = np.array(mask)
# Clean and split landmarks
ldm_vis, ldm_notvis = self._split_lnd_by_vis(landmarks, visible, mask)
# PIL images to OpenCV
if image.shape[0] == 3:
image = image.transpose(1, 2, 0)
# Plot landmarks
canvas = self.draw_circles(image, ldm_vis, color=colors[0], thick=thick)
canvas = self.draw_circles(canvas, ldm_notvis, color=colors[1], thick=thick)
return canvas
@ staticmethod
def _split_lnd_by_vis(landmarks, visible, mask):
mask = np.array(mask, dtype=bool)
visible = np.array(visible, dtype=bool)
landmarks = landmarks[mask]
visible = visible[mask]
ldm_vis = landmarks[visible]
not_visible = np.logical_not(visible)
ldm_notvis = landmarks[not_visible]
return ldm_vis, ldm_notvis
| 1,746 | 31.351852 | 100 | py |
SPIGA | SPIGA-main/spiga/demo/visualize/layouts/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/visualize/layouts/plot_headpose.py | import numpy as np
import cv2
# Demo libs
from spiga.demo.visualize.layouts.plot_basics import BasicLayout
class HeadposeLayout(BasicLayout):
BasicLayout.thickness_dft['hpose'] = 2
def __init__(self):
super().__init__()
self.hpose_axe_length = 2
self.focal_ratio = 1
def draw_headpose(self, canvas, bbox, rot, trl, euler=False, len_axe=None, thick=None,
colors=(BasicLayout.colors['blue'], BasicLayout.colors['green'], BasicLayout.colors['red'])):
trl = np.float32(trl)
rot = np.float32(rot)
K = self._camera_matrix(bbox)
# Init variables if need it
if len_axe is None:
len_axe = self.hpose_axe_length
if thick is None:
thick = self.thickness['hpose']
if euler:
rot = self._euler_to_rotation_matrix(rot)
rotV, _ = cv2.Rodrigues(rot)
points = np.float32([[len_axe, 0, 0], [0, -len_axe, 0], [0, 0, -len_axe], [0, 0, 0]]).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rotV, trl, K, (0, 0, 0, 0))
canvas = cv2.line(canvas, tuple(axisPoints[3].ravel().astype(int)), tuple(axisPoints[2].ravel().astype(int)), colors[0], thick)
canvas = cv2.line(canvas, tuple(axisPoints[3].ravel().astype(int)), tuple(axisPoints[1].ravel().astype(int)), colors[1], thick)
canvas = cv2.line(canvas, tuple(axisPoints[3].ravel().astype(int)), tuple(axisPoints[0].ravel().astype(int)), colors[2], thick)
return canvas
@staticmethod
def _euler_to_rotation_matrix(headpose):
# http://euclideanspace.com/maths/geometry/rotations/conversions/eulerToMatrix/index.htm
# Change coordinates system
euler = np.array([-(headpose[0] - 90), -headpose[1], -(headpose[2] + 90)])
# Convert to radians
rad = euler * (np.pi / 180.0)
cy = np.cos(rad[0])
sy = np.sin(rad[0])
cp = np.cos(rad[1])
sp = np.sin(rad[1])
cr = np.cos(rad[2])
sr = np.sin(rad[2])
Ry = np.array([[cy, 0.0, sy], [0.0, 1.0, 0.0], [-sy, 0.0, cy]]) # yaw
Rp = np.array([[cp, -sp, 0.0], [sp, cp, 0.0], [0.0, 0.0, 1.0]]) # pitch
Rr = np.array([[1.0, 0.0, 0.0], [0.0, cr, -sr], [0.0, sr, cr]]) # roll
return np.matmul(np.matmul(Ry, Rp), Rr)
def _camera_matrix(self, bbox):
x1, y1, x2, y2 = bbox[:4]
w = x2-x1
h = y2-y1
focal_length_x = w * self.focal_ratio
focal_length_y = h * self.focal_ratio
face_center = (x1 + (w * 0.5)), (y1 + (h * 0.5))
cam_matrix = np.array([[focal_length_x, 0, face_center[0]],
[0, focal_length_y, face_center[1]],
[0, 0, 1]], dtype=np.float32)
return cam_matrix | 2,800 | 39.014286 | 135 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/analyzer.py | import copy
# Demo libs
import spiga.demo.analyze.extract.processor as pr
class VideoAnalyzer:
def __init__(self, tracker, processor=pr.EmptyProcessor()):
self.tracker = tracker
self.processor = processor
self.tracked_obj = []
def process_frame(self, image):
image = copy.copy(image)
self.tracked_obj = self.tracker.process_frame(image, self.tracked_obj)
if len(self.tracked_obj) > 0:
self.tracked_obj = self.processor.process_frame(image, self.tracked_obj)
self.tracked_obj = self._add_attributes()
return self.tracked_obj
def plot_features(self, image, plotter, show_attributes):
for obj in self.tracked_obj:
image = obj.plot_features(image, plotter, show_attributes)
return image
def get_attributes(self, names):
# Check input type
single_name = False
if isinstance(names, str):
names = [names]
single_name = True
attributes = {}
for name in names:
attribute = []
for obj in self.tracked_obj:
attribute.append(obj.get_attributes(name))
attributes[name] = attribute
if single_name:
return attribute
else:
return attributes
def _add_attributes(self):
for obj in self.tracked_obj:
if not obj.has_processor():
obj.attributes += self.processor.attributes
obj.attributes += self.tracker.attributes
obj.drawers.append(self.processor.plot_features)
obj.drawers.append(self.tracker.plot_features)
return self.tracked_obj
| 1,697 | 30.444444 | 84 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/analyze/features/face.py | import numpy as np
# Demo libs
from spiga.demo.analyze.features.basic import ObjectAnalyzed
class Face(ObjectAnalyzed):
def __init__(self):
super().__init__()
self.bbox = np.zeros(5)
self.key_landmarks = - np.ones((5, 2))
self.landmarks = None
self.face_id = -1
self.past_states = []
self.num_past_states = 5
| 377 | 17 | 60 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/features/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/analyze/features/basic.py |
class ObjectAnalyzed:
def __init__(self):
# Processor addons
self.attributes = []
self.drawers = []
def has_processor(self):
if len(self.attributes) > 0:
return True
else:
return False
def plot_features(self, image, plotter, show_attributes):
for drawer in self.drawers:
image = drawer(image, self, plotter, show_attributes)
return image
def get_attributes(self, names=None):
# Initialization by input type
single_name = False
if names is None:
names = self.attributes
elif isinstance(names, str):
names = [names]
single_name = True
attributes = {}
attribute = []
for name in names:
if name in self.attributes and name in self.__dict__.keys():
attribute = getattr(self, name)
attributes[name] = attribute
if single_name:
return attribute
else:
return attributes | 1,053 | 25.35 | 72 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/track/tracker.py |
class Tracker:
"""
Object detection and tracking interface in a video stream
"""
def __init__(self):
self.attributes = []
def process_frame(self, image, tracked_obj):
"""
Detect and track objects in the input image.
:param image: OpenCV image.
:param tracked_obj: List with the objects found.
"""
raise NotImplementedError()
def plot_features(self, image, features, plotter, show_attributes):
"""
Visualize objects detected in the input image.
:param image: OpenCV image.
:param features: List of object features detect after processing the frame.
:param plotter: Plotter interface.
:param show_attributes: Selected object attributes to be displayed.
"""
raise NotImplementedError()
| 831 | 31 | 83 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/track/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/analyze/track/get_tracker.py | # Demo libs
import spiga.demo.analyze.track.retinasort.zoo as zoo_rs
zoos = [zoo_rs]
def get_tracker(model_name):
for zoo in zoos:
model = zoo.get_tracker(model_name)
if model is not None:
return model
raise NotImplementedError('Tracker name not available')
| 298 | 20.357143 | 59 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/track/retinasort/face_tracker.py | import numpy as np
# Third party algorithms. Implementation maintained by SPIGA authors.
import sort_tracker
import retinaface
# My libs
import spiga.demo.analyze.track.retinasort.config as cfg
import spiga.demo.analyze.track.tracker as tracker
import spiga.demo.analyze.features.face as ft_face
class RetinaSortTracker(tracker.Tracker):
def __init__(self, config=cfg.cfg_retinasort):
super().__init__()
self.detector = retinaface.RetinaFaceDetector(model=config['retina']['model_name'],
extra_features=config['retina']['extra_features'],
cfg_postreat=config['retina']['postreat'])
self.associator = sort_tracker.Sort(max_age=config['sort']['max_age'],
min_hits=config['sort']['min_hits'],
iou_threshold=config['sort']['iou_threshold'])
self.obj_type = ft_face.Face
self.attributes += ['bbox', 'face_id', 'key_landmarks']
def process_frame(self, image, tracked_obj):
# tracked_obj = []
features = self.detector.inference(image)
bboxes = features['bbox']
bboxes = self._code_bbox_idx(bboxes)
bboxes_id = self.associator.update(bboxes)
bboxes_id, bbox_idx = self._decode_bbox_idx(bboxes_id)
final_tracked_obj = []
for idx, bbox in enumerate(bboxes_id):
founded_flag = False
for past_obj in tracked_obj:
if past_obj.face_id == bbox[-1]:
past_obj.bbox = bbox[:5]
past_obj = self._update_extra_features(past_obj, features, bbox_idx[idx])
final_tracked_obj.append(past_obj)
tracked_obj.remove(past_obj)
founded_flag = True
break
if not founded_flag:
new_obj = self.obj_type()
new_obj.bbox = bbox[:5]
new_obj.face_id = bbox[5]
new_obj = self._update_extra_features(new_obj, features, bbox_idx[idx])
final_tracked_obj.append(new_obj)
return final_tracked_obj
def plot_features(self, image, features, plotter, show_attributes):
if 'bbox' in show_attributes:
image = plotter.bbox.draw_bbox(image, features.bbox)
if 'face_id' in show_attributes:
text_id = 'Face Id: %i' % features.face_id
image = plotter.bbox.draw_bbox_text(image, features.bbox, text_id, offset=(0, -10), color=plotter.basic.colors['blue'])
image = plotter.bbox.draw_bbox_line(image, features.bbox)
return image
def _code_bbox_idx(self, bboxes):
bboxes = np.array(bboxes)
bboxes[:, 4] += (np.arange(len(bboxes)) - 0.001)
return bboxes
def _decode_bbox_idx(self, bboxes):
bboxes = np.array(bboxes)
idx = bboxes[:, 4].astype(int)
bboxes[:, 4] = bboxes[:, 4] % 1 + 0.001
return bboxes, idx
def _update_extra_features(self, obj, features, idx):
obj.key_landmarks = features['landmarks'][idx]
return obj
| 3,202 | 37.590361 | 131 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/track/retinasort/zoo.py | # My libs
import spiga.demo.analyze.track.retinasort.face_tracker as tr
import spiga.demo.analyze.track.retinasort.config as cfg_tr
def get_tracker(model_name):
# MobileNet Backbone
if model_name == 'RetinaSort':
return tr.RetinaSortTracker()
# ResNet50 Backbone
if model_name == 'RetinaSort_Res50':
return tr.RetinaSortTracker(cfg_tr.cfg_retinasort_res50)
# Config CAV3D: https://ict.fbk.eu/units/speechtek/cav3d/
if model_name == 'RetinaSort_cav3d':
return tr.RetinaSortTracker(cfg_tr.cfg_retinasort_cav3d)
# Config AV16: https://ict.fbk.eu/units/speechtek/cav3d/
if model_name == 'RetinaSort_av16':
return tr.RetinaSortTracker(cfg_tr.cfg_retinasort_av16)
return None
| 744 | 30.041667 | 64 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/track/retinasort/config.py |
cfg_retinasort = {
'retina': {
'model_name': 'mobile0.25',
'extra_features': ['landmarks'],
'postreat': {
'resize': 1.,
'score_thr': 0.75,
'top_k': 5000,
'nms_thr': 0.4,
'keep_top_k': 50}
},
'sort': {
'max_age': 1,
'min_hits': 3,
'iou_threshold': 0.3,
}
}
cfg_retinasort_res50 = {
'retina': {
'model_name': 'resnet50',
'extra_features': ['landmarks'],
'postreat': {
'resize': 1.,
'score_thr': 0.75,
'top_k': 5000,
'nms_thr': 0.4,
'keep_top_k': 50}
},
'sort': {
'max_age': 1,
'min_hits': 3,
'iou_threshold': 0.3,
}
}
cfg_retinasort_cav3d = {
'retina': {
'model_name': 'resnet50',
'extra_features': ['landmarks'],
'postreat': {
'resize': 1.,
'score_thr': 0.95,
'top_k': 5000,
'nms_thr': 0.8,
'keep_top_k': 50}
},
'sort': {
'max_age': 90,
'min_hits': 3,
'iou_threshold': 0.3,
}
}
cfg_retinasort_av16 = {
'retina': {
'model_name': 'resnet50',
'extra_features': ['landmarks'],
'postreat': {
'resize': 1.,
'score_thr': 0.75,
'top_k': 5000,
'nms_thr': 0.8,
'keep_top_k': 50}
},
'sort': {
'max_age': 90,
'min_hits': 3,
'iou_threshold': 0.3,
}
}
| 1,562 | 18.060976 | 40 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/track/retinasort/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/analyze/extract/spiga_processor.py | # SPIGA library
import spiga.inference.config as model_cfg
from spiga.inference.framework import SPIGAFramework
# Demo modules
import spiga.demo.analyze.extract.processor as pr
class SPIGAProcessor(pr.Processor):
def __init__(self,
dataset='wflw',
features=('lnd', 'pose'),
gpus=[0]):
super().__init__()
# Configure and load processor
self.processor_cfg = model_cfg.ModelConfig(dataset)
self.processor = SPIGAFramework(self.processor_cfg, gpus=gpus)
# Define attributes
if 'lnd' in features:
self.attributes.append('landmarks')
self.attributes.append('landmarks_ids')
if 'pose' in features:
self.attributes.append('headpose')
def process_frame(self, frame, tracked_obj):
bboxes = []
for obj in tracked_obj:
x1, y1, x2, y2 = obj.bbox[:4]
bbox_wh = [x1, y1, x2-x1, y2-y1]
bboxes.append(bbox_wh)
features = self.processor.inference(frame, bboxes)
for obj_idx in range(len(tracked_obj)):
# Landmarks output
if 'landmarks' in self.attributes:
tracked_obj[obj_idx].landmarks = features['landmarks'][obj_idx]
tracked_obj[obj_idx].landmarks_ids = self.processor_cfg.dataset.ldm_ids
# Headpose output
if 'headpose' in self.attributes:
tracked_obj[obj_idx].headpose = features['headpose'][obj_idx]
return tracked_obj
def plot_features(self, image, features, plotter, show_attributes):
if 'landmarks' in self.attributes and 'landmarks' in show_attributes:
x1, y1, x2, y2 = features.bbox[:4]
thick = int(plotter.landmarks.thickness['lnd'] * (x2-x1)/200 + 0.5)
if thick == 0:
thick = 1
image = plotter.landmarks.draw_landmarks(image, features.landmarks, thick=thick)
if 'headpose' in self.attributes and 'headpose' in show_attributes:
image = plotter.hpose.draw_headpose(image, features.bbox[:5],
features.headpose[:3], features.headpose[3:], euler=True)
return image
| 2,243 | 35.786885 | 105 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/extract/processor.py |
class Processor:
def __init__(self):
self.attributes = []
def process_frame(self, frame, tracked_obj):
"""
Process tracked objects to extract interesting features.
:param frame: OpenCV image.
:param tracked_obj: List with the objects to be processed.
"""
raise NotImplementedError()
def plot_features(self, image, features, plotter, show_attributes):
"""
Visualize objects detected in the input image.
:param image: OpenCV image.
:param features: List of object features detect after processing the frame.
:param plotter: Plotter interface.
:param show_attributes: Selected object attributes to be displayed.
"""
raise NotImplementedError()
class EmptyProcessor(Processor):
def __init__(self):
super().__init__()
def process_frame(self, frame, tracked_obj):
return tracked_obj
def plot_features(self, image, features, plotter, show_attributes):
return image
class ProcessorsGroup(Processor):
def __init__(self):
super().__init__()
self.group = []
def process_frame(self, frame, tracked_obj):
for elem in self.group:
tracked_obj = elem.process_frame(frame, tracked_obj)
return tracked_obj
def plot_features(self, image, features, plotter, show_attributes):
for elem in self.group:
image = elem.plot_features(image, features, plotter, show_attributes)
return image
def add_processor(self, processor):
self.group.append(processor)
self.attributes += processor.attributes
def get_number_of_processors(self):
return len(self.group)
| 1,726 | 28.271186 | 83 | py |
SPIGA | SPIGA-main/spiga/demo/analyze/extract/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/demo/utils/frames2video.py | import os
import cv2
def main():
import argparse
pars = argparse.ArgumentParser(description='Frames to video converter')
pars.add_argument('frames', type=str, help='Frames directory')
pars.add_argument('vidname', type=str, help='Output video name')
pars.add_argument('-o', '--outpath', type=str, default=None, help='Save record')
pars.add_argument('--fps', type=int, default=30, help='Frames per second')
pars.add_argument('--shape', nargs='+', type=int, help='Visualizer shape (W,H)')
args = pars.parse_args()
if args.shape:
if len(args.shape) != 2:
raise ValueError('--shape requires two values: width and height. Ej: --shape 256 256')
else:
video_shape = tuple(args.shape)
else:
video_shape = None
frames2video(args.frames, args.vidname, video_path=args.outpath, video_shape=video_shape, fps=args.fps)
def frames2video(frames_path, video_name, video_path=None, video_shape=None, fps=30):
frames_names = sorted(os.listdir(frames_path))
if video_path is None:
video_path = frames_path + '/vid_out/'
if not os.path.exists(video_path):
os.makedirs(video_path)
video_file = os.path.join(video_path, video_name + '.mp4')
if video_shape is None:
video_writer = None
else:
vid_w, vid_h = video_shape
video_writer = cv2.VideoWriter(video_file, cv2.VideoWriter_fourcc(*'MP4V'), fps, (vid_w, vid_h))
for frame_name in frames_names:
if frame_name.split('.')[-1] not in ['jpg', 'jpeg', 'png', 'tif', 'tiff', 'eps', 'bmp', 'gif']:
print('File %s format doesnt match with an image ' % frame_name)
continue
frame_file = os.path.join(frames_path, frame_name)
frame = cv2.imread(frame_file)
if video_writer is None:
vid_h, vid_w = frame.shape[:2]
video_writer = cv2.VideoWriter(video_file, cv2.VideoWriter_fourcc(*'MP4V'), fps, (vid_w, vid_h))
if frame.shape[:2] != (vid_h, vid_w):
frame = cv2.resize(frame, (vid_w, vid_h))
video_writer.write(frame)
video_writer.release()
if __name__ == '__main__':
main()
| 2,184 | 33.68254 | 108 | py |
SPIGA | SPIGA-main/spiga/demo/utils/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/data/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/data/loaders/alignments.py | import os
import json
import cv2
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from spiga.data.loaders.transforms import get_transformers
class AlignmentsDataset(Dataset):
'''Loads datasets of images with landmarks and bounding boxes.
'''
def __init__(self,
database,
json_file,
images_dir,
image_size=(128, 128),
transform=None,
indices=None,
debug=False):
"""
:param database: class DatabaseStruct containing all the specifics of the database
:param json_file: path to the json file which contains the names of the images, landmarks, bounding boxes, etc
:param images_dir: path of the directory containing the images.
:param image_size: tuple like e.g. (128, 128)
:param transform: composition of transformations that will be applied to the samples.
:param debug_mode: bool if True, loads a very reduced_version of the dataset for debugging purposes.
:param indices: If it is a list of indices, allows to work with the subset of
items specified by the list. If it is None, the whole set is used.
"""
self.database = database
self.images_dir = images_dir
self.transform = transform
self.image_size = image_size
self.indices = indices
self._imgs_dict = None
self.debug = debug
with open(json_file) as jsonfile:
self.data = json.load(jsonfile)
def __len__(self):
'''Returns the length of the dataset
'''
if self.indices is None:
return len(self.data)
else:
return len(self.indices)
def __getitem__(self, sample_idx):
'''Returns sample of the dataset of index idx'''
# To allow work with a subset
if self.indices is not None:
sample_idx = self.indices[sample_idx]
# Load sample image
img_name = os.path.join(self.images_dir, self.data[sample_idx]['imgpath'])
if not self._imgs_dict:
image_cv = cv2.imread(img_name)
else:
image_cv = self._imgs_dict[sample_idx]
# Some images are B&W. We make sure that any image has three channels.
if len(image_cv.shape) == 2:
image_cv = np.repeat(image_cv[:, :, np.newaxis], 3, axis=-1)
# Some images have alpha channel
image_cv = image_cv[:, :, :3]
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image_cv)
# Load sample anns
ids = np.array(self.data[sample_idx]['ids'])
landmarks = np.array(self.data[sample_idx]['landmarks'])
bbox = np.array(self.data[sample_idx]['bbox'])
vis = np.array(self.data[sample_idx]['visible'])
headpose = self.data[sample_idx]['headpose']
# Generate bbox if need it
if bbox is None:
# Compute bbox using landmarks
aux = landmarks[vis == 1.0]
bbox = np.zeros(4)
bbox[0] = min(aux[:, 0])
bbox[1] = min(aux[:, 1])
bbox[2] = max(aux[:, 0]) - bbox[0]
bbox[3] = max(aux[:, 1]) - bbox[1]
# Clean and mask landmarks
mask_ldm = np.ones(self.database.num_landmarks)
if not self.database.ldm_ids == ids.tolist():
new_ldm = np.zeros((self.database.num_landmarks, 2))
new_vis = np.zeros(self.database.num_landmarks)
xyv = np.hstack((landmarks, vis[np.newaxis,:].T))
ids_dict = dict(zip(ids.astype(int).astype(str), xyv))
for pos, identifier in enumerate(self.database.ldm_ids):
if str(identifier) in ids_dict:
x, y, v = ids_dict[str(identifier)]
new_ldm[pos] = [x,y]
new_vis[pos] = v
else:
mask_ldm[pos] = 0
landmarks = new_ldm
vis = new_vis
sample = {'image': image,
'sample_idx': sample_idx,
'imgpath': img_name,
'ids_ldm': np.array(self.database.ldm_ids),
'bbox': bbox,
'bbox_raw': bbox,
'landmarks': landmarks,
'visible': vis.astype(np.float64),
'mask_ldm': mask_ldm,
'imgpath_local': self.data[sample_idx]['imgpath'],
}
if self.debug:
sample['landmarks_ori'] = landmarks
sample['visible_ori'] = vis.astype(np.float64)
sample['mask_ldm_ori'] = mask_ldm
if headpose is not None:
sample['headpose_ori'] = np.array(headpose)
if self.transform:
sample = self.transform(sample)
return sample
def get_dataset(data_config, pretreat=None, debug=False):
augmentors = get_transformers(data_config)
if pretreat is not None:
augmentors.append(pretreat)
dataset = AlignmentsDataset(data_config.database,
data_config.anns_file,
data_config.image_dir,
image_size=data_config.image_size,
transform=transforms.Compose(augmentors),
indices=data_config.ids,
debug=debug)
return dataset
| 5,540 | 33.849057 | 118 | py |
SPIGA | SPIGA-main/spiga/data/loaders/dataloader.py | from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import spiga.data.loaders.alignments as zoo_alignments
zoos = [zoo_alignments]
def get_dataset(data_config, pretreat=None, debug=False):
for zoo in zoos:
dataset = zoo.get_dataset(data_config, pretreat=pretreat, debug=debug)
if dataset is not None:
return dataset
raise NotImplementedError('Dataset not available')
def get_dataloader(batch_size, data_config, pretreat=None, sampler_cfg=None, debug=False):
dataset = get_dataset(data_config, pretreat=pretreat, debug=debug)
if (len(dataset) % batch_size) == 1 and data_config.shuffle == True:
drop_last_batch = True
else:
drop_last_batch = False
shuffle = data_config.shuffle
sampler = None
if sampler_cfg is not None:
sampler = DistributedSampler(dataset, num_replicas=sampler_cfg.world_size, rank=sampler_cfg.rank)
shuffle = False
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=data_config.num_workers,
pin_memory=True,
drop_last=drop_last_batch,
sampler=sampler)
return dataloader, dataset
| 1,360 | 31.404762 | 105 | py |
SPIGA | SPIGA-main/spiga/data/loaders/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/data/loaders/dl_config.py | import os
import json
import pkg_resources
from collections import OrderedDict
# Default data paths
db_img_path = pkg_resources.resource_filename('spiga', 'data/databases')
db_anns_path = pkg_resources.resource_filename('spiga', 'data/annotations') + "/{database}/{file_name}.json"
class AlignConfig:
def __init__(self, database_name, mode='train'):
# Dataset
self.database_name = database_name
self.working_mode = mode
self.database = None # Set at self._update_database()
self.anns_file = None # Set at self._update_database()
self.image_dir = None # Set at self._update_database()
self._update_database()
self.image_size = (256, 256)
self.ftmap_size = (256, 256)
# Dataloaders
self.ids = None # List of a subset if need it
self.shuffle = True # Shuffle samples
self.num_workers = 4 # Threads
# Posit
self.generate_pose = True # Generate pose parameters from landmarks
self.focal_ratio = 1.5 # Camera matrix focal length ratio
self.posit_max_iter = 100 # Refinement iterations
# Subset of robust ids in the 3D model to use in posit.
# 'None' to use all the available model landmarks.
self.posit_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
# Data augmentation
# Control augmentations with the following list, crop to self.img_size is mandatory, check target_dist param.
if mode == 'train':
self.aug_names = ['flip', 'rotate_scale', 'occlusion', 'lighting', 'blur']
else:
self.aug_names = []
self.shuffle = False
# Flip
self.hflip_prob = 0.5
# Rotation
self.angle_range = 45.
# Scale
self.scale_max = 0.15
self.scale_min = -0.15
# Translation
self.trl_ratio = 0.05 # Translation augmentation
# Crop target rescale
self.target_dist = 1.6 # Target distance zoom in/out around face. Default: 1.
# Occlusion
self.occluded_max_len = 0.4
self.occluded_min_len = 0.1
self.occluded_covar_ratio = 2.25**0.5
# Lighting
self.hsv_range_min = [-0.5, -0.5, -0.5]
self.hsv_range_max = [0.5, 0.5, 0.5]
# Blur
self.blur_prob = 0.5
self.blur_kernel_range = [0, 2]
# Heatmaps 2D
self.sigma2D = 1.5
self.heatmap2D_norm = False
# Boundaries
self.sigmaBD = 1
def update(self, params_dict):
state_dict = self.state_dict()
for k, v in params_dict.items():
if k in state_dict or hasattr(self, k):
setattr(self, k, v)
else:
Warning('Unknown option: {}: {}'.format(k, v))
self._update_database()
def state_dict(self, tojson=False):
state_dict = OrderedDict()
for k in self.__dict__.keys():
if not k.startswith('_'):
if tojson and k in ['database']:
continue
state_dict[k] = getattr(self, k)
return state_dict
def _update_database(self):
self.database = DatabaseStruct(self.database_name)
self.anns_file = db_anns_path.format(database=self.database_name, file_name=self.working_mode)
self.image_dir = self._get_imgdb_path()
def _get_imgdb_path(self):
img_dir = None
if self.database_name in ['300wpublic', '300wprivate']:
img_dir = db_img_path + '/300w/'
elif self.database_name in ['aflw19', 'merlrav']:
img_dir = db_img_path + '/aflw/data/'
elif self.database_name in ['cofw', 'cofw68']:
img_dir = db_img_path + '/cofw/'
elif self.database_name in ['wflw']:
img_dir = db_img_path + '/wflw/'
return img_dir
def __str__(self):
state_dict = self.state_dict()
text = 'Dataloader {\n'
for k, v in state_dict.items():
if isinstance(v, DatabaseStruct):
text += '\t{}: {}'.format(k, str(v).expandtabs(12))
else:
text += '\t{}: {}\n'.format(k, v)
text += '\t}\n'
return text
class DatabaseStruct:
def __init__(self, database_name):
self.name = database_name
self.ldm_ids, self.ldm_flip_order, self.ldm_edges_matrix = self._get_database_specifics()
self.num_landmarks = len(self.ldm_ids)
self.num_edges = len(self.ldm_edges_matrix[0])-1
self.fields = ['imgpath', 'bbox', 'headpose', 'ids', 'landmarks', 'visible']
def _get_database_specifics(self):
'''Returns specifics ids and horizontal flip reorder'''
database_name = self.name
db_info_file = db_anns_path.format(database=database_name, file_name='db_info')
ldm_edges_matrix = None
if os.path.exists(db_info_file):
with open(db_info_file) as jsonfile:
db_info = json.load(jsonfile)
ldm_ids = db_info['ldm_ids']
ldm_flip_order = db_info['ldm_flip_order']
if 'ldm_edges_matrix' in db_info.keys():
ldm_edges_matrix = db_info['ldm_edges_matrix']
else:
raise ValueError('Database ' + database_name + 'specifics not defined. Missing db_info.json')
return ldm_ids, ldm_flip_order, ldm_edges_matrix
def state_dict(self):
state_dict = OrderedDict()
for k in self.__dict__.keys():
if not k.startswith('_'):
state_dict[k] = getattr(self, k)
return state_dict
def __str__(self):
state_dict = self.state_dict()
text = 'Database {\n'
for k, v in state_dict.items():
text += '\t{}: {}\n'.format(k, v)
text += '\t}\n'
return text
| 5,944 | 33.766082 | 117 | py |
SPIGA | SPIGA-main/spiga/data/loaders/transforms.py | import cv2
import numpy as np
import torch
from spiga.data.loaders.augmentors.modern_posit import PositPose
from spiga.data.loaders.augmentors.heatmaps import Heatmaps
from spiga.data.loaders.augmentors.boundary import AddBoundary
from spiga.data.loaders.augmentors.landmarks import HorizontalFlipAug, RSTAug, OcclusionAug, \
LightingAug, BlurAug, TargetCropAug
def get_transformers(data_config):
# Data augmentation
aug_names = data_config.aug_names
augmentors = []
if 'flip' in aug_names:
augmentors.append(HorizontalFlipAug(data_config.database.ldm_flip_order, data_config.hflip_prob))
if 'rotate_scale' in aug_names:
augmentors.append(RSTAug(data_config.angle_range, data_config.scale_min,
data_config.scale_max, data_config.trl_ratio))
if 'occlusion' in aug_names:
augmentors.append(OcclusionAug(data_config.occluded_min_len,
data_config.occluded_max_len,
data_config.database.num_landmarks))
if 'lighting' in aug_names:
augmentors.append(LightingAug(data_config.hsv_range_min, data_config.hsv_range_max))
if 'blur' in aug_names:
augmentors.append(BlurAug(data_config.blur_prob, data_config.blur_kernel_range))
# Crop mandatory
augmentors.append(TargetCropAug(data_config.image_size, data_config.ftmap_size, data_config.target_dist))
# Opencv style
augmentors.append(ToOpencv())
# Gaussian heatmaps
if 'heatmaps2D' in aug_names:
augmentors.append(Heatmaps(data_config.database.num_landmarks, data_config.ftmap_size,
data_config.sigma2D, norm=data_config.heatmap2D_norm))
if 'boundaries' in aug_names:
augmentors.append(AddBoundary(num_landmarks=data_config.database.num_landmarks,
map_size=data_config.ftmap_size,
sigma=data_config.sigmaBD))
# Pose generator
if data_config.generate_pose:
augmentors.append(PositPose(data_config.database.ldm_ids,
focal_ratio=data_config.focal_ratio,
selected_ids=data_config.posit_ids,
max_iter=data_config.posit_max_iter))
return augmentors
class ToOpencv:
def __call__(self, sample):
# Convert in a numpy array and change to GBR
image = np.array(sample['image'])
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
sample['image'] = image
return sample
class TargetCrop(TargetCropAug):
def __init__(self, crop_size=256, target_dist=1.6):
super(TargetCrop, self).__init__(crop_size, crop_size, target_dist)
class AddModel3D(PositPose):
def __init__(self, ldm_ids, ftmap_size=(256, 256), focal_ratio=1.5, totensor=False):
super(AddModel3D, self).__init__(ldm_ids, focal_ratio=focal_ratio)
img_bbox = [0, 0, ftmap_size[1], ftmap_size[0]] # Shapes given are inverted (y,x)
self.cam_matrix = self._camera_matrix(img_bbox)
if totensor:
self.cam_matrix = torch.tensor(self.cam_matrix, dtype=torch.float)
self.model3d_world = torch.tensor(self.model3d_world, dtype=torch.float)
def __call__(self, sample={}):
# Save intrinsic matrix and 3D model landmarks
sample['cam_matrix'] = self.cam_matrix
sample['model3d'] = self.model3d_world
return sample
| 3,558 | 40.870588 | 109 | py |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/modern_posit.py | import os
import pkg_resources
import numpy as np
import cv2
# My libs
from spiga.data.loaders.augmentors.utils import rotation_matrix_to_euler
# Model file nomenclature
model_file_dft = pkg_resources.resource_filename('spiga', 'data/models3D') + '/mean_face_3D_{num_ldm}.txt'
class PositPose:
def __init__(self, ldm_ids, focal_ratio=1, selected_ids=None, max_iter=100,
fix_bbox=True, model_file=model_file_dft):
# Load 3D face model
model3d_world, model3d_ids = self._load_world_shape(ldm_ids, model_file)
# Generate id mask to pick only the robust landmarks for posit
if selected_ids is None:
model3d_mask = np.ones(len(ldm_ids))
else:
model3d_mask = np.zeros(len(ldm_ids))
for index, posit_id in enumerate(model3d_ids):
if posit_id in selected_ids:
model3d_mask[index] = 1
self.ldm_ids = ldm_ids # Ids from the database
self.model3d_world = model3d_world # Model data
self.model3d_ids = model3d_ids # Model ids
self.model3d_mask = model3d_mask # Model mask ids
self.max_iter = max_iter # Refinement iterations
self.focal_ratio = focal_ratio # Camera matrix focal length ratio
self.fix_bbox = fix_bbox # Camera matrix centered on image (False to centered on bbox)
def __call__(self, sample):
landmarks = sample['landmarks']
mask = sample['mask_ldm']
# Camera matrix
img_shape = np.array(sample['image'].shape)[0:2]
if 'img2map_scale' in sample.keys():
img_shape = img_shape * sample['img2map_scale']
if self.fix_bbox:
img_bbox = [0, 0, img_shape[1], img_shape[0]] # Shapes given are inverted (y,x)
cam_matrix = self._camera_matrix(img_bbox)
else:
bbox = sample['bbox'] # Scale error when ftshape and img_shape mismatch
cam_matrix = self._camera_matrix(bbox)
# Save intrinsic matrix and 3D model landmarks
sample['cam_matrix'] = cam_matrix
sample['model3d'] = self.model3d_world
world_pts, image_pts = self._set_correspondences(landmarks, mask)
if image_pts.shape[0] < 4:
print('POSIT does not work without landmarks')
rot_matrix, trl_matrix = np.eye(3, dtype=float), np.array([0, 0, 0])
else:
rot_matrix, trl_matrix = self._modern_posit(world_pts, image_pts, cam_matrix)
euler = rotation_matrix_to_euler(rot_matrix)
sample['pose'] = np.array([euler[0], euler[1], euler[2], trl_matrix[0], trl_matrix[1], trl_matrix[2]])
sample['model3d_proj'] = self._project_points(rot_matrix, trl_matrix, cam_matrix, norm=img_shape)
return sample
def _load_world_shape(self, ldm_ids, model_file):
return load_world_shape(ldm_ids, model_file=model_file)
def _camera_matrix(self, bbox):
focal_length_x = bbox[2] * self.focal_ratio
focal_length_y = bbox[3] * self.focal_ratio
face_center = (bbox[0] + (bbox[2] * 0.5)), (bbox[1] + (bbox[3] * 0.5))
cam_matrix = np.array([[focal_length_x, 0, face_center[0]],
[0, focal_length_y, face_center[1]],
[0, 0, 1]])
return cam_matrix
def _set_correspondences(self, landmarks, mask):
# Correspondences using labelled and robust landmarks
img_mask = np.logical_and(mask, self.model3d_mask)
img_mask = img_mask.astype(bool)
image_pts = landmarks[img_mask]
world_pts = self.model3d_world[img_mask]
return world_pts, image_pts
def _modern_posit(self, world_pts, image_pts, cam_matrix):
return modern_posit(world_pts, image_pts, cam_matrix, self.max_iter)
def _project_points(self, rot, trl, cam_matrix, norm=None):
# Perspective projection model
trl = np.expand_dims(trl, 1)
extrinsics = np.concatenate((rot, trl), 1)
proj_matrix = np.matmul(cam_matrix, extrinsics)
# Homogeneous landmarks
pts = self.model3d_world
ones = np.ones(pts.shape[0])
ones = np.expand_dims(ones, 1)
pts_hom = np.concatenate((pts, ones), 1)
# Project landmarks
pts_proj = np.matmul(proj_matrix, pts_hom.T).T
pts_proj = pts_proj / np.expand_dims(pts_proj[:, 2], 1) # Lambda = 1
if norm is not None:
pts_proj[:, 0] /= norm[0]
pts_proj[:, 1] /= norm[1]
return pts_proj[:, :-1]
def load_world_shape(db_landmarks, model_file=model_file_dft):
# Load 3D mean face coordinates
num_ldm = len(db_landmarks)
filename = model_file.format(num_ldm=num_ldm)
if not os.path.exists(filename):
raise ValueError('No 3D model find for %i landmarks' % num_ldm)
posit_landmarks = np.genfromtxt(filename, delimiter='|', dtype=int, usecols=0).tolist()
mean_face_3D = np.genfromtxt(filename, delimiter='|', dtype=(float, float, float), usecols=(1, 2, 3)).tolist()
world_all = len(mean_face_3D)*[None]
index_all = len(mean_face_3D)*[None]
for cont, elem in enumerate(mean_face_3D):
pt3d = [elem[2], -elem[0], -elem[1]]
lnd_idx = db_landmarks.index(posit_landmarks[cont])
world_all[lnd_idx] = pt3d
index_all[lnd_idx] = posit_landmarks[cont]
return np.array(world_all), np.array(index_all)
def modern_posit(world_pts, image_pts, cam_matrix, max_iters):
# Homogeneous world points
num_landmarks = image_pts.shape[0]
one = np.ones((num_landmarks, 1))
A = np.concatenate((world_pts, one), axis=1)
B = np.linalg.pinv(A)
# Normalize image points
focal_length = cam_matrix[0,0]
img_center = (cam_matrix[0,2], cam_matrix[1,2])
centered_pts = np.zeros((num_landmarks,2))
centered_pts[:,0] = (image_pts[:,0]-img_center[0])/focal_length
centered_pts[:,1] = (image_pts[:,1]-img_center[1])/focal_length
Ui = centered_pts[:,0]
Vi = centered_pts[:,1]
# POSIT loop
Tx, Ty, Tz = 0.0, 0.0, 0.0
r1, r2, r3 = [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]
for iter in range(0, max_iters):
I = np.dot(B,Ui)
J = np.dot(B,Vi)
# Estimate translation vector and rotation matrix
normI = 1.0 / np.sqrt(I[0] * I[0] + I[1] * I[1] + I[2] * I[2])
normJ = 1.0 / np.sqrt(J[0] * J[0] + J[1] * J[1] + J[2] * J[2])
Tz = np.sqrt(normI * normJ) # geometric average instead of arithmetic average of classicPosit
r1N = I*Tz
r2N = J*Tz
r1 = r1N[0:3]
r2 = r2N[0:3]
r1 = np.clip(r1, -1, 1)
r2 = np.clip(r2, -1, 1)
r3 = np.cross(r1,r2)
r3T = np.concatenate((r3, [Tz]), axis=0)
Tx = r1N[3]
Ty = r2N[3]
# Compute epsilon, update Ui and Vi and check convergence
eps = np.dot(A, r3T)/Tz
oldUi = Ui
oldVi = Vi
Ui = np.multiply(eps, centered_pts[:,0])
Vi = np.multiply(eps, centered_pts[:,1])
deltaUi = Ui - oldUi
deltaVi = Vi - oldVi
delta = focal_length * focal_length * (np.dot(np.transpose(deltaUi), deltaUi) + np.dot(np.transpose(deltaVi), deltaVi))
if iter > 0 and delta < 0.01: # converged
break
rot_matrix = np.array([np.transpose(r1), np.transpose(r2), np.transpose(r3)])
trl_matrix = np.array([Tx, Ty, Tz])
# Convert to the nearest orthogonal rotation matrix
w, u, vt = cv2.SVDecomp(rot_matrix) # R = U*D*Vt
rot_matrix = np.matmul(np.matmul(u, np.eye(3, dtype=float)), vt)
return rot_matrix, trl_matrix
| 7,711 | 37.949495 | 127 | py |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/landmarks.py | import random
import cv2
import numpy as np
from PIL import Image
from torchvision import transforms
# My libs
import spiga.data.loaders.augmentors.utils as dlu
class HorizontalFlipAug:
def __init__(self, ldm_flip_order, prob=0.5):
self.prob = prob
self.ldm_flip_order = ldm_flip_order
def __call__(self, sample):
img = sample['image']
landmarks = sample['landmarks']
mask = sample['mask_ldm']
vis = sample['visible']
bbox = sample['bbox']
if random.random() < self.prob:
new_img = transforms.functional.hflip(img)
lm_new_order = self.ldm_flip_order
new_landmarks = landmarks[lm_new_order]
new_landmarks = (new_landmarks - (img.size[0], 0)) * (-1, 1)
new_mask = mask[lm_new_order]
new_vis = vis[lm_new_order]
x, y, w, h = bbox
new_x = img.size[0] - x - w
new_bbox = np.array((new_x, y, w, h))
sample['image'] = new_img
sample['landmarks'] = new_landmarks
sample['mask_ldm'] = new_mask
sample['visible'] = new_vis
sample['bbox'] = new_bbox
return sample
class GeometryBaseAug:
def __call__(self, sample):
raise NotImplementedError('Inheritance __call__ not defined')
def map_affine_transformation(self, sample, affine_transf, new_size=None):
sample['image'] = self._image_affine_trans(sample['image'], affine_transf, new_size)
sample['bbox'] = self._bbox_affine_trans(sample['bbox'], affine_transf)
if 'landmarks' in sample.keys():
sample['landmarks'] = self._landmarks_affine_trans(sample['landmarks'], affine_transf)
return sample
def clean_outbbox_landmarks(self, shape, landmarks, mask):
filter_x1 = landmarks[:, 0] >= shape[0]
filter_x2 = landmarks[:, 0] < (shape[0] + shape[2])
filter_x = np.logical_and(filter_x1,filter_x2)
filter_y1 = landmarks[:, 1] >= shape[1]
filter_y2 = landmarks[:, 1] < (shape[1] + shape[3])
filter_y = np.logical_and(filter_y1, filter_y2)
filter_bbox = np.logical_and(filter_x, filter_y)
new_mask = mask*filter_bbox
new_landmarks = (landmarks.T * new_mask).T
new_landmarks = new_landmarks.astype(int).astype(float)
return new_mask, new_landmarks
def _image_affine_trans(self, image, affine_transf, new_size=None):
if not new_size:
new_size = image.size
inv_affine_transf = dlu.get_inverse_transf(affine_transf)
new_image = image.transform(new_size, Image.AFFINE, inv_affine_transf.flatten())
return new_image
def _bbox_affine_trans(self, bbox, affine_transf):
x, y, w, h = bbox
images_bb = []
for point in ([x, y, 1], [x + w, y, 1],
[x, y + h, 1], [x + w, y + h, 1]):
images_bb.append(affine_transf.dot(point))
images_bb = np.array(images_bb)
new_corner0 = np.min(images_bb, axis=0)
new_corner1 = np.max(images_bb, axis=0)
new_x, new_y = new_corner0
new_w, new_h = new_corner1 - new_corner0
new_bbox = np.array((new_x, new_y, new_w, new_h))
return new_bbox
def _landmarks_affine_trans(self, landmarks, affine_transf):
homog_landmarks = dlu.affine2homogeneous(landmarks)
new_landmarks = affine_transf.dot(homog_landmarks.T).T
return new_landmarks
class RSTAug(GeometryBaseAug):
def __init__(self, angle_range=45., scale_min=-0.15, scale_max=0.15, trl_ratio=0.05):
self.scale_max = scale_max
self.scale_min = scale_min
self.angle_range = angle_range
self.trl_ratio = trl_ratio
def __call__(self, sample):
x, y, w, h = sample['bbox']
x0, y0 = x + w/2, y + h/2 # center of the face, which will be the center of the rotation
# Bbox translation
rnd_Tx = np.random.uniform(-self.trl_ratio, self.trl_ratio) * w
rnd_Ty = np.random.uniform(-self.trl_ratio, self.trl_ratio) * h
sample['bbox'][0] += rnd_Tx
sample['bbox'][1] += rnd_Ty
scale = 1 + np.random.uniform(self.scale_min, self.scale_max)
angle = np.random.uniform(-self.angle_range, self.angle_range)
similarity = dlu.get_similarity_matrix(angle, scale, center=(x0, y0))
new_sample = self.map_affine_transformation(sample, similarity)
return new_sample
class TargetCropAug(GeometryBaseAug):
def __init__(self, img_new_size=128, map_new_size=128, target_dist=1.3):
self.target_dist = target_dist
self.new_size_x, self.new_size_y = self._convert_shapes(img_new_size)
self.map_size_x, self.map_size_y = self._convert_shapes(map_new_size)
self.img2map_scale = False
# Mismatch between img shape and featuremap shape
if self.map_size_x != self.new_size_x or self.map_size_y != self.new_size_y:
self.img2map_scale = True
self.map_scale_x = self.map_size_x / self.new_size_x
self.map_scale_y = self.map_size_y / self.new_size_y
self.map_scale_xx = self.map_scale_x * self.map_scale_x
self.map_scale_xy = self.map_scale_x * self.map_scale_y
self.map_scale_yy = self.map_scale_y * self.map_scale_y
def _convert_shapes(self, new_size):
if isinstance(new_size, (tuple, list)):
new_size_x = new_size[0]
new_size_y = new_size[1]
else:
new_size_x = new_size
new_size_y = new_size
return new_size_x, new_size_y
def __call__(self, sample):
x, y, w, h = sample['bbox']
# we enlarge the area taken around the bounding box
# it is neccesary to change the botton left point of the bounding box
# according to the previous enlargement. Note this will NOT be the new
# bounding box!
# We return square images, which is neccesary since
# all the images must have the same size in order to form batches
side = max(w, h) * self.target_dist
x -= (side - w) / 2
y -= (side - h) / 2
# center of the enlarged bounding box
x0, y0 = x + side/2, y + side/2
# homothety factor, chosen so the new horizontal dimension will
# coincide with new_size
mu_x = self.new_size_x / side
mu_y = self.new_size_y / side
# new_w, new_h = new_size, int(h * mu)
new_w = self.new_size_x
new_h = self.new_size_y
new_x0, new_y0 = new_w / 2, new_h / 2
# dilatation + translation
affine_transf = np.array([[mu_x, 0, new_x0 - mu_x * x0],
[0, mu_y, new_y0 - mu_y * y0]])
sample = self.map_affine_transformation(sample, affine_transf,(new_w, new_h))
if 'landmarks' in sample.keys():
img_shape = np.array([0, 0, self.new_size_x, self.new_size_y])
sample['landmarks_float'] = sample['landmarks']
sample['mask_ldm_float'] = sample['mask_ldm']
sample['landmarks'] = np.round(sample['landmarks'])
sample['mask_ldm'], sample['landmarks'] = self.clean_outbbox_landmarks(img_shape, sample['landmarks'],
sample['mask_ldm'])
if self.img2map_scale:
sample = self._rescale_map(sample)
return sample
def _rescale_map(self, sample):
# Rescale
lnd_float = sample['landmarks_float']
lnd_float[:, 0] = self.map_scale_x * lnd_float[:, 0]
lnd_float[:, 1] = self.map_scale_y * lnd_float[:, 1]
# Filter landmarks
lnd = np.round(lnd_float)
filter_x = lnd[:, 0] >= self.map_size_x
filter_y = lnd[:, 1] >= self.map_size_y
lnd[filter_x] = self.map_size_x - 1
lnd[filter_y] = self.map_size_y - 1
new_lnd = (lnd.T * sample['mask_ldm']).T
new_lnd = new_lnd.astype(int).astype(float)
sample['landmarks_float'] = lnd_float
sample['landmarks'] = new_lnd
sample['img2map_scale'] = [self.map_scale_x, self.map_scale_y]
return sample
class OcclusionAug:
def __init__(self, min_length=0.1, max_length=0.4, num_maps=1):
self.min_length = min_length
self.max_length = max_length
self.num_maps = num_maps
def __call__(self, sample):
x, y, w, h = sample['bbox']
image = sample['image']
landmarks = sample['landmarks']
vis = sample['visible']
min_ratio = self.min_length
max_ratio = self.max_length
rnd_width = np.random.randint(int(w * min_ratio), int(w * max_ratio))
rnd_height = np.random.randint(int(h * min_ratio), int(h * max_ratio))
# (xi, yi) and (xf, yf) are, respectively, the lower left points of the
# occlusion rectangle and the upper right point.
xi = int(x + np.random.randint(0, w - rnd_width))
xf = int(xi + rnd_width)
yi = int(y + np.random.randint(0, h - rnd_height))
yf = int(yi + rnd_height)
pixels = np.array(image)
pixels[yi:yf, xi:xf, :] = np.random.uniform(0, 255, size=3)
image = Image.fromarray(pixels)
sample['image'] = image
# Update visibilities
filter_x1 = landmarks[:, 0] >= xi
filter_x2 = landmarks[:, 0] < xf
filter_x = np.logical_and(filter_x1, filter_x2)
filter_y1 = landmarks[:, 1] >= yi
filter_y2 = landmarks[:, 1] < yf
filter_y = np.logical_and(filter_y1, filter_y2)
filter_novis = np.logical_and(filter_x, filter_y)
filter_vis = np.logical_not(filter_novis)
sample['visible'] = vis * filter_vis
return sample
class LightingAug:
def __init__(self, hsv_range_min=(-0.5, -0.5, -0.5), hsv_range_max=(0.5, 0.5, 0.5)):
self.hsv_range_min = hsv_range_min
self.hsv_range_max = hsv_range_max
def __call__(self, sample):
# Convert to HSV colorspace from RGB colorspace
image = np.array(sample['image'])
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# Generate new random values
H = 1 + np.random.uniform(self.hsv_range_min[0], self.hsv_range_max[0])
S = 1 + np.random.uniform(self.hsv_range_min[1], self.hsv_range_max[1])
V = 1 + np.random.uniform(self.hsv_range_min[2], self.hsv_range_max[2])
hsv[:, :, 0] = np.clip(H*hsv[:, :, 0], 0, 179)
hsv[:, :, 1] = np.clip(S*hsv[:, :, 1], 0, 255)
hsv[:, :, 2] = np.clip(V*hsv[:, :, 2], 0, 255)
# Convert back to BGR colorspace
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
sample['image'] = Image.fromarray(image)
return sample
class BlurAug:
def __init__(self, blur_prob=0.5, blur_kernel_range=(0, 2)):
self.blur_prob = blur_prob
self.kernel_range = blur_kernel_range
def __call__(self, sample):
# Smooth image
image = np.array(sample['image'])
if np.random.uniform(0.0, 1.0) < self.blur_prob:
kernel = np.random.random_integers(self.kernel_range[0], self.kernel_range[1]) * 2 + 1
image = cv2.GaussianBlur(image, (kernel, kernel), 0, 0)
sample['image'] = Image.fromarray(image)
return sample
| 11,374 | 35.931818 | 114 | py |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/utils.py | import numpy as np
def affine2homogeneous(points):
'''Returns the points completed with a new last coordinate
equal to 1
Arguments
---------
points: np.array of shape (num_points, dim)
Returns
-------
hpoints: np.array of shape (num_points, dim + 1),
of the points completed with ones'''
num_points = points.shape[0]
hpoints = np.hstack(
(points, np.repeat(1, num_points).reshape(num_points, 1)))
return hpoints
def get_similarity_matrix(deg_angle, scale, center):
'''Similarity matrix.
Arguments:
---------
deg_angle: rotation angle in degrees
scale: factor scale
center: coordinates of the rotation center
Returns:
-------
matrix: (2, 3) numpy array representing the
similarity matrix.
'''
x0, y0 = center
angle = np.radians(deg_angle)
matrix = np.zeros((2, 3))
matrix[0:2, 0:2] = [[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]]
matrix[0: 2, 0: 2] *= scale
matrix[:, 2] = [(1 - scale * np.cos(angle)) * x0 +
scale * np.sin(angle) * y0,
-scale * np.sin(angle) * x0 +
(1 - scale * np.cos(angle)) * y0]
return matrix
def get_inverse_similarity_matrix(deg_angle, scale, center):
'''Returns the inverse of the affine similarity
Arguments
---------
deg_angle: angle in degrees of the rotation
center: iterable of two components (x0, y0),
center of the rotation
scale: float, scale factor
Returns
-------
matrix: np.array of shape (2, 3) with the coordinates of
the inverse of the similarity'''
x0, y0 = center
angle = np.radians(deg_angle)
inv_scale = 1 / scale
matrix = np.zeros((2, 3))
matrix[0:2, 0:2] = [[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]]
matrix[0:2, 0:2] *= inv_scale
matrix[:, 2] = [(1 - inv_scale * np.cos(angle)) * x0 -
inv_scale * np.sin(angle) * y0,
inv_scale * np.sin(angle) * x0 +
(1 - inv_scale * np.cos(angle)) * y0]
return matrix
def get_inverse_transf(affine_transf):
A = affine_transf[0:2, 0:2]
b = affine_transf[:, 2]
inv_A = np.linalg.inv(A) # we assume A invertible!
inv_affine = np.zeros((2, 3))
inv_affine[0:2, 0:2] = inv_A
inv_affine[:, 2] = -inv_A.dot(b)
return inv_affine
def image2vect(image):
'''
Input:
image[batch_size, num_channels, im_size_x, im_size_y]
Output:
vect[batch_size, num_channels, im_size_x*im_size_y]
'''
vect = image.reshape(*image.shape[0:-2], -1)
return vect
def rotation_matrix_to_euler(rot_matrix):
# http://euclideanspace.com/maths/geometry/rotations/conversions/matrixToEuler/index.htm
a00, a01, a02 = rot_matrix[0, 0], rot_matrix[0, 1], rot_matrix[0, 2]
a10, a11, a12 = rot_matrix[1, 0], rot_matrix[1, 1], rot_matrix[1, 2]
a20, a21, a22 = rot_matrix[2, 0], rot_matrix[2, 1], rot_matrix[2, 2]
if abs(1.0 - a10) <= np.finfo(float).eps: # singularity at north pole / special case a10 == 1
yaw = np.arctan2(a02, a22)
pitch = np.pi/2.0
roll = 0
elif abs(-1.0 - a10) <= np.finfo(float).eps: # singularity at south pole / special case a10 == -1
yaw = np.arctan2(a02, a22)
pitch = -np.pi/2.0
roll = 0
else: # standard case
yaw = np.arctan2(-a20, a00)
pitch = np.arcsin(a10)
roll = np.arctan2(-a12, a11)
# Convert to degrees
euler = np.array([yaw, pitch, roll])*(180.0/np.pi)
# Change coordinates system
euler = np.array([(-euler[0])+90, -euler[1], (-euler[2])-90])
if euler[0] > 180: euler[0] -= 360
elif euler[0] < -180: euler[0] += 360
if euler[1] > 180: euler[1] -= 360
elif euler[1] < -180: euler[1] += 360
if euler[2] > 180: euler[2] -= 360
elif euler[2] < -180: euler[2] += 360
return euler
def euler_to_rotation_matrix(headpose):
# http://euclideanspace.com/maths/geometry/rotations/conversions/eulerToMatrix/index.htm
# Change coordinates system
euler = np.array([-(headpose[0]-90), -headpose[1], -(headpose[2]+90)])
# Convert to radians
rad = euler*(np.pi/180.0)
cy = np.cos(rad[0])
sy = np.sin(rad[0])
cp = np.cos(rad[1])
sp = np.sin(rad[1])
cr = np.cos(rad[2])
sr = np.sin(rad[2])
Ry = np.array([[cy, 0.0, sy], [0.0, 1.0, 0.0], [-sy, 0.0, cy]]) # yaw
Rp = np.array([[cp, -sp, 0.0], [sp, cp, 0.0], [0.0, 0.0, 1.0]]) # pitch
Rr = np.array([[1.0, 0.0, 0.0], [0.0, cr, -sr], [0.0, sr, cr]]) # roll
return np.matmul(np.matmul(Ry, Rp), Rr)
| 4,716 | 30.871622 | 102 | py |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/boundary.py | import numpy as np
from scipy import interpolate
import cv2
class AddBoundary(object):
def __init__(self, num_landmarks=68, map_size=64, sigma=1, min_dpi=64):
self.num_landmarks = num_landmarks
self.sigma = sigma
if isinstance(map_size, (tuple, list)):
self.width = map_size[0]
self.height = map_size[1]
else:
self.width = map_size
self.height = map_size
if max(map_size) > min_dpi:
self.dpi = max(map_size)
else:
self.dpi = min_dpi
self.fig_size =[self.height/self.dpi, self.width/self.dpi]
def __call__(self, sample):
landmarks = sample['landmarks_float']
mask_lnd = sample['mask_ldm_float']
boundaries = self.get_dataset_boundaries(landmarks, mask_lnd)
functions = {}
for key, points in boundaries.items():
if len(points) != 0:
temp = points[0]
new_points = points[0:1, :]
for point in points[1:]:
if point[0] == temp[0] and point[1] == temp[1]:
continue
else:
new_points = np.concatenate((new_points, np.expand_dims(point, 0)), axis=0)
temp = point
points = new_points
if points.shape[0] == 1:
points = np.concatenate((points, points+0.001), axis=0)
k = min(4, points.shape[0])
functions[key] = interpolate.splprep([points[:, 0], points[:, 1]], k=k-1,s=0)
boundary_maps = np.zeros((len(boundaries), self.height, self.width))
for i_map, key in enumerate(functions.keys()):
boundary_map = np.zeros((self.height, self.width))
xnew = np.arange(0, 1, 1/self.dpi)
out = interpolate.splev(xnew, functions[key][0], der=0)
out = np.round(out).astype(int).transpose()
out = out[out[:, 0] < self.height]
out = out[out[:, 1] < self.width]
boundary_map[out[:,1], out[:,0]]= 255
# Smooth
sigma = self.sigma
temp = 255 - boundary_map.astype(np.uint8)
temp = cv2.distanceTransform(temp, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
temp = temp.astype(np.float32)
temp = np.where(temp < 3*sigma, np.exp(-(temp*temp)/(2*sigma*sigma)), 0 )
boundary_maps[i_map] = temp
sample['boundary'] = boundary_maps
return sample
def get_dataset_boundaries(self, landmarks, mask_lnd):
boundaries = {}
if self.num_landmarks == 68:
cheek = landmarks[0:17]
boundaries['cheek'] = cheek[mask_lnd[0:17] > 0]
left_eyebrow = landmarks[17:22]
boundaries['left_eyebrow'] = left_eyebrow[mask_lnd[17:22] > 0]
right_eyebrow = landmarks[22:27]
boundaries['right_eyebrow'] = right_eyebrow[mask_lnd[22:27] > 0]
nose = landmarks[27:31]
boundaries['nose'] = nose[mask_lnd[27:31] > 0]
nose_bot = landmarks[31:36]
boundaries['nose_bot'] = nose_bot[mask_lnd[31:36] > 0]
uper_left_eyelid = landmarks[36:40]
boundaries['upper_left_eyelid'] = uper_left_eyelid[mask_lnd[36:40] > 0]
lower_left_eyelid = np.array([landmarks[i] for i in [36, 41, 40, 39]])
lower_left_eyelid_mask = np.array([mask_lnd[i] for i in [36, 41, 40, 39]])
boundaries['lower_left_eyelid'] = lower_left_eyelid[lower_left_eyelid_mask > 0]
upper_right_eyelid = landmarks[42:46]
boundaries['upper_right_eyelid'] = upper_right_eyelid[mask_lnd[42:46] > 0]
lower_right_eyelid = np.array([landmarks[i] for i in [42, 47, 46, 45]])
lower_right_eyelid_mask = np.array([mask_lnd[i] for i in [42, 47, 46, 45]])
boundaries['lower_right_eyelid'] = lower_right_eyelid[lower_right_eyelid_mask > 0]
upper_outer_lip = landmarks[48:55]
boundaries['upper_outer_lip'] = upper_outer_lip[mask_lnd[48:55] > 0]
lower_outer_lip = np.array([landmarks[i] for i in [48, 59, 58, 57, 56, 55, 54]])
lower_outer_lip_mask = np.array([mask_lnd[i] for i in [48, 59, 58, 57, 56, 55, 54]])
boundaries['lower_outer_lip'] = lower_outer_lip[lower_outer_lip_mask > 0]
upper_inner_lip = np.array([landmarks[i] for i in [60, 61, 62, 63, 64]])
upper_inner_lip_mask = np.array([mask_lnd[i] for i in [60, 61, 62, 63, 64]])
boundaries['upper_inner_lip'] = upper_inner_lip[upper_inner_lip_mask > 0]
lower_inner_lip = np.array([landmarks[i] for i in [60, 67, 66, 65, 64]])
lower_inner_lip_mask = np.array([mask_lnd[i] for i in [60, 67, 66, 65, 64]])
boundaries['lower_inner_lip'] = lower_inner_lip[lower_inner_lip_mask > 0]
elif self.num_landmarks == 98:
boundaries['cheek'] = landmarks[0:33]
boundaries['upper_left_eyebrow'] = landmarks[33:38]
boundaries['lower_left_eyebrow'] = np.array([landmarks[i] for i in [33, 41, 40, 39, 38]])
boundaries['upper_right_eyebrow'] = landmarks[42:47]
boundaries['lower_right_eyebrow'] = landmarks[46:51]
boundaries['nose'] = landmarks[51:55]
boundaries['nose_bot'] = landmarks[55:60]
boundaries['upper_left_eyelid'] = landmarks[60:65]
boundaries['lower_left_eyelid'] = np.array([landmarks[i] for i in [60, 67, 66, 65, 64]])
boundaries['upper_right_eyelid'] = landmarks[68:73]
boundaries['lower_right_eyelid'] = np.array([landmarks[i] for i in [68, 75, 74, 73, 72]])
boundaries['upper_outer_lip'] = landmarks[76:83]
boundaries['lower_outer_lip'] = np.array([landmarks[i] for i in [76, 87, 86, 85, 84, 83, 82]])
boundaries['upper_inner_lip'] = np.array([landmarks[i] for i in [88, 89, 90, 91, 92]])
boundaries['lower_inner_lip'] = np.array([landmarks[i] for i in [88, 95, 94, 93, 92]])
return boundaries
| 6,156 | 49.056911 | 106 | py |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/heatmaps.py | import numpy as np
class Heatmaps:
def __init__(self, num_maps, map_size, sigma, stride=1, norm=True):
self.num_maps = num_maps
self.sigma = sigma
self.double_sigma_pw2 = 2*sigma*sigma
self.doublepi_sigma_pw2 = self.double_sigma_pw2 * np.pi
self.stride = stride
self.norm = norm
if isinstance(map_size, (tuple, list)):
self.width = map_size[0]
self.height = map_size[1]
else:
self.width = map_size
self.height = map_size
grid_x = np.arange(self.width) * stride + stride / 2 - 0.5
self.grid_x = np.repeat(grid_x.reshape(1, self.width), self.num_maps, axis=0)
grid_y = np.arange(self.height) * stride + stride / 2 - 0.5
self.grid_y = np.repeat(grid_y.reshape(1, self.height), self.num_maps, axis=0)
def __call__(self, sample):
landmarks = sample['landmarks']
landmarks = landmarks[-self.num_maps:]
# Heatmap generation
exp_x = np.exp(-(self.grid_x - landmarks[:, 0].reshape(-1, 1)) ** 2 / self.double_sigma_pw2)
exp_y = np.exp(-(self.grid_y - landmarks[:, 1].reshape(-1, 1)) ** 2 / self.double_sigma_pw2)
heatmaps = np.matmul(exp_y.reshape(self.num_maps, self.height, 1), exp_x.reshape(self.num_maps, 1, self.width))
if self.norm:
heatmaps = heatmaps/self.doublepi_sigma_pw2
sample['heatmap2D'] = heatmaps
return sample
| 1,463 | 35.6 | 119 | py |
SPIGA | SPIGA-main/spiga/data/visualize/plotting.py | import matplotlib.pyplot as plt
import numpy as np
import cv2
import spiga.data.loaders.augmentors.utils as dlu
BLUE = (255, 0, 0)
GREEN = (0, 255, 0)
RED = (0, 0, 255)
PURPLE = (128, 0, 128)
def draw_landmarks(image, landmarks, visible=None, mask=None, thick_scale=1, colors=(GREEN, RED)):
# Fix variable
thick = int(2 * thick_scale + 0.5)
# Initialize variables if need it
if visible is None:
visible = np.ones(len(landmarks))
if mask is None:
mask = np.ones(len(landmarks))
mask = np.array(mask, dtype=bool)
visible = np.array(visible, dtype=bool)
# Clean and split landmarks
landmarks = landmarks[mask]
visible = visible[mask]
ldm_vis = landmarks[visible]
not_visible = np.logical_not(visible)
ldm_notvis = landmarks[not_visible]
# Plot landmarks
if image.shape[0] == 3:
image = image.transpose(1, 2, 0)
canvas = image.copy()
canvas = _write_circles(canvas, ldm_vis, color=colors[0], thick=thick)
canvas = _write_circles(canvas, ldm_notvis, color=colors[1], thick=thick)
return canvas
def _write_circles(canvas, landmarks, color=RED, thick=2):
for xy in landmarks:
xy = np.array(xy+0.5, dtype=int)
canvas = cv2.circle(canvas, (xy[0], xy[1]), thick, color, -1)
return canvas
def plot_landmarks_pil(image, landmarks, visible=None, mask=None):
# Initialize variables if need it
if visible is None:
visible = np.ones(len(landmarks))
if mask is None:
mask = np.ones(len(landmarks))
mask = np.array(mask, dtype=bool)
visible = np.array(visible, dtype=bool)
not_visible = np.logical_not(visible)
# Clean and split landmarks
landmarks = landmarks[mask]
ldm_vis = landmarks[visible]
ldm_notvis = landmarks[not_visible]
# Plot landmarks
if image.shape[0] == 3:
image = image.transpose(1, 2, 0)
plt.imshow(image / 255)
plt.scatter(ldm_vis[:, 0], ldm_vis[:, 1], s=10, marker='.', c='g')
plt.scatter(ldm_notvis[:, 0], ldm_notvis[:, 1], s=10, marker='.', c='r')
plt.show()
def draw_pose(img, rot, trl, K, euler=False, size=0.5, colors=(BLUE, GREEN, RED)):
if euler:
rot = dlu.euler_to_rotation_matrix(rot)
canvas = img.copy()
rotV, _ = cv2.Rodrigues(rot)
points = np.float32([[size, 0, 0], [0, -size, 0], [0, 0, -size], [0, 0, 0]]).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rotV, trl, K, (0, 0, 0, 0))
axisPoints = axisPoints.astype(int)
canvas = cv2.line(canvas, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), colors[0], 3)
canvas = cv2.line(canvas, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), colors[1], 3)
canvas = cv2.line(canvas, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), colors[2], 3)
return canvas
def enhance_heatmap(heatmap):
map_aux = heatmap - heatmap.min()
map_aux = map_aux / map_aux.max()
map_img = cv2.applyColorMap((map_aux * 255).astype(np.uint8), cv2.COLORMAP_BONE)
return map_img
| 3,051 | 30.142857 | 103 | py |
SPIGA | SPIGA-main/spiga/data/visualize/inspect_heatmaps.py | import cv2
import numpy as np
from spiga.data.visualize.inspect_dataset import DatasetInspector, inspect_parser
class HeatmapInspector(DatasetInspector):
def __init__(self, database, anns_type, data_aug=True, image_shape=(256,256)):
super().__init__(database, anns_type, data_aug=data_aug, pose=False, image_shape=image_shape)
self.data_config.aug_names.append('heatmaps2D')
self.data_config.heatmap2D_norm = False
self.data_config.aug_names.append('boundaries')
self.data_config.shuffle = False
self.reload_dataset()
def show_dataset(self, ids_list=None):
if ids_list is None:
ids = self.get_idx(shuffle=self.data_config.shuffle)
else:
ids = ids_list
for img_id in ids:
data_dict = self.dataset[img_id]
crop_imgs, _ = self.plot_features(data_dict)
# Plot landmark crop
cv2.imshow('crop', crop_imgs['lnd'])
# Plot landmarks 2D (group)
crop_allheats = self._plot_heatmaps2D(data_dict)
# Plot boundaries shape
cv2.imshow('boundary', np.max(data_dict['boundary'], axis=0))
for lnd_idx in range(self.data_config.database.num_landmarks):
# Heatmaps 2D
crop_heats = self._plot_heatmaps2D(data_dict, lnd_idx)
maps = cv2.hconcat([crop_allheats['heatmaps2D'], crop_heats['heatmaps2D']])
cv2.imshow('heatmaps', maps)
key = cv2.waitKey()
if key == ord('q'):
break
if key == ord('n'):
break
if key == ord('q'):
break
def _plot_heatmaps2D(self, data_dict, heatmap_id=None):
# Variables
heatmaps = {}
image = data_dict['image']
if heatmap_id is None:
heatmaps2D = data_dict['heatmap2D']
heatmaps2D = np.max(heatmaps2D, axis=0)
else:
heatmaps2D = data_dict['heatmap2D'][heatmap_id]
# Plot maps
heatmaps['heatmaps2D'] = self._merge_imgmap(image, heatmaps2D)
return heatmaps
def _merge_imgmap(self, image, maps):
crop_maps = cv2.applyColorMap(np.uint8(255 * maps), cv2.COLORMAP_JET)
return cv2.addWeighted(image, 0.7, crop_maps, 0.3, 0)
if __name__ == '__main__':
args = inspect_parser()
data_aug = True
database = args.database
anns_type = args.anns
select_img = args.img
if args.clean:
data_aug = False
if len(args.shape) != 2:
raise ValueError('--shape requires two values: width and height. Ej: --shape 256 256')
else:
img_shape = tuple(args.shape)
visualizer = HeatmapInspector(database, anns_type, data_aug, image_shape=img_shape)
visualizer.show_dataset(ids_list=select_img)
| 2,871 | 29.88172 | 101 | py |
SPIGA | SPIGA-main/spiga/data/visualize/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/data/visualize/inspect_dataset.py | import cv2
import random
import numpy as np
import spiga.data.loaders.dl_config as dl_cfg
import spiga.data.loaders.dataloader as dl
import spiga.data.visualize.plotting as plot
def inspect_parser():
import argparse
pars = argparse.ArgumentParser(description='Data augmentation and dataset visualization. '
'Press Q to quit,'
'N to visualize the next image'
' and any other key to visualize the next default data.')
pars.add_argument('database', type=str,
choices=['wflw', '300wpublic', '300wprivate', 'cofw68', 'merlrav'], help='Database name')
pars.add_argument('-a', '--anns', type=str, default='train', help='Annotation type: test, train or valid')
pars.add_argument('-np', '--nopose', action='store_false', default=True, help='Avoid pose generation')
pars.add_argument('-c', '--clean', action='store_true', help='Process without data augmentation for train')
pars.add_argument('--shape', nargs='+', type=int, default=[256, 256], help='Image cropped shape (W,H)')
pars.add_argument('--img', nargs='+', type=int, default=None, help='Select specific image ids')
return pars.parse_args()
class DatasetInspector:
def __init__(self, database, anns_type, data_aug=True, pose=True, image_shape=(256,256)):
data_config = dl_cfg.AlignConfig(database, anns_type)
data_config.image_size = image_shape
data_config.ftmap_size = image_shape
data_config.generate_pose = pose
if not data_aug:
data_config.aug_names = []
self.data_config = data_config
dataloader, dataset = dl.get_dataloader(1, data_config, debug=True)
self.dataset = dataset
self.dataloader = dataloader
self.colors_dft = {'lnd': (plot.GREEN, plot.RED), 'pose': (plot.BLUE, plot.GREEN, plot.RED)}
def show_dataset(self, ids_list=None):
if ids_list is None:
ids = self.get_idx(shuffle=self.data_config.shuffle)
else:
ids = ids_list
for img_id in ids:
data_dict = self.dataset[img_id]
crop_imgs, full_img = self.plot_features(data_dict)
# Plot crop
if 'merge' in crop_imgs.keys():
crop = crop_imgs['merge']
else:
crop = crop_imgs['lnd']
cv2.imshow('crop', crop)
# Plot full
cv2.imshow('image', full_img['lnd'])
key = cv2.waitKey()
if key == ord('q'):
break
def plot_features(self, data_dict, colors=None):
# Init variables
crop_imgs = {}
full_imgs = {}
if colors is None:
colors = self.colors_dft
# Cropped image
image = data_dict['image']
landmarks = data_dict['landmarks']
visible = data_dict['visible']
if np.any(np.isnan(visible)):
visible = None
mask = data_dict['mask_ldm']
# Full image
if 'image_ori' in data_dict.keys():
image_ori = data_dict['image_ori']
else:
image_ori = cv2.imread(data_dict['imgpath'])
landmarks_ori = data_dict['landmarks_ori']
visible_ori = data_dict['visible_ori']
if np.any(np.isnan(visible_ori)):
visible_ori = None
mask_ori = data_dict['mask_ldm_ori']
# Plot landmarks
crop_imgs['lnd'] = self._plot_lnd(image, landmarks, visible, mask, colors=colors['lnd'])
full_imgs['lnd'] = self._plot_lnd(image_ori, landmarks_ori, visible_ori, mask_ori, colors=colors['lnd'])
if self.data_config.generate_pose:
rot, trl, cam_matrix = self._extract_pose(data_dict)
# Plot pose
crop_imgs['pose'] = plot.draw_pose(image, rot, trl, cam_matrix, euler=True, colors=colors['pose'])
# Plot merge features
crop_imgs['merge'] = plot.draw_pose(crop_imgs['lnd'], rot, trl, cam_matrix, euler=True, colors=colors['pose'])
return crop_imgs, full_imgs
def get_idx(self, shuffle=False):
ids = list(range(len(self.dataset)))
if shuffle:
random.shuffle(ids)
return ids
def reload_dataset(self, data_config=None):
if data_config is None:
data_config = self.data_config
dataloader, dataset = dl.get_dataloader(1, data_config, debug=True)
self.dataset = dataset
self.dataloader = dataloader
def _extract_pose(self, data_dict):
# Rotation and translation matrix
pose = data_dict['pose']
rot = pose[:3]
trl = pose[3:]
# Camera matrix
cam_matrix = data_dict['cam_matrix']
# Check for ground truth anns
if 'headpose_ori' in data_dict.keys():
if len(self.data_config.aug_names) == 0:
print('Image headpose generated by ground truth data')
pose_ori = data_dict['headpose_ori']
rot = pose_ori
return rot, trl, cam_matrix
def _plot_lnd(self, image, landmarks, visible, mask, max_shape_thr=720, colors=None):
if colors is None:
colors = self.colors_dft['lnd']
# Full image plots
W, H, C = image.shape
# Original image resize if need it
if W > max_shape_thr or H > max_shape_thr:
max_shape = max(W, H)
scale_factor = max_shape_thr / max_shape
resize_shape = (int(H * scale_factor), int(W * scale_factor))
image_out = plot.draw_landmarks(image, landmarks, visible=visible, mask=mask,
thick_scale=1 / scale_factor, colors=colors)
image_out = cv2.resize(image_out, resize_shape)
else:
image_out = plot.draw_landmarks(image, landmarks, visible=visible, mask=mask, colors=colors)
return image_out
if __name__ == '__main__':
args = inspect_parser()
data_aug = True
database = args.database
anns_type = args.anns
pose = args.nopose
select_img = args.img
if args.clean:
data_aug = False
if len(args.shape) != 2:
raise ValueError('--shape requires two values: width and height. Ej: --shape 256 256')
else:
img_shape = tuple(args.shape)
visualizer = DatasetInspector(database, anns_type, data_aug=data_aug, pose=pose, image_shape=img_shape)
visualizer.show_dataset(ids_list=select_img)
| 6,554 | 35.016484 | 122 | py |
SPIGA | SPIGA-main/spiga/data/models3D/visualization.py | import argparse
import numpy as np
import matplotlib.pyplot as plt
def main():
# Input arguments control
pars = argparse.ArgumentParser(description='3D model visualization')
pars.add_argument('file', type=str, help='File txt path')
args = pars.parse_args()
visualize_3Dmodel(args.file)
def visualize_3Dmodel(input_file):
with open(input_file) as f:
lines = f.readlines()
model = []
for line in lines:
line = line[:-1] # Remove \n
line_split = line.split('|')
values = np.array(line_split, dtype=float)
model.append(values)
model = np.array(model)
model_xyz = model[:, 1:]
# Show model
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(model_xyz[:, 0], model_xyz[:, 1], model_xyz[:, 2]+0.8)
plt.show()
if __name__ == '__main__':
main()
| 875 | 22.052632 | 72 | py |
SPIGA | SPIGA-main/spiga/data/models3D/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/spiga/inference/pretreatment.py | from torchvision import transforms
import numpy as np
from PIL import Image
import cv2
from spiga.data.loaders.transforms import TargetCrop, ToOpencv, AddModel3D
def get_transformers(data_config):
transformer_seq = [
Opencv2Pil(),
TargetCrop(data_config.image_size, data_config.target_dist),
ToOpencv(),
NormalizeAndPermute()]
return transforms.Compose(transformer_seq)
class NormalizeAndPermute:
def __call__(self, sample):
image = np.array(sample['image'], dtype=float)
image = np.transpose(image, (2, 0, 1))
sample['image'] = image / 255
return sample
class Opencv2Pil:
def __call__(self, sample):
image = cv2.cvtColor(sample['image'], cv2.COLOR_BGR2RGB)
sample['image'] = Image.fromarray(image)
return sample
| 825 | 24.8125 | 74 | py |
SPIGA | SPIGA-main/spiga/inference/framework.py | import os
import pkg_resources
import copy
import torch
import numpy as np
# Paths
weights_path_dft = pkg_resources.resource_filename('spiga', 'models/weights')
import spiga.inference.pretreatment as pretreat
from spiga.models.spiga import SPIGA
from spiga.inference.config import ModelConfig
class SPIGAFramework:
def __init__(self, model_cfg: ModelConfig(), gpus=[0], load3DM=True):
# Parameters
self.model_cfg = model_cfg
self.gpus = gpus
# Pretreatment initialization
self.transforms = pretreat.get_transformers(self.model_cfg)
# SPIGA model
self.model_inputs = ['image', "model3d", "cam_matrix"]
self.model = SPIGA(num_landmarks=model_cfg.dataset.num_landmarks,
num_edges=model_cfg.dataset.num_edges)
# Load weights and set model
weights_path = self.model_cfg.model_weights_path
if weights_path is None:
weights_path = weights_path_dft
if self.model_cfg.load_model_url:
model_state_dict = torch.hub.load_state_dict_from_url(self.model_cfg.model_weights_url,
model_dir=weights_path,
file_name=self.model_cfg.model_weights)
else:
weights_file = os.path.join(weights_path, self.model_cfg.model_weights)
model_state_dict = torch.load(weights_file)
self.model.load_state_dict(model_state_dict)
self.model = self.model.cuda(gpus[0])
self.model.eval()
print('SPIGA model loaded!')
# Load 3D model and camera intrinsic matrix
if load3DM:
loader_3DM = pretreat.AddModel3D(model_cfg.dataset.ldm_ids,
ftmap_size=model_cfg.ftmap_size,
focal_ratio=model_cfg.focal_ratio,
totensor=True)
params_3DM = self._data2device(loader_3DM())
self.model3d = params_3DM['model3d']
self.cam_matrix = params_3DM['cam_matrix']
def inference(self, image, bboxes):
"""
@param self:
@param image: Raw image
@param bboxes: List of bounding box founded on the image [[x,y,w,h],...]
@return: features dict {'landmarks': list with shape (num_bbox, num_landmarks, 2) and x,y referred to image size
'headpose': list with shape (num_bbox, 6) euler->[:3], trl->[3:]
"""
batch_crops, crop_bboxes = self.pretreat(image, bboxes)
outputs = self.net_forward(batch_crops)
features = self.postreatment(outputs, crop_bboxes, bboxes)
return features
def pretreat(self, image, bboxes):
crop_bboxes = []
crop_images = []
for bbox in bboxes:
sample = {'image': copy.deepcopy(image),
'bbox': copy.deepcopy(bbox)}
sample_crop = self.transforms(sample)
crop_bboxes.append(sample_crop['bbox'])
crop_images.append(sample_crop['image'])
# Images to tensor and device
batch_images = torch.tensor(np.array(crop_images), dtype=torch.float)
batch_images = self._data2device(batch_images)
# Batch 3D model and camera intrinsic matrix
batch_model3D = self.model3d.unsqueeze(0).repeat(len(bboxes), 1, 1)
batch_cam_matrix = self.cam_matrix.unsqueeze(0).repeat(len(bboxes), 1, 1)
# SPIGA inputs
model_inputs = [batch_images, batch_model3D, batch_cam_matrix]
return model_inputs, crop_bboxes
def net_forward(self, inputs):
outputs = self.model(inputs)
return outputs
def postreatment(self, output, crop_bboxes, bboxes):
features = {}
crop_bboxes = np.array(crop_bboxes)
bboxes = np.array(bboxes)
if 'Landmarks' in output.keys():
landmarks = output['Landmarks'][-1].cpu().detach().numpy()
landmarks = landmarks.transpose((1, 0, 2))
landmarks = landmarks*self.model_cfg.image_size
landmarks_norm = (landmarks - crop_bboxes[:, 0:2]) / crop_bboxes[:, 2:4]
landmarks_out = (landmarks_norm * bboxes[:, 2:4]) + bboxes[:, 0:2]
landmarks_out = landmarks_out.transpose((1, 0, 2))
features['landmarks'] = landmarks_out.tolist()
# Pose output
if 'Pose' in output.keys():
pose = output['Pose'].cpu().detach().numpy()
features['headpose'] = pose.tolist()
return features
def select_inputs(self, batch):
inputs = []
for ft_name in self.model_inputs:
data = batch[ft_name]
inputs.append(self._data2device(data.type(torch.float)))
return inputs
def _data2device(self, data):
if isinstance(data, list):
data_var = data
for data_id, v_data in enumerate(data):
data_var[data_id] = self._data2device(v_data)
if isinstance(data, dict):
data_var = data
for k, v in data.items():
data[k] = self._data2device(v)
else:
with torch.no_grad():
data_var = data.cuda(device=self.gpus[0], non_blocking=True)
return data_var
| 5,368 | 37.905797 | 120 | py |
SPIGA | SPIGA-main/spiga/inference/config.py | from collections import OrderedDict
from spiga.data.loaders.dl_config import DatabaseStruct
MODELS_URL = {'wflw': 'https://drive.google.com/uc?export=download&confirm=yes&id=1h0qA5ysKorpeDNRXe9oYkVcVe8UYyzP7',
'300wpublic': 'https://drive.google.com/uc?export=download&confirm=yes&id=1YrbScfMzrAAWMJQYgxdLZ9l57nmTdpQC',
'300wprivate': 'https://drive.google.com/uc?export=download&confirm=yes&id=1fYv-Ie7n14eTD0ROxJYcn6SXZY5QU9SM',
'merlrav': 'https://drive.google.com/uc?export=download&confirm=yes&id=1GKS1x0tpsTVivPZUk_yrSiMhwEAcAkg6',
'cofw68': 'https://drive.google.com/uc?export=download&confirm=yes&id=1fYv-Ie7n14eTD0ROxJYcn6SXZY5QU9SM'}
class ModelConfig(object):
def __init__(self, dataset_name=None, load_model_url=True):
# Model configuration
self.model_weights = None
self.model_weights_path = None
self.load_model_url = load_model_url
self.model_weights_url = None
# Pretreatment
self.focal_ratio = 1.5 # Camera matrix focal length ratio.
self.target_dist = 1.6 # Target distance zoom in/out around face.
self.image_size = (256, 256)
# Outputs
self.ftmap_size = (64, 64)
# Dataset
self.dataset = None
if dataset_name is not None:
self.update_with_dataset(dataset_name)
def update_with_dataset(self, dataset_name):
config_dict = {'dataset': DatabaseStruct(dataset_name),
'model_weights': 'spiga_%s.pt' % dataset_name}
if dataset_name == 'cofw68': # Test only
config_dict['model_weights'] = 'spiga_300wprivate.pt'
if self.load_model_url:
config_dict['model_weights_url'] = MODELS_URL[dataset_name]
self.update(config_dict)
def update(self, params_dict):
state_dict = self.state_dict()
for k, v in params_dict.items():
if k in state_dict or hasattr(self, k):
setattr(self, k, v)
else:
raise Warning('Unknown option: {}: {}'.format(k, v))
def state_dict(self):
state_dict = OrderedDict()
for k in self.__dict__.keys():
if not k.startswith('_'):
state_dict[k] = getattr(self, k)
return state_dict
| 2,338 | 38.644068 | 124 | py |
SPIGA | SPIGA-main/spiga/inference/__init__.py | 0 | 0 | 0 | py | |
SPIGA | SPIGA-main/colab_tutorials/video_tools/record.py | from IPython.display import display, Javascript, HTML
from google.colab.output import eval_js
from base64 import b64decode, b64encode
def record_video(filename):
js = Javascript("""
async function recordVideo() {
const options = { mimeType: "video/webm; codecs=vp9" };
const div = document.createElement('div');
const capture = document.createElement('button');
const stopCapture = document.createElement("button");
capture.textContent = "Start Recording";
capture.style.background = "orange";
capture.style.color = "white";
stopCapture.textContent = "Stop Recording";
stopCapture.style.background = "red";
stopCapture.style.color = "white";
div.appendChild(capture);
const video = document.createElement('video');
const recordingVid = document.createElement("video");
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({audio:true, video: true});
let recorder = new MediaRecorder(stream, options);
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
video.muted = true;
await video.play();
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
await new Promise((resolve) => {
capture.onclick = resolve;
});
recorder.start();
capture.replaceWith(stopCapture);
await new Promise((resolve) => stopCapture.onclick = resolve);
recorder.stop();
let recData = await new Promise((resolve) => recorder.ondataavailable = resolve);
let arrBuff = await recData.data.arrayBuffer();
// stop the stream and remove the video element
stream.getVideoTracks()[0].stop();
div.remove();
let binaryString = "";
let bytes = new Uint8Array(arrBuff);
bytes.forEach((byte) => {
binaryString += String.fromCharCode(byte);
})
return btoa(binaryString);
}
""")
try:
display(js)
data = eval_js('recordVideo({})')
binary = b64decode(data)
with open(filename, "wb") as video_file:
video_file.write(binary)
print(f"Finished recording video at:{filename}")
except Exception as err:
print(str(err))
def show_video(video_path, video_width=600):
video_file = open(video_path, "r+b").read()
video_url = f"data:video/mp4;base64,{b64encode(video_file).decode()}"
return HTML(f"""<video width={video_width} controls><source src="{video_url}"></video>""") | 2,563 | 31.871795 | 94 | py |
SPIGA | SPIGA-main/colab_tutorials/video_tools/utils.py | import numpy as np
import PIL
import io
import cv2
from base64 import b64decode, b64encode
def js_to_image(js_reply):
"""
Convert the JavaScript object into an OpenCV image.
@param js_reply: JavaScript object containing image from webcam
@return img: OpenCV BGR image
"""
# decode base64 image
image_bytes = b64decode(js_reply.split(',')[1])
# convert bytes to numpy array
jpg_as_np = np.frombuffer(image_bytes, dtype=np.uint8)
# decode numpy array into OpenCV BGR image
img = cv2.imdecode(jpg_as_np, flags=1)
return img
def bbox_to_bytes(bbox_array):
"""
Convert OpenCV Rectangle bounding box image into base64 byte string to be overlayed on video stream.
@param bbox_array: Numpy array (pixels) containing rectangle to overlay on video stream.
@return bbox_bytes: Base64 image byte string
"""
# convert array into PIL image
bbox_PIL = PIL.Image.fromarray(bbox_array, 'RGBA')
iobuf = io.BytesIO()
# format bbox into png for return
bbox_PIL.save(iobuf, format='png')
# format return string
bbox_bytes = 'data:image/png;base64,{}'.format((str(b64encode(iobuf.getvalue()), 'utf-8')))
return bbox_bytes
def image_to_bytes(image):
"""
Convert OpenCV image into base64 byte string to be overlayed on video stream.
@param image: Input image.
@return img_bytes: Base64 image byte string.
"""
ret, buffer = cv2.imencode('.jpg', image)
jpg_as_text = b64encode(buffer).decode('utf-8')
img_bytes = f'data:image/jpeg;base64,{jpg_as_text}'
return img_bytes | 1,589 | 29.576923 | 104 | py |
ReconVAT | ReconVAT-master/train_baseline_Thickstun.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 10
saving_freq = 10
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'String'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=1.3
small = True
KL_Div = False
reconstruction = False
batch_size = 1
train_batch_size = 1
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 0.0001
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/baseline_ThickStun-lr={learning_rate}'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset=train_on)
if len(validation_dataset)>4:
val_batch_size=4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
model = Thickstun()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
print(f'supervised_loader')
for ep in range(1, epoches+1):
predictions, losses, optimizer = train_model(model, ep, supervised_loader,
optimizer, scheduler, clip_gradient_norm)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
tensorboard_log_without_VAT(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 5,952 | 36.677215 | 142 | py |
ReconVAT | ReconVAT-master/evaluate.py | import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch
from tqdm import tqdm
from model import *
from evaluate import *
import pickle
import shutil
import os
ex = Experiment('evaluate')
log = True
@ex.config
def config():
spec = 'Mel'
attention_mode = 'onset'
mode = 'imagewise'
weight_file = None
output_folder = 'results'
inference=True
LSTM = True
onset = True
device = 'cuda:0'
refresh=False
cat_feat = False
Simple_attention=True
logdir = os.path.join('results', weight_file)
@ex.automain
def train(spec, inference, refresh, device, logdir, weight_file, mode, LSTM, onset, Simple_attention, cat_feat):
if inference:
inference_state = 'infer'
else:
inference_state = 'no_infer'
print_config(ex.current_run)
validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=None, device=device, refresh=refresh)
weight_path = os.path.join('trained_weights', weight_file)
model_type = os.path.basename(weight_path).split('-')[0]
attention_mode = os.path.basename(weight_path).split('-')[3]
if attention_mode=='feat':
attention_mode='activation' # change the flag to match the weight name
try:
modifier = os.path.basename(weight_path).split('-')[4]
if modifier=='no_biLSTM':
LSTM=False
elif modifier=='no_onset':
onset=False
except:
modifier='Null'
if model_type=='Original':
model = OnsetsAndFrames(N_BINS, MAX_MIDI - MIN_MIDI + 1, log=log, mode=mode,
spec=spec, LSTM=LSTM, onset_stack=onset)
elif model_type=='Attention':
print('run me')
model = OnsetsAndFrames_with_fast_local_attn(N_BINS, MAX_MIDI - MIN_MIDI + 1,
log=log, mode=mode, spec=spec,
LSTM=LSTM, onset_stack=onset,
attention_mode=attention_mode)
elif model_type=='Simple':
model = SimpleModel(N_BINS, MAX_MIDI - MIN_MIDI + 1, log=log, mode=mode, spec=spec,
device=device, w_size=int(modifier[2:]), attention=Simple_attention, layers=1,
cat_feat=False, onset=False)
model.to(device)
model.load_my_state_dict(torch.load(weight_path+'.pt'))
with torch.no_grad():
model.eval()
metrics = evaluate_wo_velocity(tqdm(validation_dataset), model, reconstruction=False,
save_path=os.path.join(logdir,f'./MIDI_results-{inference_state}-{modifier}'),
onset=inference)
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values)*100:.3f} ± {np.std(values)*100:.3f}')
export_path = os.path.join(logdir, f'result_dict_{inference_state}-{modifier}')
pickle.dump(metrics, open(export_path, 'wb')) | 3,416 | 34.226804 | 117 | py |
ReconVAT | ReconVAT-master/train_UNet_Onset_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 200
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'MAPS'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=2
small = False
supersmall = False
KL_Div = False
reconstruction = False
batch_size = 8
train_batch_size = 8
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-3
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/Unet_Onset-recons={reconstruction}-XI={XI}-eps={eps}-alpha={alpha}-train_on=small_{small}_{train_on}-w_size={w_size}-n_heads={n_heads}-lr={learning_rate}-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction, supersmall):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=supersmall,
dataset=train_on)
if VAT:
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
# supervised_set, unsupervised_set = torch.utils.data.random_split(dataset, [100, 39],
# generator=torch.Generator().manual_seed(42))
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, 4, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
if resume_iteration is None:
model = UNet_Onset(ds_ksize,ds_stride, log=log, reconstruction=reconstruction,
mode=mode, spec=spec, device=device, XI=XI, eps=eps)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
for ep in range(1, epoches+1):
if VAT==True:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
else:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, None,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
True, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 7,486 | 41.782857 | 219 | py |
ReconVAT | ReconVAT-master/transcribe_files.py | import pickle
import os
import numpy as np
from model import *
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
ex = Experiment('transcription')
def transcribe2midi(data, model, model_type, onset_threshold=0.5, frame_threshold=0.5, save_path=None, reconstruction=True, onset=True, pseudo_onset=False, rule='rule2', VAT=False):
for i in data:
pred = model.transcribe(i)
# print(f"pred['onset2'] = {pred['onset2'].shape}")
# print(f"pred['frame2'] = {pred['frame2'].shape}")
for key, value in pred.items():
if key in ['frame','onset', 'frame2', 'onset2']:
value.squeeze_(0).relu_() # remove batch dim and remove make sure no negative values
p_est, i_est = extract_notes_wo_velocity(pred['onset'], pred['frame'], onset_threshold, frame_threshold, rule=rule)
# print(f"p_ref = {p_ref}\n p_est = {p_est}")
t_est, f_est = notes_to_frames(p_est, i_est, pred['frame'].shape)
scaling = HOP_LENGTH / SAMPLE_RATE
# Converting time steps to seconds and midi number to frequency
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
t_est = t_est.astype(np.float64) * scaling
f_est = [np.array([midi_to_hz(MIN_MIDI + midi) for midi in freqs]) for freqs in f_est]
midi_path = os.path.join(save_path, model_type+'-'+os.path.basename(i['path'])[:-4] + 'mid')
print(f'midi_path = {midi_path}')
save_midi(midi_path, p_est, i_est, [127]*len(p_est))
log=True
mode='imagewise'
spec='Mel'
root = 'Application'
input_path = os.path.join(root, 'Input')
output_path = os.path.join(root, 'Output')
@ex.config
def config():
device='cuda:0'
model_type='ReconVAT'
# instrument='string'
@ex.automain
def main(device, model_type):
# Load audios from the Input files
application_dataset = Application_Dataset(input_path,device=device)
# Choose models
if model_type=='ReconVAT':
model = UNet((2,2),(2,2), log=log, reconstruction=True, mode=mode, spec=spec, device=device)
weight_path = 'Weight/String_MusicNet/Unet_R_VAT-XI=1e-06-eps=1.3-String_MusicNet-lr=0.001/weight.pt'
elif model_type=='baseline_Multi_Inst':
model = Semantic_Segmentation(torch.empty(1,1,640,N_BINS), 1, device=device)
weight_path = 'Weight/String_MusicNet/baseline_Multi_Inst/weight.pt'
# Load weights
print(f'Loading model weight')
model.load_state_dict(torch.load(weight_path, map_location=device))
model.to(device)
print(f'Loading done')
print(f'Transcribing Music')
transcribe2midi(tqdm(application_dataset), model, model_type, reconstruction=False,
save_path=output_path)
| 2,933 | 36.615385 | 182 | py |
ReconVAT | ReconVAT-master/train_baseline_Multi_Inst.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 200
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'String'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=False
XI= 1e-6
eps=2
small = False
supersmall = False
KL_Div = False
reconstruction = False
batch_size = 8
if small==True and supersmall==True:
train_batch_size=1
else:
train_batch_size = 8
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-3
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/VAT_Segmentation={reconstruction}-KL={KL_Div}-XI={XI}-eps={eps}-alpha={alpha}-train_on=small_{small}_{train_on}-w_size={w_size}-n_heads={n_heads}-lr={learning_rate}-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size, supersmall,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=supersmall,
dataset=train_on)
if VAT:
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
# supervised_set, unsupervised_set = torch.utils.data.random_split(dataset, [100, 39],
# generator=torch.Generator().manual_seed(42))
if train_on=='MAPS':
val_batch_size = 4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
if resume_iteration is None:
# Need a dummy input to inference the model size
model = Semantic_Segmentation(torch.empty(1,1,640,N_BINS), 1, device=device)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
for ep in range(1, epoches+1):
if VAT==True:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
else:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, None,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start or VAT==False:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
True, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 7,722 | 41.202186 | 230 | py |
ReconVAT | ReconVAT-master/train_baseline_Prestack.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 100
# file_path = 'Retrain_Prestack-lr=0.0001210325-141510'
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'String'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=1.3
small = True
KL_Div = False
reconstruction = False
batch_size = 1
train_batch_size = 1
sequence_length = 327680//8
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-5
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/baseline_Prestack-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction,root):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset=train_on)
if len(validation_dataset)>4:
val_batch_size=4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
model = Prestack_Model()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
# This model always crashes, need to keep saving weights and load it back when crashed.
# weight_path = os.path.join(root, file_path, 'model-400.pt')
# weight_dict = torch.load(weight_path, map_location=device)
# model.load_state_dict(weight_dict)
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
print(f'supervised_loader')
for ep in range(1, epoches+1):
predictions, losses, optimizer = train_model(model, ep, supervised_loader,
optimizer, scheduler, clip_gradient_norm)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
tensorboard_log_without_VAT(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 6,274 | 37.030303 | 142 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.