repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/flows.py | import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
def create_masks(
input_size, hidden_size, n_hidden, input_order="sequential", input_degrees=None
):
# MADE paper sec 4:
# degrees of connections between layers -- ensure at most in_degree - 1 connections
degrees = []
# set input degrees to what is provided in args (the flipped order of the previous layer in a stack of mades);
# else init input degrees based on strategy in input_order (sequential or random)
if input_order == "sequential":
degrees += (
[torch.arange(input_size)] if input_degrees is None else [input_degrees]
)
for _ in range(n_hidden + 1):
degrees += [torch.arange(hidden_size) % (input_size - 1)]
degrees += (
[torch.arange(input_size) % input_size - 1]
if input_degrees is None
else [input_degrees % input_size - 1]
)
elif input_order == "random":
degrees += (
[torch.randperm(input_size)] if input_degrees is None else [input_degrees]
)
for _ in range(n_hidden + 1):
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (hidden_size,))]
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += (
[torch.randint(min_prev_degree, input_size, (input_size,)) - 1]
if input_degrees is None
else [input_degrees - 1]
)
# construct masks
masks = []
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [(d1.unsqueeze(-1) >= d0.unsqueeze(0)).float()]
return masks, degrees[0]
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x, y):
sum_log_abs_det_jacobians = 0
for module in self:
x, log_abs_det_jacobian = module(x, y)
sum_log_abs_det_jacobians += log_abs_det_jacobian
return x, sum_log_abs_det_jacobians
def inverse(self, u, y):
sum_log_abs_det_jacobians = 0
for module in reversed(self):
u, log_abs_det_jacobian = module.inverse(u, y)
sum_log_abs_det_jacobians += log_abs_det_jacobian
return u, sum_log_abs_det_jacobians
class BatchNorm(nn.Module):
""" RealNVP BatchNorm layer """
def __init__(self, input_size, momentum=0.9, eps=1e-5):
super().__init__()
self.momentum = momentum
self.eps = eps
self.log_gamma = nn.Parameter(torch.zeros(input_size))
self.beta = nn.Parameter(torch.zeros(input_size))
self.register_buffer("running_mean", torch.zeros(input_size))
self.register_buffer("running_var", torch.ones(input_size))
def forward(self, x, cond_y=None):
if self.training:
self.batch_mean = x.view(-1, x.shape[-1]).mean(0)
# note MAF paper uses biased variance estimate; ie x.var(0, unbiased=False)
self.batch_var = x.view(-1, x.shape[-1]).var(0)
# update running mean
self.running_mean.mul_(self.momentum).add_(
self.batch_mean.data * (1 - self.momentum)
)
self.running_var.mul_(self.momentum).add_(
self.batch_var.data * (1 - self.momentum)
)
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
# compute normalized input (cf original batch norm paper algo 1)
x_hat = (x - mean) / torch.sqrt(var + self.eps)
y = self.log_gamma.exp() * x_hat + self.beta
# compute log_abs_det_jacobian (cf RealNVP paper)
log_abs_det_jacobian = self.log_gamma - 0.5 * torch.log(var + self.eps)
# print('in sum log var {:6.3f} ; out sum log var {:6.3f}; sum log det {:8.3f}; mean log_gamma {:5.3f}; mean beta {:5.3f}'.format(
# (var + self.eps).log().sum().data.numpy(), y.var(0).log().sum().data.numpy(), log_abs_det_jacobian.mean(0).item(), self.log_gamma.mean(), self.beta.mean()))
return y, log_abs_det_jacobian.expand_as(x)
def inverse(self, y, cond_y=None):
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (y - self.beta) * torch.exp(-self.log_gamma)
x = x_hat * torch.sqrt(var + self.eps) + mean
log_abs_det_jacobian = 0.5 * torch.log(var + self.eps) - self.log_gamma
return x, log_abs_det_jacobian.expand_as(x)
class LinearMaskedCoupling(nn.Module):
""" Modified RealNVP Coupling Layers per the MAF paper """
def __init__(self, input_size, hidden_size, n_hidden, mask, cond_label_size=None):
super().__init__()
self.register_buffer("mask", mask)
# scale function
s_net = [
nn.Linear(
input_size + (cond_label_size if cond_label_size is not None else 0),
hidden_size,
)
]
for _ in range(n_hidden):
s_net += [nn.Tanh(), nn.Linear(hidden_size, hidden_size)]
s_net += [nn.Tanh(), nn.Linear(hidden_size, input_size)]
self.s_net = nn.Sequential(*s_net)
# translation function
self.t_net = copy.deepcopy(self.s_net)
# replace Tanh with ReLU's per MAF paper
for i in range(len(self.t_net)):
if not isinstance(self.t_net[i], nn.Linear):
self.t_net[i] = nn.ReLU()
def forward(self, x, y=None):
# apply mask
mx = x * self.mask
# run through model
s = self.s_net(mx if y is None else torch.cat([y, mx], dim=-1))
t = self.t_net(mx if y is None else torch.cat([y, mx], dim=-1)) * (
1 - self.mask
)
# cf RealNVP eq 8 where u corresponds to x (here we're modeling u)
log_s = torch.tanh(s) * (1 - self.mask)
u = x * torch.exp(log_s) + t
# u = (x - t) * torch.exp(log_s)
# u = mx + (1 - self.mask) * (x - t) * torch.exp(-s)
# log det du/dx; cf RealNVP 8 and 6; note, sum over input_size done at model log_prob
# log_abs_det_jacobian = -(1 - self.mask) * s
# log_abs_det_jacobian = -log_s #.sum(-1, keepdim=True)
log_abs_det_jacobian = log_s
return u, log_abs_det_jacobian
def inverse(self, u, y=None):
# apply mask
mu = u * self.mask
# run through model
s = self.s_net(mu if y is None else torch.cat([y, mu], dim=-1))
t = self.t_net(mu if y is None else torch.cat([y, mu], dim=-1)) * (
1 - self.mask
)
log_s = torch.tanh(s) * (1 - self.mask)
x = (u - t) * torch.exp(-log_s)
# x = u * torch.exp(log_s) + t
# x = mu + (1 - self.mask) * (u * s.exp() + t) # cf RealNVP eq 7
# log_abs_det_jacobian = (1 - self.mask) * s # log det dx/du
# log_abs_det_jacobian = log_s #.sum(-1, keepdim=True)
log_abs_det_jacobian = -log_s
return x, log_abs_det_jacobian
class MaskedLinear(nn.Linear):
""" MADE building block layer """
def __init__(self, input_size, n_outputs, mask, cond_label_size=None):
super().__init__(input_size, n_outputs)
self.register_buffer("mask", mask)
self.cond_label_size = cond_label_size
if cond_label_size is not None:
self.cond_weight = nn.Parameter(
torch.rand(n_outputs, cond_label_size) / math.sqrt(cond_label_size)
)
def forward(self, x, y=None):
out = F.linear(x, self.weight * self.mask, self.bias)
if y is not None:
out = out + F.linear(y, self.cond_weight)
return out
class MADE(nn.Module):
def __init__(
self,
input_size,
hidden_size,
n_hidden,
cond_label_size=None,
activation="ReLU",
input_order="sequential",
input_degrees=None,
):
"""
Args:
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of MADEs
conditional -- bool; whether model is conditional
"""
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer("base_dist_mean", torch.zeros(input_size))
self.register_buffer("base_dist_var", torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks(
input_size, hidden_size, n_hidden, input_order, input_degrees
)
# setup activation
if activation == "ReLU":
activation_fn = nn.ReLU()
elif activation == "Tanh":
activation_fn = nn.Tanh()
else:
raise ValueError("Check activation function.")
# construct model
self.net_input = MaskedLinear(
input_size, hidden_size, masks[0], cond_label_size
)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [
activation_fn,
MaskedLinear(hidden_size, 2 * input_size, masks[-1].repeat(2, 1)),
]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# MAF eq 4 -- return mean and log std
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=-1)
u = (x - m) * torch.exp(-loga)
# MAF eq 5
log_abs_det_jacobian = -loga
return u, log_abs_det_jacobian
def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):
# MAF eq 3
# D = u.shape[-1]
x = torch.zeros_like(u)
# run through reverse model
for i in self.input_degrees:
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=-1)
x[..., i] = u[..., i] * torch.exp(loga[..., i]) + m[..., i]
log_abs_det_jacobian = loga
return x, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=-1)
class Flow(nn.Module):
def __init__(self, input_size):
super().__init__()
self.__scale = None
self.net = None
# base distribution for calculation of log prob under the model
self.register_buffer("base_dist_mean", torch.zeros(input_size))
self.register_buffer("base_dist_var", torch.ones(input_size))
@property
def base_dist(self):
return Normal(self.base_dist_mean, self.base_dist_var)
@property
def scale(self):
return self.__scale
@scale.setter
def scale(self, scale):
self.__scale = scale
def forward(self, x, cond):
if self.scale is not None:
x /= self.scale
u, log_abs_det_jacobian = self.net(x, cond)
if self.scale is not None:
log_abs_det_jacobian -= torch.log(torch.abs(self.scale))
return u, log_abs_det_jacobian
def inverse(self, u, cond):
x, log_abs_det_jacobian = self.net.inverse(u, cond)
if self.scale is not None:
x *= self.scale
log_abs_det_jacobian += torch.log(torch.abs(self.scale))
return x, log_abs_det_jacobian
def log_prob(self, x, cond):
u, sum_log_abs_det_jacobians = self.forward(x, cond)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=-1)
def sample(self, sample_shape=torch.Size(), cond=None):
if cond is not None:
shape = cond.shape[:-1]
else:
shape = sample_shape
u = self.base_dist.sample(shape)
sample, _ = self.inverse(u, cond)
return sample
class RealNVP(Flow):
def __init__(
self,
n_blocks,
input_size,
hidden_size,
n_hidden,
cond_label_size=None,
batch_norm=True,
):
super().__init__(input_size)
# construct model
modules = []
mask = torch.arange(input_size).float() % 2
for i in range(n_blocks):
modules += [
LinearMaskedCoupling(
input_size, hidden_size, n_hidden, mask, cond_label_size
)
]
mask = 1 - mask
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
class MAF(Flow):
def __init__(
self,
n_blocks,
input_size,
hidden_size,
n_hidden,
cond_label_size=None,
activation="ReLU",
input_order="sequential",
batch_norm=True,
):
super().__init__(input_size)
# construct model
modules = []
self.input_degrees = None
for i in range(n_blocks):
modules += [
MADE(
input_size,
hidden_size,
n_hidden,
cond_label_size,
activation,
input_order,
self.input_degrees,
)
]
self.input_degrees = modules[-1].input_degrees.flip(0)
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
| 13,996 | 32.646635 | 177 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/feature.py | from typing import List, Optional
import torch
import torch.nn as nn
class FeatureEmbedder(nn.Module):
def __init__(self, cardinalities: List[int], embedding_dims: List[int],) -> None:
super().__init__()
assert len(cardinalities) == len(embedding_dims), 'the length of two variables should match'
self.__num_features = len(cardinalities)
def create_embedding(c: int, d: int) -> nn.Embedding:
embedding = nn.Embedding(c, d)
return embedding
self.__embedders = nn.ModuleList(
[create_embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.__num_features > 1:
# we slice the last dimension, giving an array of length
# self.__num_features with shape (N,T) or (N)
cat_feature_slices = torch.chunk(features, self.__num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat(
[
embed(cat_feature_slice.squeeze(-1))
for embed, cat_feature_slice in zip(
self.__embedders, cat_feature_slices
)
],
dim=-1,
)
class FeatureAssembler(nn.Module):
def __init__(
self,
T: int,
embed_static: Optional[FeatureEmbedder] = None,
embed_dynamic: Optional[FeatureEmbedder] = None,
) -> None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict(
{"embed_static": embed_static, "embed_dynamic": embed_dynamic}
)
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
feat_dynamic_cat: torch.Tensor,
feat_dynamic_real: torch.Tensor,
) -> torch.Tensor:
processed_features = [
self.process_static_cat(feat_static_cat),
self.process_static_real(feat_static_real),
self.process_dynamic_cat(feat_dynamic_cat),
self.process_dynamic_real(feat_dynamic_real),
]
return torch.cat(processed_features, dim=-1)
def process_static_cat(self, feature: torch.Tensor) -> torch.Tensor:
if self.embeddings["embed_static"] is not None:
feature = self.embeddings["embed_static"](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: torch.Tensor) -> torch.Tensor:
if self.embeddings["embed_dynamic"] is None:
return feature.float()
else:
return self.embeddings["embed_dynamic"](feature)
def process_static_real(self, feature: torch.Tensor) -> torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: torch.Tensor) -> torch.Tensor:
return feature
| 2,938 | 32.397727 | 100 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/activation.py | from typing import Optional, Union, List, Tuple
# Third-party imports
import torch.nn as nn
from torch import Tensor
class Activation(nn.Module):
"""
Activation fuction
Parameters
----------
activation
Activation function to use.
"""
def __init__(
self,
activation: Optional[str] = "identity",
):
super(Activation, self).__init__()
activations = {
'identity': nn.Identity(),
'relu': nn.ReLU(),
'sigmoid': nn.Sigmoid(),
'tanh': nn.Tanh()
}
self.activation = activations[activation]
def forward(self, data: Tensor) -> Tensor:
"""
applying activation function.
Parameters
----------
data
Shape : any shape of tensor
Returns
-------
Tensor
activation(x). Shape is the same with input
"""
return self.activation(data) | 979 | 19.416667 | 55 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/cnn.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Optional, Union, List, Tuple
# Third-party imports
import torch
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.modules.block.activation import Activation
class CausalConv1D(nn.Module):
"""
1D causal temporal convolution, where the term causal means that output[t]
does not depend on input[t+1:]. Notice that Conv1D is not implemented in
Gluon.
This is the basic structure used in Wavenet [ODZ+16]_ and Temporal
Convolution Network [BKK18]_.
The output has the same shape as the input, while we always left-pad zeros.
Parameters
----------
channels
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size
Specifies the dimensions of the convolution window.
dilation
Specifies the dilation rate to use for dilated convolution.
activation
Activation function to use.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
dilation: int = 1,
activation: Optional[str] = "identity",
**kwargs,
):
super(CausalConv1D, self).__init__(**kwargs)
self.dilation = dilation
self.kernel_size = kernel_size
self.padding = dilation * (kernel_size - 1)
self.conv1d = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
padding=self.padding,
**kwargs,
)
self.activation = Activation(activation)
# noinspection PyMethodOverriding
def forward(self, data: Tensor) -> Tensor:
"""
In Gluon's conv1D implementation, input has dimension NCW where N is
batch_size, C is channel, and W is time (sequence_length).
Parameters
----------
data
Shape (batch_size, num_features, sequence_length)
Returns
-------
Tensor
causal conv1d output. Shape (batch_size, num_features, sequence_length)
"""
ct = self.conv1d(data)
ct = self.activation(ct)
if self.kernel_size > 0:
ct = ct[:, :, :-self.padding, ...]
return ct
class DilatedCausalGated(nn.Module):
"""
1D convolution with Gated mechanism, see the Wavenet papers described above.
Parameters
----------
inner_channels
The dimensionality of the intermediate space
out_channels
The dimensionality of the output space
kernel_size
Specifies the dimensions of the convolution window.
dilation
Specifies the dilation rate to use for dilated convolution.
"""
def __init__(
self,
in_channels: int,
inner_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int], List[int]],
dilation: Union[int, Tuple[int], List[int]],
**kwargs,
) -> None:
super(DilatedCausalGated, self).__init__(**kwargs)
self.conv1 = CausalConv1D(
in_channels=in_channels,
out_channels=inner_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="tanh",
)
self.conv2 = CausalConv1D(
in_channels=in_channels,
out_channels=inner_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="sigmoid",
)
self.output_conv = nn.Conv1d(
in_channels=inner_channels, out_channels=out_channels, kernel_size=1
)
# noinspection PyMethodOverriding
def forward(self, x: Tensor) -> Tensor:
"""
Compute the 1D convolution with Gated mechanism.
Parameters
----------
x
input features, shape (batch_size, num_features, sequence_length)
Returns
-------
Tensor
output, shape (batch_size, num_features, sequence_length)
"""
x1 = self.conv1(x)
x2 = self.conv2(x)
return self.output_conv(x1 * x2)
class ResidualSequential(nn.Sequential):
"""
Adding residual connection to each layer of the sequential blocks
"""
def __init__(self, **kwargs):
super(ResidualSequential, self).__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(self, x: Tensor) -> Tensor:
"""
Parameters
----------
x
input tensor
Returns
-------
Tensor
output of the ResidualSequential
"""
outs = []
for i, block in enumerate(self._children.values()):
out = block(x)
outs.append(out)
if i == 0:
x = out
else:
x = x + out
return sum(outs)
| 5,557 | 26.37931 | 83 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/mlp.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
from pts.modules.block.activation import Activation
class MLP(nn.Module):
"""
Defines an MLP block.
Parameters
----------
layer_sizes
number of hidden units per layer.
flatten
toggle whether to flatten the output tensor.
activation
activation function of the MLP, default is relu.
"""
@validated()
def __init__(
self, input_size, layer_sizes: List[int], activation="relu"
) -> None:
super().__init__()
self.layer_sizes = layer_sizes
self.layers = nn.Sequential()
in_channel = input_size
for layer_no, layer_dim in enumerate(layer_sizes):
self.layers.add_module(
'linear_%02d' % (layer_no),
nn.Linear(in_channel, layer_dim)
)
self.layers.add_module('%s_%02d' % (activation, layer_no), Activation(activation))
in_channel = layer_dim
# noinspection PyMethodOverriding
def forward(self, x: Tensor) -> Tensor:
"""
Parameters
----------
x
Input tensor
Returns
-------
Tensor
Output of the MLP given the input tensor.
"""
return self.layers(x)
| 2,023 | 26.726027 | 94 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Tuple
# Third-party imports
import torch
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
from pts.modules.block.cnn import CausalConv1D
#from pts.modules.block.mlp import MLP
#from pts.modules.block.rnn import RNN
class Seq2SeqEncoder(nn.Module):
"""
Abstract class for the encoder. An encoder takes a `target` sequence with
corresponding covariates and maps it into a static latent and
a dynamic latent code with the same length as the `target` sequence.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
raise NotImplementedError
@staticmethod
def _assemble_inputs(
target: Tensor, static_features: Tensor, dynamic_features: Tensor
) -> Tensor:
"""
Assemble features from target, static features, and the dynamic
features.
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
combined features,
shape (batch_size, sequence_length,
num_static_features + num_dynamic_features + 1)
"""
target = target.unsqueeze(dim=-1) # (N, T, 1)
helper_ones = torch.ones_like(target) # Ones of (N, T, 1)
tiled_static_features = torch.einsum('bnm, bmk -> bnk',
helper_ones, static_features.unsqueeze(1)
) # (N, T, C)
inputs = torch.cat(
[target, tiled_static_features, dynamic_features], dim=2
) # (N, T, C)
return inputs
@property
def out_channels(self) -> int:
"""
the size of output channel
"""
raise NotImplementedError
class HierarchicalCausalConv1DEncoder(Seq2SeqEncoder):
"""
Defines a stack of dilated convolutions as the encoder.
See the following paper for details:
1. Van Den Oord, A., Dieleman, S., Zen, H., Simonyan, K., Vinyals, O., Graves, A., Kalchbrenner,
N., Senior, A.W. and Kavukcuoglu, K., 2016, September. WaveNet: A generative model for raw audio. In SSW (p. 125).
Parameters
----------
dilation_seq
dilation for each convolution in the stack.
kernel_size_seq
kernel size for each convolution in the stack.
channels_seq
number of channels for each convolution in the stack.
use_residual
flag to toggle using residual connections.
use_covariates
flag to toggle whether to use coveriates as input to the encoder
"""
@validated()
def __init__(
self,
input_size: int,
dilation_seq: List[int],
kernel_size_seq: List[int],
channels_seq: List[int],
use_residual: bool = False,
use_covariates: bool = False,
**kwargs,
) -> None:
assert all(
[x > 0 for x in dilation_seq]
), "`dilation_seq` values must be greater than zero"
assert all(
[x > 0 for x in kernel_size_seq]
), "`kernel_size_seq` values must be greater than zero"
assert all(
[x > 0 for x in channels_seq]
), "`channel_dim_seq` values must be greater than zero"
super().__init__(**kwargs)
self.use_residual = use_residual
self.use_covariates = use_covariates
self.CNNs = nn.Sequential()
self.last_out_channel = channels_seq[-1]
it = zip(channels_seq, kernel_size_seq, dilation_seq)
in_channels = input_size
for layer_no, (out_channels, kernel_size, dilation) in enumerate(it):
convolution = CausalConv1D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="relu",
)
self.CNNs.add_module('conv_%02d' % (layer_no), convolution)
in_channels = out_channels
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
if self.use_covariates:
inputs = Seq2SeqEncoder._assemble_inputs(
target=target,
static_features=static_features,
dynamic_features=dynamic_features,
)
else:
inputs = target
# NTC -> NCT (or NCW)
ct = inputs.transpose(1, 2)
ct = self.CNNs(ct)
# now we are back in NTC
ct = ct.transpose(1, 2)
if self.use_residual:
ct = torch.cat([ct, target.unsqueeze(-1)], dim=2)
# return the last state as the static code
static_code = ct[:, -1:, ...]
static_code = torch.squeeze(static_code, dim=1)
return static_code, ct
@property
def out_channels(self) -> int:
"""
the size of output channel
"""
return self.last_out_channel + 1
'''
class RNNEncoder(Seq2SeqEncoder):
"""
Defines an RNN as the encoder.
Parameters
----------
mode
type of the RNN. Can be either: rnn_relu (RNN with relu activation),
rnn_tanh, (RNN with tanh activation), lstm or gru.
hidden_size
number of units per hidden layer.
num_layers
number of hidden layers.
bidirectional
toggle use of bi-directional RNN as encoder.
"""
@validated()
def __init__(
self,
mode: str,
hidden_size: int,
num_layers: int,
bidirectional: bool,
**kwargs,
) -> None:
assert num_layers > 0, "`num_layers` value must be greater than zero"
assert hidden_size > 0, "`hidden_size` value must be greater than zero"
super().__init__(**kwargs)
self.rnn = RNN(mode, hidden_size, num_layers, bidirectional)
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
dynamic_code = self.rnn(target)
static_code = dynamic_code[:, -1:, ...]
return static_code, dynamic_code
class MLPEncoder(Seq2SeqEncoder):
"""
Defines a multilayer perceptron used as an encoder.
Parameters
----------
layer_sizes
number of hidden units per layer.
kwargs
"""
@validated()
def __init__(self, layer_sizes: List[int], **kwargs) -> None:
super().__init__(**kwargs)
self.model = MLP(layer_sizes, flatten=True)
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
inputs = Seq2SeqEncoder._assemble_inputs(
target, static_features, dynamic_features
)
static_code = self.model(inputs)
dynamic_code = torch.zeros_like(target).unsqueeze(2)
return static_code, dynamic_code
class RNNCovariateEncoder(Seq2SeqEncoder):
"""
Defines RNN encoder that uses covariates and target as input to the RNN.
Parameters
----------
mode
type of the RNN. Can be either: rnn_relu (RNN with relu activation),
rnn_tanh, (RNN with tanh activation), lstm or gru.
hidden_size
number of units per hidden layer.
num_layers
number of hidden layers.
bidirectional
toggle use of bi-directional RNN as encoder.
"""
@validated()
def __init__(
self,
mode: str,
hidden_size: int,
num_layers: int,
bidirectional: bool,
**kwargs,
) -> None:
assert num_layers > 0, "`num_layers` value must be greater than zero"
assert hidden_size > 0, "`hidden_size` value must be greater than zero"
super().__init__(**kwargs)
self.rnn = RNN(mode, hidden_size, num_layers, bidirectional)
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
inputs = Seq2SeqEncoder._assemble_inputs(
target, static_features, dynamic_features
)
dynamic_code = self.rnn(inputs)
# using the last state as the static code,
# but not working as well as the concat of all the previous states
static_code = torch.squeeze(dynamic_code[:, -1:, ...], dim=1)
return static_code, dynamic_code
''' | 12,696 | 26.188437 | 118 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/enc2dec.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple
# Third-party imports
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
class Seq2SeqEnc2Dec(nn.Module):
"""
Abstract class for any module that pass encoder to decoder, such as
attention network.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(
self,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
encoder_output_static
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
Tensor
shape (batch_size, sequence_length, num_features) or (N, T, C)
"""
pass
class PassThroughEnc2Dec(Seq2SeqEnc2Dec):
"""
Simplest class for passing encoder tensors do decoder. Passes through
tensors.
"""
def forward(
self,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
encoder_output_static
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
Tensor
shape (batch_size, sequence_length, num_features) or (N, T, C)
"""
return encoder_output_static, encoder_output_dynamic, future_features
| 2,929 | 25.636364 | 77 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import torch
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
from pts.modules.block.mlp import MLP
class Seq2SeqDecoder(nn.Module):
"""
Abstract class for the Decoder block in sequence-to-sequence models.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(
self, dynamic_input: Tensor, static_input: Tensor
) -> None:
"""
Abstract function definition of the forward.
Parameters
----------
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C)
static_input
static features, shape (batch_size, num_features) or (N, C)
"""
pass
'''
class ForkingMLPDecoder(Seq2SeqDecoder):
"""
Multilayer perceptron decoder for sequence-to-sequence models.
See [WTN+17]_ for details.
Parameters
----------
dec_len
length of the decoder (usually the number of forecasted time steps).
final_dim
dimensionality of the output per time step (number of predicted
quantiles).
hidden_dimension_sequence
number of hidden units for each MLP layer.
"""
@validated()
def __init__(
self,
dec_len: int,
final_dim: int,
hidden_dimension_sequence: List[int] = list([]),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dec_len = dec_len
self.final_dims = final_dim
self.model = nn.Sequential()
for layer_no, layer_dim in enumerate(hidden_dimension_sequence):
layer = nn.Linear(
dec_len * layer_dim,
flatten=False,
activation="relu",
prefix=f"mlp_{layer_no:#02d}'_",
)
self.model.add(layer)
layer = nn.Linear(
dec_len * final_dim,
flatten=False,
activation="softrelu",
prefix=f"mlp_{len(hidden_dimension_sequence):#02d}'_",
)
self.model.add(layer)
def forward(
self, dynamic_input: Tensor, static_input: Tensor = None
) -> Tensor:
"""
ForkingMLPDecoder forward call.
Parameters
----------
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C).
static_input
not used in this decoder.
Returns
-------
Tensor
mlp output, shape (0, 0, dec_len, final_dims).
"""
mlp_output = self.model(dynamic_input)
mlp_output = mlp_output.reshape(
shape=(0, 0, self.dec_len, self.final_dims)
)
return mlp_output
'''
class OneShotDecoder(Seq2SeqDecoder):
"""
OneShotDecoder.
Parameters
----------
decoder_length
length of the decoder (number of time steps)
layer_sizes
dimensions of the hidden layers
static_outputs_per_time_step
number of outputs per time step
"""
@validated()
def __init__(
self,
input_size: int,
decoder_length: int,
layer_sizes: List[int],
static_outputs_per_time_step: int,
) -> None:
super().__init__()
self.decoder_length = decoder_length
self.static_outputs_per_time_step = static_outputs_per_time_step
self.expander = nn.Linear(
input_size,
decoder_length * static_outputs_per_time_step
)
input_size = 4 + static_outputs_per_time_step #TODO: fix hard coded dimension for covariates
self.mlp = MLP(input_size, layer_sizes)
def forward(
self,
static_input: Tensor, # (batch_size, static_input_dim)
dynamic_input: Tensor, # (batch_size,
) -> Tensor:
"""
OneShotDecoder forward call
Parameters
----------
static_input
static features, shape (batch_size, num_features) or (N, C)
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C)
Returns
-------
Tensor
mlp output, shape (batch_size, dec_len, size of last layer)
"""
static_input_tile = self.expander(static_input).reshape(
(-1, self.decoder_length, self.static_outputs_per_time_step)
)
combined_input = torch.cat([dynamic_input, static_input_tile], dim=2)
out = self.mlp(combined_input) # (N, T, layer_sizes[-1])
return out
| 5,368 | 25.979899 | 100 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/quantile_output.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
# Third-party imports
# First-party imports
from pts.core.component import validated
class QuantileLoss(nn.Module):
@validated()
def __init__(
self,
quantiles: List[float],
quantile_weights: List[float] = None,
**kwargs,
) -> None:
"""
Represents the quantile loss used to fit decoders that learn quantiles.
Parameters
----------
quantiles
list of quantiles to compute loss over.
quantile_weights
weights of the quantiles.
"""
super().__init__(**kwargs)
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.quantile_weights = (
torch.ones(self.num_quantiles) / self.num_quantiles
if not quantile_weights
else quantile_weights
)
# noinspection PyMethodOverriding
def forward(
self, y_true: Tensor, y_pred: Tensor, sample_weight=None
):
"""
Compute the weighted sum of quantile losses.
Parameters
----------
y_true
true target, shape (N1 x N2 x ... x Nk x dimension of time series
(normally 1))
y_pred
predicted target, shape (N1 x N2 x ... x Nk x num_quantiles)
sample_weight
sample weights
Returns
-------
Tensor
weighted sum of the quantile losses, shape N1 x N1 x ... Nk
"""
y_pred_all = y_pred.chunk(self.num_quantiles, dim=-1)
qt_loss = []
for i, y_pred_q in enumerate(y_pred_all):
q = self.quantiles[i]
weighted_qt = (
self.compute_quantile_loss(y_true, y_pred_q.squeeze(-1), q)
* self.quantile_weights[i].detach()
)
qt_loss.append(weighted_qt)
stacked_qt_losses = torch.stack(qt_loss, axis=-1)
sum_qt_loss = torch.mean(
stacked_qt_losses, axis=-1
) # avg across quantiles
if sample_weight is not None:
return sample_weight * sum_qt_loss
else:
return sum_qt_loss
@staticmethod
def compute_quantile_loss(
y_true: Tensor, y_pred_p: Tensor, p: float
) -> Tensor:
"""
Compute the quantile loss of the given quantile
Parameters
----------
y_true
true target, shape (N1 x N2 x ... x Nk x dimension of time series
(normally 1)).
y_pred_p
predicted target quantile, shape (N1 x N2 x ... x Nk x 1).
p
quantile error to compute the loss.
Returns
-------
Tensor
quantile loss, shape: (N1 x N2 x ... x Nk x 1)
"""
under_bias = p * F.relu(y_true - y_pred_p)
over_bias = (1 - p) * F.relu(y_pred_p - y_true)
qt_loss = 2 * (under_bias + over_bias)
return qt_loss
class ProjectParams(nn.Module):
"""
Defines a dense layer to compute the projection weights into the quantile
space.
Parameters
----------
num_quantiles
number of quantiles to compute the projection.
"""
@validated()
def __init__(self, input_size, num_quantiles, **kwargs):
super().__init__(**kwargs)
self.projection = nn.Linear(input_size, num_quantiles)
# noinspection PyMethodOverriding,PyPep8Naming
def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
x
input tensor
Returns
-------
Tensor
output of the projection layer
"""
return self.projection(x)
class QuantileOutput:
"""
Output layer using a quantile loss and projection layer to connect the
quantile output to the network.
Parameters
----------
quantiles
list of quantiles to compute loss over.
quantile_weights
weights of the quantiles.
"""
@validated()
def __init__(
self,
input_size: int,
quantiles: List[float],
quantile_weights: Optional[List[float]] = None,
) -> None:
self.input_size = input_size
self.quantiles = quantiles
self.quantile_weights = quantile_weights
def get_loss(self) -> Tensor:
"""
Returns
-------
nn.Module
constructs quantile loss object.
"""
return QuantileLoss(
quantiles=self.quantiles, quantile_weights=self.quantile_weights
)
def get_quantile_proj(self, **kwargs) -> nn.Module:
"""
Returns
-------
nn.Module
constructs projection parameter object.
"""
return ProjectParams(self.input_size, len(self.quantiles), **kwargs)
| 5,592 | 25.258216 | 79 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/__init__.py | 0 | 0 | 0 | py | |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/distribution/constant.py | import torch
from torch.distributions.distribution import Distribution
class ConstantDistribution(Distribution):
r"""
Creates a constant distribution, i.e. Var(x) = 0
Args:
loss_type: L1 or L2
mu (Tensor): mean
"""
def __init__(self, loss_type, mu, validate_args=None):
self.loss_type = loss_type
self.mu = mu
batch_shape = mu.size()
super(ConstantDistribution, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return self.mu
@property
def variance(self):
return torch.zeros_like(self.mu)
def sample(self, sample_shape=torch.Size()):
return torch.ones_like(self.mu) * self.mu
def log_prob(self, y_true):
mu = self.mu
if self.loss_type == "L1":
loss = torch.abs(y_true - mu)
elif self.loss_type == "L2":
loss = (y_true - mu)**2
else:
raise NotImplementedError
return -loss # loss == negative log_prob | 1,045 | 25.15 | 92 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/distribution/tweedie.py | import torch
import numpy as np
from torch.distributions.distribution import Distribution
def est_lambda(mu, p):
return mu ** (2 - p) / (2 - p)
def est_alpha(p):
return (2 - p) / (p - 1)
def est_beta(mu, p):
return mu ** (1 - p) / (p - 1)
class Tweedie(Distribution):
r"""
Creates a Tweedie distribution, i.e. distribution
Args:
log_mu (Tensor): log(mean)
rho (Tensor): tweedie_variance_power (1 ~ 2)
"""
def __init__(self, log_mu, rho, validate_args=None):
self.log_mu = log_mu
self.rho = rho
batch_shape = log_mu.size()
super(Tweedie, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return torch.exp(self.log_mu)
@property
def variance(self):
return torch.ones_line(self.log_mu) #TODO need to be assigned
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
mu = self.mean
p = self.rho
phi = 1 #TODO
rate = est_lambda(mu, p) / phi #rate for poisson
alpha = est_alpha(p) #alpha for Gamma distribution
beta = est_beta(mu, p) / phi #beta for Gamma distribution
N = torch.poisson(rate)
gamma = torch.distributions.gamma.Gamma(N*alpha, beta)
samples = gamma.sample()
samples[N==0] = 0
return samples
def log_prob(self, y_true):
rho = self.rho
y_pred = self.log_mu
a = y_true * torch.exp((1 - rho) * y_pred) / (1 - rho)
b = torch.exp((2 - rho) * y_pred) / (2 - rho)
return a - b | 1,660 | 24.166667 | 79 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/distribution/__init__.py | from .constant import ConstantDistribution
from .tweedie import Tweedie
| 72 | 23.333333 | 42 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/evaluation/evaluator.py | from itertools import chain, tee
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Union,
Callable,
)
# Third-party imports
import numpy as np
import pandas as pd
from tqdm import tqdm
from pts.feature import get_seasonality
from pts.model import Quantile, Forecast
class Evaluator:
"""
Evaluator class, to compute accuracy metrics by comparing observations
to forecasts.
Parameters
----------
quantiles
list of strings of the form 'p10' or floats in [0, 1] with
the quantile levels
seasonality
seasonality to use for seasonal_error, if nothing is passed
uses the default seasonality
for the given series frequency as returned by `get_seasonality`
alpha
parameter of the MSIS metric from M4 competition that
defines the confidence interval
for alpha=0.05 the 95% considered is considered in the metric,
see https://www.m4.unic.ac.cy/wp-content/uploads/2018/03/M4
-Competitors-Guide.pdf for more detail on MSIS
"""
default_quantiles = 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9
def __init__(
self,
quantiles: Iterable[Union[float, str]] = default_quantiles,
seasonality: Optional[int] = None,
alpha: float = 0.05,
) -> None:
self.quantiles = tuple(map(Quantile.parse, quantiles))
self.seasonality = seasonality
self.alpha = alpha
def __call__(
self,
ts_iterator: Iterable[Union[pd.DataFrame, pd.Series]],
fcst_iterator: Iterable[Forecast],
num_series: Optional[int] = None,
) -> Tuple[Dict[str, float], pd.DataFrame]:
"""
Compute accuracy metrics by comparing actual data to the forecasts.
Parameters
----------
ts_iterator
iterator containing true target on the predicted range
fcst_iterator
iterator of forecasts on the predicted range
num_series
number of series of the iterator
(optional, only used for displaying progress)
Returns
-------
dict
Dictionary of aggregated metrics
pd.DataFrame
DataFrame containing per-time-series metrics
"""
ts_iterator = iter(ts_iterator)
fcst_iterator = iter(fcst_iterator)
rows = []
with tqdm(
zip(ts_iterator, fcst_iterator),
total=num_series,
desc="Running evaluation",
disable=os.environ.get("DISABLE_TQDM", False),
) as it, np.errstate(invalid="ignore"):
for ts, forecast in it:
rows.append(self.get_metrics_per_ts(ts, forecast))
assert not any(
True for _ in ts_iterator
), "ts_iterator has more elements than fcst_iterator"
assert not any(
True for _ in fcst_iterator
), "fcst_iterator has more elements than ts_iterator"
if num_series is not None:
assert (
len(rows) == num_series
), f"num_series={num_series} did not match number of elements={len(rows)}"
# If all entries of a target array are NaNs, the resulting metric will have value "masked". Pandas does not
# handle masked values correctly. Thus we set dtype=np.float64 to convert masked values back to NaNs which
# are handled correctly by pandas Dataframes during aggregation.
metrics_per_ts = pd.DataFrame(rows, dtype=np.float64)
return self.get_aggregate_metrics(metrics_per_ts)
@staticmethod
def extract_pred_target(
time_series: Union[pd.Series, pd.DataFrame], forecast: Forecast
) -> np.ndarray:
"""
Parameters
----------
time_series
forecast
Returns
-------
Union[pandas.Series, pandas.DataFrame]
time series cut in the Forecast object dates
"""
assert forecast.index.intersection(time_series.index).equals(forecast.index), (
"Cannot extract prediction target since the index of forecast is outside the index of target\n"
f"Index of forecast: {forecast.index}\n Index of target: {time_series.index}"
)
# cut the time series using the dates of the forecast object
return np.atleast_1d(np.squeeze(time_series.loc[forecast.index].transpose()))
def seasonal_error(
self, time_series: Union[pd.Series, pd.DataFrame], forecast: Forecast
) -> float:
r"""
.. math::
seasonal_error = mean(|Y[t] - Y[t-m]|)
where m is the seasonal frequency
https://www.m4.unic.ac.cy/wp-content/uploads/2018/03/M4-Competitors-Guide.pdf
"""
# Remove the prediction range
# If the prediction range is not in the end of the time series,
# everything after the prediction range is truncated
forecast_date = pd.Timestamp(forecast.start_date, freq=forecast.freq)
date_before_forecast = forecast_date - 1 * forecast_date.freq
ts = time_series[:date_before_forecast]
# Check if the length of the time series is larger than the seasonal frequency
seasonality = (
self.seasonality if self.seasonality else get_seasonality(forecast.freq)
)
if seasonality < len(ts):
forecast_freq = seasonality
else:
# edge case: the seasonal freq is larger than the length of ts
# revert to freq=1
# logging.info('The seasonal frequency is larger than the length of the time series. Reverting to freq=1.')
forecast_freq = 1
y_t = np.ma.masked_invalid(ts.values[:-forecast_freq])
y_tm = np.ma.masked_invalid(ts.values[forecast_freq:])
seasonal_mae = np.mean(abs(y_t - y_tm))
return seasonal_mae if seasonal_mae is not np.ma.masked else np.nan
def get_metrics_per_ts(
self, time_series: Union[pd.Series, pd.DataFrame], forecast: Forecast
) -> Dict[str, Union[float, str, None]]:
pred_target = np.array(self.extract_pred_target(time_series, forecast))
pred_target = np.ma.masked_invalid(pred_target)
try:
mean_fcst = forecast.mean
except:
mean_fcst = None
median_fcst = forecast.quantile(0.5)
seasonal_error = self.seasonal_error(time_series, forecast)
# For MSIS: alpha/2 quantile may not exist. Find the closest.
lower_q = min(self.quantiles, key=lambda q: abs(q.value - self.alpha / 2))
upper_q = min(
reversed(self.quantiles), key=lambda q: abs(q.value - (1 - self.alpha / 2)),
)
metrics = {
"item_id": forecast.item_id,
"MSE": self.mse(pred_target, mean_fcst) if mean_fcst is not None else None,
"abs_error": self.abs_error(pred_target, median_fcst),
"abs_target_sum": self.abs_target_sum(pred_target),
"abs_target_mean": self.abs_target_mean(pred_target),
"seasonal_error": seasonal_error,
"relative_bias": self.relative_bias(pred_target, median_fcst),
"MASE": self.mase(pred_target, median_fcst, seasonal_error),
"MAPE": self.mape(pred_target, median_fcst),
"sMAPE": self.smape(pred_target, median_fcst),
"MSIS": self.msis(
pred_target,
forecast.quantile(lower_q.value),
forecast.quantile(upper_q.value),
seasonal_error,
self.alpha,
),
}
for quantile in self.quantiles:
forecast_quantile = forecast.quantile(quantile.value)
metrics[quantile.loss_name] = self.quantile_loss(
pred_target, forecast_quantile, quantile.value
)
metrics[quantile.coverage_name] = self.coverage(
pred_target, forecast_quantile
)
return metrics
def get_aggregate_metrics(
self, metric_per_ts: pd.DataFrame
) -> Tuple[Dict[str, float], pd.DataFrame]:
agg_funs = {
"MSE": "mean",
"abs_error": "sum",
"abs_target_sum": "sum",
"abs_target_mean": "mean",
"seasonal_error": "mean",
"relative_bias": "mean",
"MASE": "mean",
"MAPE": "mean",
"sMAPE": "mean",
"MSIS": "mean",
}
for quantile in self.quantiles:
agg_funs[quantile.loss_name] = "sum"
agg_funs[quantile.coverage_name] = "mean"
assert (
set(metric_per_ts.columns) >= agg_funs.keys()
), "The some of the requested item metrics are missing."
totals = {key: metric_per_ts[key].agg(agg) for key, agg in agg_funs.items()}
# derived metrics based on previous aggregate metrics
totals["RMSE"] = np.sqrt(totals["MSE"])
flag = totals["abs_target_mean"] == 0
totals["NRMSE"] = np.divide(
totals["RMSE"] * (1 - flag), totals["abs_target_mean"] + flag
)
flag = totals["abs_target_sum"] == 0
totals["ND"] = np.divide(
totals["abs_error"] * (1 - flag), totals["abs_target_sum"] + flag
)
all_qLoss_names = [quantile.weighted_loss_name for quantile in self.quantiles]
for quantile in self.quantiles:
totals[quantile.weighted_loss_name] = np.divide(
totals[quantile.loss_name], totals["abs_target_sum"]
)
totals["mean_wQuantileLoss"] = np.array(
[totals[ql] for ql in all_qLoss_names]
).mean()
totals["MAE_Coverage"] = np.mean(
[
np.abs(totals[q.coverage_name] - np.array([q.value]))
for q in self.quantiles
]
)
return totals, metric_per_ts
@staticmethod
def mse(target, forecast):
return np.mean(np.square(target - forecast))
@staticmethod
def abs_error(target, forecast):
return np.sum(np.abs(target - forecast))
@staticmethod
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.sum(
np.abs((quantile_forecast - target) * ((target <= quantile_forecast) - q))
)
@staticmethod
def coverage(target, quantile_forecast):
return np.mean((target < quantile_forecast))
@staticmethod
def relative_bias(target, forecast):
return np.mean(forecast - target) / np.mean(target)
@staticmethod
def mase(target, forecast, seasonal_error):
r"""
.. math::
mase = mean(|Y - Y_hat|) / seasonal_error
https://www.m4.unic.ac.cy/wp-content/uploads/2018/03/M4-Competitors-Guide.pdf
"""
flag = seasonal_error == 0
return (np.mean(np.abs(target - forecast)) * (1 - flag)) / (
seasonal_error + flag
)
@staticmethod
def mape(target, forecast):
r"""
.. math::
mape = mean(|Y - Y_hat| / |Y|))
"""
denominator = np.abs(target)
flag = denominator == 0
mape = np.mean((np.abs(target - forecast) * (1 - flag)) / (denominator + flag))
return mape
@staticmethod
def smape(target, forecast):
r"""
.. math::
smape = mean(2 * |Y - Y_hat| / (|Y| + |Y_hat|))
https://www.m4.unic.ac.cy/wp-content/uploads/2018/03/M4-Competitors-Guide.pdf
"""
denominator = np.abs(target) + np.abs(forecast)
flag = denominator == 0
smape = 2 * np.mean(
(np.abs(target - forecast) * (1 - flag)) / (denominator + flag)
)
return smape
@staticmethod
def msis(target, lower_quantile, upper_quantile, seasonal_error, alpha):
r"""
:math:
msis = mean(U - L + 2/alpha * (L-Y) * I[Y<L] + 2/alpha * (Y-U) * I[Y>U]) /seasonal_error
https://www.m4.unic.ac.cy/wp-content/uploads/2018/03/M4-Competitors-Guide.pdf
"""
numerator = np.mean(
upper_quantile
- lower_quantile
+ 2.0 / alpha * (lower_quantile - target) * (target < lower_quantile)
+ 2.0 / alpha * (target - upper_quantile) * (target > upper_quantile)
)
flag = seasonal_error == 0
return (numerator * (1 - flag)) / (seasonal_error + flag)
@staticmethod
def abs_target_sum(target):
return np.sum(np.abs(target))
@staticmethod
def abs_target_mean(target):
return np.mean(np.abs(target))
class MultivariateEvaluator(Evaluator):
"""
The MultivariateEvaluator class owns functionality for evaluating
multidimensional target arrays of shape
(target_dimensionality, prediction_length).
Evaluations of individual dimensions will be stored with the corresponding
dimension prefix and contain the metrics calculated by only this dimension.
Metrics with the plain metric name correspond to metrics calculated over
all dimensions.
Additionally, the user can provide additional aggregation functions that
first aggregate the target and forecast over dimensions and then calculate
the metric. These metrics will be prefixed with m_<aggregation_fun_name>_
The evaluation dimensions can be set by the user.
Example:
{'0_MSE': 0.004307240342677687, # MSE of dimension 0
'0_abs_error': 1.6246897801756859,
'1_MSE': 0.003949341769475723, # MSE of dimension 1
'1_abs_error': 1.5052175521850586,
'MSE': 0.004128291056076705, # MSE of all dimensions
'abs_error': 3.1299073323607445,
'm_sum_MSE': 0.02 # MSE of aggregated target and aggregated forecast
(if target_agg_funcs is set).
'm_sum_abs_error': 4.2}
"""
def __init__(
self,
quantiles: Iterable[Union[float, str]] = np.linspace(0.1, 0.9, 9),
seasonality: Optional[int] = None,
alpha: float = 0.05,
eval_dims: List[int] = None,
target_agg_funcs: Dict[str, Callable] = {},
) -> None:
"""
Parameters
----------
quantiles
list of strings of the form 'p10' or floats in [0, 1] with the
quantile levels
seasonality
seasonality to use for seasonal_error, if nothing is passed uses
the default seasonality for the given series frequency as
returned by `get_seasonality`
alpha
parameter of the MSIS metric that defines the CI,
e.g., for alpha=0.05 the 95% CI is considered in the metric.
eval_dims
dimensions of the target that will be evaluated.
target_agg_funcs
pass key-value pairs that define aggregation functions over the
dimension axis. Useful to compute metrics over aggregated target
and forecast (typically sum or mean).
"""
super().__init__(quantiles=quantiles, seasonality=seasonality, alpha=alpha)
self._eval_dims = eval_dims
self.target_agg_funcs = target_agg_funcs
@staticmethod
def extract_target_by_dim(
it_iterator: Iterator[pd.DataFrame], dim: int
) -> Iterator[pd.DataFrame]:
for i in it_iterator:
yield (i[dim])
@staticmethod
def extract_forecast_by_dim(
forecast_iterator: Iterator[Forecast], dim: int
) -> Iterator[Forecast]:
for forecast in forecast_iterator:
yield forecast.copy_dim(dim)
@staticmethod
def extract_aggregate_target(
it_iterator: Iterator[pd.DataFrame], agg_fun: Callable
) -> Iterator[pd.DataFrame]:
for i in it_iterator:
yield i.agg(agg_fun, axis=1)
@staticmethod
def extract_aggregate_forecast(
forecast_iterator: Iterator[Forecast], agg_fun: Callable
) -> Iterator[Forecast]:
for forecast in forecast_iterator:
yield forecast.copy_aggregate(agg_fun)
@staticmethod
def peek(iterator: Iterator[Any]) -> Tuple[Any, Iterator[Any]]:
peeked_object = iterator.__next__()
iterator = chain([peeked_object], iterator)
return peeked_object, iterator
@staticmethod
def get_target_dimensionality(forecast: Forecast) -> int:
target_dim = forecast.dim()
assert target_dim > 1, (
f"the dimensionality of the forecast should be larger than 1, "
f"but got {target_dim}. "
f"Please use the Evaluator to evaluate 1D forecasts."
)
return target_dim
def get_eval_dims(self, target_dimensionality: int) -> List[int]:
eval_dims = (
self._eval_dims
if self._eval_dims is not None
else list(range(0, target_dimensionality))
)
assert max(eval_dims) < target_dimensionality, (
f"eval dims should range from 0 to target_dimensionality - 1, "
f"but got max eval_dim {max(eval_dims)}"
)
return eval_dims
def calculate_aggregate_multivariate_metrics(
self,
ts_iterator: Iterator[pd.DataFrame],
forecast_iterator: Iterator[Forecast],
agg_fun: Callable,
) -> Dict[str, float]:
"""
Parameters
----------
ts_iterator
Iterator over time series
forecast_iterator
Iterator over forecasts
agg_fun
aggregation function
Returns
-------
Dict[str, float]
dictionary with aggregate datasets metrics
"""
agg_metrics, _ = super(MultivariateEvaluator, self).__call__(
self.extract_aggregate_target(ts_iterator, agg_fun),
self.extract_aggregate_forecast(forecast_iterator, agg_fun),
)
return agg_metrics
def calculate_aggregate_vector_metrics(
self, all_agg_metrics: Dict[str, float], all_metrics_per_ts: pd.DataFrame,
) -> Dict[str, float]:
"""
Parameters
----------
all_agg_metrics
dictionary with aggregate metrics of individual dimensions
all_metrics_per_ts
DataFrame containing metrics for all time series of all evaluated
dimensions
Returns
-------
Dict[str, float]
dictionary with aggregate metrics (of individual (evaluated)
dimensions and the entire vector)
"""
vector_aggregate_metrics, _ = self.get_aggregate_metrics(all_metrics_per_ts)
for key, value in vector_aggregate_metrics.items():
all_agg_metrics[key] = value
return all_agg_metrics
def __call__(
self,
ts_iterator: Iterable[pd.DataFrame],
fcst_iterator: Iterable[Forecast],
num_series=None,
) -> Tuple[Dict[str, float], pd.DataFrame]:
ts_iterator = iter(ts_iterator)
fcst_iterator = iter(fcst_iterator)
all_agg_metrics = dict()
all_metrics_per_ts = list()
peeked_forecast, fcst_iterator = self.peek(fcst_iterator)
target_dimensionality = self.get_target_dimensionality(peeked_forecast)
eval_dims = self.get_eval_dims(target_dimensionality)
ts_iterator_set = tee(
ts_iterator, target_dimensionality + len(self.target_agg_funcs)
)
fcst_iterator_set = tee(
fcst_iterator, target_dimensionality + len(self.target_agg_funcs)
)
for dim in tqdm(eval_dims,disable=os.environ.get("DISABLE_TQDM", False)):
agg_metrics, metrics_per_ts = super(MultivariateEvaluator, self).__call__(
self.extract_target_by_dim(ts_iterator_set[dim], dim),
self.extract_forecast_by_dim(fcst_iterator_set[dim], dim),
)
all_metrics_per_ts.append(metrics_per_ts)
for metric, value in agg_metrics.items():
all_agg_metrics[f"{dim}_{metric}"] = value
all_metrics_per_ts = pd.concat(all_metrics_per_ts)
all_agg_metrics = self.calculate_aggregate_vector_metrics(
all_agg_metrics, all_metrics_per_ts
)
if self.target_agg_funcs:
multivariate_metrics = {
agg_fun_name: self.calculate_aggregate_multivariate_metrics(
ts_iterator_set[-(index + 1)],
fcst_iterator_set[-(index + 1)],
agg_fun,
)
for index, (agg_fun_name, agg_fun) in enumerate(
self.target_agg_funcs.items()
)
}
for key, metric_dict in multivariate_metrics.items():
prefix = f"m_{key}_"
for metric, value in metric_dict.items():
all_agg_metrics[prefix + metric] = value
return all_agg_metrics, all_metrics_per_ts
| 20,928 | 33.708126 | 119 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/evaluation/__init__.py | from .backtest import make_evaluation_predictions, backtest_metrics
from .evaluator import Evaluator, MultivariateEvaluator
| 124 | 40.666667 | 67 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/evaluation/backtest.py | # Standard library imports
import logging
from typing import Dict, Iterator, NamedTuple, Optional, Tuple, Union
# Third-party imports
import pandas as pd
from pts.dataset import (
DataEntry,
Dataset,
DatasetStatistics,
calculate_dataset_statistics,
)
from pts.model import Estimator, Predictor, Forecast
from pts.dataset import FieldName
# First-party imports
from pts.transform import AdhocTransform, TransformedDataset
from .evaluator import Evaluator
def make_validation_data(dataset: Dataset, val_start=None, val_start_final = 1914):
"""
make validation dataset by adding "is_validation" field
Parameters
----------
dataset
Base dataset where the validation will happen. Only the portion excluding
the prediction_length portion is used when making prediction.
TODO: Only valid if use InstanceSampler
"""
def add_validation_field(data):
data = data.copy()
if val_start==None:
data["is_validation"]=0
else:
data["is_validation"] = val_start_final - val_start
#assert data["is_validation"]>=0
return data
return TransformedDataset(
dataset, transformations=[AdhocTransform(add_validation_field)]
)
def make_evaluation_predictions(
dataset: Dataset, predictor: Predictor, num_samples: int
) -> Tuple[Iterator[Forecast], Iterator[pd.Series]]:
"""
Return predictions on the last portion of predict_length time units of the
target. Such portion is cut before making predictions, such a function can
be used in evaluations where accuracy is evaluated on the last portion of
the target.
Parameters
----------
dataset
Dataset where the evaluation will happen. Only the portion excluding
the prediction_length portion is used when making prediction.
predictor
Model used to draw predictions.
num_samples
Number of samples to draw on the model when evaluating.
Returns
-------
"""
prediction_length = predictor.prediction_length
freq = predictor.freq
def add_ts_dataframe(data_iterator: Iterator[DataEntry]) -> Iterator[DataEntry]:
for data_entry in data_iterator:
data = data_entry.copy()
index = pd.date_range(
start=data["start"], freq=freq, periods=data["target"].shape[-1],
)
data["ts"] = pd.DataFrame(index=index, data=data["target"].transpose())
yield data
def ts_iter(dataset: Dataset) -> pd.DataFrame:
for data_entry in add_ts_dataframe(iter(dataset)):
yield data_entry["ts"]
def truncate_target(data):
data = data.copy()
target = data["target"]
acc_target = data[FieldName.ACC_TARGET_SUM]
feat_dynamic_past = data[FieldName.FEAT_DYNAMIC_PAST]
assert target.shape[-1] >= prediction_length # handles multivariate case (target_dim, history_length)
assert feat_dynamic_past.shape[-1] >= prediction_length
data["target"] = target[..., :-prediction_length]
data[FieldName.ACC_TARGET_SUM] = acc_target[..., :-prediction_length]
data[FieldName.FEAT_DYNAMIC_PAST] = feat_dynamic_past[..., :-prediction_length]
return data
# TODO filter out time series with target shorter than prediction length
# TODO or fix the evaluator so it supports missing values instead (all
# TODO the test set may be gone otherwise with such a filtering)
dataset_trunc = TransformedDataset(
dataset, transformations=[AdhocTransform(truncate_target)]
)
return (
predictor.predict(dataset_trunc, num_samples=num_samples), # [Note] prediction은 trunc된 dataset으로 수행
ts_iter(dataset),
)
train_dataset_stats_key = "train_dataset_stats"
test_dataset_stats_key = "test_dataset_stats"
estimator_key = "estimator"
agg_metrics_key = "agg_metrics"
def serialize_message(logger, message: str, variable):
logger.info(f"pts[{message}]: {variable}")
def backtest_metrics(
train_dataset: Optional[Dataset],
test_dataset: Dataset,
forecaster: Union[Estimator, Predictor],
evaluator=Evaluator(quantiles=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)),
num_samples: int = 100,
logging_file: Optional[str] = None,
):
"""
Parameters
----------
train_dataset
Dataset to use for training.
test_dataset
Dataset to use for testing.
forecaster
An estimator or a predictor to use for generating predictions.
evaluator
Evaluator to use.
num_samples
Number of samples to use when generating sample-based forecasts.
logging_file
If specified, information of the backtest is redirected to this file.
Returns
-------
tuple
A tuple of aggregate metrics and per-time-series metrics obtained by
training `forecaster` on `train_dataset` and evaluating the resulting
`evaluator` provided on the `test_dataset`.
"""
if logging_file is not None:
log_formatter = logging.Formatter(
"[%(asctime)s %(levelname)s %(thread)d] %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
)
logger = logging.getLogger(__name__)
handler = logging.FileHandler(logging_file)
handler.setFormatter(log_formatter)
logger.addHandler(handler)
else:
logger = logging.getLogger(__name__)
if train_dataset is not None:
train_statistics = calculate_dataset_statistics(train_dataset)
serialize_message(logger, train_dataset_stats_key, train_statistics)
test_statistics = calculate_dataset_statistics(test_dataset)
serialize_message(logger, test_dataset_stats_key, test_statistics)
if isinstance(forecaster, Estimator):
serialize_message(logger, estimator_key, forecaster)
assert train_dataset is not None
predictor = forecaster.train(train_dataset)
else:
predictor = forecaster
forecast_it, ts_it = make_evaluation_predictions(
test_dataset, predictor=predictor, num_samples=num_samples
)
agg_metrics, item_metrics = evaluator(
ts_it, forecast_it, num_series=len(test_dataset)
)
# we only log aggregate metrics for now as item metrics may be very large
for name, value in agg_metrics.items():
serialize_message(logger, f"metric-{name}", value)
if logging_file is not None:
# Close the file handler to avoid letting the file open.
# https://stackoverflow.com/questions/24816456/python-logging-wont-shutdown
logger.removeHandler(handler)
del logger, handler
return agg_metrics, item_metrics
class BacktestInformation(NamedTuple):
train_dataset_stats: DatasetStatistics
test_dataset_stats: DatasetStatistics
estimator: Estimator
agg_metrics: Dict[str, float]
# @staticmethod
# def make_from_log(log_file):
# with open(log_file, "r") as f:
# return BacktestInformation.make_from_log_contents(
# "\n".join(f.readlines())
# )
# @staticmethod
# def make_from_log_contents(log_contents):
# messages = dict(re.findall(r"pts\[(.*)\]: (.*)", log_contents))
# # avoid to fail if a key is missing for instance in the case a run did
# # not finish so that we can still get partial information
# try:
# return BacktestInformation(
# train_dataset_stats=eval(
# messages[train_dataset_stats_key]
# ), # TODO: use load
# test_dataset_stats=eval(
# messages[test_dataset_stats_key]
# ), # TODO: use load
# estimator=load_code(messages[estimator_key]),
# agg_metrics={
# k: load_code(v)
# for k, v in messages.items()
# if k.startswith("metric-") and v != "nan"
# },
# )
# except Exception as error:
# logging.error(error)
# return None
| 8,129 | 33.449153 | 112 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/core/serde.py | import itertools
import json
import math
import textwrap
from functools import singledispatch
from pydoc import locate
from typing import Any, Optional, cast, NamedTuple
import numpy as np
import torch
from pts.core import fqname_for
bad_type_msg = textwrap.dedent(
"""
Cannot serialize type {}. See the documentation of the `encode` and
`validate` functions at
http://gluon-ts.mxnet.io/api/gluonts/gluonts.html
and the Python documentation of the `__getnewargs_ex__` magic method at
https://docs.python.org/3/library/pickle.html#object.__getnewargs_ex__
for more information how to make this type serializable.
"""
).lstrip()
def dump_code(o: Any) -> str:
"""
Serializes an object to a Python code string.
Parameters
----------
o
The object to serialize.
Returns
-------
str
A string representing the object as Python code.
See Also
--------
load_code
Inverse function.
"""
def _dump_code(x: Any) -> str:
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(x) == dict and x.get("__kind__") == kind_inst:
args = x.get("args", [])
kwargs = x.get("kwargs", {})
fqname = x["class"]
bindings = ", ".join(
itertools.chain(
map(_dump_code, args),
[f"{k}={_dump_code(v)}" for k, v in kwargs.items()],
)
)
return f"{fqname}({bindings})"
if type(x) == dict and x.get("__kind__") == kind_type:
return x["class"]
if isinstance(x, dict):
inner = ", ".join(
f"{_dump_code(k)}: {_dump_code(v)}" for k, v in x.items()
)
return f"{{{inner}}}"
if isinstance(x, list):
inner = ", ".join(list(map(dump_code, x)))
return f"[{inner}]"
if isinstance(x, tuple):
inner = ", ".join(list(map(dump_code, x)))
# account for the extra `,` in `(x,)`
if len(x) == 1:
inner += ","
return f"({inner})"
if isinstance(x, str):
# json.dumps escapes the string
return json.dumps(x)
if isinstance(x, float) or np.issubdtype(type(x), np.inexact):
if math.isfinite(x):
return str(x)
else:
# e.g. `nan` needs to be encoded as `float("nan")`
return 'float("{x}")'
if isinstance(x, int) or np.issubdtype(type(x), np.integer):
return str(x)
if x is None:
return str(x)
raise RuntimeError(
f"Unexpected element type {fqname_for(x.__class__)}"
)
return _dump_code(encode(o))
# JSON Serialization/Deserialization
# ----------------------------------
# The canonical way to do this is to define and `default` and `object_hook`
# parameters to the json.dumps and json.loads methods. Unfortunately, due
# to https://bugs.python.org/issue12657 this is not possible at the moment,
# as support for custom NamedTuple serialization is broken.
#
# To circumvent the issue, we pass the input value through custom encode
# and decode functions that map nested object terms to JSON-serializable
# data structures with explicit recursion.
def dump_json(o: Any, indent: Optional[int] = None) -> str:
"""
Serializes an object to a JSON string.
Parameters
----------
o
The object to serialize.
indent
An optional number of spaced to use as an indent.
Returns
-------
str
A string representing the object in JSON format.
See Also
--------
load_json
Inverse function.
"""
return json.dumps(encode(o), indent=indent, sort_keys=True)
def load_json(s: str) -> Any:
"""
Deserializes an object from a JSON string.
Parameters
----------
s
A string representing the object in JSON format.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_json
Inverse function.
"""
return decode(json.loads(s))
# Structural encoding/decoding
# ----------------------------
kind_type = "type"
kind_inst = "instance"
@singledispatch
def encode(v: Any) -> Any:
"""
Transforms a value `v` as a serializable intermediate representation (for
example, named tuples are encoded as dictionaries). The intermediate
representation is then recursively traversed and serialized either as
Python code or as JSON string.
This function is decorated with :func:`~functools.singledispatch` and can
be specialized by clients for families of types that are not supported by
the basic implementation (explained below).
Examples
--------
The conversion logic implemented by the basic implementation is used
as a fallback and is best explained by a series of examples.
Lists (as lists).
>>> encode([1, 2.0, '3'])
[1, 2.0, '3']
Tuples (as lists).
>>> encode((1, 2.0, '3'))
[1, 2.0, '3']
Dictionaries (as dictionaries).
>>> encode({'a': 1, 'b': 2.0, 'c': '3'})
{'a': 1, 'b': 2.0, 'c': '3'}
Named tuples (as dictionaries with a ``'__kind__': 'instance'`` member).
>>> from pprint import pprint
>>> from typing import NamedTuple
>>> class ComplexNumber(NamedTuple):
... x: float = 0.0
... y: float = 0.0
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a :func:`~gluonts.core.component.validated` initializer (as
dictionaries with a ``'__kind__': 'instance'`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'args': [],
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a ``__getnewargs_ex__`` magic method (as dictionaries with a
``'__kind__': 'instance'`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
... def __getnewargs_ex__(self):
... return [], {'x': self.x, 'y': self.y}
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'args': [],
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Types (as dictionaries with a ``'__kind__': 'type' member``).
>>> encode(ComplexNumber)
{'__kind__': 'type', 'class': 'gluonts.core.serde.ComplexNumber'}
Parameters
----------
v
The value to be encoded.
Returns
-------
Any
An encoding of ``v`` that can be serialized to Python code or
JSON string.
See Also
--------
decode
Inverse function.
dump_json
Serializes an object to a JSON string.
dump_code
Serializes an object to a Python code string.
"""
if isinstance(v, type(None)):
return None
if isinstance(v, (float, int, str)):
return v
if np.issubdtype(type(v), np.inexact):
return float(v)
if np.issubdtype(type(v), np.integer):
return int(v)
# we have to check for namedtuples first, to encode them not as plain
# tuples (which would become lists)
if isinstance(v, tuple) and hasattr(v, "_asdict"):
v = cast(NamedTuple, v)
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"kwargs": encode(v._asdict()),
}
if isinstance(v, (list, set, tuple)):
return list(map(encode, v))
if isinstance(v, dict):
return {k: encode(v) for k, v in v.items()}
if isinstance(v, type):
return {"__kind__": kind_type, "class": fqname_for(v)}
if hasattr(v, "__getnewargs_ex__"):
args, kwargs = v.__getnewargs_ex__() # mypy: ignore
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"args": encode(args),
"kwargs": encode(kwargs),
}
if isinstance(v, torch.device):
return None
raise RuntimeError(bad_type_msg.format(fqname_for(v.__class__)))
def decode(r: Any) -> Any:
"""
Decodes a value from an intermediate representation `r`.
Parameters
----------
r
An intermediate representation to be decoded.
Returns
-------
Any
A Python data structure corresponding to the decoded version of ``r``.
See Also
--------
encode
Inverse function.
"""
# structural recursion over the possible shapes of r
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(r) == dict and r.get("__kind__") == kind_inst:
cls = cast(Any, locate(r["class"]))
args = decode(r["args"]) if "args" in r else []
kwargs = decode(r["kwargs"]) if "kwargs" in r else {}
return cls(*args, **kwargs)
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(r) == dict and r.get("__kind__") == kind_type:
return locate(r["class"])
# r = { k1: v1, ..., kn: vn }
elif type(r) == dict:
return {k: decode(v) for k, v in r.items()}
# r = ( y1, ..., yn )
elif type(r) == tuple:
return tuple([decode(y) for y in r])
# r = [ y1, ..., yn ]
elif type(r) == list:
return [decode(y) for y in r]
# r = { y1, ..., yn }
elif type(r) == set:
return {decode(y) for y in r}
# r = a
else:
return r
| 10,036 | 26.49863 | 78 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/core/logging.py | import os
import socket
from datetime import datetime
import logging
from pathlib import Path
def get_log_path(log_dir, log_comment='temp', trial='t0', mkdir=True):
if log_comment=='':
log_comment='temp'
base_path = os.path.join('logs', log_dir, log_comment)
trial_path = trial
full_log_path = f"{base_path}/{trial_path}"
path = Path(full_log_path)
if mkdir==True:
if not path.exists():
path.mkdir(parents=True)
return full_log_path, base_path, trial_path
def set_logger(text_log_path, text_log_file = 'log.txt', level = logging.INFO):
logger = logging.getLogger("mofl")
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s%(name)18s%(levelname)10s\t%(message)s')
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
logger.addHandler(stream_hander)
log_file = f"{text_log_path}/{text_log_file}"
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.propagate = False
logger.info(f"Logging to {log_file}...") | 1,140 | 27.525 | 86 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/core/component.py | import functools
import inspect
from collections import OrderedDict
from typing import Any
import torch
from pydantic import BaseConfig, BaseModel, create_model
from pts.core.serde import dump_code
class BaseValidatedInitializerModel(BaseModel):
"""
Base Pydantic model for components with :func:`validated` initializers.
See Also
--------
validated
Decorates an initializer methods with argument validation logic.
"""
class Config(BaseConfig):
"""
`Config <https://pydantic-docs.helpmanual.io/#model-config>`_ for the
Pydantic model inherited by all :func:`validated` initializers.
Allows the use of arbitrary type annotations in initializer parameters.
"""
arbitrary_types_allowed = True
def validated(base_model=None):
"""
Decorates an ``__init__`` method with typed parameters with validation
and auto-conversion logic.
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
Classes with decorated initializers can be instantiated using arguments of
another type (e.g. an ``y`` argument of type ``str`` ). The decorator
handles the type conversion logic.
>>> c = ComplexNumber(y='42')
>>> (c.x, c.y)
(0.0, 42.0)
If the bound argument cannot be converted, the decorator throws an error.
>>> c = ComplexNumber(y=None)
Traceback (most recent call last):
...
pydantic.error_wrappers.ValidationError: 1 validation error for ComplexNumberModel
y
none is not an allowed value (type=type_error.none.not_allowed)
Internally, the decorator delegates all validation and conversion logic to
`a Pydantic model <https://pydantic-docs.helpmanual.io/>`_, which can be
accessed through the ``Model`` attribute of the decorated initiazlier.
>>> ComplexNumber.__init__.Model
<class 'ComplexNumberModel'>
The Pydantic model is synthesized automatically from on the parameter
names and types of the decorated initializer. In the ``ComplexNumber``
example, the synthesized Pydantic model corresponds to the following
definition.
>>> class ComplexNumberModel(BaseValidatedInitializerModel):
... x: float = 0.0
... y: float = 0.0
Clients can optionally customize the base class of the synthesized
Pydantic model using the ``base_model`` decorator parameter. The default
behavior uses :class:`BaseValidatedInitializerModel` and its
`model config <https://pydantic-docs.helpmanual.io/#config>`_.
See Also
--------
BaseValidatedInitializerModel
Default base class for all synthesized Pydantic models.
"""
def validator(init):
init_qualname = dict(inspect.getmembers(init))["__qualname__"]
init_clsnme = init_qualname.split(".")[0]
init_params = inspect.signature(init).parameters
init_fields = {
param.name: (
param.annotation
if param.annotation != inspect.Parameter.empty
else Any,
param.default
if param.default != inspect.Parameter.empty
else ...,
)
for param in init_params.values()
if param.name != "self"
and param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
}
if base_model is None:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__config__=BaseValidatedInitializerModel.Config,
**init_fields,
)
else:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__base__=base_model,
**init_fields,
)
def validated_repr(self) -> str:
return dump_code(self)
def validated_getnewargs_ex(self):
return (), self.__init_args__
@functools.wraps(init)
def init_wrapper(*args, **kwargs):
self, *args = args
nmargs = {
name: arg
for (name, param), arg in zip(
list(init_params.items()), [self] + args
)
if name != "self"
}
model = PydanticModel(**{**nmargs, **kwargs})
# merge nmargs, kwargs, and the model fields into a single dict
all_args = {**nmargs, **kwargs, **model.__dict__}
# save the merged dictionary for Representable use, but only of the
# __init_args__ is not already set in order to avoid overriding a
# value set by a subclass initializer in super().__init__ calls
if not getattr(self, "__init_args__", {}):
self.__init_args__ = OrderedDict(
{
name: arg
for name, arg in sorted(all_args.items())
if type(arg) != torch.nn.ParameterDict
}
)
self.__class__.__getnewargs_ex__ = validated_getnewargs_ex
self.__class__.__repr__ = validated_repr
return init(self, **all_args)
# attach the Pydantic model as the attribute of the initializer wrapper
setattr(init_wrapper, "Model", PydanticModel)
return init_wrapper
return validator | 5,487 | 32.668712 | 86 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/core/_base.py | def fqname_for(cls: type) -> str:
"""
Returns the fully qualified name of ``cls``.
Parameters
----------
cls
The class we are interested in.
Returns
-------
str
The fully qualified name of ``cls``.
"""
return f"{cls.__module__}.{cls.__qualname__}" | 305 | 19.4 | 49 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/core/__init__.py | # Relative imports
from ._base import fqname_for
__all__ = ["fqname_for"]
# fix Sphinx issues, see https://bit.ly/2K2eptM
for item in __all__:
if hasattr(item, "__module__"):
setattr(item, "__module__", __name__) | 226 | 24.222222 | 47 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/artificial.py | import math
import os
import random
from typing import Callable, List, NamedTuple, Optional, Tuple, Union
import numpy as np
import pandas as pd
import rapidjson as json
from .common import (
MetaData,
CategoricalFeatureInfo,
BasicFeatureInfo,
FieldName,
Dataset,
TrainDatasets,
DataEntry,
)
from .list_dataset import ListDataset
from .recipe import (
BinaryHolidays,
BinaryMarkovChain,
Constant,
ForEachCat,
Lag,
LinearTrend,
RandomCat,
RandomGaussian,
Stack,
generate,
take_as_list,
)
from .stat import DatasetStatistics, calculate_dataset_statistics
class DatasetInfo(NamedTuple):
"""
Information stored on a dataset. When downloading from the repository, the
dataset repository checks that the obtained version matches the one
declared in dataset_info/dataset_name.json.
"""
name: str
metadata: MetaData
prediction_length: int
train_statistics: DatasetStatistics
test_statistics: DatasetStatistics
class ArtificialDataset:
"""
Parent class of a dataset that can be generated from code.
"""
def __init__(self, freq) -> None:
self.freq = freq
@property
def metadata(self) -> MetaData:
pass
@property
def train(self) -> List[DataEntry]:
pass
@property
def test(self) -> List[DataEntry]:
pass
# todo return the same type as dataset repo for better usability
def generate(self) -> TrainDatasets:
return TrainDatasets(
metadata=self.metadata,
train=ListDataset(self.train, self.freq),
test=ListDataset(self.test, self.freq),
)
class ConstantDataset(ArtificialDataset):
def __init__(
self,
num_timeseries: int = 10,
num_steps: int = 30,
freq: str = "1H",
start: str = "2000-01-01 00:00:00",
is_nan: bool = False, # Generates constant dataset of 0s with explicit NaN missing values
is_random_constant: bool = False, # Inserts random constant value for each time series
is_different_scales: bool = False, # Generates constants on various scales
is_piecewise: bool = False, # Determines whether the time series in the test
# and train set should have different constant values
is_noise: bool = False, # Determines whether to add Gaussian noise to the constant dataset
is_long: bool = False, # Determines whether some time series will have very long lengths
is_short: bool = False, # Determines whether some time series will have very short lengths
is_trend: bool = False, # Determines whether to add linear trends
num_missing_middle: int = 0, # Number of missing values in the middle of the time series
is_promotions: bool = False, # Determines whether to add promotions to the target time series
# and to store in metadata
holidays: Optional[
List[pd.Timestamp]
] = None, # Determines whether to add holidays to the target time series
# and to store in metadata
) -> None:
super(ConstantDataset, self).__init__(freq)
self.num_timeseries = num_timeseries
self.num_steps = num_steps
self.num_training_steps = self.num_steps // 10 * 8
self.prediction_length = self.num_steps - self.num_training_steps
self.start = start
self.is_nan = is_nan
self.is_random_constant = is_random_constant
self.is_different_scales = is_different_scales
self.is_piecewise = is_piecewise
self.is_noise = is_noise
self.is_long = is_long
self.is_short = is_short
self.is_trend = is_trend
self.num_missing_middle = num_missing_middle
self.is_promotions = is_promotions
self.holidays = holidays
@property
def metadata(self) -> MetaData:
metadata = MetaData(
freq=self.freq,
feat_static_cat=[
{
"name": "feat_static_cat_000",
"cardinality": str(self.num_timeseries),
}
],
feat_static_real=[{"name": "feat_static_real_000"}],
prediction_length=self.prediction_length,
)
if self.is_promotions or self.holidays:
metadata = MetaData(
freq=self.freq,
feat_static_cat=[
{
"name": "feat_static_cat_000",
"cardinality": str(self.num_timeseries),
}
],
feat_static_real=[{"name": "feat_static_real_000"}],
feat_dynamic_real=[BasicFeatureInfo(name=FieldName.FEAT_DYNAMIC_REAL)],
prediction_length=self.prediction_length,
)
return metadata
def determine_constant(
self, index: int, constant: Optional[float] = None, seed: int = 1
) -> Optional[float]:
if self.is_random_constant:
my_random = random.Random(seed)
constant = (index + 1) * my_random.random()
elif self.is_different_scales:
if index == 0:
constant = 1e-8
elif constant is not None:
constant *= 100
else:
constant = float(index)
return constant
def compute_data_from_recipe(
self,
num_steps: int,
constant: Optional[float] = None,
one_to_zero: float = 0.1,
zero_to_one: float = 0.1,
scale_features: float = 200,
) -> TrainDatasets:
recipe = []
recipe_type = Constant(constant)
if self.is_noise:
recipe_type += RandomGaussian() # Use default stddev = 1.0
if self.is_trend:
recipe_type += LinearTrend()
if self.is_promotions:
recipe.append(
("binary_causal", BinaryMarkovChain(one_to_zero, zero_to_one))
)
recipe.append((FieldName.FEAT_DYNAMIC_REAL, Stack(["binary_causal"])))
recipe_type += scale_features * Lag("binary_causal", lag=0)
if self.holidays:
timestamp = self.init_date()
# Compute dates array
dates = []
for i in range(num_steps):
dates.append(timestamp)
timestamp += 1
recipe.append(("binary_holidays", BinaryHolidays(dates, self.holidays)))
recipe.append((FieldName.FEAT_DYNAMIC_REAL, Stack(["binary_holidays"])))
recipe_type += scale_features * Lag("binary_holidays", lag=0)
recipe.append((FieldName.TARGET, recipe_type))
max_train_length = num_steps - self.prediction_length
data = RecipeDataset(
recipe=recipe,
metadata=self.metadata,
max_train_length=max_train_length,
prediction_length=self.prediction_length,
num_timeseries=1, # Add 1 time series at a time in the loop for different constant valus per time series
)
generated = data.generate()
return generated
def piecewise_constant(self, index: int, num_steps: int) -> List:
target = []
for j in range(num_steps):
if j < self.num_training_steps:
constant = self.determine_constant(index=index)
else:
constant = self.determine_constant(index=index, seed=2)
target.append(constant)
return target
def get_num_steps(
self,
index: int,
num_steps_max: int = 10000,
long_freq: int = 4,
num_steps_min: int = 2,
short_freq: int = 4,
) -> int:
num_steps = self.num_steps
if self.is_long and index % long_freq == 0:
num_steps = num_steps_max
elif self.is_short and index % short_freq == 0:
num_steps = num_steps_min
return num_steps
def init_date(self) -> pd.Timestamp:
week_dict = {
0: "MON",
1: "TUE",
2: "WED",
3: "THU",
4: "FRI",
5: "SAT",
6: "SUN",
}
timestamp = pd.Timestamp(self.start)
freq_week_start = self.freq
if freq_week_start == "W":
freq_week_start = f"W-{week_dict[timestamp.weekday()]}"
return pd.Timestamp(self.start, freq=freq_week_start)
@staticmethod
def insert_nans_and_zeros(ts_len: int) -> List:
target = []
for j in range(ts_len):
# Place NaNs at even indices. Use convention no NaNs before start date.
if j != 0 and j % 2 == 0:
target.append(np.nan)
# Place zeros at odd indices
else:
target.append(0.0)
return target
def insert_missing_vals_middle(
self, ts_len: int, constant: Optional[float]
) -> List:
target = []
lower_bound = (self.num_training_steps - self.num_missing_middle) // 2
upper_bound = (self.num_training_steps + self.num_missing_middle) // 2
num_missing_endpts = math.floor(0.1 * self.num_missing_middle)
for j in range(ts_len):
if (
(0 < j < lower_bound and j % (2 * num_missing_endpts) == 0)
or (lower_bound <= j < upper_bound)
or (j >= upper_bound and j % (2 * num_missing_endpts) == 0)
):
val = np.nan
else:
val = constant
target.append(val)
return target
def generate_ts(self, num_ts_steps: int, is_train: bool = False) -> List[DataEntry]:
res = []
constant = None
for i in range(self.num_timeseries):
if self.is_nan:
target = self.insert_nans_and_zeros(num_ts_steps)
elif self.is_piecewise:
target = self.piecewise_constant(i, num_ts_steps)
else:
constant = self.determine_constant(i, constant)
if self.num_missing_middle > 0:
target = self.insert_missing_vals_middle(num_ts_steps, constant)
elif (
self.is_noise
or self.is_trend
or self.is_promotions
or self.holidays
):
num_steps = self.get_num_steps(i)
generated = self.compute_data_from_recipe(num_steps, constant)
if is_train:
time_series = generated.train
else:
assert generated.test is not None
time_series = generated.test
# returns np array convert to list for consistency
target = list(time_series)[0][FieldName.TARGET].tolist()
else:
target = [constant] * num_ts_steps
ts_data = dict(
start=self.start,
target=target,
item_id=str(i),
feat_static_cat=[i],
feat_static_real=[i],
)
if self.is_promotions or self.holidays:
ts_data[FieldName.FEAT_DYNAMIC_REAL] = list(time_series)[0][
FieldName.FEAT_DYNAMIC_REAL
].tolist()
res.append(ts_data)
return res
@property
def train(self) -> List[DataEntry]:
return self.generate_ts(num_ts_steps=self.num_training_steps, is_train=True)
@property
def test(self) -> List[DataEntry]:
return self.generate_ts(num_ts_steps=self.num_steps)
class ComplexSeasonalTimeSeries(ArtificialDataset):
"""
Generate sinus time series that ramp up and reach a certain amplitude, and
level and have additional spikes on each sunday.
TODO: This could be converted to a RecipeDataset to avoid code duplication.
"""
def __init__(
self,
num_series: int = 100,
prediction_length: int = 20,
freq_str: str = "D",
length_low: int = 30,
length_high: int = 200,
min_val: float = -10000,
max_val: float = 10000,
is_integer: bool = False,
proportion_missing_values: float = 0,
is_noise: bool = True,
is_scale: bool = True,
percentage_unique_timestamps: float = 0.07,
is_out_of_bounds_date: bool = False,
seasonality: Optional[int] = None,
clip_values: bool = False,
) -> None:
"""
:param num_series: number of time series generated in the train and
test set
:param prediction_length:
:param freq_str:
:param length_low: minimum length of a time-series, must be larger than
prediction_length
:param length_high: maximum length of a time-series
:param min_val: min value of a time-series
:param max_val: max value of a time-series
:param is_integer: whether the dataset has integers or not
:param proportion_missing_values:
:param is_noise: whether to add noise
:param is_scale: whether to add scale
:param percentage_unique_timestamps: percentage of random start dates bounded between 0 and 1
:param is_out_of_bounds_date: determines whether to use very old start dates and start dates far in the future
:param seasonality: Seasonality of the generated data. If not given uses default seasonality for frequency
:param clip_values: if True the values will be clipped to [min_val, max_val], otherwise linearly scales them
"""
assert length_low > prediction_length
super(ComplexSeasonalTimeSeries, self).__init__(freq_str)
self.num_series = num_series
self.prediction_length = prediction_length
self.length_low = length_low
self.length_high = length_high
self.freq_str = freq_str
self.min_val = min_val
self.max_val = max_val
self.is_integer = is_integer
self.proportion_missing_values = proportion_missing_values
self.is_noise = is_noise
self.is_scale = is_scale
self.percentage_unique_timestamps = percentage_unique_timestamps
self.is_out_of_bounds_date = is_out_of_bounds_date
self.seasonality = seasonality
self.clip_values = clip_values
@property
def metadata(self) -> MetaData:
return MetaData(freq=self.freq, prediction_length=self.prediction_length)
def _get_period(self) -> int:
if self.seasonality is not None:
return self.seasonality
if self.freq_str == "M":
return 24
elif self.freq_str == "W":
return 52
elif self.freq_str == "D":
return 14
elif self.freq_str == "H":
return 24
elif self.freq_str == "min":
return 60
else:
raise RuntimeError()
def _get_start(self, index: int, my_random: random.Random) -> str:
if (
self.is_out_of_bounds_date and index == 0
): # Add edge case of dates out of normal bounds past date
start_y, start_m, start_d = (
1690,
2,
7,
) # Pandas doesn't allot before 1650
start_h, start_min = 18, 36
elif (
self.is_out_of_bounds_date and index == self.num_series - 1
): # Add edge case of dates out of normal bounds future date
start_y, start_m, start_d = (
2030,
6,
3,
) # Pandas doesn't allot before 1650
start_h, start_min = 18, 36
# assume that only 100 * percentage_unique_timestamps of timestamps are unique
elif my_random.random() < self.percentage_unique_timestamps:
start_y = my_random.randint(2000, 2018)
start_m = my_random.randint(1, 12)
start_d = my_random.randint(1, 28)
start_h = my_random.randint(0, 23)
start_min = my_random.randint(0, 59)
else:
start_y, start_m, start_d = 2013, 11, 28
start_h, start_min = 18, 36
if self.freq_str == "M":
return "%04.d-%02.d" % (start_y, start_m)
elif self.freq_str in ["W", "D"]:
return "%04.d-%02.d-%02.d" % (start_y, start_m, start_d)
elif self.freq_str == "H":
return "%04.d-%02.d-%02.d %02.d:00:00" % (
start_y,
start_m,
start_d,
start_h,
)
else:
return "%04.d-%02.d-%02.d %02.d:%02.d:00" % (
start_y,
start_m,
start_d,
start_h,
start_min,
)
def _special_time_point_indicator(self, index) -> bool:
if self.freq_str == "M":
return index.month == 1
elif self.freq_str == "W":
return index.month % 2 == 0
elif self.freq_str == "D":
return index.dayofweek == 0
elif self.freq_str == "H":
return index.hour == 0
elif self.freq_str == "min":
return index.minute % 30 == 0
else:
raise RuntimeError(f'Bad freq_str value "{index}"')
@property
def train(self) -> List[DataEntry]:
return [
dict(
start=ts[FieldName.START],
target=ts[FieldName.TARGET][: -self.prediction_length],
item_id=ts[FieldName.ITEM_ID],
)
for ts in self.make_timeseries()
]
@property
def test(self) -> List[DataEntry]:
return self.make_timeseries()
def make_timeseries(self, seed: int = 1) -> List[DataEntry]:
res = []
# Fix seed so that the training set is the same
# as the test set from 0:self.prediction_length for the two independent calls
def sigmoid(x: np.ndarray) -> np.ndarray:
return 1.0 / (1.0 + np.exp(-x))
# Ensure same start dates in test and training set
my_random = random.Random(seed)
state = np.random.RandomState(seed)
for i in range(self.num_series):
val_range = self.max_val - self.min_val
length = state.randint(low=self.length_low, high=self.length_high)
start = self._get_start(i, my_random)
envelope = sigmoid((np.arange(length) - 20.0) / 10.0)
level = 0.3 * val_range * (state.random_sample() - 0.5)
phi = 2 * np.pi * state.random_sample()
period = self._get_period()
w = 2 * np.pi / period
t = np.arange(length)
idx = pd.date_range(start=start, freq=self.freq_str, periods=length)
special_tp_indicator = self._special_time_point_indicator(idx)
sunday_effect = state.random_sample() * special_tp_indicator
v = np.sin(w * t + phi) + sunday_effect
if self.is_scale:
scale = 0.1 * val_range * state.random_sample()
v *= scale
v += level
if self.is_noise:
noise_range = 0.02 * val_range * state.random_sample()
noise = noise_range * state.normal(size=length)
v += noise
v = envelope * v
if self.clip_values:
np.clip(v, a_min=self.min_val, a_max=self.max_val, out=v)
else:
"""
Rather than mapping [v_min, v_max] to [self.min_val, self.max_val] which would lead to
all the time series having the same min and max, we want to keep the same interval length
(v_max - v_min). We thus shift the interval [v_min, v_max] in [self.min_val, self.max_val]
and clip it if needed.
"""
v_min, v_max = v.min(), v.max()
p_min, p_max = (
max(self.min_val, v_min),
min(self.max_val, v_max),
)
shifted_min = np.clip(
p_min + (p_max - v_max), a_min=self.min_val, a_max=self.max_val,
)
shifted_max = np.clip(
p_max + (p_min - v_min), a_min=self.min_val, a_max=self.max_val,
)
v = shifted_min + (shifted_max - shifted_min) * (v - v_min) / (
v_max - v_min
)
if self.is_integer:
np.clip(
v, a_min=np.ceil(self.min_val), a_max=np.floor(self.max_val), out=v,
)
v = np.round(v).astype(int)
v = list(v.tolist())
if self.proportion_missing_values > 0:
assert (
self.proportion_missing_values < 1.0
), "Please chose a number 0 < x < 1.0"
idx = np.arange(len(v))
state.shuffle(idx)
num_missing_values = (
int(len(v) * self.proportion_missing_values) + 1
) # Add one in case this gets zero
missing_idx = idx[:num_missing_values]
for j in missing_idx:
# Using convention that there are no missing values before the start date.
if j != 0:
v[j] = None if state.rand() < 0.5 else "NaN"
res.append(
dict(
start=pd.Timestamp(start, freq=self.freq_str),
target=np.array(v),
item_id=i,
)
)
return res
class RecipeDataset(ArtificialDataset):
"""Synthetic data set generated by providing a recipe.
A recipe is either a (non-deterministic) function
f(length: int, global_state: dict) -> dict
or list of (field, function) tuples of the form
(field: str, f(data: dict, length: int, global_state: dict) -> dict)
which is processed sequentially, with data initially set to {},
and each entry updating data[field] to the output of the function
call.
"""
def __init__(
self,
recipe: Union[Callable, List[Tuple[str, Callable]]],
metadata: MetaData,
max_train_length: int,
prediction_length: int,
num_timeseries: int,
trim_length_fun=lambda x, **kwargs: 0,
data_start=pd.Timestamp("2014-01-01"),
) -> None:
"""
:param recipe: The recipe to generate from (see class docstring)
:param metadata: The metadata to be included in the dataset
:param max_train_length: The maximum length of a training time series.
:param prediction_length: The length of the prediction range
:param num_timeseries: Number of time series to generate
:param trim_length_fun: Callable f(x: int) -> int returning the
(shortened) training length
:param data_start: Start date for the data set
"""
super().__init__(freq=metadata.freq)
self.recipe = recipe
self._metadata = metadata
self.max_train_length = max_train_length
self.prediction_length = prediction_length
self.trim_length_fun = trim_length_fun
self.num_timeseries = num_timeseries
self.data_start = pd.Timestamp(data_start, freq=self._metadata.freq)
@property
def metadata(self) -> MetaData:
return self._metadata
def dataset_info(self, train_ds: Dataset, test_ds: Dataset) -> DatasetInfo:
return DatasetInfo(
name=f"RecipeDataset({repr(self.recipe)})",
metadata=self.metadata,
prediction_length=self.prediction_length,
train_statistics=calculate_dataset_statistics(train_ds),
test_statistics=calculate_dataset_statistics(test_ds),
)
@staticmethod
def trim_ts_item_end(x: DataEntry, length: int) -> DataEntry:
"""Trim a TimeSeriesItem into a training range, by removing
the last prediction_length time points from the target and dynamic
features."""
y = dict(
item_id=x[FieldName.ITEM_ID],
start=x[FieldName.START],
target=x[FieldName.TARGET][:-length],
)
if FieldName.FEAT_DYNAMIC_CAT in x:
y[FieldName.FEAT_DYNAMIC_CAT] = x[FieldName.FEAT_DYNAMIC_CAT][:, :-length]
if FieldName.FEAT_DYNAMIC_REAL in x:
y[FieldName.FEAT_DYNAMIC_REAL] = x[FieldName.FEAT_DYNAMIC_REAL][:, :-length]
return y
@staticmethod
def trim_ts_item_front(x: DataEntry, length: int) -> DataEntry:
"""Trim a TimeSeriesItem into a training range, by removing
the first offset_front time points from the target and dynamic
features."""
assert length <= len(x[FieldName.TARGET])
y = dict(
item_id=x[FieldName.ITEM_ID],
start=x[FieldName.START] + length * x[FieldName.START].freq,
target=x[FieldName.TARGET][length:],
)
if FieldName.FEAT_DYNAMIC_CAT in x:
y[FieldName.FEAT_DYNAMIC_CAT] = x[FieldName.FEAT_DYNAMIC_CAT][:, length:]
if FieldName.FEAT_DYNAMIC_REAL in x:
y[FieldName.FEAT_DYNAMIC_REAL] = x[FieldName.FEAT_DYNAMIC_REAL][:, length:]
return y
def generate(self) -> TrainDatasets:
metadata = self.metadata
data_it = generate(
length=self.max_train_length + self.prediction_length,
recipe=self.recipe,
start=self.data_start,
)
full_length_data = take_as_list(data_it, self.num_timeseries)
test_data = [
RecipeDataset.trim_ts_item_front(
x, self.trim_length_fun(x, train_length=self.max_train_length)
)
for x in full_length_data
]
train_data = [
RecipeDataset.trim_ts_item_end(x, self.prediction_length) for x in test_data
]
return TrainDatasets(
metadata=metadata,
train=ListDataset(train_data, metadata.freq),
test=ListDataset(test_data, metadata.freq),
)
def default_synthetic() -> Tuple[DatasetInfo, Dataset, Dataset]:
recipe = [
(FieldName.TARGET, LinearTrend() + RandomGaussian()),
(FieldName.FEAT_STATIC_CAT, RandomCat([10])),
(
FieldName.FEAT_STATIC_REAL,
ForEachCat(RandomGaussian(1, (10,)), FieldName.FEAT_STATIC_CAT)
+ RandomGaussian(0.1, (10,)),
),
]
data = RecipeDataset(
recipe=recipe,
metadata=MetaData(
freq="D",
feat_static_real=[BasicFeatureInfo(name=FieldName.FEAT_STATIC_REAL)],
feat_static_cat=[
CategoricalFeatureInfo(name=FieldName.FEAT_STATIC_CAT, cardinality=10)
],
feat_dynamic_real=[BasicFeatureInfo(name=FieldName.FEAT_DYNAMIC_REAL)],
),
max_train_length=20,
prediction_length=10,
num_timeseries=10,
trim_length_fun=lambda x, **kwargs: np.minimum(
int(np.random.geometric(1 / (kwargs["train_length"] / 2))),
kwargs["train_length"],
),
)
generated = data.generate()
assert generated.test is not None
info = data.dataset_info(generated.train, generated.test)
return info, generated.train, generated.test
def constant_dataset() -> Tuple[DatasetInfo, Dataset, Dataset]:
metadata = MetaData(
freq="1H",
feat_static_cat=[
CategoricalFeatureInfo(name="feat_static_cat_000", cardinality="10")
],
feat_static_real=[BasicFeatureInfo(name="feat_static_real_000")],
)
start_date = "2000-01-01 00:00:00"
train_ds = ListDataset(
data_iter=[
{
FieldName.ITEM_ID: str(i),
FieldName.START: start_date,
FieldName.TARGET: [float(i)] * 24,
FieldName.FEAT_STATIC_CAT: [i],
FieldName.FEAT_STATIC_REAL: [float(i)],
}
for i in range(10)
],
freq=metadata.freq,
)
test_ds = ListDataset(
data_iter=[
{
FieldName.ITEM_ID: str(i),
FieldName.START: start_date,
FieldName.TARGET: [float(i)] * 30,
FieldName.FEAT_STATIC_CAT: [i],
FieldName.FEAT_STATIC_REAL: [float(i)],
}
for i in range(10)
],
freq=metadata.freq,
)
info = DatasetInfo(
name="constant_dataset",
metadata=metadata,
prediction_length=6,
train_statistics=calculate_dataset_statistics(train_ds),
test_statistics=calculate_dataset_statistics(test_ds),
)
return info, train_ds, test_ds
def generate_sf2(
filename: str, time_series: List, is_missing: bool, num_missing: int
) -> None:
# This function generates the test and train json files which will be converted to csv format
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, "w") as json_file:
for ts in time_series:
if is_missing:
target = [] # type: List
# For Forecast don't output feat_static_cat and feat_static_real
for j, val in enumerate(ts[FieldName.TARGET]):
# only add ones that are not missing
if j != 0 and j % num_missing == 0:
target.append(None)
else:
target.append(val)
ts[FieldName.TARGET] = target
ts.pop(FieldName.FEAT_STATIC_CAT, None)
ts.pop(FieldName.FEAT_STATIC_REAL, None)
# Chop features in training set
if FieldName.FEAT_DYNAMIC_REAL in ts.keys() and "train" in filename:
# TODO: Fix for missing values
for i, feat_dynamic_real in enumerate(ts[FieldName.FEAT_DYNAMIC_REAL]):
ts[FieldName.FEAT_DYNAMIC_REAL][i] = feat_dynamic_real[
: len(ts[FieldName.TARGET])
]
json.dump(ts, json_file)
json_file.write("\n")
| 30,342 | 35.958587 | 118 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/utils.py | import shutil
from pathlib import Path
import numpy as np
import pandas as pd
import rapidjson as json
from .common import TrainDatasets, MetaData
from .file_dataset import FileDataset
def frequency_add(ts: pd.Timestamp, amount: int) -> pd.Timestamp:
return ts + ts.freq * amount
def forecast_start(entry):
return frequency_add(entry["start"], len(entry["target"]))
def to_pandas(instance: dict, freq: str = None) -> pd.Series:
"""
Transform a dictionary into a pandas.Series object, using its
"start" and "target" fields.
Parameters
----------
instance
Dictionary containing the time series data.
freq
Frequency to use in the pandas.Series index.
Returns
-------
pandas.Series
Pandas time series object.
"""
target = instance["target"]
start = instance["start"]
if not freq:
freq = start.freqstr
index = pd.date_range(start=start, periods=len(target), freq=freq)
return pd.Series(target, index=index)
def load_datasets(metadata, train, test, shuffle: bool = False) -> TrainDatasets:
"""
Loads a dataset given metadata, train and test path.
Parameters
----------
metadata
Path to the metadata file
train
Path to the training dataset files.
test
Path to the test dataset files.
Returns
-------
TrainDatasets
An object collecting metadata, training data, test data.
"""
meta = MetaData.parse_file(metadata)
train_ds = FileDataset(train, meta.freq, shuffle=shuffle)
test_ds = FileDataset(test, meta.freq) if test else None
return TrainDatasets(metadata=meta, train=train_ds, test=test_ds)
def save_datasets(dataset: TrainDatasets, path_str: str, overwrite=True) -> None:
"""
Saves an TrainDatasets object to a JSON Lines file.
Parameters
----------
dataset
The training datasets.
path_str
Where to save the dataset.
overwrite
Whether to delete previous version in this folder.
"""
path = Path(path_str)
if overwrite:
shutil.rmtree(path, ignore_errors=True)
def dump_line(f, line):
f.write(json.dumps(line).encode("utf-8"))
f.write("\n".encode("utf-8"))
(path / "metadata").mkdir(parents=True)
with open(path / "metadata/metadata.json", "wb") as f:
dump_line(f, dataset.metadata.dict())
(path / "train").mkdir(parents=True)
with open(path / "train/data.json", "wb") as f:
for entry in dataset.train:
dump_line(f, serialize_data_entry(entry))
if dataset.test is not None:
(path / "test").mkdir(parents=True)
with open(path / "test/data.json", "wb") as f:
for entry in dataset.test:
dump_line(f, serialize_data_entry(entry))
def serialize_data_entry(data):
"""
Encode the numpy values in the a DataEntry dictionary into lists so the
dictionary can be JSON serialized.
Parameters
----------
data
The dictionary to be transformed.
Returns
-------
Dict
The transformed dictionary, where all fields where transformed into
strings.
"""
def serialize_field(field):
if isinstance(field, np.ndarray):
# circumvent https://github.com/micropython/micropython/issues/3511
nan_ix = np.isnan(field)
field = field.astype(np.object_)
field[nan_ix] = "NaN"
return field.tolist()
return str(field)
return {k: serialize_field(v) for k, v in data.items() if v is not None}
| 3,603 | 26.097744 | 81 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/transformed_iterable_dataset.py | import itertools
from typing import Dict, Iterable, Iterator, Optional
import numpy as np
import torch
from pts.transform.transform import Transformation
from .common import DataEntry, Dataset
class TransformedIterableDataset(torch.utils.data.IterableDataset):
def __init__(
self, dataset: Dataset, is_train: bool, transform: Transformation, is_forever: bool = True,
) -> None:
super().__init__()
self.dataset = dataset
self.transform = transform
self.is_train = is_train
self._cur_iter: Optional[Iterator] = None
self.is_forever = is_forever
def _iterate_forever(self, collection: Iterable[DataEntry]) -> Iterator[DataEntry]:
# iterate forever over the collection, the collection must be non empty
while True:
try:
first = next(iter(collection))
except StopIteration:
raise Exception("empty dataset")
else:
for x in collection:
yield x
def __iter__(self) -> Iterator[Dict[str, np.ndarray]]:
if self.is_forever:
if self._cur_iter is None:
# only set once
self._cur_iter = self.transform(
self._iterate_forever(self.dataset), is_train=self.is_train
)
else:
# reset at start
self._cur_iter = self.transform(
self.dataset, is_train=self.is_train
)
assert self._cur_iter is not None
while True:
try:
data_entry = next(self._cur_iter)
except StopIteration:
return
yield {
k: (v.astype(np.float32) if v.dtype.kind == "f" else v)
for k, v in data_entry.items()
if isinstance(v, np.ndarray) == True
}
# def __len__(self) -> int:
# return len(self.dataset)
class TransformedListDataset(torch.utils.data.Dataset):
def __init__(
self, dataset: list, is_train: bool, transform: Transformation,
) -> None:
super().__init__()
self.dataset = dataset
self.transform = transform
self.is_train = is_train
def __getitem__(self, idx):
data_item = self.transform(
[self.dataset[idx]], is_train=self.is_train
)
data_entry = next(data_item)
return {
k: (v.astype(np.float32) if v.dtype.kind == "f" else v)
for k, v in data_entry.items()
if isinstance(v, np.ndarray) == True
}
def __len__(self) -> int:
return len(self.dataset)
| 2,707 | 30.488372 | 99 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/common.py | from typing import Any, Dict, Iterable, NamedTuple, List, Optional
import pandas as pd
from pydantic import BaseModel
# Dictionary used for data flowing through the transformations.
DataEntry = Dict[str, Any]
# A Dataset is an iterable of DataEntry.
Dataset = Iterable[DataEntry]
class SourceContext(NamedTuple):
source: str
row: int
class FieldName:
"""
A bundle of default field names to be used by clients when instantiating
transformer instances.
"""
ITEM_ID = "item_id"
START = "start"
TARGET = "target"
ACC_TARGET_SUM = "accumulated_target"
FEAT_STATIC_CAT = "feat_static_cat"
FEAT_STATIC_REAL = "feat_static_real"
FEAT_DYNAMIC_CAT = "feat_dynamic_cat"
FEAT_DYNAMIC_REAL = "feat_dynamic_real"
FEAT_DYNAMIC_PAST = "feat_dynamic_past" # 과거만 참조가능한 feature (미래정보는 없음)
FEAT_TIME = "time_feat"
FEAT_CONST = "feat_dynamic_const"
FEAT_AGE = "feat_dynamic_age"
OBSERVED_VALUES = "observed_values"
IS_PAD = "is_pad"
FORECAST_START = "forecast_start"
class CategoricalFeatureInfo(BaseModel):
name: str
cardinality: str
class BasicFeatureInfo(BaseModel):
name: str
class MetaData(BaseModel):
freq: str = None
target: Optional[BasicFeatureInfo] = None
feat_static_cat: List[CategoricalFeatureInfo] = []
feat_static_real: List[BasicFeatureInfo] = []
feat_dynamic_real: List[BasicFeatureInfo] = []
feat_dynamic_cat: List[CategoricalFeatureInfo] = []
prediction_length: Optional[int] = None
class TrainDatasets(NamedTuple):
"""
A dataset containing two subsets, one to be used for training purposes,
and the other for testing purposes, as well as metadata.
"""
metadata: MetaData
train: Dataset
test: Optional[Dataset] = None
class DateConstants:
"""
Default constants for specific dates.
"""
OLDEST_SUPPORTED_TIMESTAMP = pd.Timestamp(1800, 1, 1, 12)
LATEST_SUPPORTED_TIMESTAMP = pd.Timestamp(2200, 1, 1, 12)
| 1,997 | 22.785714 | 76 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/stat.py | import math
from collections import defaultdict
from typing import Any, List, NamedTuple, Optional, Set
import numpy as np
from tqdm import tqdm
from pts.exception import assert_pts
from .common import FieldName
class ScaleHistogram:
"""
Scale histogram of a timeseries dataset
This counts the number of timeseries whose mean of absolute values is in
the `[base ** i, base ** (i+1)]` range for all possible `i`.
The number of entries with empty target is counted separately.
Parameters
----------
base
Log-width of the histogram's buckets.
bin_counts
empty_target_count
"""
def __init__(
self,
base: float = 2.0,
bin_counts: Optional[dict] = None,
empty_target_count: int = 0,
) -> None:
self._base = base
self.bin_counts = defaultdict(int, {} if bin_counts is None else bin_counts)
self.empty_target_count = empty_target_count
self.__init_args__ = dict(
base=self._base,
bin_counts=self.bin_counts,
empty_target_count=empty_target_count,
)
def bucket_index(self, target_values):
assert len(target_values) > 0
scale = np.mean(np.abs(target_values))
scale_bin = int(math.log(scale + 1.0, self._base))
return scale_bin
def add(self, target_values):
if len(target_values) > 0:
bucket = self.bucket_index(target_values)
self.bin_counts[bucket] = self.bin_counts[bucket] + 1
else:
self.empty_target_count = self.empty_target_count + 1
def count(self, target):
if len(target) > 0:
return self.bin_counts[self.bucket_index(target)]
else:
return self.empty_target_count
def __len__(self):
return self.empty_target_count + sum(self.bin_counts.values())
def __eq__(self, other):
return (
isinstance(other, ScaleHistogram)
and self.bin_counts == other.bin_counts
and self.empty_target_count == other.empty_target_count
and self._base == other._base
)
def __str__(self):
string_repr = [
"count of scales in {min}-{max}:{count}".format(
min=self._base ** base_index - 1,
max=self._base ** (base_index + 1) - 1,
count=count,
)
for base_index, count in sorted(self.bin_counts.items(), key=lambda x: x[0])
]
return "\n".join(string_repr)
class DatasetStatistics(NamedTuple):
"""
A NamedTuple to store the statistics of a Dataset.
"""
integer_dataset: bool
max_target: float
mean_abs_target: float
mean_target: float
mean_target_length: float
min_target: float
feat_static_real: List[Set[float]]
feat_static_cat: List[Set[int]]
num_feat_dynamic_real: Optional[int]
num_feat_dynamic_cat: Optional[int]
num_missing_values: int
num_time_observations: int
num_time_series: int
scale_histogram: ScaleHistogram
# DO NOT override the __str__ method, since we rely that we can load
# DatasetStatistics again; i.e. stats == eval(str(stats))
def __eq__(self, other):
for x, y in zip(self._asdict().values(), other._asdict().values()):
if isinstance(x, float):
if abs(x - y) > abs(0.0001 * x):
return False
elif x != y:
return False
return True
# TODO: reorganize modules to avoid circular dependency
# TODO: and substitute Any with Dataset
def calculate_dataset_statistics(ts_dataset: Any) -> DatasetStatistics:
"""
Computes the statistics of a given Dataset.
Parameters
----------
ts_dataset
Dataset of which to compute the statistics.
Returns
-------
DatasetStatistics
NamedTuple containing the statistics.
"""
num_time_observations = 0
num_time_series = 0
min_target = 1e20
max_target = -1e20
sum_target = 0.0
sum_abs_target = 0.0
integer_dataset = True
observed_feat_static_cat: Optional[List[Set[int]]] = None
observed_feat_static_real: Optional[List[Set[float]]] = None
num_feat_static_real: Optional[int] = None
num_feat_static_cat: Optional[int] = None
num_feat_dynamic_real: Optional[int] = None
num_feat_dynamic_cat: Optional[int] = None
num_missing_values = 0
scale_histogram = ScaleHistogram()
with tqdm(enumerate(ts_dataset, start=1), total=len(ts_dataset)) as it:
for num_time_series, ts in it:
# TARGET
target = ts[FieldName.TARGET]
observed_target = target[~np.isnan(target)]
num_observations = len(observed_target)
if num_observations > 0:
# 'nan' is handled in observed_target definition
assert_pts(
np.all(np.isfinite(observed_target)),
"Target values have to be finite (e.g., not inf, -inf, "
"or None) and cannot exceed single precision floating "
"point range.",
)
num_time_observations += num_observations
min_target = float(min(min_target, observed_target.min()))
max_target = float(max(max_target, observed_target.max()))
num_missing_values += int(np.isnan(target).sum())
sum_target += float(observed_target.sum())
sum_abs_target += float(np.abs(observed_target).sum())
integer_dataset = integer_dataset and bool(
np.all(np.mod(observed_target, 1) == 0)
)
scale_histogram.add(observed_target) # after checks for inf and None
# FEAT_STATIC_CAT
feat_static_cat = (
ts[FieldName.FEAT_STATIC_CAT] if FieldName.FEAT_STATIC_CAT in ts else []
)
if num_feat_static_cat is None:
num_feat_static_cat = len(feat_static_cat)
observed_feat_static_cat = [set() for _ in range(num_feat_static_cat)]
# needed to type check
assert num_feat_static_cat is not None
assert observed_feat_static_cat is not None
assert_pts(
num_feat_static_cat == len(feat_static_cat),
"Not all feat_static_cat vectors have the same length {} != {}.",
num_feat_static_cat,
len(feat_static_cat),
)
for i, c in enumerate(feat_static_cat):
observed_feat_static_cat[i].add(c)
# FEAT_STATIC_REAL
feat_static_real = (
ts[FieldName.FEAT_STATIC_REAL]
if FieldName.FEAT_STATIC_REAL in ts
else []
)
if num_feat_static_real is None:
num_feat_static_real = len(feat_static_real)
observed_feat_static_real = [set() for _ in range(num_feat_static_real)]
# needed to type check
assert num_feat_static_real is not None
assert observed_feat_static_real is not None
assert_pts(
num_feat_static_real == len(feat_static_real),
"Not all feat_static_real vectors have the same length {} != {}.",
num_feat_static_real,
len(feat_static_real),
)
for i, c in enumerate(feat_static_real):
observed_feat_static_real[i].add(c)
# FEAT_DYNAMIC_CAT
feat_dynamic_cat = (
ts[FieldName.FEAT_DYNAMIC_CAT]
if FieldName.FEAT_DYNAMIC_CAT in ts
else None
)
if feat_dynamic_cat is None:
# feat_dynamic_cat not found, check it was the first ts we encounter or
# that feat_dynamic_cat were seen before
assert_pts(
num_feat_dynamic_cat is None or num_feat_dynamic_cat == 0,
"feat_dynamic_cat was found for some instances but not others.",
)
num_feat_dynamic_cat = 0
else:
if num_feat_dynamic_cat is None:
# first num_feat_dynamic_cat found
num_feat_dynamic_cat = feat_dynamic_cat.shape[0]
else:
assert_pts(
num_feat_dynamic_cat == feat_dynamic_cat.shape[0],
"Found instances with different number of features in "
"feat_dynamic_cat, found one with {} and another with {}.",
num_feat_dynamic_cat,
feat_dynamic_cat.shape[0],
)
assert_pts(
np.all(np.isfinite(feat_dynamic_cat)),
"Features values have to be finite and cannot exceed single "
"precision floating point range.",
)
num_feat_dynamic_cat_time_steps = feat_dynamic_cat.shape[1]
assert_pts(
num_feat_dynamic_cat_time_steps == len(target),
"Each feature in feat_dynamic_cat has to have the same length as "
"the target. Found an instance with feat_dynamic_cat of length {} "
"and a target of length {}.",
num_feat_dynamic_cat_time_steps,
len(target),
)
# FEAT_DYNAMIC_REAL
feat_dynamic_real = (
ts[FieldName.FEAT_DYNAMIC_REAL]
if FieldName.FEAT_DYNAMIC_REAL in ts
else None
)
if feat_dynamic_real is None:
# feat_dynamic_real not found, check it was the first ts we encounter or
# that feat_dynamic_real were seen before
assert_pts(
num_feat_dynamic_real is None or num_feat_dynamic_real == 0,
"feat_dynamic_real was found for some instances but not others.",
)
num_feat_dynamic_real = 0
else:
if num_feat_dynamic_real is None:
# first num_feat_dynamic_real found
num_feat_dynamic_real = feat_dynamic_real.shape[0]
else:
assert_pts(
num_feat_dynamic_real == feat_dynamic_real.shape[0],
"Found instances with different number of features in "
"feat_dynamic_real, found one with {} and another with {}.",
num_feat_dynamic_real,
feat_dynamic_real.shape[0],
)
assert_pts(
np.all(np.isfinite(feat_dynamic_real)),
"Features values have to be finite and cannot exceed single "
"precision floating point range.",
)
num_feat_dynamic_real_time_steps = feat_dynamic_real.shape[1]
assert_pts(
num_feat_dynamic_real_time_steps == len(target),
"Each feature in feat_dynamic_real has to have the same length as "
"the target. Found an instance with feat_dynamic_real of length {} "
"and a target of length {}.",
num_feat_dynamic_real_time_steps,
len(target),
)
assert_pts(num_time_series > 0, "Time series dataset is empty!")
assert_pts(
num_time_observations > 0, "Only empty time series found in the dataset!",
)
# note this require the above assumption to avoid a division by zero
# runtime error
mean_target_length = num_time_observations / num_time_series
# note this require the above assumption to avoid a division by zero
# runtime error
mean_target = sum_target / num_time_observations
mean_abs_target = sum_abs_target / num_time_observations
integer_dataset = integer_dataset and min_target >= 0.0
assert len(scale_histogram) == num_time_series
return DatasetStatistics(
integer_dataset=integer_dataset,
max_target=max_target,
mean_abs_target=mean_abs_target,
mean_target=mean_target,
mean_target_length=mean_target_length,
min_target=min_target,
num_missing_values=num_missing_values,
feat_static_real=observed_feat_static_real if observed_feat_static_real else [],
feat_static_cat=observed_feat_static_cat if observed_feat_static_cat else [],
num_feat_dynamic_real=num_feat_dynamic_real,
num_feat_dynamic_cat=num_feat_dynamic_cat,
num_time_observations=num_time_observations,
num_time_series=num_time_series,
scale_histogram=scale_histogram,
)
| 12,942 | 36.625 | 88 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/file_dataset.py | import functools
import glob
import random
from pathlib import Path
from typing import Iterator, List
from typing import NamedTuple
import rapidjson as json
from .common import Dataset, DataEntry, SourceContext
from .process import ProcessDataEntry
def load(file_obj):
for line in file_obj:
yield json.loads(line)
class Span(NamedTuple):
path: Path
line: int
class Line(NamedTuple):
content: object
span: Span
class JsonLinesFile:
"""
An iterable type that draws from a JSON Lines file.
Parameters
----------
path
Path of the file to load data from. This should be a valid
JSON Lines file.
"""
def __init__(self, path: Path, shuffle: bool = True) -> None:
self.path = path
self.shuffle = shuffle
def __iter__(self):
with open(self.path) as jsonl_file:
lines = jsonl_file.read().splitlines()
if self.shuffle:
random.shuffle(lines)
for line_number, raw in enumerate(lines, start=1):
span = Span(path=self.path, line=line_number)
try:
yield Line(json.loads(raw), span=span)
except ValueError:
raise Exception(f"Could not read json line {line_number}, {raw}")
def __len__(self):
# 1MB
BUF_SIZE = 1024 ** 2
with open(self.path) as file_obj:
read_chunk = functools.partial(file_obj.read, BUF_SIZE)
return sum(chunk.count("\n") for chunk in iter(read_chunk, ""))
class FileDataset(Dataset):
"""
Dataset that loads JSON Lines files contained in a path.
Parameters
----------
path
Return list of path names that match path. Each file is considered
and should be valid. A valid line in a file can be for
instance: {"start": "2014-09-07", "target": [0.1, 0.2]}.
freq
Frequency of the observation in the time series.
Must be a valid Pandas frequency.
one_dim_target
Whether to accept only univariate target time series.
shuffle
Whether to shuffle the time series when making the batches
"""
def __init__(
self, path: Path, freq: str, one_dim_target: bool = True, shuffle: bool = False
) -> None:
self.shuffle = shuffle
self.path = path
self.process = ProcessDataEntry(freq, one_dim_target=one_dim_target)
if not self.files():
raise OSError(f"no valid file found via {path}")
def __iter__(self) -> Iterator[DataEntry]:
for path in self.files():
for line in JsonLinesFile(path, self.shuffle):
data = self.process(line.content)
data["source"] = SourceContext(
source=line.span.path, row=line.span.line
)
yield data
def __len__(self):
return sum([len(JsonLinesFile(path)) for path in self.files()])
def files(self) -> List[Path]:
"""
List the files that compose the dataset.
Returns
-------
List[Path]
List of the paths of all files composing the dataset.
"""
files = glob.glob(str(self.path))
if self.shuffle:
random.shuffle(files)
return files
| 3,319 | 26.666667 | 87 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/__init__.py | from .artificial import (
ArtificialDataset,
ConstantDataset,
ComplexSeasonalTimeSeries,
RecipeDataset,
constant_dataset,
default_synthetic,
generate_sf2,
)
from .common import (
DataEntry,
FieldName,
Dataset,
MetaData,
TrainDatasets,
DateConstants,
)
from .file_dataset import FileDataset
from .list_dataset import ListDataset
from .loader import TrainDataLoader, InferenceDataLoader
from .multivariate_grouper import MultivariateGrouper
from .process import ProcessStartField, ProcessDataEntry
from .stat import DatasetStatistics, ScaleHistogram, calculate_dataset_statistics
from .transformed_iterable_dataset import TransformedIterableDataset, TransformedListDataset
from .utils import (
to_pandas,
load_datasets,
save_datasets,
serialize_data_entry,
frequency_add,
forecast_start,
)
| 864 | 25.212121 | 92 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/process.py | from functools import lru_cache
from typing import Callable, List, cast
import numpy as np
import pandas as pd
from pandas.tseries.offsets import Tick
from .common import DataEntry
class ProcessStartField:
def __init__(self, name: str, freq: str) -> None:
self.name = name
self.freq = freq
def __call__(self, data: DataEntry) -> DataEntry:
try:
value = ProcessStartField.process(data[self.name], self.freq)
except (TypeError, ValueError) as e:
raise Exception(f'Error "{e}" occurred when reading field "{self.name}"')
data[self.name] = value
return data
@staticmethod
@lru_cache(maxsize=10000)
def process(string: str, freq: str) -> pd.Timestamp:
timestamp = pd.Timestamp(string, freq=freq)
# operate on time information (days, hours, minute, second)
if isinstance(timestamp.freq, Tick):
return pd.Timestamp(timestamp.floor(timestamp.freq), timestamp.freq)
# since we are only interested in the data piece, we normalize the
# time information
timestamp = timestamp.replace(
hour=0, minute=0, second=0, microsecond=0, nanosecond=0
)
return timestamp.freq.rollforward(timestamp)
class ProcessTimeSeriesField:
def __init__(self, name, is_required: bool, is_static: bool, is_cat: bool) -> None:
self.name = name
self.is_required = is_required
self.req_ndim = 1 if is_static else 2
self.dtype = np.int64 if is_cat else np.float32
def __call__(self, data: DataEntry) -> DataEntry:
value = data.get(self.name, None)
if value is not None:
value = np.asarray(value, dtype=self.dtype)
dim_diff = self.req_ndim - value.ndim
if dim_diff == 1:
value = np.expand_dims(a=value, axis=0)
elif dim_diff != 0:
raise Exception(
f"JSON array has bad shape - expected {self.req_ndim} dimensions got {dim_diff}"
)
data[self.name] = value
return data
elif not self.is_required:
return data
else:
raise Exception(f"JSON object is missing a required field `{self.name}`")
class ProcessDataEntry:
def __init__(self, freq: str, one_dim_target: bool = True) -> None:
self.trans = cast(
List[Callable[[DataEntry], DataEntry]],
[
ProcessStartField("start", freq=freq),
ProcessTimeSeriesField(
"target", is_required=True, is_cat=False, is_static=one_dim_target
),
ProcessTimeSeriesField(
"accumulated_target", is_required=True, is_cat=False, is_static=one_dim_target
),
ProcessTimeSeriesField(
"feat_dynamic_cat", is_required=False, is_cat=True, is_static=False
),
ProcessTimeSeriesField(
"feat_dynamic_real",
is_required=False,
is_cat=False,
is_static=False,
),
ProcessTimeSeriesField(
"feat_dynamic_past",
is_required=False,
is_cat=False,
is_static=False,
),
ProcessTimeSeriesField(
"feat_static_cat", is_required=False, is_cat=True, is_static=True
),
ProcessTimeSeriesField(
"feat_static_real", is_required=False, is_cat=False, is_static=True
),
],
)
def __call__(self, data: DataEntry) -> DataEntry:
for t in self.trans:
data = t(data)
return data
| 3,843 | 33.321429 | 100 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/loader.py | import itertools
from collections import defaultdict
from typing import Any, Dict, Iterable, Iterator, List, Optional # noqa: F401
import numpy as np
# Third-party imports
import torch
from pts.transform.transform import Transformation
# First-party imports
from .common import DataEntry, Dataset
DataBatch = Dict[str, Any]
class BatchBuffer:
def __init__(
self, batch_size: int, device: torch.device, dtype: np.dtype = np.float32
) -> None:
self._buffers: Dict[Any, List[Any]] = defaultdict(list)
self.batch_size = batch_size
self._size = 0
self.device = device
self.dtype = dtype
def add(self, d: Dict[str, List[np.ndarray]]):
if self._buffers:
assert self._buffers.keys() == d.keys()
for k, v in d.items():
self._buffers[k].append(v)
self._size += 1
def __len__(self):
return self._size
def next_batch(self) -> DataBatch:
assert self._size > 0
n = min(self._size, self.batch_size)
batch = {k: self.stack(v[:n]) for k, v in self._buffers.items()}
for key in self._buffers.keys():
self._buffers[key] = self._buffers[key][n:]
self._size -= n
return batch
def stack(self, xs):
if isinstance(xs[0], np.ndarray):
data = np.asarray(xs)
if data.dtype.kind == "f":
data = data.astype(self.dtype)
return torch.from_numpy(data).to(device=self.device, non_blocking=True)
elif isinstance(xs[0], torch.Tensor):
return torch.stack(*xs)
else:
return xs # stack all other types as list
def shuffle(self):
perm = np.random.permutation(self._size)
for key in self._buffers.keys():
li = self._buffers[key]
self._buffers[key] = [li[i] for i in perm]
class DataLoader(Iterable[DataEntry]):
"""
An abstract Iterable type for iterating and transforming a dataset,
in batches of a prescribed size.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
dtype
Floating point type to use.
"""
def __init__(
self,
dataset: Dataset,
transform: Transformation,
batch_size: int,
device: torch.device,
dtype: np.dtype = np.float32,
) -> None:
self.dataset = dataset
self.transform = transform
self.batch_size = batch_size
self.device = device
self.dtype = dtype
class TrainDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset, in batches of a
prescribed size, until a given number of batches is reached.
The transformation are applied with in training mode, i.e. with the flag
`is_train = True`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
num_batches_per_epoch
Number of batches to return in one complete iteration over this object.
dtype
Floating point type to use.
"""
def __init__(
self,
dataset: Dataset,
transform: Transformation,
batch_size: int,
device: torch.device,
num_batches_per_epoch: int,
dtype: np.dtype = np.float32,
shuffle_for_training: bool = True,
num_batches_for_shuffling: int = 10,
) -> None:
super().__init__(dataset, transform, batch_size, device, dtype)
self.num_batches_per_epoch = num_batches_per_epoch
self.shuffle_for_training = shuffle_for_training
self._num_buffered_batches = (
num_batches_for_shuffling if shuffle_for_training else 1
)
self._cur_iter: Optional[Iterator] = None
self._buffer = BatchBuffer(self.batch_size, device, dtype)
def _emit_batches_while_buffer_larger_than(self, thresh) -> Iterator[DataBatch]:
if self.shuffle_for_training:
self._buffer.shuffle()
while len(self._buffer) > thresh:
yield self._buffer.next_batch()
def _iterate_forever(self, collection: Iterable[DataEntry]) -> Iterator[DataEntry]:
# iterate forever over the collection, the collection must be non empty
while True:
try:
first = next(iter(collection))
except StopIteration:
raise Exception("empty dataset")
else:
for x in itertools.chain([first], collection):
yield x
def __len__(self) -> int:
return self.num_batches_per_epoch
def __iter__(self) -> Iterator[DataBatch]:
batch_count = 0
if self._cur_iter is None:
self._cur_iter = self.transform(
self._iterate_forever(self.dataset), is_train=True
)
assert self._cur_iter is not None
while True:
data_entry = next(self._cur_iter)
self._buffer.add(data_entry)
if len(self._buffer) >= self._num_buffered_batches * self.batch_size:
for batch in self._emit_batches_while_buffer_larger_than(
self.batch_size - 1
):
yield batch
batch_count += 1
if batch_count >= self.num_batches_per_epoch:
return
class ValidationDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset just once, in
batches of a prescribed size.
The transformation are applied with in training mode, i.e. with the flag
`is_train = True`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
dtype
Floating point type to use.
"""
def __iter__(self) -> Iterator[DataBatch]:
buffer = BatchBuffer(self.batch_size, self.device, self.dtype)
for data_entry in self.transform(iter(self.dataset), is_train=True):
buffer.add(data_entry)
if len(buffer) >= self.batch_size:
yield buffer.next_batch()
if len(buffer) > 0:
yield buffer.next_batch()
class InferenceDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset just once, in
batches of a prescribed size.
The transformation are applied with in inference mode, i.e. with the flag
`is_train = False`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
dtype
Floating point type to use.
"""
def __iter__(self) -> Iterator[DataBatch]:
buffer = BatchBuffer(self.batch_size, self.device, self.dtype)
for data_entry in self.transform(iter(self.dataset), is_train=False):
buffer.add(data_entry)
if len(buffer) >= self.batch_size:
yield buffer.next_batch()
if len(buffer) > 0:
yield buffer.next_batch()
| 7,684 | 30.756198 | 87 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/recipe.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import itertools
import operator
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
# Third-party imports
import numpy as np
import pandas as pd
# First-party imports
from .common import DataEntry
ValueOrCallable = Union[Any, Callable]
Recipe = List[Tuple[str, Callable]]
Env = Dict[str, Any]
def resolve(val_or_callable: ValueOrCallable, context: Env, *args, **kwargs):
if callable(val_or_callable):
return val_or_callable(context, *args, **kwargs)
elif isinstance(val_or_callable, str):
return context[val_or_callable]
else:
return val_or_callable
def generate(
length: int,
recipe: Union[Callable, Recipe],
start: pd.Timestamp,
global_state: Optional[dict] = None,
seed: int = 0,
item_id_prefix: str = "",
) -> Iterator[DataEntry]:
np.random.seed(seed)
if global_state is None:
global_state = {}
if isinstance(recipe, list):
for x in itertools.count():
data: DataEntry = {}
for k, f in recipe:
data[k] = resolve(
f, data, length=length, field_name=k, global_state=global_state,
)
yield dict(**data, item_id=item_id_prefix + str(x), start=start)
else:
assert callable(recipe)
for x in itertools.count():
data = recipe(length=length, global_state=global_state)
yield dict(**data, item_id=item_id_prefix + str(x), start=start)
def evaluate(
funcs: Recipe, length: int, *args, global_state: dict = None, **kwargs
) -> Env:
if global_state is None:
global_state = {}
if "length" in kwargs:
del kwargs["length"]
if "field_name" in kwargs:
del kwargs["field_name"]
if "global_state" in kwargs:
del kwargs["global_state"]
data: DataEntry = {}
for k, f in funcs:
try:
data[k] = resolve(
f,
data,
length=length,
field_name=k,
global_state=global_state,
*args,
**kwargs
)
except ValueError as e:
raise ValueError('Error while evaluating key "{}"'.format(k), e)
return data
def make_func(
length: int, funcs: Recipe, global_state=None
) -> Callable[[int, Env], DataEntry]:
if global_state is None:
global_state = {}
def f(length=length, global_state=global_state, *args, **kwargs):
data = {}
for k, f in funcs:
data[k] = resolve(
f,
data,
length=length,
field_name=k,
global_state=global_state,
*args,
**kwargs
)
return data
return f
def take_as_list(iterator, num):
return list(itertools.islice(iterator, num))
class Debug:
def __init__(self, print_global=False) -> None:
self.print_global = print_global
def __call__(self, x: Env, global_state, **kwargs):
print(x)
if self.print_global:
print(global_state)
return 0
class Lifted:
def __add__(self, other):
return LiftedAdd(self, other)
def __radd__(self, other):
return LiftedAdd(other, self)
def __sub__(self, other):
return LiftedSub(self, other)
def __rsub__(self, other):
return LiftedSub(other, self)
def __mul__(self, other):
return LiftedMul(self, other, operator.mul)
def __rmul__(self, other):
return LiftedMul(other, self, operator.mul)
def __truediv__(self, other):
return LiftedTruediv(self, other, operator.truediv)
def __rtruediv__(self, other):
return LiftedTruediv(other, self, operator.truediv)
def __call__(
self, x: Env, length: int, field_name: str, global_state: Dict, *args, **kwargs
):
pass
class LiftedBinaryOp(Lifted):
def __init__(self, left, right, op) -> None:
self.left = left
self.right = right
self.op = op
def __call__(self, *args, **kwargs):
left = resolve(self.left, *args, **kwargs)
right = resolve(self.right, *args, **kwargs)
return self.op(left, right)
class LiftedAdd(LiftedBinaryOp):
def __init__(self, left, right) -> None:
super().__init__(left, right, operator.add)
class LiftedSub(LiftedBinaryOp):
def __init__(self, left, right) -> None:
super().__init__(left, right, operator.sub)
class LiftedMul(LiftedBinaryOp):
def __init__(self, left, right) -> None:
super().__init__(left, right, operator.mul)
class LiftedTruediv(LiftedBinaryOp):
def __init__(self, left, right) -> None:
super().__init__(left, right, operator.truediv)
class RandomGaussian(Lifted):
def __init__(
self, stddev: ValueOrCallable = 1.0, shape: Sequence[int] = (0,)
) -> None:
self.stddev = stddev
self.shape = shape
def __call__(self, x: Env, length: int, *args, **kwargs):
stddev = resolve(self.stddev, x, length, *args, **kwargs)
s = np.array(self.shape)
s[s == 0] = length
return stddev * np.random.randn(*s)
# Binary recipe that returns 1 if date is in holidays list and 0 otherwise
class BinaryHolidays(Lifted):
# TODO: holidays is type List[datetime.date]
def __init__(self, dates: List[pd.Timestamp], holidays: List[Any]) -> None:
self.dates = dates
self.holidays = holidays
def __call__(self, *args, **kwargs):
length = len(self.dates)
out = np.ones(length)
for i, date in enumerate(self.dates):
# Convert to string to check if inside of holidays datatime.date
if date.date() in self.holidays:
out[i] = 1.0
else:
out[i] = 0.0
return out
class RandomBinary(Lifted):
def __init__(self, prob: ValueOrCallable = 0.1) -> None:
self.prob = prob
def __call__(self, x: Env, length: int, *args, **kwargs):
prob = resolve(self.prob, x, length, *args, **kwargs)
return 1.0 * (np.random.rand(length) < prob)
class RandomSymmetricDirichlet(Lifted):
def __init__(
self, alpha: ValueOrCallable = 1.0, shape: Sequence[int] = (0,)
) -> None:
self.alpha = alpha
self.shape = shape
def __call__(self, x, length, *args, **kwargs):
alpha = resolve(self.alpha, x, length, *args, **kwargs)
s = np.array(self.shape)
s[s == 0] = length
return np.random.dirichlet(alpha * np.ones(s))
class BinaryMarkovChain(Lifted):
def __init__(
self, one_to_zero: ValueOrCallable, zero_to_one: ValueOrCallable
) -> None:
self.one_to_zero = one_to_zero
self.zero_to_one = zero_to_one
def __call__(self, x: Env, length: int, *args, **kwargs):
probs = np.zeros(2)
probs[0] = resolve(self.zero_to_one, x, length, *args, **kwargs)
probs[1] = resolve(self.one_to_zero, x, length, *args, **kwargs)
out = np.ones(length, dtype=np.int) # initial state is 1
uu = np.random.rand(length)
for i in range(1, length):
if uu[i] < probs[out[i - 1]]:
out[i] = 1 - out[i - 1]
else:
out[i] = out[i - 1]
return out
class Constant(Lifted):
def __init__(self, constant) -> None:
self.constant = constant
def __call__(self, *args, **kwargs):
return self.constant
class ConstantVec(Lifted):
def __init__(self, constant: ValueOrCallable) -> None:
self.constant = constant
def __call__(self, x: Env, length: int, *args, **kwargs):
constant = resolve(self.constant, x, length, *args, **kwargs)
return constant * np.ones(length)
class NormalizeMax(Lifted):
def __init__(self, input) -> None:
self.input = input
def __call__(self, x: Env, *args, **kwargs):
inp = resolve(self.input, x, *args, kwargs)
return inp / np.max(inp)
class OnesLike(Lifted):
def __init__(self, other) -> None:
self.other = other
def __call__(self, x, length, *args, **kwargs):
other = resolve(self.other, x, length, **kwargs)
return np.ones_like(other)
class LinearTrend(Lifted):
def __init__(self, slope: ValueOrCallable = 1.0) -> None:
self.slope = slope
def __call__(self, x, length, *args, **kwargs):
slope = resolve(self.slope, x, length, *args, **kwargs)
return slope * np.arange(length) / length
class RandomCat:
def __init__(
self,
cardinalities: List[int],
prob_fun: Callable = RandomSymmetricDirichlet(alpha=1.0, shape=(0,)),
) -> None:
self.cardinalities = cardinalities
self.prob_fun = prob_fun
def __call__(self, x, field_name, global_state, **kwargs):
if field_name not in global_state:
probs = [self.prob_fun(x, length=c) for c in self.cardinalities]
global_state[field_name] = probs
probs = global_state[field_name]
cats = np.array(
[
np.random.choice(np.arange(len(probs[i])), p=probs[i])
for i in range(len(probs))
]
)
return cats
class Lag(Lifted):
def __init__(
self, input: ValueOrCallable, lag: ValueOrCallable = 0, pad_const: int = 0,
) -> None:
self.input = input
self.lag = lag
self.pad_const = pad_const
def __call__(self, x, *args, **kwargs):
feat = resolve(self.input, x, *args, **kwargs)
lag = resolve(self.lag, x, *args, **kwargs)
if lag > 0:
lagged_feat = np.concatenate((self.pad_const * np.ones(lag), feat[:-lag]))
elif lag < 0:
lagged_feat = np.concatenate((feat[-lag:], self.pad_const * np.ones(-lag)))
else:
lagged_feat = feat
return lagged_feat
class ForEachCat(Lifted):
def __init__(self, fun, cat_field="cat", cat_idx=0) -> None:
self.fun = fun
self.cat_field = cat_field
self.cat_idx = cat_idx
def __call__(
self, x: Env, length: int, field_name: str, global_state: Dict, *args, **kwargs
):
c = x[self.cat_field][self.cat_idx]
if field_name not in global_state:
global_state[field_name] = np.empty(
len(global_state[self.cat_field][self.cat_idx]), dtype=np.object,
)
if global_state[field_name][c] is None:
global_state[field_name][c] = self.fun(
x, length=length, field_name=field_name, *args, **kwargs
)
return global_state[field_name][c]
class Eval(Lifted):
def __init__(self, expr: str) -> None:
self.expr = expr
def __call__(self, x: Env, length: int, *args, **kwargs):
return eval(self.expr, globals(), dict(x=x, length=length, **kwargs))
class SmoothSeasonality(Lifted):
def __init__(self, period: ValueOrCallable, phase: ValueOrCallable) -> None:
self.period = period
self.phase = phase
def __call__(self, x: Env, length: int, *args, **kwargs):
period = resolve(self.period, x, length, *args, **kwargs)
phase = resolve(self.phase, x, length, *args, **kwargs)
return (np.sin(2.0 / period * np.pi * (np.arange(length) + phase)) + 1) / 2.0
class Add(Lifted):
def __init__(self, inputs: List[ValueOrCallable]) -> None:
self.inputs = inputs
def __call__(self, x: Env, length: int, *args, **kwargs):
return sum([resolve(k, x, length, *args, **kwargs) for k in self.inputs])
class Mul(Lifted):
def __init__(self, inputs) -> None:
self.inputs = inputs
def __call__(self, x: Env, length: int, *args, **kwargs):
return functools.reduce(
operator.mul, [resolve(k, x, length, *args, **kwargs) for k in self.inputs],
)
class NanWhere(Lifted):
def __init__(self, source: ValueOrCallable, nan_indicator: ValueOrCallable) -> None:
self.source = source
self.nan_indicator = nan_indicator
def __call__(self, x: Env, length: int, *args, **kwargs):
source = resolve(self.source, x, length, *args, **kwargs)
nan_indicator = resolve(self.nan_indicator, x, length, *args, **kwargs)
out = source.copy()
out[nan_indicator == 1] = np.nan
return out
class OneMinus(Lifted):
def __init__(self, source: ValueOrCallable) -> None:
self.source = source
def __call__(self, x: Env, length: int, *args, **kwargs):
value = resolve(self.source, x, length, *args, **kwargs)
return 1 - value
class Concatenate(Lifted):
def __init__(self, inputs: List[ValueOrCallable], axis: int = 0) -> None:
self.inputs = inputs
self.axis = axis
def __call__(self, x: Env, length: int, *args, **kwargs):
inputs = [resolve(z, x, length, **kwargs) for z in self.inputs]
return np.concatenate(inputs, self.axis)
class Stack(Lifted):
def __init__(self, inputs: List[ValueOrCallable]) -> None:
self.inputs = inputs
def __call__(self, x: Env, length: int, *args, **kwargs):
inputs = [resolve(z, x, length, **kwargs) for z in self.inputs]
return np.stack(inputs, axis=0)
class StackPrefix(Lifted):
def __init__(self, prefix: str) -> None:
self.prefix = prefix
def __call__(self, x: Env, length: int, *args, **kwargs):
inputs = [v for k, v in x.items() if k.startswith(self.prefix)]
return np.stack(inputs, axis=0)
class Ref(Lifted):
def __init__(self, field_name: str) -> None:
self.field_name = field_name
def __call__(self, x: Env, length: int, *args, **kwargs):
return x[self.field_name]
class RandomUniform(Lifted):
def __init__(
self, low: ValueOrCallable = 0.0, high: ValueOrCallable = 1.0, shape=(0,),
) -> None:
self.low = low
self.high = high
self.shape = shape
def __call__(self, x: Env, length: int, *args, **kwargs):
low = resolve(self.low, x, length, *args, **kwargs)
high = resolve(self.high, x, length, *args, **kwargs)
s = np.array(self.shape)
s[s == 0] = length
return np.random.uniform(low, high, s)
class RandomInteger(Lifted):
def __init__(
self,
low: ValueOrCallable,
high: ValueOrCallable,
shape: Optional[Sequence[int]] = (0,),
) -> None:
self.low = low
self.high = high
self.shape = shape
def __call__(self, x: Env, length: int, *args, **kwargs):
low = resolve(self.low, x, length, *args, **kwargs)
high = resolve(self.high, x, length, *args, **kwargs)
if self.shape is not None:
s = np.array(self.shape)
s[s == 0] = length
return np.random.randint(low, high, s)
else:
return np.random.randint(low, high)
class RandomChangepoints(Lifted):
def __init__(self, max_num_changepoints: ValueOrCallable) -> None:
self.max_num_changepoints = max_num_changepoints
def __call__(self, x: Env, length: int, *args, **kwargs):
max_num_changepoints = resolve(
self.max_num_changepoints, x, length, *args, **kwargs
)
num_changepoints = np.random.randint(0, max_num_changepoints + 1)
change_idx = np.sort(
np.random.randint(low=1, high=length - 1, size=(num_changepoints,))
)
change_ranges = np.concatenate([change_idx, [length]])
out = np.zeros(length, dtype=np.int)
for i in range(0, num_changepoints):
out[change_ranges[i] : change_ranges[i + 1]] = i + 1
return out
class Repeated(Lifted):
def __init__(self, pattern: ValueOrCallable) -> None:
self.pattern = pattern
def __call__(self, x: Env, length: int, *args, **kwargs):
pattern = resolve(self.pattern, x, length, **kwargs)
repeats = length // len(pattern) + 1
out = np.tile(pattern, (repeats,))
return out[:length]
class Convolve(Lifted):
def __init__(self, input: ValueOrCallable, filter: ValueOrCallable) -> None:
self.filter = filter
self.input = input
def __call__(self, x: Env, length: int, *args, **kwargs):
fil = resolve(self.filter, x, length, **kwargs)
inp = resolve(self.input, x, length, **kwargs)
out = np.convolve(inp, fil, mode="same")
return out
class Dilated(Lifted):
def __init__(self, source: Callable, dilation: int) -> None:
self.source = source
self.dilation = dilation
def __call__(self, x: Env, length: int, *args, **kwargs):
inner = self.source(x, length // self.dilation + 1, **kwargs)
out = np.repeat(inner, self.dilation)
return out[:length]
class Choose(Lifted):
def __init__(self, options: ValueOrCallable, selector: ValueOrCallable) -> None:
self.options = options
self.selector = selector
def __call__(self, x, length, **kwargs):
options = resolve(self.options, x, length, **kwargs)
selector = resolve(self.selector, x, length, **kwargs)
e = np.eye(options.shape[0])
out = np.sum(e[selector] * options.T, axis=1)
return out
class EvalRecipe(Lifted):
def __init__(self, recipe: Recipe, op: ValueOrCallable) -> None:
self.recipe = recipe
self.op = op
def __call__(self, x: Env, *args, **kwargs):
xx = evaluate(self.recipe, *args, **kwargs)
return resolve(self.op, xx, *args, **kwargs)
| 18,316 | 29.276033 | 88 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/multivariate_grouper.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import logging
from typing import Callable, Optional
import numpy as np
import pandas as pd
# First-party imports
from .common import DataEntry, Dataset, FieldName, DateConstants
from .list_dataset import ListDataset
class MultivariateGrouper:
"""
The MultivariateGrouper takes a univariate dataset and groups it into a
single multivariate time series. Therefore, this class allows the user
to convert a univariate dataset into a multivariate dataset without making
a separate copy of the dataset.
The Multivariate Grouper has two different modes:
Training: For training data, the univariate time series get aligned to the
earliest time stamp in the dataset. Time series will be left and right
padded to produce an array of shape (dim, num_time_steps)
Test: The test dataset might have multiple start dates (usually because
the test dataset mimics a rolling evaluation scenario). In this case,
the univariate dataset will be split into n multivariate time series,
where n is the number of evaluation dates. Again, the
time series will be grouped but only left padded. Note that the
padded value will influence the prediction if the context length is
longer than the length of the time series.
Rules for padding for training and test datasets can be specified by the
user.
Parameters
----------
max_target_dim
Set maximum dimensionality (for faster testing or when hitting
constraints of multivariate model). Takes the last max_target_dim
time series and groups them to multivariate time series.
num_test_dates
Number of test dates in the test set. This can be more than one if
the test set contains more than one forecast start date (often the
case in a rolling evaluation scenario). Must be set to convert test
data.
train_fill_rule
Implements the rule that fills missing data after alignment of the
time series for the training dataset.
test_fill_rule
Implements the rule that fills missing data after alignment of the
time series for the test dataset.
"""
def __init__(
self,
max_target_dim: Optional[int] = None,
num_test_dates: Optional[int] = None,
train_fill_rule: Callable = np.mean,
test_fill_rule: Callable = lambda x: 0.0,
) -> None:
self.num_test_dates = num_test_dates
self.max_target_dimension = max_target_dim
self.train_fill_function = train_fill_rule
self.test_fill_rule = test_fill_rule
self.first_timestamp = DateConstants.LATEST_SUPPORTED_TIMESTAMP
self.last_timestamp = DateConstants.OLDEST_SUPPORTED_TIMESTAMP
self.frequency = ""
def __call__(self, dataset: Dataset) -> Dataset:
self._preprocess(dataset)
return self._group_all(dataset)
def _preprocess(self, dataset: Dataset) -> None:
"""
The preprocess function iterates over the dataset to gather data that
is necessary for alignment.
This includes
1) Storing first/last timestamp in the dataset
2) Storing the frequency of the dataset
"""
for data in dataset:
timestamp = data[FieldName.START]
self.first_timestamp = min(self.first_timestamp, timestamp)
self.last_timestamp = max(
self.last_timestamp,
timestamp + (len(data[FieldName.TARGET]) - 1) * timestamp.freq,
)
self.frequency = timestamp.freq
logging.info(
f"first/last timestamp found: "
f"{self.first_timestamp}/{self.last_timestamp}"
)
def _group_all(self, dataset: Dataset) -> Dataset:
if self.num_test_dates is None:
grouped_dataset = self._prepare_train_data(dataset)
else:
grouped_dataset = self._prepare_test_data(dataset)
return grouped_dataset
def _prepare_train_data(self, dataset: Dataset) -> ListDataset:
logging.info("group training time-series to datasets")
grouped_data = self._transform_target(self._align_data_entry, dataset)
grouped_data = self._restrict_max_dimensionality(grouped_data)
grouped_data[FieldName.START] = self.first_timestamp
grouped_data[FieldName.FEAT_STATIC_CAT] = [0] # TODO: feat_static_cat도 multivariate로 변환
return ListDataset([grouped_data], freq=self.frequency, one_dim_target=False)
def _prepare_test_data(self, dataset: Dataset) -> ListDataset:
logging.info("group test time-series to datasets")
grouped_data = self._transform_target(self._left_pad_data, dataset)
# splits test dataset with rolling date into N R^d time series where
# N is the number of rolling evaluation dates
split_dataset = np.split(grouped_data[FieldName.TARGET], self.num_test_dates)
all_entries = list()
for dataset_at_test_date in split_dataset:
grouped_data = dict()
grouped_data[FieldName.TARGET] = np.array(
list(dataset_at_test_date), dtype=np.float32
)
grouped_data = self._restrict_max_dimensionality(grouped_data)
grouped_data[FieldName.START] = self.first_timestamp
grouped_data[FieldName.FEAT_STATIC_CAT] = [0]
all_entries.append(grouped_data)
return ListDataset(
all_entries, freq=self.frequency, one_dim_target=False
)
def _align_data_entry(self, data: DataEntry) -> np.array:
ts = self.to_ts(data)
return ts.reindex(
pd.date_range(
start=self.first_timestamp,
end=self.last_timestamp,
freq=data[FieldName.START].freq,
),
fill_value=self.train_fill_function(ts),
).values
def _left_pad_data(self, data: DataEntry) -> np.array:
ts = self.to_ts(data)
return ts.reindex(
pd.date_range(
start=self.first_timestamp,
end=ts.index[-1],
freq=data[FieldName.START].freq,
),
fill_value=self.test_fill_rule(ts),
).values
@staticmethod
def _transform_target(funcs, dataset: Dataset) -> DataEntry:
return {FieldName.TARGET: np.array([funcs(data) for data in dataset])}
def _restrict_max_dimensionality(self, data: DataEntry) -> DataEntry:
"""
Takes the last max_target_dimension dimensions from a multivariate
dataentry.
Parameters
----------
data
multivariate data entry with (dim, num_timesteps) target field
Returns
-------
DataEntry
data multivariate data entry with
(max_target_dimension, num_timesteps) target field
"""
if self.max_target_dimension is not None:
# restrict maximum dimensionality (for faster testing)
data[FieldName.TARGET] = data[FieldName.TARGET][
-self.max_target_dimension :, :
]
return data
@staticmethod
def to_ts(data: DataEntry) -> pd.Series:
return pd.Series(
data[FieldName.TARGET],
index=pd.date_range(
start=data[FieldName.START],
periods=len(data[FieldName.TARGET]),
freq=data[FieldName.START].freq,
),
)
| 8,108 | 37.25 | 95 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/list_dataset.py | import random
import torch
from typing import Iterable
from .common import DataEntry, Dataset, SourceContext
from .process import ProcessDataEntry
class ListDataset(Dataset):
def __init__(
self,
data_iter: Iterable[DataEntry],
freq: str,
one_dim_target: bool = True,
shuffle: bool = False,
) -> None:
process = ProcessDataEntry(freq, one_dim_target)
self.list_data = [process(data) for data in data_iter]
self.shuffle = shuffle
if self.shuffle:
random.shuffle(self.list_data)
def __iter__(self):
source_name = "list_data"
for row_number, data in enumerate(self.list_data, start=1):
data["source"] = SourceContext(source=source_name, row=row_number)
yield data
if self.shuffle:
random.shuffle(self.list_data)
def __len__(self):
return len(self.list_data)
| 945 | 26.028571 | 78 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/_m4.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import os
from pathlib import Path
import numpy as np
import pandas as pd
from ._util import save_to_file, to_dict, metadata
def generate_m4_dataset(
dataset_path: Path, m4_freq: str, pandas_freq: str, prediction_length: int
):
m4_dataset_url = "https://github.com/M4Competition/M4-methods/raw/master/Dataset"
train_df = pd.read_csv(f"{m4_dataset_url}/Train/{m4_freq}-train.csv", index_col=0)
test_df = pd.read_csv(f"{m4_dataset_url}/Test/{m4_freq}-test.csv", index_col=0)
os.makedirs(dataset_path, exist_ok=True)
with open(dataset_path / "metadata.json", "w") as f:
f.write(
json.dumps(
metadata(
cardinality=len(train_df),
freq=pandas_freq,
prediction_length=prediction_length,
)
)
)
train_file = dataset_path / "train" / "data.json"
test_file = dataset_path / "test" / "data.json"
train_target_values = [ts[~np.isnan(ts)] for ts in train_df.values]
test_target_values = [
np.hstack([train_ts, test_ts])
for train_ts, test_ts in zip(train_target_values, test_df.values)
]
if m4_freq == "Yearly":
# some time series have more than 300 years which can not be represented in pandas,
# this is probably due to a misclassification of those time series as Yearly
# we simply use only the last 300 years for training
# note this does not affect test time as prediction length is less than 300 years
train_target_values = [ts[-300:] for ts in train_target_values]
test_target_values = [ts[-300:] for ts in test_target_values]
# the original dataset did not include time stamps, so we use a mock start date for each time series
# we use the earliest point available in pandas
mock_start_dataset = "1750-01-01 00:00:00"
save_to_file(
train_file,
[
to_dict(
target_values=target, start=mock_start_dataset, cat=[cat], item_id=cat
)
for cat, target in enumerate(train_target_values)
],
)
save_to_file(
test_file,
[
to_dict(
target_values=target, start=mock_start_dataset, cat=[cat], item_id=cat
)
for cat, target in enumerate(test_target_values)
],
)
| 2,966 | 33.5 | 104 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/_gp_copula_2019.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Loads the datasets used in Salinas et al. 2019 (https://tinyurl.com/woyhhqy).
This wrapper downloads and unpacks them so they don'thave to be attached as
large files in GluonTS master.
"""
import json
import os
import shutil
import tarfile
from pathlib import Path
from typing import NamedTuple, Optional
from urllib import request
from pts.dataset import FileDataset, FieldName
from ._util import save_to_file, to_dict, metadata
class GPCopulaDataset(NamedTuple):
name: str
url: str
num_series: int
prediction_length: int
freq: str
rolling_evaluations: int
max_target_dim: Optional[int] = None
root = (
"https://raw.githubusercontent.com/mbohlkeschneider/gluon-ts/mv_release/datasets/"
)
datasets_info = {
"exchange_rate_nips": GPCopulaDataset(
name="exchange_rate_nips",
url=root + "exchange_rate_nips.tar.gz",
num_series=8,
prediction_length=30,
freq="B",
rolling_evaluations=5,
max_target_dim=None,
),
"electricity_nips": GPCopulaDataset(
name="electricity_nips",
url=root + "electricity_nips.tar.gz",
# original dataset can be found at https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014#
num_series=370,
prediction_length=24,
freq="H",
rolling_evaluations=7,
max_target_dim=None,
),
"traffic_nips": GPCopulaDataset(
name="traffic_nips",
url=root + "traffic_nips.tar.gz",
# note there are 963 in the original dataset from https://archive.ics.uci.edu/ml/datasets/PEMS-SF
num_series=963,
prediction_length=24,
freq="H",
rolling_evaluations=7,
max_target_dim=None,
),
"solar_nips": GPCopulaDataset(
name="solar-energy",
url=root + "solar_nips.tar.gz",
num_series=137,
prediction_length=24,
freq="H",
rolling_evaluations=7,
max_target_dim=None,
),
"wiki-rolling_nips": GPCopulaDataset(
name="wiki-rolling_nips",
# That file lives on GitHub Large file storage (lfs). We need to use
# the exact link, otherwise it will only open the lfs pointer file.
url="https://github.com/mbohlkeschneider/gluon-ts/raw/650ad5ffe92d20e89d491966b6d8b4459e219be8/datasets/wiki-rolling_nips.tar.gz",
num_series=9535,
prediction_length=30,
freq="D",
rolling_evaluations=5,
max_target_dim=2000,
),
"taxi_30min": GPCopulaDataset(
name="taxi_30min",
url=root + "taxi_30min.tar.gz",
num_series=1214,
prediction_length=24,
freq="30min",
rolling_evaluations=56,
max_target_dim=None,
),
}
def generate_gp_copula_dataset(dataset_path: Path, dataset_name: str):
ds_info = datasets_info[dataset_name]
os.makedirs(dataset_path, exist_ok=True)
download_dataset(dataset_path.parent, ds_info)
save_metadata(dataset_path, ds_info)
save_dataset(dataset_path / "train", ds_info)
save_dataset(dataset_path / "test", ds_info)
clean_up_dataset(dataset_path, ds_info)
def download_dataset(dataset_path: Path, ds_info: GPCopulaDataset):
request.urlretrieve(ds_info.url, dataset_path / f"{ds_info.name}.tar.gz")
with tarfile.open(dataset_path / f"{ds_info.name}.tar.gz") as tar:
tar.extractall(path=dataset_path)
def save_metadata(dataset_path: Path, ds_info: GPCopulaDataset):
with open(dataset_path / "metadata.json", "w") as f:
f.write(
json.dumps(
metadata(
cardinality=ds_info.num_series,
freq=ds_info.freq,
prediction_length=ds_info.prediction_length,
)
)
)
def save_dataset(dataset_path: Path, ds_info: GPCopulaDataset):
dataset = list(FileDataset(dataset_path / "*.json", freq=ds_info.freq))
shutil.rmtree(dataset_path)
train_file = dataset_path / "data.json"
save_to_file(
train_file,
[
to_dict(
target_values=data_entry[FieldName.TARGET],
start=data_entry[FieldName.START],
# Handles adding categorical features of rolling
# evaluation dates
cat=[cat - ds_info.num_series * (cat // ds_info.num_series)],
item_id=cat,
)
for cat, data_entry in enumerate(dataset)
],
)
def clean_up_dataset(dataset_path: Path, ds_info: GPCopulaDataset):
os.remove(dataset_path.parent / f"{ds_info.name}.tar.gz")
shutil.rmtree(dataset_path / "metadata")
| 5,230 | 31.490683 | 138 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/_lstnet.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Here we reuse the datasets used by LSTNet as the processed url of the datasets
are available on GitHub.
"""
import json
import os
from pathlib import Path
from typing import List, NamedTuple, Optional
import pandas as pd
from pts.dataset import frequency_add
from ._util import save_to_file, to_dict, metadata
def load_from_pandas(
df: pd.DataFrame, time_index: pd.DatetimeIndex, agg_freq: Optional[str] = None,
) -> List[pd.Series]:
df = df.set_index(time_index)
pivot_df = df.transpose()
pivot_df.head()
timeseries = []
for row in pivot_df.iterrows():
ts = pd.Series(row[1].values, index=time_index)
if agg_freq is not None:
ts = ts.resample(agg_freq).sum()
first_valid = ts[ts.notnull()].index[0]
last_valid = ts[ts.notnull()].index[-1]
ts = ts[first_valid:last_valid]
timeseries.append(ts)
return timeseries
class LstnetDataset(NamedTuple):
name: str
url: str
num_series: int
num_time_steps: int
prediction_length: int
rolling_evaluations: int
freq: str
start_date: str
agg_freq: Optional[str] = None
root = (
"https://raw.githubusercontent.com/laiguokun/multivariate-time-series-data/master/"
)
datasets_info = {
"exchange_rate": LstnetDataset(
name="exchange_rate",
url=root + "exchange_rate/exchange_rate.txt.gz",
num_series=8,
num_time_steps=7588,
prediction_length=30,
rolling_evaluations=5,
start_date="1990-01-01",
freq="1B",
agg_freq=None,
),
"electricity": LstnetDataset(
name="electricity",
url=root + "electricity/electricity.txt.gz",
# original dataset can be found at https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014#
# the aggregated ones that is used from LSTNet filters out from the initial 370 series the one with no data
# in 2011
num_series=321,
num_time_steps=26304,
prediction_length=24,
rolling_evaluations=7,
start_date="2012-01-01",
freq="1H",
agg_freq=None,
),
"traffic": LstnetDataset(
name="traffic",
url=root + "traffic/traffic.txt.gz",
# note there are 963 in the original dataset from https://archive.ics.uci.edu/ml/datasets/PEMS-SF
# but only 862 in LSTNet
num_series=862,
num_time_steps=17544,
prediction_length=24,
rolling_evaluations=7,
start_date="2015-01-01",
freq="H",
agg_freq=None,
),
"solar-energy": LstnetDataset(
name="solar-energy",
url=root + "solar-energy/solar_AL.txt.gz",
num_series=137,
num_time_steps=52560,
prediction_length=24,
rolling_evaluations=7,
start_date="2006-01-01",
freq="10min",
agg_freq="1H",
),
}
def generate_lstnet_dataset(dataset_path: Path, dataset_name: str):
ds_info = datasets_info[dataset_name]
os.makedirs(dataset_path, exist_ok=True)
with open(dataset_path / "metadata.json", "w") as f:
f.write(
json.dumps(
metadata(
cardinality=ds_info.num_series,
freq=ds_info.freq,
prediction_length=ds_info.prediction_length,
)
)
)
train_file = dataset_path / "train" / "data.json"
test_file = dataset_path / "test" / "data.json"
time_index = pd.date_range(
start=ds_info.start_date, freq=ds_info.freq, periods=ds_info.num_time_steps,
)
df = pd.read_csv(ds_info.url, header=None)
assert df.shape == (
ds_info.num_time_steps,
ds_info.num_series,
), f"expected num_time_steps/num_series {(ds_info.num_time_steps, ds_info.num_series)} but got {df.shape}"
timeseries = load_from_pandas(
df=df, time_index=time_index, agg_freq=ds_info.agg_freq
)
# the last date seen during training
ts_index = timeseries[0].index
training_end = ts_index[int(len(ts_index) * (8 / 10))]
train_ts = []
for cat, ts in enumerate(timeseries):
sliced_ts = ts[:training_end]
if len(sliced_ts) > 0:
train_ts.append(
to_dict(
target_values=sliced_ts.values,
start=sliced_ts.index[0],
cat=[cat],
item_id=cat,
)
)
assert len(train_ts) == ds_info.num_series
save_to_file(train_file, train_ts)
# time of the first prediction
prediction_dates = [
frequency_add(training_end, i * ds_info.prediction_length)
for i in range(ds_info.rolling_evaluations)
]
test_ts = []
for prediction_start_date in prediction_dates:
for cat, ts in enumerate(timeseries):
# print(prediction_start_date)
prediction_end_date = frequency_add(
prediction_start_date, ds_info.prediction_length
)
sliced_ts = ts[:prediction_end_date]
test_ts.append(
to_dict(
target_values=sliced_ts.values, start=sliced_ts.index[0], cat=[cat],
)
)
assert len(test_ts) == ds_info.num_series * ds_info.rolling_evaluations
save_to_file(test_file, test_ts)
| 5,944 | 29.025253 | 115 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/_util.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Any
import numpy as np
def to_dict(
target_values: np.ndarray,
start: str,
cat: Optional[List[int]] = None,
item_id: Optional[Any] = None,
):
def serialize(x):
if np.isnan(x):
return "NaN"
else:
# return x
return float("{0:.6f}".format(float(x)))
res = {
"start": str(start),
"target": [serialize(x) for x in target_values],
}
if cat is not None:
res["feat_static_cat"] = cat
if item_id is not None:
res["item_id"] = item_id
return res
def save_to_file(path: Path, data: List[Dict]):
print(f"saving time-series into {path}")
path_dir = os.path.dirname(path)
os.makedirs(path_dir, exist_ok=True)
with open(path, "wb") as fp:
for d in data:
fp.write(json.dumps(d).encode("utf-8"))
fp.write("\n".encode("utf-8"))
def get_download_path() -> Path:
"""
Returns
-------
Path
default path to download datasets
/home/username/.pytorch/pytorch-ts/
"""
return Path(str(Path.home() / ".pytorch" / "pytorch-ts"))
def metadata(cardinality: int, freq: str, prediction_length: int):
return {
"freq": freq,
"prediction_length": prediction_length,
"feat_static_cat": [
{"name": "feat_static_cat", "cardinality": str(cardinality)}
],
}
| 2,063 | 25.126582 | 75 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/datasets.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from collections import OrderedDict
from functools import partial
from pathlib import Path
from pts.dataset import ConstantDataset, TrainDatasets, load_datasets
from ._artificial import generate_artificial_dataset
from ._gp_copula_2019 import generate_gp_copula_dataset
from ._lstnet import generate_lstnet_dataset
from ._m4 import generate_m4_dataset
from ._util import get_download_path
m4_freq = "Hourly"
pandas_freq = "H"
dataset_path = Path(f"m4-{m4_freq}")
prediction_length = 48
dataset_recipes = OrderedDict(
{
# each recipe generates a dataset given a path
"constant": partial(generate_artificial_dataset, dataset=ConstantDataset()),
"exchange_rate": partial(generate_lstnet_dataset, dataset_name="exchange_rate"),
"solar-energy": partial(generate_lstnet_dataset, dataset_name="solar-energy"),
"electricity": partial(generate_lstnet_dataset, dataset_name="electricity"),
"traffic": partial(generate_lstnet_dataset, dataset_name="traffic"),
"exchange_rate_nips": partial(
generate_gp_copula_dataset, dataset_name="exchange_rate_nips"
),
"electricity_nips": partial(
generate_gp_copula_dataset, dataset_name="electricity_nips"
),
"traffic_nips": partial(
generate_gp_copula_dataset, dataset_name="traffic_nips"
),
"solar_nips": partial(generate_gp_copula_dataset, dataset_name="solar_nips"),
"wiki-rolling_nips": partial(
generate_gp_copula_dataset, dataset_name="wiki-rolling_nips"
),
"taxi_30min": partial(generate_gp_copula_dataset, dataset_name="taxi_30min"),
"m4_hourly": partial(
generate_m4_dataset,
m4_freq="Hourly",
pandas_freq="H",
prediction_length=48,
),
"m4_daily": partial(
generate_m4_dataset, m4_freq="Daily", pandas_freq="D", prediction_length=14,
),
"m4_weekly": partial(
generate_m4_dataset,
m4_freq="Weekly",
pandas_freq="W",
prediction_length=13,
),
"m4_monthly": partial(
generate_m4_dataset,
m4_freq="Monthly",
pandas_freq="M",
prediction_length=18,
),
"m4_quarterly": partial(
generate_m4_dataset,
m4_freq="Quarterly",
pandas_freq="3M",
prediction_length=8,
),
"m4_yearly": partial(
generate_m4_dataset,
m4_freq="Yearly",
pandas_freq="12M",
prediction_length=6,
),
}
)
dataset_names = list(dataset_recipes.keys())
default_dataset_path = get_download_path() / "datasets"
def materialize_dataset(
dataset_name: str, path: Path = default_dataset_path, regenerate: bool = False,
) -> Path:
"""
Ensures that the dataset is materialized under the `path / dataset_name`
path.
Parameters
----------
dataset_name
name of the dataset, for instance "m4_hourly"
regenerate
whether to regenerate the dataset even if a local file is present.
If this flag is False and the file is present, the dataset will not
be downloaded again.
path
where the dataset should be saved
Returns
-------
the path where the dataset is materialized
"""
assert dataset_name in dataset_recipes.keys(), (
f"{dataset_name} is not present, please choose one from "
f"{dataset_recipes.keys()}."
)
path.mkdir(parents=True, exist_ok=True)
dataset_path = path / dataset_name
dataset_recipe = dataset_recipes[dataset_name]
if not dataset_path.exists() or regenerate:
logging.info(f"downloading and processing {dataset_name}")
dataset_recipe(dataset_path=dataset_path)
else:
logging.info(f"using dataset already processed in path {dataset_path}.")
return dataset_path
def get_dataset(
dataset_name: str, path: Path = default_dataset_path, regenerate: bool = False, shuffle: bool = False
) -> TrainDatasets:
"""
Get a repository dataset.
The datasets that can be obtained through this function have been used
with different processing over time by several papers (e.g., [SFG17]_,
[LCY+18]_, and [YRD15]_).
Parameters
----------
dataset_name
name of the dataset, for instance "m4_hourly"
regenerate
whether to regenerate the dataset even if a local file is present.
If this flag is False and the file is present, the dataset will not
be downloaded again.
path
where the dataset should be saved
Returns
-------
dataset obtained by either downloading or reloading from local file.
"""
dataset_path = materialize_dataset(dataset_name, path, regenerate)
return load_datasets(
metadata=dataset_path / "metadata.json",
train=dataset_path / "train" / "*.json",
test=dataset_path / "test" / "*.json",
shuffle=shuffle
)
if __name__ == "__main__":
for dataset in dataset_names:
print(f"generate {dataset}")
ds = get_dataset(dataset, regenerate=True)
print(ds.metadata)
print(sum(1 for _ in list(iter(ds.train))))
| 5,882 | 32.617143 | 105 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/_artificial.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import json
from pathlib import Path
# First-party imports
from pts.dataset import ArtificialDataset, generate_sf2, serialize_data_entry
def generate_artificial_dataset(dataset_path: Path, dataset: ArtificialDataset) -> None:
dataset_path_train = dataset_path / "train"
dataset_path_test = dataset_path / "test"
dataset_path.mkdir(exist_ok=True)
dataset_path_train.mkdir(exist_ok=False)
dataset_path_test.mkdir(exist_ok=False)
ds = dataset.generate()
assert ds.test is not None
with (dataset_path / "metadata.json").open("w") as fp:
json.dump(ds.metadata.dict(), fp, indent=2, sort_keys=True)
generate_sf2(
filename=str(dataset_path_train / "train.json"),
time_series=list(map(serialize_data_entry, ds.train)),
is_missing=False,
num_missing=0,
)
generate_sf2(
filename=str(dataset_path_test / "test.json"),
time_series=list(map(serialize_data_entry, ds.test)),
is_missing=False,
num_missing=0,
)
| 1,627 | 32.22449 | 88 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/__init__.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from .datasets import get_dataset, dataset_recipes | 626 | 43.785714 | 75 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/split.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from functools import lru_cache
from typing import Iterator, List, Optional
import numpy as np
import pandas as pd
from pts.core.component import validated
from pts.dataset import DataEntry, FieldName
from .sampler import InstanceSampler, ContinuousTimePointSampler
from .transform import FlatMapTransformation
def shift_timestamp(ts: pd.Timestamp, offset: int) -> pd.Timestamp:
"""
Computes a shifted timestamp.
Basic wrapping around pandas ``ts + offset`` with caching and exception
handling.
"""
return _shift_timestamp_helper(ts, ts.freq, offset)
@lru_cache(maxsize=10000)
def _shift_timestamp_helper(ts: pd.Timestamp, freq: str, offset: int) -> pd.Timestamp:
"""
We are using this helper function which explicitly uses the frequency as a
parameter, because the frequency is not included in the hash of a time
stamp.
I.e.
pd.Timestamp(x, freq='1D') and pd.Timestamp(x, freq='1min')
hash to the same value.
"""
try:
# this line looks innocent, but can create a date which is out of
# bounds values over year 9999 raise a ValueError
# values over 2262-04-11 raise a pandas OutOfBoundsDatetime
return ts + offset * ts.freq
except (ValueError, pd._libs.OutOfBoundsDatetime) as ex:
raise Exception(ex)
class InstanceSplitter(FlatMapTransformation):
"""
Selects training instances, by slicing the target and other time series
like arrays at random points in training mode or at the last time point in
prediction mode. Assumption is that all time like arrays start at the same
time point.
The target and each time_series_field is removed and instead two
corresponding fields with prefix `past_` and `future_` are included. E.g.
If the target array is one-dimensional, the resulting instance has shape
(len_target). In the multi-dimensional case, the instance has shape (dim,
len_target).
target -> past_target and future_target
The transformation also adds a field 'past_is_pad' that indicates whether
values where padded or not.
Convention: time axis is always the last axis.
Parameters
----------
target_field
field containing the target
is_pad_field
output field indicating whether padding happened
start_field
field containing the start date of the time series
forecast_start_field
output field that will contain the time point where the forecast starts
train_sampler
instance sampler that provides sampling indices given a time-series
past_length
length of the target seen before making prediction
future_length
length of the target that must be predicted
batch_first
whether to have time series output in (time, dimension) or in
(dimension, time) layout
time_series_fields
fields that contains time-series, they are split in the same interval
as the target
pick_incomplete
whether training examples can be sampled with only a part of
past_length time-units
present for the time series. This is useful to train models for
cold-start. In such case, is_pad_out contains an indicator whether
data is padded or not.
"""
@validated()
def __init__(
self,
target_field: str,
is_pad_field: str,
start_field: str,
forecast_start_field: str,
train_sampler: InstanceSampler,
past_length: int,
future_length: int,
batch_first: bool = True,
time_series_fields: Optional[List[str]] = None,
pick_incomplete: bool = True,
) -> None:
assert future_length > 0
self.train_sampler = train_sampler
self.past_length = past_length
self.future_length = future_length
self.batch_first = batch_first
self.ts_fields = time_series_fields if time_series_fields is not None else []
self.target_field = target_field
self.is_pad_field = is_pad_field
self.start_field = start_field
self.forecast_start_field = forecast_start_field
self.pick_incomplete = pick_incomplete
def _past(self, col_name):
return f"past_{col_name}"
def _future(self, col_name):
return f"future_{col_name}"
def flatmap_transform(self, data: DataEntry, is_train: bool) -> Iterator[DataEntry]:
pl = self.future_length
slice_cols = self.ts_fields + [self.target_field]
target = data[self.target_field]
len_target = target.shape[-1]
minimum_length = (
self.future_length
if self.pick_incomplete
else self.past_length + self.future_length
)
if 'is_validation' in data.keys():
idx = len_target - self.future_length - data["is_validation"]
assert self.pick_incomplete or idx >= self.past_length
sampled_indices = np.array([idx], dtype=int)
elif is_train:
sampling_bounds = (
(0, len_target - self.future_length)
if self.pick_incomplete
else (self.past_length, len_target - self.future_length)
)
# We currently cannot handle time series that are
# too short during training, so we just skip these.
# If we want to include them we would need to pad and to
# mask the loss.
sampled_indices = (
np.array([], dtype=int)
if len_target < minimum_length
else self.train_sampler(target, *sampling_bounds)
)
else:
assert self.pick_incomplete or len_target >= self.past_length
sampled_indices = np.array([len_target], dtype=int) # [Note] test인 경우 마지막 index 반환
for i in sampled_indices:
pad_length = max(self.past_length - i, 0)
if not self.pick_incomplete:
assert pad_length == 0, f"pad_length should be zero, got {pad_length}"
d = data.copy()
for ts_field in slice_cols:
if i > self.past_length:
# truncate to past_length
past_piece = d[ts_field][..., i - self.past_length : i]
elif i < self.past_length:
pad_block = np.zeros(
d[ts_field].shape[:-1] + (pad_length,), dtype=d[ts_field].dtype,
)
past_piece = np.concatenate(
[pad_block, d[ts_field][..., :i]], axis=-1
)
else:
past_piece = d[ts_field][..., :i]
d[self._past(ts_field)] = past_piece
d[self._future(ts_field)] = d[ts_field][..., i : i + pl] # [Note] time feat은 미래에도 존재함
del d[ts_field]
pad_indicator = np.zeros(self.past_length)
if pad_length > 0:
pad_indicator[:pad_length] = 1
if self.batch_first:
for ts_field in slice_cols:
d[self._past(ts_field)] = d[self._past(ts_field)].transpose()
d[self._future(ts_field)] = d[self._future(ts_field)].transpose()
d[self._past(self.is_pad_field)] = pad_indicator
d[self.forecast_start_field] = shift_timestamp(d[self.start_field], i)
yield d
class CanonicalInstanceSplitter(FlatMapTransformation):
"""
Selects instances, by slicing the target and other time series
like arrays at random points in training mode or at the last time point in
prediction mode. Assumption is that all time like arrays start at the same
time point.
In training mode, the returned instances contain past_`target_field`
as well as past_`time_series_fields`.
In prediction mode, one can set `use_prediction_features` to get
future_`time_series_fields`.
If the target array is one-dimensional, the `target_field` in the resulting instance has shape
(`instance_length`). In the multi-dimensional case, the instance has shape (`dim`, `instance_length`),
where `dim` can also take a value of 1.
In the case of insufficient number of time series values, the
transformation also adds a field 'past_is_pad' that indicates whether
values where padded or not, and the value is padded with
`default_pad_value` with a default value 0.
This is done only if `allow_target_padding` is `True`,
and the length of `target` is smaller than `instance_length`.
Parameters
----------
target_field
fields that contains time-series
is_pad_field
output field indicating whether padding happened
start_field
field containing the start date of the time series
forecast_start_field
field containing the forecast start date
instance_sampler
instance sampler that provides sampling indices given a time-series
instance_length
length of the target seen before making prediction
batch_first
whether to have time series output in (time, dimension) or in
(dimension, time) layout
time_series_fields
fields that contains time-series, they are split in the same interval
as the target
allow_target_padding
flag to allow padding
pad_value
value to be used for padding
use_prediction_features
flag to indicate if prediction range features should be returned
prediction_length
length of the prediction range, must be set if
use_prediction_features is True
"""
def __init__(
self,
target_field: str,
is_pad_field: str,
start_field: str,
forecast_start_field: str,
instance_sampler: InstanceSampler,
instance_length: int,
batch_first: bool = True,
time_series_fields: List[str] = [],
allow_target_padding: bool = False,
pad_value: float = 0.0,
use_prediction_features: bool = False,
prediction_length: Optional[int] = None,
) -> None:
self.instance_sampler = instance_sampler
self.instance_length = instance_length
self.batch_first = batch_first
self.dynamic_feature_fields = time_series_fields
self.target_field = target_field
self.allow_target_padding = allow_target_padding
self.pad_value = pad_value
self.is_pad_field = is_pad_field
self.start_field = start_field
self.forecast_start_field = forecast_start_field
assert (
not use_prediction_features or prediction_length is not None
), "You must specify `prediction_length` if `use_prediction_features`"
self.use_prediction_features = use_prediction_features
self.prediction_length = prediction_length
def _past(self, col_name):
return f"past_{col_name}"
def _future(self, col_name):
return f"future_{col_name}"
def flatmap_transform(self, data: DataEntry, is_train: bool) -> Iterator[DataEntry]:
ts_fields = self.dynamic_feature_fields + [self.target_field]
ts_target = data[self.target_field]
len_target = ts_target.shape[-1]
if is_train:
if len_target < self.instance_length:
sampling_indices = (
# Returning [] for all time series will cause this to be in loop forever!
[len_target]
if self.allow_target_padding
else []
)
else:
sampling_indices = self.instance_sampler(
ts_target, self.instance_length, len_target
)
else:
sampling_indices = [len_target]
for i in sampling_indices:
d = data.copy()
pad_length = max(self.instance_length - i, 0)
# update start field
d[self.start_field] = shift_timestamp(
data[self.start_field], i - self.instance_length
)
# set is_pad field
is_pad = np.zeros(self.instance_length)
if pad_length > 0:
is_pad[:pad_length] = 1
d[self.is_pad_field] = is_pad
# update time series fields
for ts_field in ts_fields:
full_ts = data[ts_field]
if pad_length > 0:
pad_pre = self.pad_value * np.ones(
shape=full_ts.shape[:-1] + (pad_length,)
)
past_ts = np.concatenate([pad_pre, full_ts[..., :i]], axis=-1)
else:
past_ts = full_ts[..., (i - self.instance_length) : i]
past_ts = past_ts.transpose() if self.batch_first else past_ts
d[self._past(ts_field)] = past_ts
if self.use_prediction_features and not is_train:
if not ts_field == self.target_field:
future_ts = full_ts[..., i : i + self.prediction_length]
future_ts = (
future_ts.transpose() if self.batch_first else future_ts
)
d[self._future(ts_field)] = future_ts
del d[ts_field]
d[self.forecast_start_field] = shift_timestamp(
d[self.start_field], self.instance_length
)
yield d
class ContinuousTimeInstanceSplitter(FlatMapTransformation):
"""
Selects training instances by slicing "intervals" from a continuos-time
process instantiation. Concretely, the input data is expected to describe an
instantiation from a point (or jump) process, with the "target"
identifying inter-arrival times and other features (marks), as described
in detail below.
The splitter will then take random points in continuous time from each
given observation, and return a (variable-length) array of points in
the past (context) and the future (prediction) intervals.
The transformation is analogous to its discrete counterpart
`InstanceSplitter` except that
- It does not allow "incomplete" records. That is, the past and future
intervals sampled are always complete
- Outputs a (T, C) layout.
- Does not accept `time_series_fields` (i.e., only accepts target fields) as these
would typically not be available in TPP data.
The target arrays are expected to have (2, T) layout where the first axis
corresponds to the (i) interarrival times between consecutive points, in
order and (ii) integer identifiers of marks (from {0, 1, ..., :code:`num_marks`}).
The returned arrays will have (T, 2) layout.
For example, the array below corresponds to a target array where points with timestamps
0.5, 1.1, and 1.5 were observed belonging to categories (marks) 3, 1 and 0
respectively: :code:`[[0.5, 0.6, 0.4], [3, 1, 0]]`.
Parameters
----------
past_interval_length
length of the interval seen before making prediction
future_interval_length
length of the interval that must be predicted
train_sampler
instance sampler that provides sampling indices given a time-series
target_field
field containing the target
start_field
field containing the start date of the of the point process observation
end_field
field containing the end date of the point process observation
forecast_start_field
output field that will contain the time point where the forecast starts
"""
def __init__(
self,
past_interval_length: float,
future_interval_length: float,
train_sampler: ContinuousTimePointSampler,
target_field: str = FieldName.TARGET,
start_field: str = FieldName.START,
end_field: str = "end",
forecast_start_field: str = FieldName.FORECAST_START,
) -> None:
assert (
future_interval_length > 0
), "Prediction interval must have length greater than 0."
self.train_sampler = train_sampler
self.past_interval_length = past_interval_length
self.future_interval_length = future_interval_length
self.target_field = target_field
self.start_field = start_field
self.end_field = end_field
self.forecast_start_field = forecast_start_field
# noinspection PyMethodMayBeStatic
def _mask_sorted(self, a: np.ndarray, lb: float, ub: float):
start = np.searchsorted(a, lb)
end = np.searchsorted(a, ub)
return np.arange(start, end)
def flatmap_transform(self, data: DataEntry, is_train: bool) -> Iterator[DataEntry]:
assert data[self.start_field].freq == data[self.end_field].freq
total_interval_length = (data[self.end_field] - data[self.start_field]) / data[
self.start_field
].freq.delta
# sample forecast start times in continuous time
if is_train:
if total_interval_length < (
self.future_interval_length + self.past_interval_length
):
sampling_times: np.ndarray = np.array([])
else:
sampling_times = self.train_sampler(
self.past_interval_length,
total_interval_length - self.future_interval_length,
)
else:
sampling_times = np.array([total_interval_length])
ia_times = data[self.target_field][0, :]
marks = data[self.target_field][1:, :]
ts = np.cumsum(ia_times)
assert ts[-1] < total_interval_length, (
"Target interarrival times provided are inconsistent with "
"start and end timestamps."
)
# select field names that will be included in outputs
keep_cols = {
k: v
for k, v in data.items()
if k not in [self.target_field, self.start_field, self.end_field]
}
for future_start in sampling_times:
r: DataEntry = dict()
past_start = future_start - self.past_interval_length
future_end = future_start + self.future_interval_length
assert past_start >= 0
past_mask = self._mask_sorted(ts, past_start, future_start)
past_ia_times = np.diff(np.r_[0, ts[past_mask] - past_start])[np.newaxis]
r[f"past_{self.target_field}"] = np.concatenate(
[past_ia_times, marks[:, past_mask]], axis=0
).transpose()
r["past_valid_length"] = np.array([len(past_mask)])
r[self.forecast_start_field] = (
data[self.start_field]
+ data[self.start_field].freq.delta * future_start
)
if is_train: # include the future only if is_train
assert future_end <= total_interval_length
future_mask = self._mask_sorted(ts, future_start, future_end)
future_ia_times = np.diff(np.r_[0, ts[future_mask] - future_start])[
np.newaxis
]
r[f"future_{self.target_field}"] = np.concatenate(
[future_ia_times, marks[:, future_mask]], axis=0
).transpose()
r["future_valid_length"] = np.array([len(future_mask)])
# include other fields
r.update(keep_cols.copy())
yield r
| 20,103 | 36.64794 | 106 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/field.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from collections import Counter
from typing import Any, Dict, List
from pts.core.component import validated
from pts.dataset import DataEntry
from .transform import SimpleTransformation, MapTransformation
class RenameFields(SimpleTransformation):
"""
Rename fields using a mapping
Parameters
----------
mapping
Name mapping `input_name -> output_name`
"""
@validated()
def __init__(self, mapping: Dict[str, str]) -> None:
self.mapping = mapping
values_count = Counter(mapping.values())
for new_key, count in values_count.items():
assert count == 1, f"Mapped key {new_key} occurs multiple time"
def transform(self, data: DataEntry):
for key, new_key in self.mapping.items():
if key not in data:
continue
assert new_key not in data
data[new_key] = data[key]
del data[key]
return data
class RemoveFields(SimpleTransformation):
@validated()
def __init__(self, field_names: List[str]) -> None:
self.field_names = field_names
def transform(self, data: DataEntry) -> DataEntry:
for k in self.field_names:
if k in data.keys():
del data[k]
return data
class SetField(SimpleTransformation):
"""
Sets a field in the dictionary with the given value.
Parameters
----------
output_field
Name of the field that will be set
value
Value to be set
"""
@validated()
def __init__(self, output_field: str, value: Any) -> None:
self.output_field = output_field
self.value = value
def transform(self, data: DataEntry) -> DataEntry:
data[self.output_field] = self.value
return data
class SetFieldIfNotPresent(SimpleTransformation):
"""Sets a field in the dictionary with the given value, in case it does not
exist already.
Parameters
----------
output_field
Name of the field that will be set
value
Value to be set
"""
@validated()
def __init__(self, field: str, value: Any) -> None:
self.output_field = field
self.value = value
def transform(self, data: DataEntry) -> DataEntry:
if self.output_field not in data.keys():
data[self.output_field] = self.value
return data
class SelectFields(MapTransformation):
"""
Only keep the listed fields
Parameters
----------
input_fields
List of fields to keep.
"""
@validated()
def __init__(self, input_fields: List[str]) -> None:
self.input_fields = input_fields
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
return {f: data[f] for f in self.input_fields}
| 3,366 | 27.294118 | 79 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/sampler.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from abc import ABC, abstractmethod
import numpy as np
from pts.core.component import validated
from pts.dataset.stat import ScaleHistogram
class InstanceSampler(ABC):
"""
An InstanceSampler is called with the time series and the valid
index bounds a, b and should return a set of indices a <= i <= b
at which training instances will be generated.
The object should be called with:
Parameters
----------
ts
target that should be sampled with shape (dim, seq_len)
a
first index of the target that can be sampled
b
last index of the target that can be sampled
Returns
-------
np.ndarray
Selected points to sample
"""
@abstractmethod
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
pass
class UniformSplitSampler(InstanceSampler):
"""
Samples each point with the same fixed probability.
Parameters
----------
p
Probability of selecting a time point
"""
def __init__(self, p: float) -> None:
self.p = p
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
assert a <= b, "First index must be less than or equal to the last index."
window_size = b - a + 1
(indices,) = np.where(np.random.random_sample(window_size) < self.p)
return indices + a
class TestSplitSampler(InstanceSampler):
"""
Sampler used for prediction. Always selects the last time point for
splitting i.e. the forecast point for the time series.
"""
def __init__(self) -> None:
pass
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
return np.array([b])
class ExpectedNumInstanceSampler(InstanceSampler):
"""
Keeps track of the average time series length and adjusts the probability
per time point such that on average `num_instances` training examples are
generated per time series.
Parameters
----------
num_instances
number of training examples generated per time series on average
"""
@validated()
def __init__(self, num_instances: float) -> None:
self.num_instances = num_instances
self.total_length = 0
self.n = 0
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
window_size = b - a + 1
self.n += 1
self.total_length += window_size # TODO: prevent overflow (max 9223372036854775807)
avg_length = self.total_length / self.n
assert self.total_length>=0
sampler = UniformSplitSampler(self.num_instances / avg_length)
return sampler(ts, a, b)
class ExactNumInstanceSampler(InstanceSampler):
"""
a simple random sampler to sample num_instance samples per time series
interval between :code:`a` and :code:`b`.
"""
@validated()
def __init__(self, num_instances: int, update_iter: int = -1) -> None:
# update_iter : update period
self.num_instances = int(num_instances)
self.update_iter = update_iter
self.update_idx = 0
self.cache = None
def __call__(self, ts: np.ndarray, a: float, b: float) -> np.ndarray:
assert a <= b, "Interval start time must be before interval end time."
if self.update_idx == 0:
self.cache = [np.random.randint(a, b+1) for _ in range(self.num_instances)]
if self.update_iter > 0:
self.update_idx += 1
self.update_idx %= self.update_iter
return self.cache
class BucketInstanceSampler(InstanceSampler):
"""
This sample can be used when working with a set of time series that have a
skewed distributions. For instance, if the dataset contains many time series
with small values and few with large values.
The probability of sampling from bucket i is the inverse of its number of elements.
Parameters
----------
scale_histogram
The histogram of scale for the time series. Here scale is the mean abs
value of the time series.
"""
def __init__(self, scale_histogram: ScaleHistogram) -> None:
# probability of sampling a bucket i is the inverse of its number of
# elements
self.scale_histogram = scale_histogram
self.lookup = np.arange(2 ** 13)
def __call__(self, ts: np.ndarray, a: int, b: int) -> None:
while ts.shape[-1] >= len(self.lookup):
self.lookup = np.arange(2 * len(self.lookup))
p = 1.0 / self.scale_histogram.count(ts)
mask = np.random.uniform(low=0.0, high=1.0, size=b - a + 1) < p
indices = self.lookup[a : a + len(mask)][mask]
return indices
class ContinuousTimePointSampler(ABC):
"""
Abstract class for "continuous time" samplers, which, given a lower bound
and upper bound, sample "points" (events) in continuous time from a
specified interval.
"""
def __init__(self, num_instances: int) -> None:
self.num_instances = num_instances
@abstractmethod
def __call__(self, a: float, b: float) -> np.ndarray:
"""
Returns random points in the real interval between :code:`a` and
:code:`b`.
Parameters
----------
a
The lower bound (minimum time value that a sampled point can take)
b
Upper bound. Must be greater than a.
"""
pass
class ContinuousTimeUniformSampler(ContinuousTimePointSampler):
"""
Implements a simple random sampler to sample points in the continuous
interval between :code:`a` and :code:`b`.
"""
def __call__(self, a: float, b: float) -> np.ndarray:
assert a <= b, "Interval start time must be before interval end time."
return np.random.rand(self.num_instances) * (b - a) + a
| 6,374 | 30.25 | 91 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/transform.py | from abc import ABC, abstractmethod
from functools import reduce
from typing import Callable, Iterator, Iterable, List
from pts.core.component import validated
from pts.dataset import DataEntry
MAX_IDLE_TRANSFORMS = 100
class Transformation(ABC):
@abstractmethod
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterator[DataEntry]:
pass
def estimate(self, data_it: Iterable[DataEntry]) -> Iterator[DataEntry]:
return data_it # default is to pass through without estimation
def chain(self, other: "Transformation") -> "Chain":
return Chain(self, other)
def __add__(self, other: "Transformation") -> "Chain":
return self.chain(other)
class Chain(Transformation):
"""
Chain multiple transformations together.
"""
@validated()
def __init__(self, trans: List[Transformation]) -> None:
self.transformations = []
for transformation in trans:
# flatten chains
if isinstance(transformation, Chain):
self.transformations.extend(transformation.transformations)
else:
self.transformations.append(transformation)
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterator[DataEntry]:
tmp = data_it
for t in self.transformations:
tmp = t(tmp, is_train)
return tmp
def estimate(self, data_it: Iterator[DataEntry]) -> Iterator[DataEntry]:
return reduce(lambda x, y: y.estimate(x), self.transformations, data_it)
class Identity(Transformation):
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterator[DataEntry]:
return data_it
class MapTransformation(Transformation):
"""
Base class for Transformations that returns exactly one result per input in the stream.
"""
def __call__(self, data_it: Iterable[DataEntry], is_train: bool) -> Iterator:
for data_entry in data_it:
try:
yield self.map_transform(data_entry.copy(), is_train)
except Exception as e:
raise e
@abstractmethod
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
pass
class SimpleTransformation(MapTransformation):
"""
Element wise transformations that are the same in train and test mode
"""
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
return self.transform(data)
@abstractmethod
def transform(self, data: DataEntry) -> DataEntry:
pass
class AdhocTransform(SimpleTransformation):
"""
Applies a function as a transformation
This is called ad-hoc, because it is not serializable.
It is OK to use this for experiments and outside of a model pipeline that
needs to be serialized.
"""
def __init__(self, func: Callable[[DataEntry], DataEntry]) -> None:
self.func = func
def transform(self, data: DataEntry) -> DataEntry:
return self.func(data.copy())
class FlatMapTransformation(Transformation):
"""
Transformations that yield zero or more results per input, but do not combine
elements from the input stream.
"""
def __call__(self, data_it: Iterable[DataEntry], is_train: bool) -> Iterator:
num_idle_transforms = 0
for data_entry in data_it:
num_idle_transforms += 1
try:
for result in self.flatmap_transform(data_entry.copy(), is_train):
num_idle_transforms = 0
yield result
except Exception as e:
raise e
if num_idle_transforms > MAX_IDLE_TRANSFORMS:
raise Exception(
f"Reached maximum number of idle transformation calls.\n"
f"This means the transformation looped over "
f"MAX_IDLE_TRANSFORMS={MAX_IDLE_TRANSFORMS} "
f"inputs without returning any output.\n"
f"This occurred in the following transformation:\n{self}"
)
@abstractmethod
def flatmap_transform(self, data: DataEntry, is_train: bool) -> Iterator[DataEntry]:
pass
class FilterTransformation(FlatMapTransformation):
def __init__(self, condition: Callable[[DataEntry], bool]) -> None:
self.condition = condition
def flatmap_transform(self, data: DataEntry, is_train: bool) -> Iterator[DataEntry]:
if self.condition(data):
yield data
| 4,564 | 30.923077 | 91 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/dataset.py | from typing import Iterator, List
from pts.dataset import DataEntry, Dataset
from .transform import Chain, Transformation
class TransformedDataset(Dataset):
"""
A dataset that corresponds to applying a list of transformations to each
element in the base_dataset.
This only supports SimpleTransformations, which do the same thing at
prediction and training time.
Parameters
----------
base_dataset
Dataset to transform
transformations
List of transformations to apply
"""
def __init__(
self, base_dataset: Dataset, transformations: List[Transformation]
) -> None:
self.base_dataset = base_dataset
self.transformations = Chain(transformations)
def __iter__(self) -> Iterator[DataEntry]:
yield from self.transformations(self.base_dataset, is_train=True)
def __len__(self):
return sum(1 for _ in self)
| 918 | 26.029412 | 76 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/convert.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Iterator, List, Tuple, Optional
import numpy as np
import torch
from scipy.special import erf, erfinv
from pts.core.component import validated
from pts.dataset import DataEntry
from pts.exception import assert_pts
from .transform import (
SimpleTransformation,
MapTransformation,
FlatMapTransformation,
)
class AsNumpyArray(SimpleTransformation):
"""
Converts the value of a field into a numpy array.
Parameters
----------
expected_ndim
Expected number of dimensions. Throws an exception if the number of
dimensions does not match.
dtype
numpy dtype to use.
"""
@validated()
def __init__(
self, field: str, expected_ndim: int, dtype: np.dtype = np.float32
) -> None:
self.field = field
self.expected_ndim = expected_ndim
self.dtype = dtype
def transform(self, data: DataEntry) -> DataEntry:
value = data[self.field]
if not isinstance(value, float):
# this lines produces "ValueError: setting an array element with a
# sequence" on our test
# value = np.asarray(value, dtype=np.float32)
# see https://stackoverflow.com/questions/43863748/
value = np.asarray(list(value), dtype=self.dtype)
else:
# ugly: required as list conversion will fail in the case of a
# float
value = np.asarray(value, dtype=self.dtype)
assert_pts(
value.ndim >= self.expected_ndim,
'Input for field "{self.field}" does not have the required'
"dimension (field: {self.field}, ndim observed: {value.ndim}, "
"expected ndim: {self.expected_ndim})",
value=value,
self=self,
)
data[self.field] = value
return data
class ExpandDimArray(SimpleTransformation):
"""
Expand dims in the axis specified, if the axis is not present does nothing.
(This essentially calls np.expand_dims)
Parameters
----------
field
Field in dictionary to use
axis
Axis to expand (see np.expand_dims for details)
"""
@validated()
def __init__(self, field: str, axis: Optional[int] = None) -> None:
self.field = field
self.axis = axis
def transform(self, data: DataEntry) -> DataEntry:
if self.axis is not None:
data[self.field] = np.expand_dims(data[self.field], axis=self.axis)
return data
class VstackFeatures(SimpleTransformation):
"""
Stack fields together using ``np.vstack``.
Fields with value ``None`` are ignored.
Parameters
----------
output_field
Field name to use for the output
input_fields
Fields to stack together
drop_inputs
If set to true the input fields will be dropped.
"""
@validated()
def __init__(
self, output_field: str, input_fields: List[str], drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [fname for fname in self.input_fields if fname != output_field]
)
def transform(self, data: DataEntry) -> DataEntry:
r = [data[fname] for fname in self.input_fields if data[fname] is not None]
output = np.vstack(r)
data[self.output_field] = output
for fname in self.cols_to_drop:
del data[fname]
return data
class ConcatFeatures(SimpleTransformation):
"""
Concatenate fields together using ``np.concatenate``.
Fields with value ``None`` are ignored.
Parameters
----------
output_field
Field name to use for the output
input_fields
Fields to stack together
drop_inputs
If set to true the input fields will be dropped.
"""
@validated()
def __init__(
self, output_field: str, input_fields: List[str], drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [fname for fname in self.input_fields if fname != output_field]
)
def transform(self, data: DataEntry) -> DataEntry:
r = [data[fname] for fname in self.input_fields if data[fname] is not None]
output = np.concatenate(r)
data[self.output_field] = output
for fname in self.cols_to_drop:
del data[fname]
return data
class SwapAxes(SimpleTransformation):
"""
Apply `np.swapaxes` to fields.
Parameters
----------
input_fields
Field to apply to
axes
Axes to use
"""
@validated()
def __init__(self, input_fields: List[str], axes: Tuple[int, int]) -> None:
self.input_fields = input_fields
self.axis1, self.axis2 = axes
def transform(self, data: DataEntry) -> DataEntry:
for field in self.input_fields:
data[field] = self.swap(data[field])
return data
def swap(self, v):
if isinstance(v, np.ndarray):
return np.swapaxes(v, self.axis1, self.axis2)
if isinstance(v, list):
return [self.swap(x) for x in v]
else:
raise ValueError(
f"Unexpected field type {type(v).__name__}, expected "
f"np.ndarray or list[np.ndarray]"
)
class ListFeatures(SimpleTransformation):
"""
Creates a new field which contains a list of features.
Parameters
----------
output_field
Field name for output
input_fields
Fields to combine into list
drop_inputs
If true the input fields will be removed from the result.
"""
@validated()
def __init__(
self, output_field: str, input_fields: List[str], drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [fname for fname in self.input_fields if fname != output_field]
)
def transform(self, data: DataEntry) -> DataEntry:
data[self.output_field] = [data[fname] for fname in self.input_fields]
for fname in self.cols_to_drop:
del data[fname]
return data
class TargetDimIndicator(SimpleTransformation):
"""
Label-encoding of the target dimensions.
"""
@validated()
def __init__(self, field_name: str, target_field: str) -> None:
self.field_name = field_name
self.target_field = target_field
def transform(self, data: DataEntry) -> DataEntry:
data[self.field_name] = np.arange(0, data[self.target_field].shape[0])
return data
class SampleTargetDim(FlatMapTransformation):
"""
Samples random dimensions from the target at training time.
"""
@validated()
def __init__(
self,
field_name: str,
target_field: str,
observed_values_field: str,
num_samples: int,
shuffle: bool = True,
) -> None:
self.field_name = field_name
self.target_field = target_field
self.observed_values_field = observed_values_field
self.num_samples = num_samples
self.shuffle = shuffle
def flatmap_transform(
self, data: DataEntry, is_train: bool, slice_future_target: bool = True
) -> Iterator[DataEntry]:
if not is_train:
yield data
else:
# (target_dim,)
target_dimensions = data[self.field_name]
if self.shuffle:
np.random.shuffle(target_dimensions)
target_dimensions = target_dimensions[: self.num_samples]
data[self.field_name] = target_dimensions
# (seq_len, target_dim) -> (seq_len, num_samples)
for field in [
f"past_{self.target_field}",
f"future_{self.target_field}",
f"past_{self.observed_values_field}",
f"future_{self.observed_values_field}",
]:
data[field] = data[field][:, target_dimensions]
yield data
class CDFtoGaussianTransform(MapTransformation):
"""
Marginal transformation that transforms the target via an empirical CDF
to a standard gaussian as described here: https://arxiv.org/abs/1910.03002
To be used in conjunction with a multivariate gaussian to from a copula.
Note that this transformation is currently intended for multivariate
targets only.
"""
@validated()
def __init__(
self,
target_dim: int,
target_field: str,
observed_values_field: str,
cdf_suffix="_cdf",
max_context_length: Optional[int] = None,
) -> None:
"""
Constructor for CDFtoGaussianTransform.
Parameters
----------
target_dim
Dimensionality of the target.
target_field
Field that will be transformed.
observed_values_field
Field that indicates observed values.
cdf_suffix
Suffix to mark the field with the transformed target.
max_context_length
Sets the maximum context length for the empirical CDF.
"""
self.target_field = target_field
self.past_target_field = "past_" + self.target_field
self.future_target_field = "future_" + self.target_field
self.past_observed_field = f"past_{observed_values_field}"
self.sort_target_field = f"past_{target_field}_sorted"
self.slopes_field = "slopes"
self.intercepts_field = "intercepts"
self.cdf_suffix = cdf_suffix
self.max_context_length = max_context_length
self.target_dim = target_dim
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
self._preprocess_data(data, is_train=is_train)
self._calc_pw_linear_params(data)
for target_field in [self.past_target_field, self.future_target_field]:
data[target_field + self.cdf_suffix] = self.standard_gaussian_ppf(
self._empirical_cdf_forward_transform(
data[self.sort_target_field],
data[target_field],
data[self.slopes_field],
data[self.intercepts_field],
)
)
return data
def _preprocess_data(self, data: DataEntry, is_train: bool):
"""
Performs several preprocess operations for computing the empirical CDF.
1) Reshaping the data.
2) Normalizing the target length.
3) Adding noise to avoid zero slopes (training only)
4) Sorting the target to compute the empirical CDF
Parameters
----------
data
DataEntry with input data.
is_train
if is_train is True, this function adds noise to the target to
avoid zero slopes in the piece-wise linear function.
Returns
-------
"""
# (target_length, target_dim)
past_target_vec = data[self.past_target_field].copy()
# pick only observed values
target_length, target_dim = past_target_vec.shape
# (target_length, target_dim)
past_observed = (data[self.past_observed_field] > 0) * (
data["past_is_pad"].reshape((-1, 1)) == 0
)
assert past_observed.ndim == 2
assert target_dim == self.target_dim
past_target_vec = past_target_vec[past_observed.min(axis=1)]
assert past_target_vec.ndim == 2
assert past_target_vec.shape[1] == self.target_dim
expected_length = (
target_length
if self.max_context_length is None
else self.max_context_length
)
if target_length != expected_length:
# Fills values in the case where past_target_vec.shape[-1] <
# target_length
# as dataset.loader.BatchBuffer does not support varying shapes
past_target_vec = CDFtoGaussianTransform._fill(
past_target_vec, expected_length
)
# sorts along the time dimension to compute empirical CDF of each
# dimension
if is_train:
past_target_vec = self._add_noise(past_target_vec)
past_target_vec.sort(axis=0)
assert past_target_vec.shape == (expected_length, self.target_dim)
data[self.sort_target_field] = past_target_vec
def _calc_pw_linear_params(self, data: DataEntry):
"""
Calculates the piece-wise linear parameters to interpolate between
the observed values in the empirical CDF.
Once current limitation is that we use a zero slope line as the last
piece. Thus, we cannot forecast anything higher than the highest
observed value.
Parameters
----------
data
Input data entry containing a sorted target field.
Returns
-------
"""
sorted_target = data[self.sort_target_field]
sorted_target_length, target_dim = sorted_target.shape
quantiles = np.stack(
[np.arange(sorted_target_length) for _ in range(target_dim)], axis=1,
) / float(sorted_target_length)
x_diff = np.diff(sorted_target, axis=0)
y_diff = np.diff(quantiles, axis=0)
# Calculate slopes of the pw-linear pieces.
slopes = np.where(x_diff == 0.0, np.zeros_like(x_diff), y_diff / x_diff)
zeroes = np.zeros_like(np.expand_dims(slopes[0, :], axis=0))
slopes = np.append(slopes, zeroes, axis=0)
# Calculate intercepts of the pw-linear pieces.
intercepts = quantiles - slopes * sorted_target
# Populate new fields with the piece-wise linear parameters.
data[self.slopes_field] = slopes
data[self.intercepts_field] = intercepts
def _empirical_cdf_forward_transform(
self,
sorted_values: np.ndarray,
values: np.ndarray,
slopes: np.ndarray,
intercepts: np.ndarray,
) -> np.ndarray:
"""
Applies the empirical CDF forward transformation.
Parameters
----------
sorted_values
Sorted target vector.
values
Values (real valued) that will be transformed to empirical CDF
values.
slopes
Slopes of the piece-wise linear function.
intercepts
Intercepts of the piece-wise linear function.
Returns
-------
quantiles
Empirical CDF quantiles in [0, 1] interval with winzorized cutoff.
"""
m = sorted_values.shape[0]
quantiles = self._forward_transform(sorted_values, values, slopes, intercepts)
quantiles = np.clip(
quantiles, self.winsorized_cutoff(m), 1 - self.winsorized_cutoff(m)
)
return quantiles
@staticmethod
def _add_noise(x: np.array) -> np.array:
scale_noise = 0.2
std = np.sqrt(
(np.square(x - x.mean(axis=1, keepdims=True))).mean(axis=1, keepdims=True)
)
noise = np.random.normal(
loc=np.zeros_like(x), scale=np.ones_like(x) * std * scale_noise
)
x = x + noise
return x
@staticmethod
def _search_sorted(sorted_vec: np.array, to_insert_vec: np.array) -> np.array:
"""
Finds the indices of the active piece-wise linear function.
Parameters
----------
sorted_vec
Sorted target vector.
to_insert_vec
Vector for which the indicies of the active linear functions
will be computed
Returns
-------
indices
Indices mapping to the active linear function.
"""
indices_left = np.searchsorted(sorted_vec, to_insert_vec, side="left")
indices_right = np.searchsorted(sorted_vec, to_insert_vec, side="right")
indices = indices_left + (indices_right - indices_left) // 2
indices = indices - 1
indices = np.minimum(indices, len(sorted_vec) - 1)
indices[indices < 0] = 0
return indices
def _forward_transform(
self,
sorted_vec: np.array,
target: np.array,
slopes: np.array,
intercepts: np.array,
) -> np.array:
"""
Applies the forward transformation to the marginals of the multivariate
target. Target (real valued) -> empirical cdf [0, 1]
Parameters
----------
sorted_vec
Sorted (past) target vector.
target
Target that will be transformed.
slopes
Slopes of the piece-wise linear function.
intercepts
Intercepts of the piece-wise linear function
Returns
-------
transformed_target
Transformed target vector.
"""
transformed = list()
for sorted, t, slope, intercept in zip(
sorted_vec.transpose(),
target.transpose(),
slopes.transpose(),
intercepts.transpose(),
):
indices = self._search_sorted(sorted, t)
transformed_value = slope[indices] * t + intercept[indices]
transformed.append(transformed_value)
return np.array(transformed).transpose()
@staticmethod
def standard_gaussian_cdf(x: np.array) -> np.array:
u = x / (np.sqrt(2.0))
return (erf(u) + 1.0) / 2.0
@staticmethod
def standard_gaussian_ppf(y: np.array) -> np.array:
y_clipped = np.clip(y, a_min=1.0e-6, a_max=1.0 - 1.0e-6)
return np.sqrt(2.0) * erfinv(2.0 * y_clipped - 1.0)
@staticmethod
def winsorized_cutoff(m: np.array) -> np.array:
"""
Apply truncation to the empirical CDF estimator to reduce variance as
described here: https://arxiv.org/abs/0903.0649
Parameters
----------
m
Input array with empirical CDF values.
Returns
-------
res
Truncated empirical CDf values.
"""
res = 1 / (4 * m ** 0.25 * np.sqrt(3.14 * np.log(m)))
assert 0 < res < 1
return res
@staticmethod
def _fill(target: np.ndarray, expected_length: int) -> np.ndarray:
"""
Makes sure target has at least expected_length time-units by repeating
it or using zeros.
Parameters
----------
target : shape (seq_len, dim)
expected_length
Returns
-------
array of shape (target_length, dim)
"""
current_length, target_dim = target.shape
if current_length == 0:
# todo handle the case with no observation better,
# we could use dataset statistics but for now we use zeros
filled_target = np.zeros((expected_length, target_dim))
elif current_length < expected_length:
filled_target = np.vstack(
[target for _ in range(expected_length // current_length + 1)]
)
filled_target = filled_target[:expected_length]
elif current_length > expected_length:
filled_target = target[-expected_length:]
else:
filled_target = target
assert filled_target.shape == (expected_length, target_dim)
return filled_target
def cdf_to_gaussian_forward_transform(
input_batch: DataEntry, outputs: torch.Tensor
) -> np.ndarray:
"""
Forward transformation of the CDFtoGaussianTransform.
Parameters
----------
input_batch
Input data to the predictor.
outputs
Predictor outputs.
Returns
-------
outputs
Forward transformed outputs.
"""
def _empirical_cdf_inverse_transform(
batch_target_sorted: torch.Tensor,
batch_predictions: torch.Tensor,
slopes: torch.Tensor,
intercepts: torch.Tensor,
) -> np.ndarray:
"""
Apply forward transformation of the empirical CDF.
Parameters
----------
batch_target_sorted
Sorted targets of the input batch.
batch_predictions
Predictions of the underlying probability distribution
slopes
Slopes of the piece-wise linear function.
intercepts
Intercepts of the piece-wise linear function.
Returns
-------
outputs
Forward transformed outputs.
"""
slopes = slopes.cpu().numpy()
intercepts = intercepts.cpu().numpy()
batch_target_sorted = batch_target_sorted.cpu().numpy()
_, num_timesteps, _ = batch_target_sorted.shape
indices = np.floor(batch_predictions * num_timesteps)
# indices = indices - 1
# for now project into [0, 1]
indices = np.clip(indices, 0, num_timesteps - 1)
indices = indices.astype(np.int)
transformed = np.where(
np.take_along_axis(slopes, indices, axis=1) != 0.0,
(batch_predictions - np.take_along_axis(intercepts, indices, axis=1))
/ np.take_along_axis(slopes, indices, axis=1),
np.take_along_axis(batch_target_sorted, indices, axis=1),
)
return transformed
# applies inverse cdf to all outputs
_, samples, _, _ = outputs.shape
for sample_index in range(0, samples):
outputs[:, sample_index, :, :] = _empirical_cdf_inverse_transform(
input_batch["past_target_sorted"],
CDFtoGaussianTransform.standard_gaussian_cdf(
outputs[:, sample_index, :, :]
),
input_batch["slopes"],
input_batch["intercepts"],
)
return outputs
| 22,563 | 30.602241 | 86 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/__init__.py | from .convert import (
AsNumpyArray,
ExpandDimArray,
VstackFeatures,
ConcatFeatures,
SwapAxes,
ListFeatures,
TargetDimIndicator,
SampleTargetDim,
CDFtoGaussianTransform,
cdf_to_gaussian_forward_transform,
)
from .dataset import TransformedDataset
from .feature import (
target_transformation_length,
AddObservedValuesIndicator,
AddConstFeature,
AddTimeFeatures,
AddAgeFeature,
)
from .field import (
RemoveFields,
RenameFields,
SetField,
SetFieldIfNotPresent,
SelectFields,
)
from .sampler import (
InstanceSampler,
UniformSplitSampler,
TestSplitSampler,
ExpectedNumInstanceSampler,
ExactNumInstanceSampler,
BucketInstanceSampler,
ContinuousTimePointSampler,
ContinuousTimeUniformSampler,
)
from .split import (
shift_timestamp,
InstanceSplitter,
CanonicalInstanceSplitter,
ContinuousTimeInstanceSplitter,
)
from .transform import (
Transformation,
Chain,
Identity,
MapTransformation,
SimpleTransformation,
AdhocTransform,
FlatMapTransformation,
FilterTransformation,
)
| 1,130 | 19.944444 | 39 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/feature.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List
import numpy as np
import pandas as pd
from pts.core.component import validated
from pts.dataset import DataEntry
from pts.feature import TimeFeature
from .split import shift_timestamp
from .transform import SimpleTransformation, MapTransformation
def target_transformation_length(
target: np.array, pred_length: int, is_train: bool
) -> int:
return target.shape[-1] + (0 if is_train else pred_length)
class AddObservedValuesIndicator(SimpleTransformation):
"""
Replaces missing values in a numpy array (NaNs) with a dummy value and adds
an "observed"-indicator that is ``1`` when values are observed and ``0``
when values are missing.
Parameters
----------
target_field
Field for which missing values will be replaced
output_field
Field name to use for the indicator
dummy_value
Value to use for replacing missing values.
convert_nans
If set to true (default) missing values will be replaced. Otherwise
they will not be replaced. In any case the indicator is included in the
result.
"""
@validated()
def __init__(
self,
target_field: str,
output_field: str,
dummy_value: int = 0,
convert_nans: bool = True,
dtype: np.dtype = np.float32,
) -> None:
self.dummy_value = dummy_value
self.target_field = target_field
self.output_field = output_field
self.convert_nans = convert_nans
self.dtype = dtype
def transform(self, data: DataEntry) -> DataEntry:
value = data[self.target_field]
nan_indices = np.where(np.isnan(value))
nan_entries = np.isnan(value)
if self.convert_nans:
value[nan_indices] = self.dummy_value
data[self.target_field] = value
# Invert bool array so that missing values are zeros and store as float
data[self.output_field] = np.invert(nan_entries).astype(self.dtype)
return data
class AddConstFeature(MapTransformation):
"""
Expands a `const` value along the time axis as a dynamic feature, where
the T-dimension is defined as the sum of the `pred_length` parameter and
the length of a time series specified by the `target_field`.
If `is_train=True` the feature matrix has the same length as the `target` field.
If `is_train=False` the feature matrix has length len(target) + pred_length
Parameters
----------
output_field
Field name for output.
target_field
Field containing the target array. The length of this array will be used.
pred_length
Prediction length (this is necessary since
features have to be available in the future)
const
Constant value to use.
dtype
Numpy dtype to use for resulting array.
"""
@validated()
def __init__(
self,
output_field: str,
target_field: str,
pred_length: int,
const: float = 1.0,
dtype: np.dtype = np.float32,
) -> None:
self.pred_length = pred_length
self.const = const
self.dtype = dtype
self.output_field = output_field
self.target_field = target_field
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
length = target_transformation_length(
data[self.target_field], self.pred_length, is_train=is_train
)
data[self.output_field] = self.const * np.ones(
shape=(1, length), dtype=self.dtype
)
return data
class AddTimeFeatures(MapTransformation):
"""
Adds a set of time features.
If `is_train=True` the feature matrix has the same length as the `target` field.
If `is_train=False` the feature matrix has length len(target) + pred_length
Parameters
----------
start_field
Field with the start time stamp of the time series
target_field
Field with the array containing the time series values
output_field
Field name for result.
time_features
list of time features to use.
pred_length
Prediction length
"""
@validated()
def __init__(
self,
start_field: str,
target_field: str,
output_field: str,
time_features: List[TimeFeature],
pred_length: int,
) -> None:
self.date_features = time_features
self.pred_length = pred_length
self.start_field = start_field
self.target_field = target_field
self.output_field = output_field
self._min_time_point: pd.Timestamp = None
self._max_time_point: pd.Timestamp = None
self._full_range_date_features: np.ndarray = None
self._date_index: pd.DatetimeIndex = None
def _update_cache(self, start: pd.Timestamp, length: int) -> None:
end = shift_timestamp(start, length)
if self._min_time_point is not None:
if self._min_time_point <= start and end <= self._max_time_point:
return
if self._min_time_point is None:
self._min_time_point = start
self._max_time_point = end
self._min_time_point = min(shift_timestamp(start, -50), self._min_time_point)
self._max_time_point = max(shift_timestamp(end, 50), self._max_time_point)
self.full_date_range = pd.date_range(
self._min_time_point, self._max_time_point, freq=start.freq
)
self._full_range_date_features = (
np.vstack([feat(self.full_date_range) for feat in self.date_features])
if self.date_features
else None
)
self._date_index = pd.Series(
index=self.full_date_range, data=np.arange(len(self.full_date_range)),
)
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
start = data[self.start_field]
length = target_transformation_length(
data[self.target_field], self.pred_length, is_train=is_train
)
self._update_cache(start, length)
i0 = self._date_index[start]
features = (
self._full_range_date_features[..., i0 : i0 + length]
if self.date_features
else None
)
data[self.output_field] = features
return data
class AddAgeFeature(MapTransformation):
"""
Adds an 'age' feature to the data_entry.
The age feature starts with a small value at the start of the time series
and grows over time.
If `is_train=True` the age feature has the same length as the `target`
field.
If `is_train=False` the age feature has length len(target) + pred_length
Parameters
----------
target_field
Field with target values (array) of time series
output_field
Field name to use for the output.
pred_length
Prediction length
log_scale
If set to true the age feature grows logarithmically otherwise linearly
over time.
"""
@validated()
def __init__(
self,
target_field: str,
output_field: str,
pred_length: int,
log_scale: bool = True,
dtype: np.dtype = np.float32,
) -> None:
self.pred_length = pred_length
self.target_field = target_field
self.feature_name = output_field
self.log_scale = log_scale
self._age_feature = np.zeros(0)
self.dtype = dtype
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
length = target_transformation_length(
data[self.target_field], self.pred_length, is_train=is_train
)
if self.log_scale:
age = np.log10(2.0 + np.arange(length, dtype=self.dtype))
else:
age = np.arange(length, dtype=self.dtype)
data[self.feature_name] = age.reshape((1, length))
return data
| 8,489 | 31.906977 | 85 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/quantile.py | import re
from typing import NamedTuple, Union
class Quantile(NamedTuple):
value: float
name: str
@property
def loss_name(self):
return f"QuantileLoss[{self.name}]"
@property
def weighted_loss_name(self):
return f"wQuantileLoss[{self.name}]"
@property
def coverage_name(self):
return f"Coverage[{self.name}]"
@classmethod
def checked(cls, value: float, name: str) -> "Quantile":
if not 0 <= value <= 1:
raise Exception(f"quantile value should be in [0, 1] but found {value}")
return Quantile(value, name)
@classmethod
def from_float(cls, quantile: float) -> "Quantile":
assert isinstance(quantile, float)
return cls.checked(value=quantile, name=str(quantile))
@classmethod
def from_str(cls, quantile: str) -> "Quantile":
assert isinstance(quantile, str)
try:
return cls.checked(value=float(quantile), name=quantile)
except ValueError:
m = re.match(r"^p(\d{2})$", quantile)
if m is None:
raise Exception(
"Quantile string should be of the form "
f'"p10", "p50", ... or "0.1", "0.5", ... but found {quantile}'
)
else:
quantile_float: float = int(m.group(1)) / 100
return cls(value=quantile_float, name=str(quantile_float))
@classmethod
def parse(cls, quantile: Union["Quantile", float, str]) -> "Quantile":
"""Produces equivalent float and string representation of a given
quantile level.
>>> Quantile.parse(0.1)
Quantile(value=0.1, name='0.1')
>>> Quantile.parse('0.2')
Quantile(value=0.2, name='0.2')
>>> Quantile.parse('0.20')
Quantile(value=0.2, name='0.20')
>>> Quantile.parse('p99')
Quantile(value=0.99, name='0.99')
Parameters
----------
quantile
Quantile, can be a float a str representing a float e.g. '0.1' or a
quantile string of the form 'p0.1'.
Returns
-------
Quantile
A tuple containing both a float and a string representation of the
input quantile level.
"""
if isinstance(quantile, Quantile):
return quantile
elif isinstance(quantile, float):
return cls.from_float(quantile)
else:
return cls.from_str(quantile)
| 2,496 | 28.376471 | 84 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/predictor.py | import json
from abc import ABC, abstractmethod
from pathlib import Path
from pydoc import locate
from typing import Iterator, Callable, Optional
import numpy as np
import torch
import torch.nn as nn
import pts
from pts.core.serde import dump_json, fqname_for, load_json
from pts.dataset import Dataset, DataEntry, InferenceDataLoader
from pts.transform import Transformation
from .forecast import Forecast
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from .utils import get_module_forward_input_names
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor(ABC):
__version__: str = pts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
self.prediction_length = prediction_length
self.freq = freq
@abstractmethod
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
pass
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "pts": pts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, device: Optional[torch.device] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
device
Optional pytorch to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, device)
class PTSPredictor(Predictor):
def __init__(
self,
prediction_net: nn.Module,
batch_size: int,
prediction_length: int,
freq: str,
device: torch.device,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: np.dtype = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = get_module_forward_input_names(prediction_net)
self.prediction_net = prediction_net
self.batch_size = batch_size
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.device = device
self.dtype = dtype
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
self.batch_size,
device=self.device,
dtype=self.dtype,
)
self.prediction_net.eval()
with torch.no_grad():
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def serialize(self, path: Path) -> None:
super().serialize(path)
# serialize network
model_name = 'prediction_net'
with (path / f"{model_name}-network.json").open("w") as fp:
print(dump_json(self.prediction_net), file=fp)
torch.save(self.prediction_net.state_dict(), path / "prediction_net")
# serialize input transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# serialize output transformation chain
with (path / "output_transform.json").open("w") as fp:
print(dump_json(self.output_transform), file=fp)
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
@classmethod
def deserialize(
cls, path: Path, device: Optional[torch.device] = None
) -> "PTSPredictor":
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transformation = load_json(fp.read())
# deserialize prediction network
model_name = 'prediction_net'
with (path / f"{model_name}-network.json").open("r") as fp:
prediction_net = load_json(fp.read())
prediction_net.load_state_dict(torch.load(path / "prediction_net"))
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["device"] = device
return PTSPredictor(
input_transform=transformation,
prediction_net=prediction_net.to(device),
**parameters
)
| 6,040 | 33.129944 | 81 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/forecast_generator.py | from abc import ABC, abstractmethod
from typing import Any, Callable, Iterator, List, Optional
import numpy as np
import torch
import torch.nn as nn
from pts.core.component import validated
from pts.dataset import InferenceDataLoader, DataEntry, FieldName
from pts.modules import DistributionOutput
from .forecast import Forecast, DistributionForecast, QuantileForecast, SampleForecast
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
def _extract_instances(x: Any) -> Any:
"""
Helper function to extract individual instances from batched
mxnet results.
For a tensor `a`
_extract_instances(a) -> [a[0], a[1], ...]
For (nested) tuples of tensors `(a, (b, c))`
_extract_instances((a, (b, c)) -> [(a[0], (b[0], c[0])), (a[1], (b[1], c[1])), ...]
"""
if isinstance(x, (np.ndarray, torch.Tensor)):
for i in range(x.shape[0]):
# yield x[i: i + 1]
yield x[i]
elif isinstance(x, tuple):
for m in zip(*[_extract_instances(y) for y in x]):
yield tuple([r for r in m])
elif isinstance(x, list):
for m in zip(*[_extract_instances(y) for y in x]):
yield [r for r in m]
elif x is None:
while True:
yield None
else:
assert False
class ForecastGenerator(ABC):
"""
Classes used to bring the output of a network into a class.
"""
@abstractmethod
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
pass
class DistributionForecastGenerator(ForecastGenerator):
def __init__(self, distr_output: DistributionOutput) -> None:
self.distr_output = distr_output
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[DistributionForecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs)
if output_transform is not None:
outputs = output_transform(batch, outputs)
distributions = [
self.distr_output.distribution(*u) for u in _extract_instances(outputs)
]
i = -1
for i, distr in enumerate(distributions):
yield DistributionForecast(
distr,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
)
assert i + 1 == len(batch["forecast_start"])
class QuantileForecastGenerator(ForecastGenerator):
def __init__(self, quantiles: List[str]) -> None:
self.quantiles = quantiles
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs).cpu().numpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
i = -1
for i, output in enumerate(outputs):
yield QuantileForecast(
output,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
forecast_keys=self.quantiles,
)
assert i + 1 == len(batch["forecast_start"])
class SampleForecastGenerator(ForecastGenerator):
@validated()
def __init__(self):
pass
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs).cpu().numpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
if num_samples:
num_collected_samples = outputs[0].shape[0]
collected_samples = [outputs]
while num_collected_samples < num_samples: # [Note] inference 빠르게 하려고 batch단위로 sample생성하는 경우도 있음(e.g. deepAR)
outputs = prediction_net(*inputs).cpu().numpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
collected_samples.append(outputs)
num_collected_samples += outputs[0].shape[0]
outputs = [
np.concatenate(s)[:num_samples] for s in zip(*collected_samples)
]
assert len(outputs[0]) == num_samples
i = -1
for i, output in enumerate(outputs):
yield SampleForecast(
output,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
)
assert i + 1 == len(batch["forecast_start"])
| 6,330 | 33.785714 | 125 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/utils.py | import inspect
from typing import Optional
import torch
import torch.nn as nn
def get_module_forward_input_names(module: nn.Module):
params = inspect.signature(module.forward).parameters
return list(params)
def copy_parameters(net_source: nn.Module, net_dest: nn.Module) -> None:
net_dest.load_state_dict(net_source.state_dict())
def weighted_average(
tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None
):
if weights is not None:
weighted_tensor = tensor * weights
if dim is not None:
sum_weights = torch.sum(weights, dim)
sum_weighted_tensor = torch.sum(weighted_tensor, dim)
else:
sum_weights = weights.sum()
sum_weighted_tensor = weighted_tensor.sum()
sum_weights = torch.max(torch.ones_like(sum_weights), sum_weights)
return sum_weighted_tensor / sum_weights
else:
if dim is not None:
return torch.mean(tensor, dim=dim)
else:
return tensor.mean()
| 1,032 | 26.918919 | 74 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/forecast.py | from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, List, Optional, Set, Union, Callable
import numpy as np
import pandas as pd
import torch
from pydantic import BaseModel, Field
from torch.distributions import Distribution
from .quantile import Quantile
class OutputType(str, Enum):
mean = "mean"
samples = "samples"
quantiles = "quantiles"
class Config(BaseModel):
num_samples: int = Field(100, alias="num_eval_samples")
output_types: Set[OutputType] = {"quantiles", "mean"}
# FIXME: validate list elements
quantiles: List[str] = ["0.1", "0.5", "0.9"]
class Config:
allow_population_by_field_name = True
# store additional fields
extra = "allow"
class Forecast(ABC):
start_date: pd.Timestamp
freq: str
item_id: Optional[str]
info: Optional[Dict]
prediction_length: int
mean: np.ndarray
_index = None
@abstractmethod
def quantile(self, q: Union[float, str]) -> np.ndarray:
"""
Computes a quantile from the predicted distribution.
Parameters
----------
q
Quantile to compute.
Returns
-------
numpy.ndarray
Value of the quantile across the prediction range.
"""
pass
def quantile_ts(self, q: Union[float, str]) -> pd.Series:
return pd.Series(data=self.quantile(q), index=self.index)
@property
def median(self) -> np.ndarray:
return self.quantile(0.5)
def plot(
self,
prediction_intervals=(50.0, 90.0),
show_mean=False,
color="b",
label=None,
output_file=None,
*args,
**kwargs,
):
"""
Plots the median of the forecast as well as confidence bounds.
(requires matplotlib and pandas).
Parameters
----------
prediction_intervals : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
show_mean : boolean
Whether to also show the mean of the forecast.
color : matplotlib color name or dictionary
The color used for plotting the forecast.
label : string
A label (prefix) that is used for the forecast
output_file : str or None, default None
Output path for the plot file. If None, plot is not saved to file.
args :
Other arguments are passed to main plot() call
kwargs :
Other keyword arguments are passed to main plot() call
"""
# matplotlib==2.0.* gives errors in Brazil builds and has to be
# imported locally
import matplotlib.pyplot as plt
label_prefix = "" if label is None else label + "-"
for c in prediction_intervals:
assert 0.0 <= c <= 100.0
ps = [50.0] + [
50.0 + f * c / 2.0 for c in prediction_intervals for f in [-1.0, +1.0]
]
percentiles_sorted = sorted(set(ps))
def alpha_for_percentile(p):
return (p / 100.0) ** 0.5
ps_data = [self.quantile(p / 100.0) for p in percentiles_sorted]
i_p50 = len(percentiles_sorted) // 2
p50_data = ps_data[i_p50]
p50_series = pd.Series(data=p50_data, index=self.index)
p50_series.plot(color=color, ls="-", label=f"{label_prefix}median")
if show_mean:
mean_data = np.mean(self._sorted_samples, axis=0)
pd.Series(data=mean_data, index=self.index).plot(
color=color, ls=":", label=f"{label_prefix}mean", *args, **kwargs,
)
for i in range(len(percentiles_sorted) // 2):
ptile = percentiles_sorted[i]
alpha = alpha_for_percentile(ptile)
plt.fill_between(
self.index,
ps_data[i],
ps_data[-i - 1],
facecolor=color,
alpha=alpha,
interpolate=True,
*args,
**kwargs,
)
# Hack to create labels for the error intervals.
# Doesn't actually plot anything, because we only pass a single data point
pd.Series(data=p50_data[:1], index=self.index[:1]).plot(
color=color,
alpha=alpha,
linewidth=10,
label=f"{label_prefix}{100 - ptile * 2}%",
*args,
**kwargs,
)
if output_file:
plt.savefig(output_file)
@property
def index(self) -> pd.DatetimeIndex:
if self._index is None:
self._index = pd.date_range(
self.start_date, periods=self.prediction_length, freq=self.freq
)
return self._index
def as_json_dict(self, config: "Config") -> dict:
result = {}
if OutputType.mean in config.output_types:
result["mean"] = self.mean.tolist()
if OutputType.quantiles in config.output_types:
quantiles = map(Quantile.parse, config.quantiles)
result["quantiles"] = {
quantile.name: self.quantile(quantile.value).tolist()
for quantile in quantiles
}
if OutputType.samples in config.output_types:
result["samples"] = []
return result
class SampleForecast(Forecast):
"""
A `Forecast` object, where the predicted distribution is represented
internally as samples.
Parameters
----------
samples
Array of size (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
samples: Union[torch.Tensor, np.ndarray],
start_date: pd.Timestamp,
freq: str,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
assert isinstance(
samples, (np.ndarray, torch.Tensor)
), "samples should be either a numpy array or an torch tensor"
assert (
len(np.shape(samples)) == 2 or len(np.shape(samples)) == 3
), "samples should be a 2-dimensional or 3-dimensional array. Dimensions found: {}".format(
len(np.shape(samples))
)
self.samples = (
samples if (isinstance(samples, np.ndarray)) else samples.cpu().numpy()
)
self._sorted_samples_value = None
self._mean = None
self._dim = None
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
@property
def _sorted_samples(self):
if self._sorted_samples_value is None:
self._sorted_samples_value = np.sort(self.samples, axis=0)
return self._sorted_samples_value
@property
def num_samples(self):
"""
The number of samples representing the forecast.
"""
return self.samples.shape[0]
@property
def prediction_length(self):
"""
Time length of the forecast.
"""
return self.samples.shape[1]
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
return np.mean(self.samples, axis=0)
@property
def mean_ts(self) -> pd.Series:
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(data=self.mean, index=self.index)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q = Quantile.parse(q).value
sample_idx = int(np.round((self.num_samples - 1) * q))
return self._sorted_samples[sample_idx, :]
def copy_dim(self, dim: int) -> "SampleForecast":
"""
Returns a new Forecast object with only the selected sub-dimension.
Parameters
----------
dim
The returned forecast object will only represent this dimension.
"""
if len(self.samples.shape) == 2:
samples = self.samples
else:
target_dim = self.samples.shape[2]
assert dim < target_dim, (
f"must set 0 <= dim < target_dim, but got dim={dim},"
f" target_dim={target_dim}"
)
samples = self.samples[:, :, dim]
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def copy_aggregate(self, agg_fun: Callable) -> "SampleForecast":
"""
Returns a new Forecast object with a time series aggregated over the
dimension axis.
Parameters
----------
agg_fun
Aggregation function that defines the aggregation operation
(typically mean or sum).
"""
if len(self.samples.shape) == 2:
samples = self.samples
else:
# Aggregate over target dimension axis
samples = agg_fun(self.samples, axis=2)
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def dim(self) -> int:
"""
Returns the dimensionality of the forecast object.
"""
if self._dim is not None:
return self._dim
else:
if len(self.samples.shape) == 2:
# univariate target
# shape: (num_samples, prediction_length)
return 1
else:
# multivariate target
# shape: (num_samples, prediction_length, target_dim)
return self.samples.shape[2]
def as_json_dict(self, config: "Config") -> dict:
result = super().as_json_dict(config)
if OutputType.samples in config.output_types:
result["samples"] = self.samples.tolist()
return result
def __repr__(self):
return ", ".join(
[
f"SampleForecast({self.samples!r})",
f"{self.start_date!r}",
f"{self.freq!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class QuantileForecast(Forecast):
"""
A Forecast that contains arrays (i.e. time series) for quantiles and mean
Parameters
----------
forecast_arrays
An array of forecasts
start_date
start of the forecast
freq
forecast frequency
forecast_keys
A list of quantiles of the form '0.1', '0.9', etc.,
and potentially 'mean'. Each entry corresponds to one array in
forecast_arrays.
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
forecast_arrays: np.ndarray,
start_date: pd.Timestamp,
freq: str,
forecast_keys: List[str],
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
self.forecast_array = forecast_arrays
self.start_date = pd.Timestamp(start_date, freq=freq)
self.freq = freq
# normalize keys
self.forecast_keys = [
Quantile.from_str(key).name if key != "mean" else key
for key in forecast_keys
]
self.item_id = item_id
self.info = info
self._dim = None
shape = self.forecast_array.shape
assert shape[0] == len(self.forecast_keys), (
f"The forecast_array (shape={shape} should have the same "
f"length as the forecast_keys (len={len(self.forecast_keys)})."
)
self.prediction_length = shape[-1]
self._forecast_dict = {
k: self.forecast_array[i] for i, k in enumerate(self.forecast_keys)
}
self._nan_out = np.array([np.nan] * self.prediction_length)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q_str = Quantile.parse(q).name
# We return nan here such that evaluation runs through
return self._forecast_dict.get(q_str, self._nan_out)
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
return self._forecast_dict.get("mean", self._nan_out)
def dim(self) -> int:
"""
Returns the dimensionality of the forecast object.
"""
if self._dim is not None:
return self._dim
else:
if (
len(self.forecast_array.shape) == 2
): # 1D target. shape: (num_samples, prediction_length)
return 1
else:
return self.forecast_array.shape[
1
] # 2D target. shape: (num_samples, target_dim, prediction_length)
def __repr__(self):
return ", ".join(
[
f"QuantileForecast({self.forecast_array!r})",
f"start_date={self.start_date!r}",
f"freq={self.freq!r}",
f"forecast_keys={self.forecast_keys!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class DistributionForecast(Forecast):
"""
A `Forecast` object that uses a distribution directly.
This can for instance be used to represent marginal probability
distributions for each time point -- although joint distributions are
also possible, e.g. when using MultiVariateGaussian).
Parameters
----------
distribution
Distribution object. This should represent the entire prediction
length, i.e., if we draw `num_samples` samples from the distribution,
the sample shape should be
samples = trans_dist.sample(num_samples)
samples.shape -> (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
distribution: Distribution,
start_date: pd.Timestamp,
freq: str,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
self.distribution = distribution
self.shape = self.distribution.batch_shape + self.distribution.event_shape
self.prediction_length = self.shape[0]
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
self._mean = None
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
self._mean = self.distribution.mean.cpu().numpy()
return self._mean
@property
def mean_ts(self) -> pd.Series:
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(data=self.mean, index=self.index)
def quantile(self, level: Union[float, str]) -> np.ndarray:
level = Quantile.parse(level).value
q = self.distribution.icdf(torch.tensor([level])).cpu().numpy()
return q
def to_sample_forecast(self, num_samples: int = 200) -> SampleForecast:
return SampleForecast(
samples=self.distribution.sample((num_samples,)),
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
| 16,436 | 29.495362 | 99 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/__init__.py | from .estimator import Estimator, PTSEstimator
from .forecast import Forecast, SampleForecast, QuantileForecast, DistributionForecast
from .predictor import Predictor, PTSPredictor
from .quantile import Quantile
from .utils import get_module_forward_input_names, copy_parameters, weighted_average
| 297 | 48.666667 | 86 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/estimator.py | from abc import ABC, abstractmethod
from typing import NamedTuple, Optional
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from pts.core.component import validated
from pts import Trainer
from pts.dataset import Dataset, TransformedIterableDataset, TransformedListDataset
from pts.transform import Transformation
from .predictor import Predictor
from .utils import get_module_forward_input_names
class Estimator(ABC):
"""
An abstract class representing a trainable model.
The underlying model is trained by calling the `train` method with
a training `Dataset`, producing a `Predictor` object.
"""
prediction_length: int
freq: str
@abstractmethod
def train(
self, training_data: Dataset,
) -> Predictor:
"""
Train the estimator on the given data.
Parameters
----------
training_data
Dataset to train the model on.
Returns
-------
Predictor
The predictor containing the trained model.
"""
pass
class DummyEstimator(Estimator):
"""
An `Estimator` that, upon training, simply returns a pre-constructed
`Predictor`.
Parameters
----------
predictor_cls
`Predictor` class to instantiate.
**kwargs
Keyword arguments to pass to the predictor constructor.
"""
@validated()
def __init__(self, predictor_cls: type, **kwargs) -> None:
self.predictor = predictor_cls(**kwargs)
def train(
self,
training_data: Dataset,
) -> Predictor:
return self.predictor
class TrainOutput(NamedTuple):
transformation: Transformation
trained_net: nn.Module
predictor: Predictor
class PTSEstimator(Estimator):
def __init__(self, trainer: Trainer, dtype: np.dtype = np.float32) -> None:
self.trainer = trainer
self.dtype = dtype
@abstractmethod
def create_transformation(self) -> Transformation:
"""
Create and return the transformation needed for training and inference.
Returns
-------
Transformation
The transformation that will be applied entry-wise to datasets,
at training and inference time.
"""
pass
@abstractmethod
def create_training_network(self, device: torch.device) -> nn.Module:
"""
Create and return the network used for training (i.e., computing the
loss).
Returns
-------
nn.Module
The network that computes the loss given input data.
"""
pass
@abstractmethod
def create_predictor(
self,
transformation: Transformation,
trained_network: nn.Module,
device: torch.device,
) -> Predictor:
"""
Create and return a predictor object.
Returns
-------
Predictor
A predictor wrapping a `nn.Module` used for inference.
"""
pass
def train_model(
self, training_data: Dataset, validation_period: int = 1
) -> TrainOutput:
transformation = self.create_transformation()
transformation.estimate(iter(training_data))
training_iter_dataset = TransformedListDataset(
dataset=training_data.list_data,
is_train=True,
transform=transformation
)
training_data_loader = DataLoader(
training_iter_dataset,
batch_size=self.trainer.batch_size,
num_workers=self.trainer.num_workers,
pin_memory=self.trainer.pin_memory,
drop_last=True,
shuffle=True
)
# ensure that the training network is created on the same device
trained_net = self.create_training_network(self.trainer.device)
self.trainer(
net=trained_net,
input_names=get_module_forward_input_names(trained_net),
training_data_loader=training_data_loader,
validation_period=validation_period,
)
return TrainOutput(
transformation=transformation,
trained_net=trained_net,
predictor=self.create_predictor(
transformation, trained_net, self.trainer.device
),
)
def train(
self, training_data: Dataset, validation_period: int = 1
) -> Predictor:
return self.train_model(training_data, validation_period).predictor | 4,526 | 26.436364 | 83 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/deepar/deepar_network.py | from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Distribution
from pts.core.component import validated
from pts.model import weighted_average
from pts.modules import DistributionOutput, MeanScaler, NOPScaler, FeatureEmbedder
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(nn.Module):
@validated()
def __init__(
self,
input_size: int,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
dc_cardinality: List[int],
dc_embedding_dimension: List[int],
lags_seq: List[int],
moving_avg_windows: List[int],
scaling: bool = True,
dtype: np.dtype = np.float32,
) -> None:
super().__init__()
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.dc_cardinality = dc_cardinality
self.dc_embedding_dimension = dc_embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
self.lags_seq = lags_seq
self.moving_avg_windows = moving_avg_windows
self.distr_output = distr_output
rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[self.cell_type]
self.rnn = rnn(
input_size=input_size,
hidden_size=num_cells,
num_layers=num_layers,
dropout=dropout_rate,
batch_first=True,
)
# initialize LSTM forget gate bias to be 1 as recommanded by http://proceedings.mlr.press/v37/jozefowicz15.pdf
'''for names in self.rnn._all_weights:
for name in filter(lambda n: "bias" in n, names):
bias = getattr(self.rnn, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)'''
self.target_shape = distr_output.event_shape
self.proj_distr_args = distr_output.get_args_proj(num_cells)
self.embedder = FeatureEmbedder(
cardinalities=cardinality, embedding_dims=embedding_dimension
)
self.dc_embedder = FeatureEmbedder(
cardinalities=dc_cardinality, embedding_dims=dc_embedding_dimension
)
if scaling:
self.scaler = MeanScaler(keepdim=True)
else:
self.scaler = NOPScaler(keepdim=True)
@staticmethod
def get_lagged_subsequences(
sequence: torch.Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> torch.Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(sequence[:, begin_index:end_index, ...])
return torch.stack(lagged_values, dim=-1)
@staticmethod
def get_moving_average(
accumlated_sum: torch.Tensor,
moving_avg_windows: List[int],
) -> torch.Tensor:
"""
Returns lagged moving average of a given sequence.
Parameters
----------
accumlated_sum : Tensor
the accumulated sum of target sequence.
Shape: (N, T, C)
moving_avg_windows: List[int]
list of window size for averaging
Returns
--------
lagged : Tensor
a tensor of shape (N, T, C, I), where I = len(moving_avg_windows), containing moving_average sequences.
"""
averaged_seqs = []
for w in moving_avg_windows:
moving_avg = torch.zeros_like(accumlated_sum)
moving_avg[...] = np.nan
moving_avg[:, w:, ...] = accumlated_sum[:, w:, ...] - accumlated_sum[:, :-w, ...]
moving_avg /= w
averaged_seqs.append(moving_avg)
return torch.stack(averaged_seqs, dim=-1)
def unroll_encoder(
self,
feat_static_cat: torch.Tensor, # (batch_size, num_features)
feat_static_real: torch.Tensor, # (batch_size, num_features)
past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)
past_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_accumulated_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_past: torch.Tensor, # (batch_size, history_length, *target_shape)
future_feat_dynamic_past: torch.Tensor, # (batch_size, history_length, *target_shape)
future_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
torch.Tensor
] = None, # (batch_size, prediction_length, num_features)
future_target: Optional[
torch.Tensor
] = None, # (batch_size, prediction_length, *target_shape)
future_accumulated_target: Optional[torch.Tensor] = None, # (batch_size, prediction_length, *target_shape)
) -> Tuple[torch.Tensor, Union[torch.Tensor, List], torch.Tensor, torch.Tensor]:
if future_time_feat is None or future_target is None:
time_feat = past_time_feat[
:, self.history_length - self.context_length :, ...
]
feat_dynamic_cat = past_feat_dynamic_cat[
:, self.history_length - self.context_length :, ...
]
feat_dynamic_past = past_feat_dynamic_past
accumlated_sequence = past_accumulated_target
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = torch.cat(
(
past_time_feat[:, self.history_length - self.context_length :, ...],
future_time_feat,
),
dim=1
)
feat_dynamic_cat = torch.cat(
(
past_feat_dynamic_cat[:, self.history_length - self.context_length :, ...],
future_feat_dynamic_cat,
),
dim=1
)
feat_dynamic_past = torch.cat((past_feat_dynamic_past, future_feat_dynamic_past), dim=1)
accumlated_sequence = torch.cat((past_accumulated_target, future_accumulated_target), dim=1)
sequence = torch.cat((past_target, future_target), dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
#apply lag to feat_dynamic_past for aligning with target-lag
feat_dynamic_past_lags = self.get_lagged_subsequences(
sequence=feat_dynamic_past,
sequence_length=sequence_length,
indices=[min(self.lags_seq)],
subsequences_length=subsequences_length
).squeeze(-1) # (batch_size, subsequences_length, num_features)
# moving average
if len(self.moving_avg_windows) == 0:
merged_sequence = sequence
else:
moving_avg = self.get_moving_average(
accumlated_sum=accumlated_sequence,
moving_avg_windows=self.moving_avg_windows
)
merged_sequence = torch.cat((sequence.unsqueeze(-1) if len(self.target_shape) == 0 else sequence, moving_avg), dim=-1)
# apply lags
lags = self.get_lagged_subsequences(
sequence=merged_sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length
)
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = lags.reshape(
(-1, subsequences_length, len(self.lags_seq) * (1 + len(self.moving_avg_windows)) * prod(self.target_shape))
) # [Note] 모든 lags에 대한 sequence를 생성
# embdding dynamic category features
embedded_dynamic_cat = self.dc_embedder(feat_dynamic_cat)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target[:, -self.context_length :, ...],
past_observed_values[:, -self.context_length :, ...],
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# (batch_size, num_features + prod(target_shape))
static_feat = torch.cat( (embedded_cat, feat_static_real), dim=1)
if not self.scaling:
# use the log scale as it can help prediction
static_feat = torch.cat(
(
static_feat,
scale.log() if len(self.target_shape) == 0 else scale.squeeze(1).log(),
),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.unsqueeze(1).expand(
-1, subsequences_length, -1
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
input_lags = input_lags / scale.unsqueeze(-1)
# (batch_size, sub_seq_len, input_dim)
inputs = torch.cat((input_lags, time_feat, embedded_dynamic_cat, repeated_static_feat, feat_dynamic_past_lags), dim=-1)
# unroll encoder
self.rnn.flatten_parameters() # resovle warining on multi-gpu training
outputs, state = self.rnn(inputs) # [Note] (batch, seq_len, input_size) 순서 batch_first = True로 설정되어 있음
# outputs: (batch_size, seq_len, num_cells)
# state: list of (num_layers, batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class RolledDeepARTrainingNetwork(DeepARNetwork):
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_time_feat: torch.Tensor,
past_target: torch.Tensor,
past_accumulated_target: torch.Tensor,
past_observed_values: torch.Tensor,
past_feat_dynamic_cat: torch.Tensor,
past_feat_dynamic_past: torch.Tensor,
future_feat_dynamic_past: torch.Tensor,
future_feat_dynamic_cat: torch.Tensor,
future_time_feat: torch.Tensor,
future_target: torch.Tensor,
future_accumulated_target: torch.Tensor,
future_observed_values: torch.Tensor
) -> torch.Tensor:
rnn_outputs, state, scale, static_feat = self.unroll_encoder(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_accumulated_target=past_accumulated_target,
past_observed_values=past_observed_values,
past_feat_dynamic_cat=past_feat_dynamic_cat,
past_feat_dynamic_past=past_feat_dynamic_past,
future_feat_dynamic_past=None,
future_feat_dynamic_cat=None,
future_time_feat=None,
future_target=None,
future_accumulated_target=None,
)
#distr_args = self.proj_distr_args(rnn_outputs)
#distr = self.distr_output.distribution(distr_args)
target_scaled = future_target / scale # use normalized target for training
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target
repeated_past_accumulated_target = past_accumulated_target
repeated_feat_dynamic_past = past_feat_dynamic_past
embedded_dynamic_cat = self.dc_embedder(future_feat_dynamic_cat)
repeated_embedded_dynamic_cat = embedded_dynamic_cat
repeated_time_feat = future_time_feat
repeated_static_feat = static_feat.unsqueeze(1)
repeated_scale = scale
repeated_states = state
future_samples = []
losses = []
self.shifted_lags = [l - 1 for l in self.lags_seq] # [Note] prediction을 위해 1-step forward (0-lag부터 시작
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
feat_dynamic_past_lags = self.get_lagged_subsequences(
sequence=repeated_feat_dynamic_past,
sequence_length=self.history_length + k,
indices=[min(self.shifted_lags)],
subsequences_length=1
).squeeze(-1) # (batch_size, 1, num_features)
if len(self.moving_avg_windows) == 0:
merged_repeated_past_target = repeated_past_target
else:
# moving average
repeated_moving_avg = self.get_moving_average(
accumlated_sum=repeated_past_accumulated_target,
moving_avg_windows=self.moving_avg_windows
)
merged_repeated_past_target = torch.cat((repeated_past_target.unsqueeze(-1) if len(self.target_shape) == 0 else repeated_past_target, repeated_moving_avg), dim=-1)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
sequence=merged_repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = lags.reshape(
(-1, 1, prod(self.target_shape) * len(self.lags_seq) * (1 + len(self.moving_avg_windows)))
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
input_lags = input_lags / repeated_scale.unsqueeze(-1)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = torch.cat(
(input_lags, repeated_time_feat[:, k : k + 1, :], repeated_embedded_dynamic_cat[:, k : k + 1, :], repeated_static_feat, feat_dynamic_past_lags),
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn(decoder_input, repeated_states)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(distr_args, scale=repeated_scale)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = torch.cat((repeated_past_target, new_samples), dim=1) # [Note] rolling prediction
future_samples.append(distr.mean)
# rolling feat_dynamic_past (zero-sale period)
future_feat_dynamic_past = repeated_feat_dynamic_past[:,[-1],:] + 1
future_feat_dynamic_past[new_samples > 0.5] = 0
repeated_feat_dynamic_past = torch.cat((repeated_feat_dynamic_past, future_feat_dynamic_past), dim=1)
# rolling accumulated target
future_accumulated_target = repeated_past_accumulated_target[:,[-1],...] + new_samples
repeated_past_accumulated_target = torch.cat((repeated_past_accumulated_target, future_accumulated_target), dim=1)
# loss
losses.append(-distr.log_prob(target_scaled[:,k:k+1]))
loss = torch.cat(losses, dim=1)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
future_observed_values
if (len(self.target_shape) == 0)
else future_observed_values.min(dim=-1, keepdim=False)[0]
)
weighted_loss = weighted_average(loss, weights=loss_weights)
# for mornitoring
predicted = torch.cat(future_samples, dim=1)
true_label = future_target # first true_label (assumed lag-1)
error = true_label - predicted
return weighted_loss, error.detach()
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq] # [Note] prediction을 위해 1-step forward (0-lag부터 시작)
def sampling_decoder(
self,
static_feat: torch.Tensor,
past_target: torch.Tensor,
past_accumulated_target: torch.Tensor,
past_feat_dynamic_past: torch.Tensor,
time_feat: torch.Tensor,
dynamic_cat_feat: torch.Tensor,
scale: torch.Tensor,
begin_states: Union[torch.Tensor, List[torch.Tensor]],
) -> torch.Tensor:
"""
Computes sample paths by unrolling the RNN starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List or Tensor
list of initial states for the LSTM layers or tensor for GRU.
the shape of each tensor of the list should be (num_layers, batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_past_accumulated_target = past_accumulated_target.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_feat_dynamic_past = past_feat_dynamic_past.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
embedded_dynamic_cat = self.dc_embedder(dynamic_cat_feat)
repeated_embedded_dynamic_cat = embedded_dynamic_cat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_time_feat = time_feat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_static_feat = static_feat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
).unsqueeze(1)
repeated_scale = scale.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
if self.cell_type == "LSTM":
repeated_states = [
s.repeat_interleave(repeats=self.num_parallel_samples, dim=1)
for s in begin_states
]
else:
repeated_states = begin_states.repeat_interleave(
repeats=self.num_parallel_samples, dim=1
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
feat_dynamic_past_lags = self.get_lagged_subsequences(
sequence=repeated_feat_dynamic_past,
sequence_length=self.history_length + k,
indices=[min(self.shifted_lags)],
subsequences_length=1
).squeeze(-1) # (batch_size, 1, num_features)
if len(self.moving_avg_windows) == 0:
merged_repeated_past_target = repeated_past_target
else:
# moving average
repeated_moving_avg = self.get_moving_average(
accumlated_sum=repeated_past_accumulated_target,
moving_avg_windows=self.moving_avg_windows
)
merged_repeated_past_target = torch.cat((repeated_past_target.unsqueeze(-1) if len(self.target_shape) == 0 else repeated_past_target, repeated_moving_avg), dim=-1)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
sequence=merged_repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = lags.reshape(
(-1, 1, prod(self.target_shape) * len(self.lags_seq) * (1 + len(self.moving_avg_windows)))
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
input_lags = input_lags / repeated_scale.unsqueeze(-1)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = torch.cat(
(input_lags, repeated_time_feat[:, k : k + 1, :], repeated_embedded_dynamic_cat[:, k : k + 1, :], repeated_static_feat, feat_dynamic_past_lags),
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn(decoder_input, repeated_states)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(distr_args, scale=repeated_scale)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample() # [Note] 샘플링
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = torch.cat((repeated_past_target, new_samples), dim=1) # [Note] rolling prediction
future_samples.append(new_samples)
# rolling feat_dynamic_past (zero-sale period)
future_feat_dynamic_past = repeated_feat_dynamic_past[:,[-1],:] + 1
future_feat_dynamic_past[new_samples > 0.5] = 0
repeated_feat_dynamic_past = torch.cat((repeated_feat_dynamic_past, future_feat_dynamic_past), dim=1)
# rolling accumulated target
future_accumulated_target = repeated_past_accumulated_target[:,[-1],...] + new_samples
repeated_past_accumulated_target = torch.cat((repeated_past_accumulated_target, future_accumulated_target), dim=1)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = torch.cat(future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def forward(
self,
feat_static_cat: torch.Tensor, # (batch_size, num_features)
feat_static_real: torch.Tensor, # (batch_size, num_features)
past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)
past_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_accumulated_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_past: torch.Tensor, # (batch_size, history_length, *target_shape)
future_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: torch.Tensor, # (batch_size, prediction_length, num_features)
) -> torch.Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_accumulated_target=past_accumulated_target,
past_observed_values=past_observed_values,
past_feat_dynamic_cat=past_feat_dynamic_cat,
past_feat_dynamic_past=past_feat_dynamic_past,
future_feat_dynamic_past=None,
future_feat_dynamic_cat=None,
future_time_feat=None,
future_target=None,
future_accumulated_target=None,
)
return self.sampling_decoder(
past_target=past_target,
past_accumulated_target=past_accumulated_target,
past_feat_dynamic_past=past_feat_dynamic_past,
time_feat=future_time_feat,
dynamic_cat_feat=future_feat_dynamic_cat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 28,509 | 41.936747 | 179 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/deepar/deepar_estimator.py | from typing import List, Optional
import numpy as np
import torch
import torch.nn as nn
from pts.core.component import validated
from pts import Trainer
from pts.dataset import FieldName
from pts.feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from pts.model import PTSEstimator, Predictor, PTSPredictor, copy_parameters
from pts.modules import DistributionOutput, StudentTOutput
from pts.transform import (
Transformation,
Chain,
RemoveFields,
SetField,
AsNumpyArray,
AddObservedValuesIndicator,
AddTimeFeatures,
AddAgeFeature,
VstackFeatures,
InstanceSplitter,
#ExpectedNumInstanceSampler,
ExactNumInstanceSampler
)
from .deepar_network import RolledDeepARTrainingNetwork, DeepARPredictionNetwork
class DeepAREstimator(PTSEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
input_size: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None, # [Note] past data길이
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "LSTM",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_dynamic_cat: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
dc_cardinality: Optional[List[int]] = None,
dc_embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None, # [Note] lags_seq : 반복되는 주기
moving_avg_windows: Optional[List[int]] = [], # moving average 적용할 window크기
time_features: Optional[List[TimeFeature]] = None,
pick_incomplete: bool = True,
num_parallel_samples: int = 100,
dtype: np.dtype = np.float32,
) -> None:
super().__init__(trainer=trainer)
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.input_size = input_size
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_dynamic_cat = use_feat_dynamic_cat
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = cardinality if cardinality and use_feat_static_cat else [1]
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.dc_cardinality = dc_cardinality
self.dc_embedding_dimension = dc_embedding_dimension
self.scaling = scaling
self.lags_seq = (
lags_seq if lags_seq is not None else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.moving_avg_windows = moving_avg_windows
self.history_length = self.context_length + max(self.lags_seq) + (max(self.moving_avg_windows) if len(self.moving_avg_windows)>0 else 0)
self.num_parallel_samples = num_parallel_samples
self.pick_incomplete = pick_incomplete
def create_transformation(self) -> Transformation:
remove_field_names = []
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
if not self.use_feat_dynamic_cat:
remove_field_names.append(FieldName.FEAT_DYNAMIC_CAT)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0])]
if not self.use_feat_static_cat
else []
)
+ (
[SetField(output_field=FieldName.FEAT_STATIC_REAL, value=[0.0])]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT, expected_ndim=1, dtype=np.long,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL, expected_ndim=1, dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True, # [Note] log scale적용
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
)
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExactNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.ACC_TARGET_SUM,
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_DYNAMIC_PAST,
FieldName.OBSERVED_VALUES,
],
pick_incomplete=self.pick_incomplete,
),
]
)
def create_training_network(self, device: torch.device) -> RolledDeepARTrainingNetwork:
return RolledDeepARTrainingNetwork(
input_size=self.input_size,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
dc_cardinality=self.dc_cardinality,
dc_embedding_dimension=self.dc_embedding_dimension,
lags_seq=self.lags_seq,
moving_avg_windows=self.moving_avg_windows,
scaling=self.scaling,
dtype=self.dtype,
).to(device)
def create_predictor(
self,
transformation: Transformation,
trained_network: nn.Module,
device: torch.device,
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
input_size=self.input_size,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
dc_cardinality=self.dc_cardinality,
dc_embedding_dimension=self.dc_embedding_dimension,
lags_seq=self.lags_seq,
moving_avg_windows=self.moving_avg_windows,
scaling=self.scaling,
dtype=self.dtype,
).to(device)
copy_parameters(trained_network, prediction_network)
return PTSPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
device=device,
dtype=self.dtype,
#forecast_generator = SampleForecastGenerator() # [Note] Default는 샘플링하는 forecaster
)
| 9,762 | 38.686992 | 144 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/deepar/__init__.py | from .deepar_estimator import DeepAREstimator
from .deepar_network import DeepARNetwork, RolledDeepARTrainingNetwork
| 117 | 38.333333 | 70 | py |
NM-sparsity | NM-sparsity-main/devkit/__init__.py | 0 | 0 | 0 | py | |
NM-sparsity | NM-sparsity-main/devkit/core/lr_scheduler.py | """Learning Rate Schedulers"""
from __future__ import division
from math import pi, cos
class LRScheduler(object):
r"""Learning Rate Scheduler
For mode='step', we multiply lr with `decay_factor` at each epoch in `step`.
For mode='poly'::
lr = targetlr + (baselr - targetlr) * (1 - iter / maxiter) ^ power
For mode='cosine'::
lr = targetlr + (baselr - targetlr) * (1 + cos(pi * iter / maxiter)) / 2
If warmup_epochs > 0, a warmup stage will be inserted before the main lr scheduler.
For warmup_mode='linear'::
lr = warmup_lr + (baselr - warmup_lr) * iter / max_warmup_iter
For warmup_mode='constant'::
lr = warmup_lr
Parameters
----------
mode : str
Modes for learning rate scheduler.
Currently it supports 'step', 'poly' and 'cosine'.
niters : int
Number of iterations in each epoch.
base_lr : float
Base learning rate, i.e. the starting learning rate.
epochs : int
Number of training epochs.
step : list
A list of epochs to decay the learning rate.
decay_factor : float
Learning rate decay factor.
targetlr : float
Target learning rate for poly and cosine, as the ending learning rate.
power : float
Power of poly function.
warmup_epochs : int
Number of epochs for the warmup stage.
warmup_lr : float
The base learning rate for the warmup stage.
warmup_mode : str
Modes for the warmup stage.
Currently it supports 'linear' and 'constant'.
"""
def __init__(self, optimizer, niters, args):
super(LRScheduler, self).__init__()
self.mode = args.lr_mode
self.warmup_mode = args.warmup_mode if hasattr(args,'warmup_mode') else 'linear'
assert(self.mode in ['step', 'poly', 'cosine'])
assert(self.warmup_mode in ['linear', 'constant'])
self.optimizer = optimizer
self.base_lr = args.base_lr if hasattr(args,'base_lr') else 0.1
self.learning_rate = self.base_lr
self.niters = niters
self.step = [int(i) for i in args.step.split(',')] if hasattr(args,'step') else [30, 60, 90]
self.decay_factor = args.decay_factor if hasattr(args,'decay_factor') else 0.1
self.targetlr = args.targetlr if hasattr(args,'targetlr') else 0.0
self.power = args.power if hasattr(args,'power') else 2.0
self.warmup_lr = args.warmup_lr if hasattr(args,'warmup_lr') else 0.0
self.max_iter = args.epochs * niters
self.warmup_iters = (args.warmup_epochs if hasattr(args,'warmup_epochs') else 0) * niters
def update(self, i, epoch):
T = epoch * self.niters + i
assert (T >= 0 and T <= self.max_iter)
if self.warmup_iters > T:
# Warm-up Stage
if self.warmup_mode == 'linear':
self.learning_rate = self.warmup_lr + (self.base_lr - self.warmup_lr) * \
T / self.warmup_iters
elif self.warmup_mode == 'constant':
self.learning_rate = self.warmup_lr
else:
raise NotImplementedError
else:
if self.mode == 'step':
count = sum([1 for s in self.step if s <= epoch])
self.learning_rate = self.base_lr * pow(self.decay_factor, count)
elif self.mode == 'poly':
self.learning_rate = self.targetlr + (self.base_lr - self.targetlr) * \
pow(1 - (T - self.warmup_iters) / (self.max_iter - self.warmup_iters), self.power)
elif self.mode == 'cosine':
self.learning_rate = self.targetlr + (self.base_lr - self.targetlr) * \
(1 + cos(pi * (T - self.warmup_iters) / (self.max_iter - self.warmup_iters))) / 2
else:
raise NotImplementedError
for i, param_group in enumerate(self.optimizer.param_groups):
param_group['lr'] = self.learning_rate
| 3,992 | 41.478723 | 102 | py |
NM-sparsity | NM-sparsity-main/devkit/core/dist_utils.py | import os
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
__all__ = [
'init_dist', 'broadcast_params','average_gradients']
def init_dist(backend='nccl',
master_ip='127.0.0.1',
port=29500):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
os.environ['MASTER_ADDR'] = master_ip
os.environ['MASTER_PORT'] = str(port)
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend)
return rank, world_size
def average_gradients(model):
for param in model.parameters():
if param.requires_grad and not (param.grad is None):
dist.all_reduce(param.grad.data)
def broadcast_params(model):
for p in model.state_dict().values():
dist.broadcast(p, 0)
| 945 | 28.5625 | 60 | py |
NM-sparsity | NM-sparsity-main/devkit/core/utils.py | import torch
import os
import shutil
def save_checkpoint(model_dir, state, is_best):
epoch = state['epoch']
path = os.path.join(model_dir, 'model.pth-' + str(epoch))
torch.save(state, path)
checkpoint_file = os.path.join(model_dir, 'checkpoint')
checkpoint = open(checkpoint_file, 'w+')
checkpoint.write('model_checkpoint_path:%s\n' % path)
checkpoint.close()
if is_best:
shutil.copyfile(path, os.path.join(model_dir, 'model-best.pth'))
def load_state(model_dir, model, optimizer=None):
if not os.path.exists(model_dir + '/checkpoint'):
print("=> no checkpoint found at '{}', train from scratch".format(model_dir))
return 0, 0
else:
ckpt = open(model_dir + '/checkpoint')
model_path = ckpt.readlines()[0].split(':')[1].strip('\n')
checkpoint = torch.load(model_path,map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = own_keys - ckpt_keys
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_dir, k))
print("=> loaded model from checkpoint '{}'".format(model_dir))
if optimizer != None:
best_prec1 = 0
if 'best_prec1' in checkpoint.keys():
best_prec1 = checkpoint['best_prec1']
start_epoch = checkpoint['epoch']
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> also loaded optimizer from checkpoint '{}' (epoch {})"
.format(model_dir, start_epoch))
return best_prec1, start_epoch
def load_state_epoch(model_dir, model, epoch):
model_path = model_dir + '/model.pth-' + str(epoch)
checkpoint = torch.load(model_path,map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = own_keys - ckpt_keys
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_dir, k))
print("=> loaded model from checkpoint '{}'".format(model_dir))
def load_state_ckpt(model_path, model):
checkpoint = torch.load(model_path, map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = own_keys - ckpt_keys
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_path, k))
print("=> loaded model from checkpoint '{}'".format(model_path))
| 2,861 | 40.478261 | 102 | py |
NM-sparsity | NM-sparsity-main/devkit/core/__init__.py | from .lr_scheduler import *
from .dist_utils import *
from .utils import *
| 75 | 18 | 27 | py |
NM-sparsity | NM-sparsity-main/devkit/dataset/imagenet_dataset.py | from torch.utils.data import Dataset
from PIL import Image
import torch
def pil_loader(filename):
with Image.open(filename) as img:
img = img.convert('RGB')
return img
class ImagenetDataset(Dataset):
def __init__(self, root_dir, meta_file, transform=None):
self.root_dir = root_dir
self.transform = transform
with open(meta_file) as f:
lines = f.readlines()
print("building dataset from %s"%meta_file)
self.num = len(lines)
self.metas = []
for line in lines:
path, cls = line.rstrip().split()
self.metas.append((path, int(cls)))
print("read meta done")
def __len__(self):
return self.num
def __getitem__(self, idx):
filename = self.root_dir + '/' + self.metas[idx][0]
cls = self.metas[idx][1]
img = pil_loader(filename)
## transform
if self.transform is not None:
img = self.transform(img)
return img, cls
class ColorAugmentation(object):
def __init__(self, eig_vec=None, eig_val=None):
if eig_vec == None:
eig_vec = torch.Tensor([
[0.4009, 0.7192, -0.5675],
[-0.8140, -0.0045, -0.5808],
[0.4203, -0.6948, -0.5836],
])
if eig_val == None:
eig_val = torch.Tensor([[0.2175, 0.0188, 0.0045]])
self.eig_val = eig_val # 1*3
self.eig_vec = eig_vec # 3*3
def __call__(self, tensor):
assert tensor.size(0) == 3
alpha = torch.normal(mean=torch.zeros_like(self.eig_val)) * 0.1
quatity = torch.mm(self.eig_val * alpha, self.eig_vec)
tensor = tensor + quatity.view(3, 1, 1)
return tensor
| 1,758 | 29.327586 | 71 | py |
NM-sparsity | NM-sparsity-main/devkit/dataset/__init__.py | 0 | 0 | 0 | py | |
NM-sparsity | NM-sparsity-main/devkit/sparse_ops/sparse_ops.py | import torch
from torch import autograd, nn
import torch.nn.functional as F
from itertools import repeat
from torch._six import container_abcs
class Sparse(autograd.Function):
"""" Prune the unimprotant weight for the forwards phase but pass the gradient to dense weight using SR-STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, decay = 0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length/M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M-N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
ctx.mask = w_b
ctx.decay = decay
return output*w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1-ctx.mask) * weight, None, None
class Sparse_NHWC(autograd.Function):
"""" Prune the unimprotant edges for the forwards phase but pass the gradient to dense weight using SR-STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, decay = 0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length/M)
weight_temp = weight.detach().abs().permute(0,2,3,1).reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M-N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.permute(0,2,3,1).shape)
w_b = w_b.permute(0,3,1,2)
ctx.mask = w_b
ctx.decay = decay
return output*w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1-ctx.mask) * weight, None, None
class SparseConv(nn.Conv2d):
"""" implement N:M sparse convolution layer """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse_NHWC.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SparseLinear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool = True, N=2, M=2, decay = 0.0002, **kwargs):
self.N = N
self.M = M
super(SparseLinear, self).__init__(in_features, out_features, bias = True)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.linear(x, w, self.bias)
return x
| 3,245 | 26.982759 | 159 | py |
NM-sparsity | NM-sparsity-main/devkit/sparse_ops/__init__.py | from .syncbn_layer import SyncBatchNorm2d
from .sparse_ops import SparseConv
| 77 | 25 | 41 | py |
NM-sparsity | NM-sparsity-main/devkit/sparse_ops/syncbn_layer.py | import torch
from torch.autograd import Function
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.distributed as dist
import torch.nn as nn
class SyncBNFunc(Function):
@staticmethod
def forward(ctx, in_data, scale_data, shift_data, running_mean, running_var, eps, momentum, training):
if in_data.is_cuda:
ctx.eps =eps
N, C, H, W = in_data.size()
in_data = in_data.view(N, C, -1)
mean_in = in_data.mean(-1, keepdim=True)
var_in = in_data.var(-1, keepdim=True)
temp = var_in + mean_in ** 2
if training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
sum_x = mean_bn ** 2 + var_bn
dist.all_reduce(mean_bn)
mean_bn /= dist.get_world_size()
dist.all_reduce(sum_x)
sum_x /= dist.get_world_size()
var_bn = sum_x - mean_bn ** 2
running_mean.mul_(momentum)
running_mean.add_((1 - momentum) * mean_bn.data)
running_var.mul_(momentum)
running_var.add_((1 - momentum) * var_bn.data)
else:
mean_bn = torch.autograd.Variable(running_mean)
var_bn = torch.autograd.Variable(running_var)
x_hat = (in_data - mean_bn) / (var_bn+ ctx.eps).sqrt()
x_hat = x_hat.view(N, C, H, W)
out_data = x_hat * scale_data + shift_data
ctx.save_for_backward(in_data.data, scale_data.data, x_hat.data, mean_bn.data, var_bn.data)
else:
raise RuntimeError('SyncBNFunc only support CUDA computation!')
return out_data
@staticmethod
def backward(ctx, grad_outdata):
if grad_outdata.is_cuda:
in_data, scale_data, x_hat, mean_bn, var_bn = ctx.saved_tensors
N, C, H, W = grad_outdata.size()
scaleDiff = torch.sum(grad_outdata * x_hat,[0,2,3],keepdim=True)
shiftDiff = torch.sum(grad_outdata,[0,2,3],keepdim=True)
dist.all_reduce(scaleDiff)
dist.all_reduce(shiftDiff)
inDiff = scale_data / (var_bn.view(1,C,1,1) + ctx.eps).sqrt() *(grad_outdata - 1 / (N*H*W*dist.get_world_size()) * (scaleDiff * x_hat + shiftDiff))
else:
raise RuntimeError('SyncBNFunc only support CUDA computation!')
return inDiff, scaleDiff, shiftDiff, None, None, None, None, None
class SyncBatchNorm2d(Module):
def __init__(self, num_features, eps=1e-5, momentum=0.9,last_gamma=False):
super(SyncBatchNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.last_gamma = last_gamma
self.weight = Parameter(torch.Tensor(1, num_features, 1, 1))
self.bias = Parameter(torch.Tensor(1, num_features, 1, 1))
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.ones(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def __repr__(self):
return ('{name}({num_features}, eps={eps}, momentum={momentum},'
' affine={affine})'
.format(name=self.__class__.__name__, **self.__dict__))
def forward(self, in_data):
return SyncBNFunc.apply(
in_data, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.momentum, self.training)
| 3,824 | 37.25 | 159 | py |
NM-sparsity | NM-sparsity-main/classification/train_imagenet.py | from __future__ import division
import argparse
import os
import time
import torch.distributed as dist
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import yaml
import sys
from tensorboardX import SummaryWriter
import models
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../')))
from devkit.core import (init_dist, broadcast_params, average_gradients, load_state_ckpt, load_state, save_checkpoint, LRScheduler)
from devkit.dataset.imagenet_dataset import ColorAugmentation, ImagenetDataset
parser = argparse.ArgumentParser(
description='Pytorch Imagenet Training')
parser.add_argument('--config', default='configs/config_resnet50_2:4.yaml')
parser.add_argument("--local_rank", type=int)
parser.add_argument(
'--port', default=29500, type=int, help='port of server')
parser.add_argument('--world-size', default=1, type=int)
parser.add_argument('--rank', default=0, type=int)
parser.add_argument('--model_dir', type=str)
parser.add_argument('--resume_from', default='', help='resume_from')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
args = parser.parse_args()
def main():
global args, best_prec1
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f)
for key in config:
for k, v in config[key].items():
setattr(args, k, v)
print('Enabled distributed training.')
rank, world_size = init_dist(
backend='nccl', port=args.port)
args.rank = rank
args.world_size = world_size
# create model
print("=> creating model '{}'".format(args.model))
model = models.__dict__[args.model](N = args.N, M = args.M)
model.cuda()
broadcast_params(model)
print(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.base_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# auto resume from a checkpoint
model_dir = args.model_dir
start_epoch = 0
if args.rank == 0 and not os.path.exists(model_dir):
os.makedirs(model_dir)
if args.evaluate:
load_state_ckpt(args.checkpoint_path, model)
else:
best_prec1, start_epoch = load_state(model_dir, model, optimizer=optimizer)
if args.rank == 0:
writer = SummaryWriter(model_dir)
else:
writer = None
cudnn.benchmark = True
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = ImagenetDataset(
args.train_root,
args.train_source,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
ColorAugmentation(),
normalize,
]))
val_dataset = ImagenetDataset(
args.val_root,
args.val_source,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_sampler = DistributedSampler(train_dataset)
val_sampler = DistributedSampler(val_dataset)
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size//args.world_size, shuffle=False,
num_workers=args.workers, pin_memory=False, sampler=train_sampler)
val_loader = DataLoader(
val_dataset, batch_size=args.batch_size//args.world_size, shuffle=False,
num_workers=args.workers, pin_memory=False, sampler=val_sampler)
if args.evaluate:
validate(val_loader, model, criterion, 0, writer)
return
niters = len(train_loader)
lr_scheduler = LRScheduler(optimizer, niters, args)
for epoch in range(start_epoch, args.epochs):
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, lr_scheduler, epoch, writer)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch, writer)
if rank == 0:
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint(model_dir, {
'epoch': epoch + 1,
'model': args.model,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, lr_scheduler, epoch, writer):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
SAD = AverageMeter()
# switch to train mode
model.train()
world_size = args.world_size
rank = args.rank
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
lr_scheduler.update(i, epoch)
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input.cuda())
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var) / world_size
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
reduced_loss = loss.data.clone()
reduced_prec1 = prec1.clone() / world_size
reduced_prec5 = prec5.clone() / world_size
dist.all_reduce_multigpu([reduced_loss])
dist.all_reduce_multigpu([reduced_prec1])
dist.all_reduce_multigpu([reduced_prec5])
losses.update(reduced_loss.item(), input.size(0))
top1.update(reduced_prec1.item(), input.size(0))
top5.update(reduced_prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
average_gradients(model)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
niter = epoch * len(train_loader) + i
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], niter)
writer.add_scalar('Train/Avg_Loss', losses.avg, niter)
writer.add_scalar('Train/Avg_Top1', top1.avg / 100.0, niter)
writer.add_scalar('Train/Avg_Top5', top5.avg / 100.0, niter)
def validate(val_loader, model, criterion, epoch, writer):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
world_size = args.world_size
rank = args.rank
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var) / world_size
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
reduced_loss = loss.data.clone()
reduced_prec1 = prec1.clone() / world_size
reduced_prec5 = prec5.clone() / world_size
dist.all_reduce_multigpu([reduced_loss])
dist.all_reduce_multigpu([reduced_prec1])
dist.all_reduce_multigpu([reduced_prec5])
losses.update(reduced_loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and rank == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
if rank == 0:
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
niter = (epoch + 1)
writer.add_scalar('Eval/Avg_Loss', losses.avg, niter)
writer.add_scalar('Eval/Avg_Top1', top1.avg / 100.0, niter)
writer.add_scalar('Eval/Avg_Top5', top5.avg / 100.0, niter)
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,642 | 33.003195 | 131 | py |
NM-sparsity | NM-sparsity-main/classification/models/resnet.py | import torch.nn as nn
import math
import sys
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../../../')))
#from devkit.ops import SyncBatchNorm2d
import torch
import torch.nn.functional as F
from torch import autograd
from torch.nn.modules.utils import _pair as pair
from torch.nn import init
from devkit.sparse_ops import SparseConv
__all__ = ['ResNetV1', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def conv3x3(in_planes, out_planes, stride=1, N=2, M=4):
"""3x3 convolution with padding"""
return SparseConv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, N=N, M=M)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, N=2, M=4):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, N=N, M=M)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, N=N, M=M)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, N=2, M=4):
super(Bottleneck, self).__init__()
self.conv1 = SparseConv(inplanes, planes, kernel_size=1, bias=False, N=N, M=M)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = SparseConv(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, N=N, M=M)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = SparseConv(planes, planes * 4, kernel_size=1, bias=False, N=N, M=M)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetV1(nn.Module):
def __init__(self, block, layers, num_classes=1000, N=2, M=4):
super(ResNetV1, self).__init__()
self.N = N
self.M = M
self.inplanes = 64
self.conv1 = SparseConv(3, 64, kernel_size=7, stride=2, padding=3,
bias=False, N=self.N, M=self.M)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], N = self.N, M = self.M)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, N = self.N, M = self.M)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, N = self.N, M = self.M)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, N = self.N, M = self.M)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, SparseConv):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
def _make_layer(self, block, planes, blocks, stride=1, N = 2, M = 4):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
SparseConv(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False, N=N, M=M),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, N=N, M=M))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, N=N, M=M))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(**kwargs):
model = ResNetV1(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
model = ResNetV1(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
model = ResNetV1(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
model = ResNetV1(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
model = ResNetV1(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
| 5,446 | 28.603261 | 95 | py |
NM-sparsity | NM-sparsity-main/classification/models/__init__.py | from .resnet import *
| 22 | 10.5 | 21 | py |
NM-sparsity | NM-sparsity-main/RAFT/evaluate.py | import sys
sys.path.append('core')
from PIL import Image
import argparse
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import datasets
from utils import flow_viz
from utils import frame_utils
from raft import RAFT
from utils.utils import InputPadder, forward_interpolate
@torch.no_grad()
def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'):
""" Create submission for the Sintel leaderboard """
model.eval()
for dstype in ['clean', 'final']:
test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype)
flow_prev, sequence_prev = None, None
for test_id in range(len(test_dataset)):
image1, image2, (sequence, frame) = test_dataset[test_id]
if sequence != sequence_prev:
flow_prev = None
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
if warm_start:
flow_prev = forward_interpolate(flow_low[0])[None].cuda()
output_dir = os.path.join(output_path, dstype, sequence)
output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frame_utils.writeFlow(output_file, flow)
sequence_prev = sequence
@torch.no_grad()
def create_kitti_submission(model, iters=24, output_path='kitti_submission'):
""" Create submission for the Sintel leaderboard """
model.eval()
test_dataset = datasets.KITTI(split='testing', aug_params=None)
if not os.path.exists(output_path):
os.makedirs(output_path)
for test_id in range(len(test_dataset)):
image1, image2, (frame_id, ) = test_dataset[test_id]
padder = InputPadder(image1.shape, mode='kitti')
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
output_filename = os.path.join(output_path, frame_id)
frame_utils.writeFlowKITTI(output_filename, flow)
@torch.no_grad()
def validate_chairs(model, iters=24):
""" Perform evaluation on the FlyingChairs (test) split """
model.eval()
epe_list = []
val_dataset = datasets.FlyingChairs(split='validation')
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, _ = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt()
epe_list.append(epe.view(-1).numpy())
epe = np.mean(np.concatenate(epe_list))
print("Validation Chairs EPE: %f" % epe)
return {'chairs': epe}
@torch.no_grad()
def validate_sintel(model, iters=32):
""" Peform validation using the Sintel (train) split """
model.eval()
results = {}
for dstype in ['clean', 'final']:
val_dataset = datasets.MpiSintel(split='training', dstype=dstype)
epe_list = []
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, _ = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).cpu()
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
epe_list.append(epe.view(-1).numpy())
epe_all = np.concatenate(epe_list)
epe = np.mean(epe_all)
px1 = np.mean(epe_all<1)
px3 = np.mean(epe_all<3)
px5 = np.mean(epe_all<5)
print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5))
results[dstype] = np.mean(epe_list)
return results
@torch.no_grad()
def validate_kitti(model, iters=24):
""" Peform validation using the KITTI-2015 (train) split """
model.eval()
val_dataset = datasets.KITTI(split='training')
out_list, epe_list = [], []
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, valid_gt = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
padder = InputPadder(image1.shape, mode='kitti')
image1, image2 = padder.pad(image1, image2)
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).cpu()
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
mag = torch.sum(flow_gt**2, dim=0).sqrt()
epe = epe.view(-1)
mag = mag.view(-1)
val = valid_gt.view(-1) >= 0.5
out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()
epe_list.append(epe[val].mean().item())
out_list.append(out[val].cpu().numpy())
epe_list = np.array(epe_list)
out_list = np.concatenate(out_list)
epe = np.mean(epe_list)
f1 = 100 * np.mean(out_list)
print("Validation KITTI: %f, %f" % (epe, f1))
return {'kitti-epe': epe, 'kitti-f1': f1}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--dataset', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model.cuda()
model.eval()
# create_sintel_submission(model.module, warm_start=True)
# create_kitti_submission(model.module)
with torch.no_grad():
if args.dataset == 'chairs':
validate_chairs(model.module)
elif args.dataset == 'sintel':
validate_sintel(model.module)
elif args.dataset == 'kitti':
validate_kitti(model.module)
| 6,618 | 32.429293 | 112 | py |
NM-sparsity | NM-sparsity-main/RAFT/demo.py | import sys
sys.path.append('core')
import argparse
import os
import cv2
import glob
import numpy as np
import torch
from PIL import Image
from raft import RAFT
from utils import flow_viz
from utils.utils import InputPadder
DEVICE = 'cuda'
def load_image(imfile):
img = np.array(Image.open(imfile)).astype(np.uint8)
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img[None].to(DEVICE)
def viz(img, flo):
img = img[0].permute(1,2,0).cpu().numpy()
flo = flo[0].permute(1,2,0).cpu().numpy()
# map flow to rgb image
flo = flow_viz.flow_to_image(flo)
img_flo = np.concatenate([img, flo], axis=0)
# import matplotlib.pyplot as plt
# plt.imshow(img_flo / 255.0)
# plt.show()
cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0)
cv2.waitKey()
def demo(args):
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model = model.module
model.to(DEVICE)
model.eval()
with torch.no_grad():
images = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
images = sorted(images)
for imfile1, imfile2 in zip(images[:-1], images[1:]):
image1 = load_image(imfile1)
image2 = load_image(imfile2)
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
viz(image1, flow_up)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--path', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
demo(args)
| 2,073 | 26.289474 | 112 | py |
NM-sparsity | NM-sparsity-main/RAFT/train.py | from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from raft import RAFT
from sparse_raft import SparseRAFT
import evaluate
import datasets
from torch.utils.tensorboard import SummaryWriter
from lr_scheduler import OneCycleLR
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
#def __init__(self):
# pass
def __init__(self, enabled=None):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt**2, dim=1).sqrt()
valid = (valid >= 0.5) & (mag < max_flow)
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
i_loss = (flow_preds[i] - flow_gt).abs()
flow_loss += i_weight * (valid[:, None] * i_loss).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = OneCycleLR(optimizer, args.lr, args.num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str+metrics_str)
#if self.writer is None:
# self.writer = SummaryWriter()
for k in self.running_loss:
#self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def close(self):
self.writer.close()
def train(args):
model = nn.DataParallel(SparseRAFT(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
model.cuda()
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
train_loader = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
scaler = GradScaler(enabled=args.mixed_precision)
logger = Logger(model, scheduler)
VAL_FREQ = 5000
add_noise = True
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
optimizer.zero_grad()
image1, image2, flow, valid = [x.cuda() for x in data_blob]
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
flow_predictions = model(image1, image2, iters=args.iters)
loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma)
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scaler.step(optimizer)
scheduler.step()
scaler.update()
logger.push(metrics)
if total_steps % VAL_FREQ == VAL_FREQ - 1:
PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
results = {}
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module))
#logger.write_dict(results)
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
#logger.close()
PATH = 'checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
parser.add_argument('--local_rank', default=-1, type=int,
help='node rank for distributed training')
args = parser.parse_args()
torch.manual_seed(1234)
np.random.seed(1234)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args)
| 8,244 | 31.333333 | 103 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/lr_scheduler.py | import types
import math
from torch._six import inf
from functools import wraps
import warnings
import weakref
from collections import Counter
from bisect import bisect_right
#from torch.optim.optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1, verbose=False):
# Attach optimizer
#if not isinstance(optimizer, Optimizer):
# raise TypeError('{} is not an Optimizer'.format(
# type(optimizer).__name__))
self.optimizer = optimizer
# Initialize epoch and base learning rates
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups]
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `optimizer.step()` has already been replaced, return.
return method
# Keep a weak reference to the optimizer instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step)
self.optimizer._step_count = 0
self._step_count = 0
self.verbose = verbose
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
return self._last_lr
def get_lr(self):
# Compute learning rate using chainable form of the scheduler
raise NotImplementedError
def print_lr(self, is_verbose, group, lr, epoch=None):
"""Display the current learning rate.
"""
if is_verbose:
if epoch is None:
print('Adjusting learning rate'
' of group {} to {:.4e}.'.format(group, lr))
else:
print('Epoch {:5d}: adjusting learning rate'
' of group {} to {:.4e}.'.format(epoch, group, lr))
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule. "
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
with _enable_get_lr_call(self):
if epoch is None:
self.last_epoch += 1
values = self.get_lr()
else:
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
values = self._get_closed_form_lr()
else:
values = self.get_lr()
for i, data in enumerate(zip(self.optimizer.param_groups, values)):
param_group, lr = data
param_group['lr'] = lr
self.print_lr(self.verbose, i, lr, epoch)
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
class OneCycleLR(_LRScheduler):
r"""Sets the learning rate of each parameter group according to the
1cycle learning rate policy. The 1cycle policy anneals the learning
rate from an initial learning rate to some maximum learning rate and then
from that maximum learning rate to some minimum learning rate much lower
than the initial learning rate.
This policy was initially described in the paper `Super-Convergence:
Very Fast Training of Neural Networks Using Large Learning Rates`_.
The 1cycle learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This scheduler is not chainable.
Note also that the total number of steps in the cycle can be determined in one
of two ways (listed in order of precedence):
#. A value for total_steps is explicitly provided.
#. A number of epochs (epochs) and a number of steps per epoch
(steps_per_epoch) are provided.
In this case, the number of total steps is inferred by
total_steps = epochs * steps_per_epoch
You must either provide a value for total_steps or provide a value for both
epochs and steps_per_epoch.
The default behaviour of this scheduler follows the fastai implementation of 1cycle, which
claims that "unpublished work has shown even better results by using only two phases". To
mimic the behaviour of the original paper instead, set ``three_phase=True``.
Args:
optimizer (Optimizer): Wrapped optimizer.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group.
total_steps (int): The total number of steps in the cycle. Note that
if a value is not provided here, then it must be inferred by providing
a value for epochs and steps_per_epoch.
Default: None
epochs (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle
if a value for total_steps is not provided.
Default: None
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the
cycle if a value for total_steps is not provided.
Default: None
pct_start (float): The percentage of the cycle (in number of steps) spent
increasing the learning rate.
Default: 0.3
anneal_strategy (str): {'cos', 'linear'}
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
linear annealing.
Default: 'cos'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.85
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.95
div_factor (float): Determines the initial learning rate via
initial_lr = max_lr/div_factor
Default: 25
final_div_factor (float): Determines the minimum learning rate via
min_lr = initial_lr/final_div_factor
Default: 1e4
three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the
learning rate according to 'final_div_factor' instead of modifying the second
phase (the first two phases will be symmetrical about the step indicated by
'pct_start').
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
Example:
>>> data_loader = torch.utils.data.DataLoader(...)
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
"""
def __init__(self,
optimizer,
max_lr,
total_steps=None,
epochs=None,
steps_per_epoch=None,
pct_start=0.3,
anneal_strategy='cos',
cycle_momentum=True,
base_momentum=0.85,
max_momentum=0.95,
div_factor=25.,
final_div_factor=1e4,
three_phase=False,
last_epoch=-1,
verbose=False):
# Validate optimizer
#if not isinstance(optimizer, Optimizer):
# raise TypeError('{} is not an Optimizer'.format(
# type(optimizer).__name__))
self.optimizer = optimizer
# Validate total_steps
if total_steps is None and epochs is None and steps_per_epoch is None:
raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)")
elif total_steps is not None:
if total_steps <= 0 or not isinstance(total_steps, int):
raise ValueError("Expected positive integer total_steps, but got {}".format(total_steps))
self.total_steps = total_steps
else:
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError("Expected positive integer epochs, but got {}".format(epochs))
if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int):
raise ValueError("Expected positive integer steps_per_epoch, but got {}".format(steps_per_epoch))
self.total_steps = epochs * steps_per_epoch
if three_phase:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': float(2 * pct_start * self.total_steps) - 2,
'start_lr': 'max_lr',
'end_lr': 'initial_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'initial_lr',
'end_lr': 'min_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'max_momentum',
},
]
else:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'max_lr',
'end_lr': 'min_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
]
# Validate pct_start
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
raise ValueError("Expected float between 0 and 1 pct_start, but got {}".format(pct_start))
# Validate anneal_strategy
if anneal_strategy not in ['cos', 'linear']:
raise ValueError("anneal_strategy must by one of 'cos' or 'linear', instead got {}".format(anneal_strategy))
elif anneal_strategy == 'cos':
self.anneal_func = self._annealing_cos
elif anneal_strategy == 'linear':
self.anneal_func = self._annealing_linear
# Initialize learning rate variables
max_lrs = self._format_param('max_lr', self.optimizer, max_lr)
if last_epoch == -1:
for idx, group in enumerate(self.optimizer.param_groups):
group['initial_lr'] = max_lrs[idx] / div_factor
group['max_lr'] = max_lrs[idx]
group['min_lr'] = group['initial_lr'] / final_div_factor
# Initialize momentum variables
self.cycle_momentum = cycle_momentum
if self.cycle_momentum:
if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults:
raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled')
self.use_beta1 = 'betas' in self.optimizer.defaults
max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
if last_epoch == -1:
for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups):
if self.use_beta1:
_, beta2 = group['betas']
group['betas'] = (m_momentum, beta2)
else:
group['momentum'] = m_momentum
group['max_momentum'] = m_momentum
group['base_momentum'] = b_momentum
super(OneCycleLR, self).__init__(optimizer, last_epoch, verbose)
def _format_param(self, name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError("expected {} values for {}, got {}".format(
len(optimizer.param_groups), name, len(param)))
return param
else:
return [param] * len(optimizer.param_groups)
def _annealing_cos(self, start, end, pct):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
def _annealing_linear(self, start, end, pct):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return (end - start) * pct + start
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
lrs = []
step_num = self.last_epoch
if step_num > self.total_steps:
raise ValueError("Tried to step {} times. The specified number of total steps is {}"
.format(step_num + 1, self.total_steps))
for group in self.optimizer.param_groups:
start_step = 0
for i, phase in enumerate(self._schedule_phases):
end_step = phase['end_step']
if step_num <= end_step or i == len(self._schedule_phases) - 1:
pct = (step_num - start_step) / (end_step - start_step)
computed_lr = self.anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct)
if self.cycle_momentum:
computed_momentum = self.anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct)
break
start_step = phase['end_step']
lrs.append(computed_lr)
if self.cycle_momentum:
if self.use_beta1:
_, beta2 = group['betas']
group['betas'] = (computed_momentum, beta2)
else:
group['momentum'] = computed_momentum
return lrs
| 19,353 | 43.800926 | 128 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/sparse_update.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../../../')))
from devkit.sparse_ops import SparseConv
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = SparseConv(input_dim, hidden_dim, 3, padding=1)
self.conv2 = SparseConv(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = SparseConv(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = SparseConv(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = SparseConv(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = SparseConv(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = SparseConv(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = SparseConv(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = SparseConv(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = SparseConv(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = SparseConv(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = SparseConv(cor_planes, 96, 1, padding=0)
self.convf1 = SparseConv(2, 64, 7, padding=3)
self.convf2 = SparseConv(64, 32, 3, padding=1)
self.conv = SparseConv(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = SparseConv(cor_planes, 256, 1, padding=0)
self.convc2 = SparseConv(256, 192, 3, padding=1)
self.convf1 = SparseConv(2, 128, 7, padding=3)
self.convf2 = SparseConv(128, 64, 3, padding=1)
self.conv = SparseConv(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
SparseConv(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
SparseConv(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
| 5,385 | 36.402778 | 88 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/sparse_raft.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sparse_update import BasicUpdateBlock, SmallUpdateBlock
from sparse_extractor import BasicEncoder, SmallEncoder
from corr import CorrBlock, AlternateCorrBlock
from utils.utils import bilinear_sampler, coords_grid, upflow8
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class SparseRAFT(nn.Module):
def __init__(self, args):
super(SparseRAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8, device=img.device)
coords1 = coords_grid(N, H//8, W//8, device=img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(flow_up)
if test_mode:
return coords1 - coords0, flow_up
return flow_predictions
| 4,950 | 33.144828 | 102 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/corr.py | import torch
import torch.nn.functional as F
from utils.utils import bilinear_sampler, coords_grid
try:
import alt_cuda_corr
except:
# alt_cuda_corr is not compiled
pass
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, dim, h2, w2 = corr.shape
corr = corr.reshape(batch*h1*w1, dim, h2, w2)
self.corr_pyramid.append(corr)
for i in range(self.num_levels-1):
corr = F.avg_pool2d(corr, 2, stride=2)
self.corr_pyramid.append(corr)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
batch, h1, w1, _ = coords.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1, device=coords.device)
dy = torch.linspace(-r, r, 2*r+1, device=coords.device)
delta = torch.stack(torch.meshgrid(dy, dx), axis=-1)
centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i
delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
coords_lvl = centroid_lvl + delta_lvl
corr = bilinear_sampler(corr, coords_lvl)
corr = corr.view(batch, h1, w1, -1)
out_pyramid.append(corr)
out = torch.cat(out_pyramid, dim=-1)
return out.permute(0, 3, 1, 2).contiguous().float()
@staticmethod
def corr(fmap1, fmap2):
batch, dim, ht, wd = fmap1.shape
fmap1 = fmap1.view(batch, dim, ht*wd)
fmap2 = fmap2.view(batch, dim, ht*wd)
corr = torch.matmul(fmap1.transpose(1,2), fmap2)
corr = corr.view(batch, ht, wd, 1, ht, wd)
return corr / torch.sqrt(torch.tensor(dim).float())
class AlternateCorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.pyramid = [(fmap1, fmap2)]
for i in range(self.num_levels):
fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
self.pyramid.append((fmap1, fmap2))
def __call__(self, coords):
coords = coords.permute(0, 2, 3, 1)
B, H, W, _ = coords.shape
dim = self.pyramid[0][0].shape[1]
corr_list = []
for i in range(self.num_levels):
r = self.radius
fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous()
fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous()
coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()
corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r)
corr_list.append(corr.squeeze(1))
corr = torch.stack(corr_list, dim=1)
corr = corr.reshape(B, -1, H, W)
return corr / torch.sqrt(torch.tensor(dim).float())
| 3,085 | 32.543478 | 74 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/update.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
| 5,227 | 36.342857 | 87 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
| 8,847 | 32.014925 | 93 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/datasets.py | # Data loading based on https://github.com/NVIDIA/flownet2-pytorch
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
import os
import math
import random
from glob import glob
import os.path as osp
from utils import frame_utils
from utils.augmentor import FlowAugmentor, SparseFlowAugmentor
class FlowDataset(data.Dataset):
def __init__(self, aug_params=None, sparse=False):
self.augmentor = None
self.sparse = sparse
if aug_params is not None:
if sparse:
self.augmentor = SparseFlowAugmentor(**aug_params)
else:
self.augmentor = FlowAugmentor(**aug_params)
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
self.extra_info = []
def __getitem__(self, index):
if self.is_test:
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
img1 = np.array(img1).astype(np.uint8)[..., :3]
img2 = np.array(img2).astype(np.uint8)[..., :3]
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
return img1, img2, self.extra_info[index]
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
index = index % len(self.image_list)
valid = None
if self.sparse:
flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
else:
flow = frame_utils.read_gen(self.flow_list[index])
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = np.array(flow).astype(np.float32)
img1 = np.array(img1).astype(np.uint8)
img2 = np.array(img2).astype(np.uint8)
# grayscale images
if len(img1.shape) == 2:
img1 = np.tile(img1[...,None], (1, 1, 3))
img2 = np.tile(img2[...,None], (1, 1, 3))
else:
img1 = img1[..., :3]
img2 = img2[..., :3]
if self.augmentor is not None:
if self.sparse:
img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)
else:
img1, img2, flow = self.augmentor(img1, img2, flow)
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow).permute(2, 0, 1).float()
if valid is not None:
valid = torch.from_numpy(valid)
else:
valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)
return img1, img2, flow, valid.float()
def __rmul__(self, v):
self.flow_list = v * self.flow_list
self.image_list = v * self.image_list
return self
def __len__(self):
return len(self.image_list)
class MpiSintel(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'):
super(MpiSintel, self).__init__(aug_params)
flow_root = osp.join(root, split, 'flow')
image_root = osp.join(root, split, dstype)
if split == 'test':
self.is_test = True
for scene in os.listdir(image_root):
image_list = sorted(glob(osp.join(image_root, scene, '*.png')))
for i in range(len(image_list)-1):
self.image_list += [ [image_list[i], image_list[i+1]] ]
self.extra_info += [ (scene, i) ] # scene and frame_id
if split != 'test':
self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))
class FlyingChairs(FlowDataset):
def __init__(self, aug_params=None, split='train', root='xxxxx/FlyingChairs_release/data'):
super(FlyingChairs, self).__init__(aug_params)
images = sorted(glob(osp.join(root, '*.ppm')))
flows = sorted(glob(osp.join(root, '*.flo')))
assert (len(images)//2 == len(flows))
split_list = np.loadtxt('chairs_split.txt', dtype=np.int32)
for i in range(len(flows)):
xid = split_list[i]
if (split=='training' and xid==1) or (split=='validation' and xid==2):
self.flow_list += [ flows[i] ]
self.image_list += [ [images[2*i], images[2*i+1]] ]
class FlyingThings3D(FlowDataset):
def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'):
super(FlyingThings3D, self).__init__(aug_params)
for cam in ['left']:
for direction in ['into_future', 'into_past']:
image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))
flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted(glob(osp.join(idir, '*.png')) )
flows = sorted(glob(osp.join(fdir, '*.pfm')) )
for i in range(len(flows)-1):
if direction == 'into_future':
self.image_list += [ [images[i], images[i+1]] ]
self.flow_list += [ flows[i] ]
elif direction == 'into_past':
self.image_list += [ [images[i+1], images[i]] ]
self.flow_list += [ flows[i+1] ]
class KITTI(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/KITTI'):
super(KITTI, self).__init__(aug_params, sparse=True)
if split == 'testing':
self.is_test = True
root = osp.join(root, split)
images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
for img1, img2 in zip(images1, images2):
frame_id = img1.split('/')[-1]
self.extra_info += [ [frame_id] ]
self.image_list += [ [img1, img2] ]
if split == 'training':
self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
class HD1K(FlowDataset):
def __init__(self, aug_params=None, root='datasets/HD1k'):
super(HD1K, self).__init__(aug_params, sparse=True)
seq_ix = 0
while 1:
flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))
images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))
if len(flows) == 0:
break
for i in range(len(flows)-1):
self.flow_list += [flows[i]]
self.image_list += [ [images[i], images[i+1]] ]
seq_ix += 1
def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'):
""" Create the data loader for the corresponding trainign set """
if args.stage == 'chairs':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}
train_dataset = FlyingChairs(aug_params, split='training')
elif args.stage == 'things':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True}
clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass')
final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass')
train_dataset = clean_dataset + final_dataset
elif args.stage == 'sintel':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True}
things = FlyingThings3D(aug_params, dstype='frames_cleanpass')
sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')
sintel_final = MpiSintel(aug_params, split='training', dstype='final')
if TRAIN_DS == 'C+T+K+S+H':
kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True})
hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True})
train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things
elif TRAIN_DS == 'C+T+K/S':
train_dataset = 100*sintel_clean + 100*sintel_final + things
elif args.stage == 'kitti':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}
train_dataset = KITTI(aug_params, split='training')
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=False, shuffle=True, num_workers=4, drop_last=True)
print('Training with %d image pairs' % len(train_dataset))
return train_loader
| 9,242 | 38.165254 | 111 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/raft.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from update import BasicUpdateBlock, SmallUpdateBlock
from extractor import BasicEncoder, SmallEncoder
from corr import CorrBlock, AlternateCorrBlock
from utils.utils import bilinear_sampler, coords_grid, upflow8
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8, device=img.device)
coords1 = coords_grid(N, H//8, W//8, device=img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(flow_up)
if test_mode:
return coords1 - coords0, flow_up
return flow_predictions
| 4,924 | 32.965517 | 102 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/__init__.py | 0 | 0 | 0 | py | |
NM-sparsity | NM-sparsity-main/RAFT/core/sparse_extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../../../')))
from devkit.sparse_ops import SparseConv
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = SparseConv(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = SparseConv(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
SparseConv(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = SparseConv(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = SparseConv(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = SparseConv(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
SparseConv(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = SparseConv(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = SparseConv(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, SparseConv):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = SparseConv(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = SparseConv(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, SparseConv):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
| 8,997 | 31.959707 | 94 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/utils/utils.py | import torch
import torch.nn.functional as F
import numpy as np
from scipy import interpolate
class InputPadder:
""" Pads images such that dimensions are divisible by 8 """
def __init__(self, dims, mode='sintel'):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
if mode == 'sintel':
self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
else:
self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
def pad(self, *inputs):
return [F.pad(x, self._pad, mode='replicate') for x in inputs]
def unpad(self,x):
ht, wd = x.shape[-2:]
c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
return x[..., c[0]:c[1], c[2]:c[3]]
def forward_interpolate(flow):
flow = flow.detach().cpu().numpy()
dx, dy = flow[0], flow[1]
ht, wd = dx.shape
x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
x1 = x0 + dx
y1 = y0 + dy
x1 = x1.reshape(-1)
y1 = y1.reshape(-1)
dx = dx.reshape(-1)
dy = dy.reshape(-1)
valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
x1 = x1[valid]
y1 = y1[valid]
dx = dx[valid]
dy = dy[valid]
flow_x = interpolate.griddata(
(x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
flow_y = interpolate.griddata(
(x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
flow = np.stack([flow_x, flow_y], axis=0)
return torch.from_numpy(flow).float()
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1,1], dim=-1)
xgrid = 2*xgrid/(W-1) - 1
ygrid = 2*ygrid/(H-1) - 1
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
return img, mask.float()
return img
def coords_grid(batch, ht, wd, device):
coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1)
def upflow8(flow, mode='bilinear'):
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
| 2,489 | 29 | 93 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/utils/augmentor.py | import numpy as np
import random
import math
from PIL import Image
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import torch
from torchvision.transforms import ColorJitter
import torch.nn.functional as F
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
""" Photometric augmentation """
# asymmetric
if np.random.rand() < self.asymmetric_color_aug_prob:
img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
# symmetric
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
""" Occlusion augmentation """
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def spatial_transform(self, img1, img2, flow):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 8) / float(ht),
(self.crop_size[1] + 8) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
scale_y = scale
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = flow * [scale_x, scale_y]
if self.do_flip:
if np.random.rand() < self.h_flip_prob: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.v_flip_prob: # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow = self.spatial_transform(img1, img2, flow)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
return img1, img2, flow
class SparseFlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
ht, wd = flow.shape[:2]
coords = np.meshgrid(np.arange(wd), np.arange(ht))
coords = np.stack(coords, axis=-1)
coords = coords.reshape(-1, 2).astype(np.float32)
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
yy = yy[v]
flow1 = flow1[v]
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
flow_img[yy, xx] = flow1
valid_img[yy, xx] = 1
return flow_img, valid_img
def spatial_transform(self, img1, img2, flow, valid):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
scale_y = np.clip(scale, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
if self.do_flip:
if np.random.rand() < 0.5: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
valid = valid[:, ::-1]
margin_y = 20
margin_x = 50
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
valid = np.ascontiguousarray(valid)
return img1, img2, flow, valid
| 9,108 | 35.878543 | 97 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/utils/__init__.py | 0 | 0 | 0 | py | |
NM-sparsity | NM-sparsity-main/RAFT/core/utils/flow_viz.py | # Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization
# MIT License
#
# Copyright (c) 2018 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Tom Runia
# Date Created: 2018-08-03
import numpy as np
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of Daniel Scharstein.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
"""
Expects a two dimensional flow image of shape.
Args:
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = np.max(rad)
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr) | 4,318 | 31.719697 | 90 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/utils/frame_utils.py | import numpy as np
from PIL import Image
from os.path import *
import re
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
""" Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b'PF':
color = True
elif header == b'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writeFlow(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def readFlowKITTI(filename):
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
flow = flow[:,:,::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def readDispKITTI(filename):
disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0
valid = disp > 0.0
flow = np.stack([-disp, np.zeros_like(disp)], -1)
return flow, valid
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def read_gen(file_name, pil=False):
ext = splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
return Image.open(file_name)
elif ext == '.bin' or ext == '.raw':
return np.load(file_name)
elif ext == '.flo':
return readFlow(file_name).astype(np.float32)
elif ext == '.pfm':
flow = readPFM(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow
else:
return flow[:, :, :-1]
return [] | 4,024 | 28.379562 | 109 | py |
NM-sparsity | NM-sparsity-main/RAFT/alt_cuda_corr/setup.py | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='correlation',
ext_modules=[
CUDAExtension('alt_cuda_corr',
sources=['correlation.cpp', 'correlation_kernel.cu'],
extra_compile_args={'cxx': [], 'nvcc': ['-O3']}),
],
cmdclass={
'build_ext': BuildExtension
})
| 381 | 22.875 | 67 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/do_annealing.py | import tequila as tq
import numpy as np
import pickle
from pathos.multiprocessing import ProcessingPool as Pool
from parallel_annealing import *
#from dummy_par import *
from mutation_options import *
from single_thread_annealing import *
def find_best_instructions(instructions_dict):
"""
This function finds the instruction with the best fitness
args:
instructions_dict: A dictionary with the "Instructions" objects the corresponding fitness values
as values
"""
best_instructions = None
best_fitness = 10000000
for key in instructions_dict:
if instructions_dict[key][1] is None or instructions_dict[key] is None:
continue
if instructions_dict[key][1] <= best_fitness:
best_instructions = instructions_dict[key][0]
best_fitness = instructions_dict[key][1]
return best_instructions, best_fitness
def simulated_annealing(num_population, num_offsprings, actions_ratio,
hamiltonian, max_epochs=100, min_epochs=1, tol=1e-6,
type_energy_eval="wfn", cluster_circuit=None,
patience = 20, num_processors=4, T_0=1.0, alpha=0.9,
max_non_cliffords=0, verbose=False, beta=0.5,
starting_energy=None):
"""
this function tries to find the clifford circuit that best lowers the energy of the hamiltonian using simulated annealing.
params:
- num_population = number of members in a generation
- num_offsprings = number of mutations carried on every member
- num_processors = number of processors to use for parallelization
- T_0 = initial temperature
- alpha = temperature decay
- beta = parameter to adjust temperature on resetting after running out of patience
- max_epochs = max iterations for optimizing
- min_epochs = min iterations for optimizing
- tol = minimum change in energy required, otherwise terminate optimization
- verbose = display energies/decisions at iterations of not
- hamiltonian = original hamiltonian
- actions_ratio = The ratio of the different actions for mutations
- type_energy_eval = keyword specifying the type of optimization method to use for energy minimization
- cluster_circuit = the reference circuit to which clifford gates are added
- patience = the number of epochs before resetting the optimization
"""
if verbose:
print("Starting to Optimize Cluster circuit", flush=True)
num_qubits = len(hamiltonian.qubits)
if verbose:
print("Initializing Generation", flush=True)
# restart = False if previous_instructions is None\
# else True
restart = True
instructions_dict = {}
instructions_list = []
current_fitness_list = []
fitness, wfn = None, None # pre-initialize with None
for instruction_id in range(num_population):
instructions = None
if restart:
try:
instructions = Instructions(n_qubits=num_qubits, alpha=alpha, T_0=T_0, beta=beta, patience=patience, max_non_cliffords=max_non_cliffords, reference_energy=starting_energy)
instructions.gates = pickle.load(open("instruct_gates.pickle", "rb"))
instructions.positions = pickle.load(open("instruct_positions.pickle", "rb"))
instructions.best_reference_wfn = pickle.load(open("best_reference_wfn.pickle", "rb"))
print("Added a guess from previous runs", flush=True)
fitness, wfn = evaluate_fitness(instructions=instructions, hamiltonian=hamiltonian, type_energy_eval=type_energy_eval, cluster_circuit=cluster_circuit)
except Exception as e:
print(e)
# raise Exception("Did not find a guess from previous runs")
# pass
restart = False
#print(instructions._str())
else:
failed = True
while failed:
instructions = Instructions(n_qubits=num_qubits, alpha=alpha, T_0=T_0, beta=beta, patience=patience, max_non_cliffords=max_non_cliffords, reference_energy=starting_energy)
instructions.prune()
fitness, wfn = evaluate_fitness(instructions=instructions, hamiltonian=hamiltonian, type_energy_eval=type_energy_eval, cluster_circuit=cluster_circuit)
# if fitness <= starting_energy:
failed = False
instructions.set_reference_wfn(wfn)
current_fitness_list.append(fitness)
instructions_list.append(instructions)
instructions_dict[instruction_id] = (instructions, fitness)
if verbose:
print("First Generation details: \n", flush=True)
for key in instructions_dict:
print("Initial Instructions number: ", key, flush=True)
instructions_dict[key][0]._str()
print("Initial fitness values: ", instructions_dict[key][1], flush=True)
best_instructions, best_energy = find_best_instructions(instructions_dict)
if verbose:
print("Best member of the Generation: \n", flush=True)
print("Instructions: ", flush=True)
best_instructions._str()
print("fitness value: ", best_energy, flush=True)
pickle.dump(best_instructions.gates, open("instruct_gates.pickle", "wb"))
pickle.dump(best_instructions.positions, open("instruct_positions.pickle", "wb"))
pickle.dump(best_instructions.best_reference_wfn, open("best_reference_wfn.pickle", "wb"))
epoch = 0
previous_best_energy = best_energy
converged = False
has_improved_before = False
dts = []
#pool = multiprocessing.Pool(processes=num_processors)
while (epoch < max_epochs):
print("Epoch: ", epoch, flush=True)
import time
t0 = time.time()
if num_processors == 1:
st_evolve_generation(num_offsprings, actions_ratio,
instructions_dict,
hamiltonian, type_energy_eval,
cluster_circuit)
else:
evolve_generation(num_offsprings, actions_ratio,
instructions_dict,
hamiltonian, num_processors, type_energy_eval,
cluster_circuit)
t1 = time.time()
dts += [t1-t0]
best_instructions, best_energy = find_best_instructions(instructions_dict)
if verbose:
print("Best member of the Generation: \n", flush=True)
print("Instructions: ", flush=True)
best_instructions._str()
print("fitness value: ", best_energy, flush=True)
# A bit confusing, but:
# Want that current best energy has improved something previous, is better than
# some starting energy and achieves some convergence criterion
has_improved_before = True if np.abs(best_energy - previous_best_energy) < 0\
else False
if np.abs(best_energy - previous_best_energy) < tol and has_improved_before:
if starting_energy is not None:
converged = True if best_energy < starting_energy else False
else:
converged = True
else:
converged = False
if best_energy < previous_best_energy:
previous_best_energy = best_energy
epoch += 1
pickle.dump(best_instructions.gates, open("instruct_gates.pickle", "wb"))
pickle.dump(best_instructions.positions, open("instruct_positions.pickle", "wb"))
pickle.dump(best_instructions.best_reference_wfn, open("best_reference_wfn.pickle", "wb"))
#pool.close()
if converged:
print("Converged after ", epoch, " iterations.", flush=True)
print("Best energy:", best_energy, flush=True)
print("\t with instructions", best_instructions.gates, best_instructions.positions, flush=True)
print("\t optimal parameters", best_instructions.best_reference_wfn, flush=True)
print("average time: ", np.average(dts), flush=True)
print("overall time: ", np.sum(dts), flush=True)
# best_instructions.replace_UCCXc_with_UCC(number=max_non_cliffords)
pickle.dump(best_instructions.gates, open("instruct_gates.pickle", "wb"))
pickle.dump(best_instructions.positions, open("instruct_positions.pickle", "wb"))
pickle.dump(best_instructions.best_reference_wfn, open("best_reference_wfn.pickle", "wb"))
def replace_cliff_with_non_cliff(num_population, num_offsprings, actions_ratio,
hamiltonian, max_epochs=100, min_epochs=1, tol=1e-6,
type_energy_eval="wfn", cluster_circuit=None,
patience = 20, num_processors=4, T_0=1.0, alpha=0.9,
max_non_cliffords=0, verbose=False, beta=0.5,
starting_energy=None):
"""
this function tries to find the clifford circuit that best lowers the energy of the hamiltonian using simulated annealing.
params:
- num_population = number of members in a generation
- num_offsprings = number of mutations carried on every member
- num_processors = number of processors to use for parallelization
- T_0 = initial temperature
- alpha = temperature decay
- beta = parameter to adjust temperature on resetting after running out of patience
- max_epochs = max iterations for optimizing
- min_epochs = min iterations for optimizing
- tol = minimum change in energy required, otherwise terminate optimization
- verbose = display energies/decisions at iterations of not
- hamiltonian = original hamiltonian
- actions_ratio = The ratio of the different actions for mutations
- type_energy_eval = keyword specifying the type of optimization method to use for energy minimization
- cluster_circuit = the reference circuit to which clifford gates are added
- patience = the number of epochs before resetting the optimization
"""
if verbose:
print("Starting to replace clifford gates Cluster circuit with non-clifford gates one at a time", flush=True)
num_qubits = len(hamiltonian.qubits)
#get the best clifford object
instructions = Instructions(n_qubits=num_qubits, alpha=alpha, T_0=T_0, beta=beta, patience=patience, max_non_cliffords=max_non_cliffords, reference_energy=starting_energy)
instructions.gates = pickle.load(open("instruct_gates.pickle", "rb"))
instructions.positions = pickle.load(open("instruct_positions.pickle", "rb"))
instructions.best_reference_wfn = pickle.load(open("best_reference_wfn.pickle", "rb"))
fitness, wfn = evaluate_fitness(instructions=instructions, hamiltonian=hamiltonian, type_energy_eval=type_energy_eval, cluster_circuit=cluster_circuit)
if verbose:
print("Initial energy after previous Clifford optimization is",\
fitness, flush=True)
print("Starting with instructions", instructions.gates, instructions.positions, flush=True)
instructions_dict = {}
instructions_dict[0] = (instructions, fitness)
for gate_id, (gate, position) in enumerate(zip(instructions.gates, instructions.positions)):
print(gate)
altered_instructions = copy.deepcopy(instructions)
# skip if controlled rotation
if gate[0]=='C':
continue
altered_instructions.replace_cg_w_ncg(gate_id)
# altered_instructions.max_non_cliffords = 1 # TODO why is this set to 1??
altered_instructions.max_non_cliffords = max_non_cliffords
#clifford_circuit, init_angles = build_circuit(instructions)
#print(clifford_circuit, init_angles)
#folded_hamiltonian = perform_folding(hamiltonian, clifford_circuit)
#folded_hamiltonian2 = (convert_PQH_to_tq_QH(folded_hamiltonian))()
#print(folded_hamiltonian)
#clifford_circuit, init_angles = build_circuit(altered_instructions)
#print(clifford_circuit, init_angles)
#folded_hamiltonian = perform_folding(hamiltonian, clifford_circuit)
#folded_hamiltonian1 = (convert_PQH_to_tq_QH(folded_hamiltonian))(init_angles)
#print(folded_hamiltonian1 - folded_hamiltonian2)
#print(folded_hamiltonian)
#raise Exception("teste")
counter = 0
success = False
while not success:
counter += 1
try:
fitness, wfn = evaluate_fitness(instructions=altered_instructions, hamiltonian=hamiltonian, type_energy_eval=type_energy_eval, cluster_circuit=cluster_circuit)
success = True
except Exception as e:
print(e)
if counter > 5:
print("This replacement failed more than 5 times")
success = True
instructions_dict[gate_id+1] = (altered_instructions, fitness)
#circuit = build_circuit(altered_instructions)
#tq.draw(circuit,backend="cirq")
best_instructions, best_energy = find_best_instructions(instructions_dict)
print("best instrucitons after the non-clifford opimizaton")
print("Best energy:", best_energy, flush=True)
print("\t with instructions", best_instructions.gates, best_instructions.positions, flush=True)
| 13,258 | 46.185053 | 187 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/generate_orbital_optimization_data.py | import tequila as tq
import numpy
def opt_mol(mol, U, guess=None, threshold=1.e-5):
delta=1.0
energy=1.0
while(delta>threshold):
opt = tq.chemistry.optimize_orbitals(molecule=mol, circuit=U, initial_guess=guess, silent=True)
guess = opt.mo_coeff
delta = abs(opt.energy-energy)
energy = opt.energy
return opt
start=1.0
step=0.2
steps=10
# taken from https://github.com/kottmanj/moldata/beh2/
name="/h/332/philipps/software/moldata/beh2/beh2_{0:1.2f}_{0:1.2f}_180"
geometry="be 0.0 0.0 0.0\nH 0.0 0.0 {0:1.2f}\nH 0.0 0.0 -{0:1.2f}"
guess = None
energies = []
energies_opt = []
points = []
# for R in ([start + i*step for i in range(steps)]):
for R in [1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]:
mol=tq.Molecule(name=name.format(R), geometry=geometry.format(R))
U = mol.make_ansatz(name="SPA")
E = tq.ExpectationValue(H=mol.make_hamiltonian(), U=U)
result = tq.minimize(E, silent=True)
energy0 = result.energy
opt = opt_mol(mol=mol, U=U, guess=guess)
guess = opt.mo_coeff
fci = mol.compute_energy("fci")
delta1 = fci-result.energy
delta2 = fci-opt.energy
energies.append(energy0)
energies_opt.append(opt.energy)
points.append(R)
print("{:1.2f} | {:+1.4f} | {:+1.4f}".format(R,delta1,delta2))
print(points)
# print(energies)
print('oo-energies')
print(energies_opt)
| 1,383 | 29.086957 | 103 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/energy_optimization.py | import tequila as tq
import numpy as np
from tequila.objective.objective import Variable
import openfermion
from hacked_openfermion_qubit_operator import ParamQubitHamiltonian
from typing import Union
from vqe_utils import convert_PQH_to_tq_QH, convert_tq_QH_to_PQH,\
fold_unitary_into_hamiltonian
from grad_hacked import grad
from scipy_optimizer import minimize
import scipy
import random # guess we could substitute that with numpy.random #TODO
import argparse
import pickle as pk
import sys
sys.path.insert(0, '../../')
#sys.path.insert(0, '../')
# This needs to be properly integrated somewhere else at some point
# from tn_update.qq import contract_energy, optimize_wavefunctions
from tn_update.my_mpo import *
from tn_update.wfn_optimization import contract_energy_mpo, optimize_wavefunctions_mpo, initialize_wfns_randomly
def energy_from_wfn_opti(hamiltonian: tq.QubitHamiltonian, n_qubits: int, guess_wfns=None, TOL=1e-5) -> tuple:
'''
Get energy using a tensornetwork based method ~ power method (here as blackbox)
'''
d = int(2**(n_qubits/2))
# Build MPO
h_mpo = MyMPO(hamiltonian=hamiltonian, n_qubits=n_qubits, maxdim=500)
h_mpo.make_mpo_from_hamiltonian()
# Optimize wavefunctions based on random guess
out = None
it = 0
while out is None:
# Set up random initial guesses for subsystems
it += 1
if guess_wfns is None:
psiL_rand = initialize_wfns_randomly(d, n_qubits//2)
psiR_rand = initialize_wfns_randomly(d, n_qubits//2)
else:
psiL_rand = guess_wfns[0]
psiR_rand = guess_wfns[1]
out = optimize_wavefunctions_mpo(h_mpo,
psiL_rand,
psiR_rand,
TOL=TOL,
silent=True)
energy_rand = out[0]
optimal_wfns = [out[1], out[2]]
return energy_rand, optimal_wfns
def combine_two_clusters(H: tq.QubitHamiltonian, subsystems: list, circ_list: list) -> float:
'''
E = nuc_rep + \sum_j c_j <0_A | U_A+ sigma_j|A U_A | 0_A><0_B | U_B+ sigma_j|B U_B | 0_B>
i ) Split up Hamiltonian into vector c = [c_j], all sigmas of system A and system B
ii ) Two vectors of ExpectationValues E_A=[E(U_A, sigma_j|A)], E_B=[E(U_B, sigma_j|B)]
iii) Minimize vectors from ii)
iv ) Perform weighted sum \sum_j c_[j] E_A[j] E_B[j]
result = nuc_rep + iv)
Finishing touch inspired by private tequila-repo / pair-separated objective
This is still rather inefficient/unoptimized
Can still prune out near-zero coefficients c_j
'''
objective = 0.0
n_subsystems = len(subsystems)
# Over all Paulistrings in the Hamiltonian
for p_index, pauli in enumerate(H.paulistrings):
# Gather coefficient
coeff = pauli.coeff.real
# Empty dictionary for operations:
# to be filled with another dictionary per subsystem, where then
# e.g. X(0)Z(1)Y(2)X(4) and subsystems=[[0,1,2],[3,4,5]]
# -> ops={ 0: {0: 'X', 1: 'Z', 2: 'Y'}, 1: {4: 'X'} }
ops = {}
for s_index, sys in enumerate(subsystems):
for k, v in pauli.items():
if k in sys:
if s_index in ops:
ops[s_index][k] = v
else:
ops[s_index] = {k: v}
# If no ops gathered -> identity -> nuclear repulsion
if len(ops) == 0:
#print ("this should only happen ONCE")
objective += coeff
# If not just identity:
# add objective += c_j * prod_subsys( < Paulistring_j_subsys >_{U_subsys} )
elif len(ops) > 0:
obj_tmp = coeff
for s_index, sys_pauli in ops.items():
#print (s_index, sys_pauli)
H_tmp = QubitHamiltonian.from_paulistrings(PauliString(data=sys_pauli))
E_tmp = ExpectationValue(U=circ_list[s_index], H=H_tmp)
obj_tmp *= E_tmp
objective += obj_tmp
initial_values = {k: 0.0 for k in objective.extract_variables()}
random_initial_values = {k: 1e-2*np.random.uniform(-1, 1) for k in objective.extract_variables()}
method = 'bfgs' # 'bfgs' # 'l-bfgs-b' # 'cobyla' # 'slsqp'
curr_res = tq.minimize(method=method, objective=objective, initial_values=initial_values,
gradient='two-point', backend='qulacs',
method_options={"finite_diff_rel_step": 1e-3})
return curr_res.energy
def energy_from_tq_qcircuit(hamiltonian: Union[tq.QubitHamiltonian,
ParamQubitHamiltonian],
n_qubits: int,
circuit = Union[list, tq.QCircuit],
subsystems: list = [[0,1,2,3,4,5]],
initial_guess = None)-> tuple:
'''
Get minimal energy using tequila
'''
result = None
# If only one circuit handed over, just run simple VQE
if isinstance(circuit, list) and len(circuit) == 1:
circuit = circuit[0]
if isinstance(circuit, tq.QCircuit):
if isinstance(hamiltonian, tq.QubitHamiltonian):
E = tq.ExpectationValue(H=hamiltonian, U=circuit)
if initial_guess is None:
initial_angles = {k: 0.0 for k in E.extract_variables()}
else:
initial_angles = initial_guess
#print ("optimizing non-param...")
result = tq.minimize(objective=E, method='l-bfgs-b', silent=True, backend='qulacs',
initial_values=initial_angles)
#gradient='two-point', backend='qulacs',
#method_options={"finite_diff_rel_step": 1e-4})
elif isinstance(hamiltonian, ParamQubitHamiltonian):
if initial_guess is None:
raise Exception("Need to provide initial guess for this to work.")
initial_angles = None
else:
initial_angles = initial_guess
#print ("optimizing param...")
result = minimize(hamiltonian, circuit, method='bfgs', initial_values=initial_angles, backend="qulacs", silent=True)
# If more circuits handed over, assume subsystems
else:
# TODO!! implement initial guess for the combine_two_clusters thing
#print ("Should not happen for now...")
result = combine_two_clusters(hamiltonian, subsystems, circuit)
return result.energy, result.angles
def mixed_optimization(hamiltonian: ParamQubitHamiltonian, n_qubits: int, initial_guess: Union[dict, list]=None, init_angles: list=None):
'''
Minimizes energy using wfn opti and a parametrized Hamiltonian
min_{psi, theta fixed} <psi | H(theta) | psi> --> min_{t, p fixed} <p | H(t) | p>
^---------------------------------------------------^
until convergence
'''
energy, optimal_state = None, None
H_qh = convert_PQH_to_tq_QH(hamiltonian)
var_keys, H_derivs = H_qh._construct_derivatives()
print("var keys", var_keys, flush=True)
print('var dict', init_angles, flush=True)
# if not init_angles:
# var_vals = [0. for i in range(len(var_keys))] # initialize with 0
# else:
# var_vals = init_angles
def build_variable_dict(keys, values):
assert(len(keys)==len(values))
out = dict()
for idx, key in enumerate(keys):
out[key] = complex(values[idx])
return out
var_dict = init_angles
var_vals = [*init_angles.values()]
assert(build_variable_dict(var_keys, var_vals) == var_dict)
def wrap_energy_eval(psi):
''' like energy_from_wfn_opti but instead of optimize
get inner product '''
def energy_eval_fn(x):
var_dict = build_variable_dict(var_keys, x)
H_qh_fix = H_qh(var_dict)
H_mpo = MyMPO(hamiltonian=H_qh_fix, n_qubits=n_qubits, maxdim=500)
H_mpo.make_mpo_from_hamiltonian()
return contract_energy_mpo(H_mpo, psi[0], psi[1])
return energy_eval_fn
def wrap_gradient_eval(psi):
''' call derivatives with updated variable list '''
def gradient_eval_fn(x):
variables = build_variable_dict(var_keys, x)
deriv_expectations = H_derivs.values() # list of ParamQubitHamiltonian's
deriv_qhs = [convert_PQH_to_tq_QH(d) for d in deriv_expectations]
deriv_qhs = [d(variables) for d in deriv_qhs] # list of tq.QubitHamiltonian's
# print(deriv_qhs)
deriv_mpos = [MyMPO(hamiltonian=d, n_qubits=n_qubits, maxdim=500)\
for d in deriv_qhs]
# print(deriv_mpos[0].n_qubits)
for d in deriv_mpos:
d.make_mpo_from_hamiltonian()
# deriv_mpos = [d.make_mpo_from_hamiltonian() for d in deriv_mpos]
return [contract_energy_mpo(d, psi[0], psi[1]) for d in deriv_mpos]
return gradient_eval_fn
def do_wfn_opti(values, guess_wfns, TOL):
# H_qh = H_qh(H_vars)
var_dict = build_variable_dict(var_keys, values)
en, psi = energy_from_wfn_opti(H_qh(var_dict), n_qubits=n_qubits,
guess_wfns=guess_wfns, TOL=TOL)
return en, psi
def do_param_opti(psi, x0):
result = scipy.optimize.minimize(fun=wrap_energy_eval(psi),
jac=wrap_gradient_eval(psi),
method='bfgs',
x0=x0,
options={'maxiter': 4})
# print(result)
return result
e_prev, psi_prev = 123., None
# print("iguess", initial_guess)
e_curr, psi_curr = do_wfn_opti(var_vals, initial_guess, 1e-5)
print('first eval', e_curr, flush=True)
def converged(e_prev, e_curr, TOL=1e-5):
return True if np.abs(e_prev-e_curr) < TOL else False
it = 0
var_prev = var_vals
print("vars before comp", var_prev, flush=True)
while not converged(e_prev, e_curr) and it < 50:
e_prev, psi_prev = e_curr, psi_curr
# print('before param opti')
res = do_param_opti(psi_curr, var_prev)
var_curr = res['x']
print('curr vars', var_curr, flush=True)
e_curr = res['fun']
print('en before wfn opti', e_curr, flush=True)
# print("iiiiiguess", psi_prev)
e_curr, psi_curr = do_wfn_opti(var_curr, psi_prev, 1e-3)
print('en after wfn opti', e_curr, flush=True)
it += 1
print('at iteration', it, flush=True)
return e_curr, psi_curr
# optimize parameters with fixed wavefunction
# define/wrap energy function - given |p>, evaluate <p|H(t)|p>
'''
def wrap_gradient(objective: typing.Union[Objective, QTensor], no_compile=False, *args, **kwargs):
def gradient_fn(variable: Variable = None):
return grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs)
return grad_fn
'''
def minimize_energy(hamiltonian: Union[ParamQubitHamiltonian, tq.QubitHamiltonian], n_qubits: int, type_energy_eval: str='wfn', cluster_circuit: tq.QCircuit=None, initial_guess=None, initial_mixed_angles: dict=None) -> float:
'''
Minimizes energy functional either according a power-method inspired shortcut ('wfn')
or using a unitary circuit ('qc')
'''
if type_energy_eval == 'wfn':
if isinstance(hamiltonian, tq.QubitHamiltonian):
energy, optimal_state = energy_from_wfn_opti(hamiltonian, n_qubits, initial_guess)
elif isinstance(hamiltonian, ParamQubitHamiltonian):
init_angles = initial_mixed_angles
energy, optimal_state = mixed_optimization(hamiltonian=hamiltonian, n_qubits=n_qubits, initial_guess=initial_guess, init_angles=init_angles)
elif type_energy_eval == 'qc':
if cluster_circuit is None:
raise Exception("Need to hand over circuit!")
energy, optimal_state = energy_from_tq_qcircuit(hamiltonian, n_qubits, cluster_circuit, [[0,1,2,3],[4,5,6,7]], initial_guess)
else:
raise Exception("Option not implemented!")
return energy, optimal_state
| 12,442 | 42.968198 | 225 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/parallel_annealing.py | import tequila as tq
import multiprocessing
import copy
from time import sleep
from mutation_options import *
from pathos.multiprocessing import ProcessingPool as Pool
def evolve_population(hamiltonian,
type_energy_eval,
cluster_circuit,
process_id,
num_offsprings,
actions_ratio,
tasks, results):
"""
This function carries a single step of the simulated
annealing on a single member of the generation
args:
process_id: A unique identifier for each process
num_offsprings: Number of offsprings every member has
actions_ratio: The ratio of the different actions for mutations
tasks: multiprocessing queue to pass family_id, instructions and current_fitness
family_id: A unique identifier for each family
instructions: An object consisting of the instructions in the quantum circuit
current_fitness: Current fitness value of the circuit
results: multiprocessing queue to pass results back
"""
#print('[%s] evaluation routine starts' % process_id)
while True:
try:
#getting parameters for mutation and carrying it
family_id, instructions, current_fitness = tasks.get()
#check if patience has been exhausted
if instructions.patience == 0:
instructions.reset_to_best()
best_child = 0
best_energy = current_fitness
new_generations = {}
for off_id in range(1, num_offsprings+1):
scheduled_ratio = schedule_actions_ratio(epoch=0, steady_ratio=actions_ratio)
action = get_action(scheduled_ratio)
updated_instructions = copy.deepcopy(instructions)
updated_instructions.update_by_action(action)
new_fitness, wfn = evaluate_fitness(updated_instructions, hamiltonian, type_energy_eval, cluster_circuit)
updated_instructions.set_reference_wfn(wfn)
prob_acceptance = get_prob_acceptance(current_fitness, new_fitness, updated_instructions.T, updated_instructions.reference_energy)
# need to figure out in case we have two equals
new_generations[str(off_id)] = updated_instructions
if best_energy > new_fitness: # now two equals -> "first one" is picked
best_child = off_id
best_energy = new_fitness
# decision = np.random.binomial(1, prob_acceptance)
# if decision == 0:
if best_child == 0:
# Add result to the queue
instructions.patience -= 1
#print('Reduced patience -- now ' + str(updated_instructions.patience))
if instructions.patience == 0:
if instructions.best_previous_instructions:
instructions.reset_to_best()
else:
instructions.update_T()
results.put((family_id, instructions, current_fitness))
else:
# Add result to the queue
if (best_energy < current_fitness):
new_generations[str(best_child)].update_best_previous_instructions()
new_generations[str(best_child)].update_T()
results.put((family_id, new_generations[str(best_child)], best_energy))
except Exception as eeee:
#print('[%s] evaluation routine quits' % process_id)
#print(eeee)
# Indicate finished
results.put(-1)
break
return
def evolve_generation(num_offsprings, actions_ratio,
instructions_dict,
hamiltonian, num_processors=4,
type_energy_eval='wfn',
cluster_circuit=None):
"""
This function does parallel mutation on all the members of the
generation and updates the generation
args:
num_offsprings: Number of offsprings every member has
actions_ratio: The ratio of the different actions for sampling
instructions_dict: A dictionary with the "Instructions" objects the corresponding fitness values
as values
num_processors: Number of processors to use for parallel run
"""
# Define IPC manager
manager = multiprocessing.Manager()
# Define a list (queue) for tasks and computation results
tasks = manager.Queue()
results = manager.Queue()
processes = []
pool = Pool(processes=num_processors)
for i in range(num_processors):
process_id = 'P%i' % i
# Create the process, and connect it to the worker function
new_process = multiprocessing.Process(target=evolve_population,
args=(hamiltonian,
type_energy_eval,
cluster_circuit,
process_id,
num_offsprings, actions_ratio,
tasks, results))
# Add new process to the list of processes
processes.append(new_process)
# Start the process
new_process.start()
#print("putting tasks")
for family_id in instructions_dict:
single_task = (family_id, instructions_dict[family_id][0], instructions_dict[family_id][1])
tasks.put(single_task)
# Wait while the workers process - change it to something for our case later
# sleep(5)
multiprocessing.Barrier(num_processors)
#print("after barrier")
# Quit the worker processes by sending them -1
for i in range(num_processors):
tasks.put(-1)
# Read calculation results
num_finished_processes = 0
while True:
# Read result
#print("num fin", num_finished_processes)
try:
family_id, updated_instructions, new_fitness = results.get()
instructions_dict[family_id] = (updated_instructions, new_fitness)
except:
# Process has finished
num_finished_processes += 1
if num_finished_processes == num_processors:
break
for process in processes:
process.join()
pool.close()
| 6,456 | 38.371951 | 146 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/single_thread_annealing.py | import tequila as tq
import copy
from mutation_options import *
def st_evolve_population(hamiltonian,
type_energy_eval,
cluster_circuit,
num_offsprings,
actions_ratio,
tasks):
"""
This function carries a single step of the simulated
annealing on a single member of the generation
args:
num_offsprings: Number of offsprings every member has
actions_ratio: The ratio of the different actions for mutations
tasks: a tuple of (family_id, instructions and current_fitness)
family_id: A unique identifier for each family
instructions: An object consisting of the instructions in the quantum circuit
current_fitness: Current fitness value of the circuit
"""
family_id, instructions, current_fitness = tasks
#check if patience has been exhausted
if instructions.patience == 0:
if instructions.best_previous_instructions:
instructions.reset_to_best()
best_child = 0
best_energy = current_fitness
new_generations = {}
for off_id in range(1, num_offsprings+1):
scheduled_ratio = schedule_actions_ratio(epoch=0, steady_ratio=actions_ratio)
action = get_action(scheduled_ratio)
updated_instructions = copy.deepcopy(instructions)
updated_instructions.update_by_action(action)
new_fitness, wfn = evaluate_fitness(updated_instructions, hamiltonian, type_energy_eval, cluster_circuit)
updated_instructions.set_reference_wfn(wfn)
prob_acceptance = get_prob_acceptance(current_fitness, new_fitness, updated_instructions.T, updated_instructions.reference_energy)
# need to figure out in case we have two equals
new_generations[str(off_id)] = updated_instructions
if best_energy > new_fitness: # now two equals -> "first one" is picked
best_child = off_id
best_energy = new_fitness
# decision = np.random.binomial(1, prob_acceptance)
# if decision == 0:
if best_child == 0:
# Add result to the queue
instructions.patience -= 1
#print('Reduced patience -- now ' + str(updated_instructions.patience))
if instructions.patience == 0:
if instructions.best_previous_instructions:
instructions.reset_to_best()
else:
instructions.update_T()
results = ((family_id, instructions, current_fitness))
else:
# Add result to the queue
if (best_energy < current_fitness):
new_generations[str(best_child)].update_best_previous_instructions()
new_generations[str(best_child)].update_T()
results = ((family_id, new_generations[str(best_child)], best_energy))
return results
def st_evolve_generation(num_offsprings, actions_ratio,
instructions_dict,
hamiltonian,
type_energy_eval='wfn',
cluster_circuit=None):
"""
This function does a single threas mutation on all the members of the
generation and updates the generation
args:
num_offsprings: Number of offsprings every member has
actions_ratio: The ratio of the different actions for sampling
instructions_dict: A dictionary with the "Instructions" objects the corresponding fitness values
as values
"""
for family_id in instructions_dict:
task = (family_id, instructions_dict[family_id][0], instructions_dict[family_id][1])
results = st_evolve_population(hamiltonian,
type_energy_eval,
cluster_circuit,
num_offsprings, actions_ratio,
task)
family_id, updated_instructions, new_fitness = results
instructions_dict[family_id] = (updated_instructions, new_fitness)
| 4,005 | 40.298969 | 138 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/mutation_options.py | import argparse
import numpy as np
import random
import copy
import tequila as tq
from typing import Union
from collections import Counter
from time import time
from vqe_utils import convert_PQH_to_tq_QH, convert_tq_QH_to_PQH,\
fold_unitary_into_hamiltonian
from energy_optimization import minimize_energy
global_seed = 1
class Instructions:
'''
TODO need to put some documentation here
'''
def __init__(self, n_qubits, mu=2.0, sigma=0.4, alpha=0.9, T_0=1.0,
beta=0.5, patience=10, max_non_cliffords=0,
reference_energy=0., number=None):
# hardcoded values for now
self.num_non_cliffords = 0
self.max_non_cliffords = max_non_cliffords
# ------------------------
self.starting_patience = patience
self.patience = patience
self.mu = mu
self.sigma = sigma
self.alpha = alpha
self.beta = beta
self.T_0 = T_0
self.T = T_0
self.gates = self.get_random_gates(number=number)
self.n_qubits = n_qubits
self.positions = self.get_random_positions()
self.best_previous_instructions = {}
self.reference_energy = reference_energy
self.best_reference_wfn = None
self.noncliff_replacements = {}
def _str(self):
print(self.gates)
print(self.positions)
def set_reference_wfn(self, reference_wfn):
self.best_reference_wfn = reference_wfn
def update_T(self, update_type: str = 'regular', best_temp=None):
# Regular update
if update_type.lower() == 'regular':
self.T = self.alpha * self.T
# Temperature update if patience ran out
elif update_type.lower() == 'patience':
self.T = self.beta*best_temp + (1-self.beta)*self.T_0
def get_random_gates(self, number=None):
'''
Randomly generates a list of gates.
number indicates the number of gates to generate
otherwise, number will be drawn from a log normal distribution
'''
mu, sigma = self.mu, self.sigma
full_options = ['X','Y','Z','S','H','CX', 'CY', 'CZ','SWAP', 'UCC2c', 'UCC4c', 'UCC2', 'UCC4']
clifford_options = ['X','Y','Z','S','H','CX', 'CY', 'CZ','SWAP', 'UCC2c', 'UCC4c']
#non_clifford_options = ['UCC2', 'UCC4']
# Gate distribution, selecting number of gates to add
k = np.random.lognormal(mu, sigma)
k = np.int(k)
if number is not None:
k = number
# Selecting gate types
gates = None
if self.num_non_cliffords < self.max_non_cliffords:
gates = random.choices(full_options, k=k) # with replacement
else:
gates = random.choices(clifford_options, k=k)
new_num_non_cliffords = 0
if "UCC2" in gates:
new_num_non_cliffords += Counter(gates)["UCC2"]
if "UCC4" in gates:
new_num_non_cliffords += Counter(gates)["UCC4"]
if (new_num_non_cliffords+self.num_non_cliffords) <= self.max_non_cliffords:
self.num_non_cliffords += new_num_non_cliffords
else:
extra_cliffords = (new_num_non_cliffords+self.num_non_cliffords) - self.max_non_cliffords
assert(extra_cliffords >= 0)
new_gates = random.choices(clifford_options, k=extra_cliffords)
for g in new_gates:
try:
gates[gates.index("UCC4")] = g
except:
gates[gates.index("UCC2")] = g
self.num_non_cliffords = self.max_non_cliffords
if k == 1:
if gates == "UCC2c" or gates == "UCC4c":
gates = get_string_Cliff_ucc(gates)
if gates == "UCC2" or gates == "UCC4":
gates = get_string_ucc(gates)
else:
for ind, gate in enumerate(gates):
if gate == "UCC2c" or gate == "UCC4c":
gates[ind] = get_string_Cliff_ucc(gate)
if gate == "UCC2" or gate == "UCC4":
gates[ind] = get_string_ucc(gate)
return gates
def get_random_positions(self, gates=None):
'''
Randomly assign gates to qubits.
'''
if gates is None:
gates = self.gates
n_qubits = self.n_qubits
single_qubit = ['X','Y','Z','S','H']
# two_qubit = ['CX','CY','CZ', 'SWAP']
two_qubit = ['CX', 'CY', 'CZ', 'SWAP', 'UCC2c', 'UCC2']
four_qubit = ['UCC4c', 'UCC4']
qubits = list(range(0, n_qubits))
q_positions = []
for gate in gates:
if gate in four_qubit:
p = random.sample(qubits, k=4)
if gate in two_qubit:
p = random.sample(qubits, k=2)
if gate in single_qubit:
p = random.sample(qubits, k=1)
if "UCC2" in gate:
p = random.sample(qubits, k=2)
if "UCC4" in gate:
p = random.sample(qubits, k=4)
q_positions.append(p)
return q_positions
def delete(self, number=None):
'''
Randomly drops some gates from a clifford instruction set
if not specified, the number of gates to drop is sampled from a uniform distribution over all the gates
'''
gates = copy.deepcopy(self.gates)
positions = copy.deepcopy(self.positions)
n_qubits = self.n_qubits
if number is not None:
num_to_drop = number
else:
num_to_drop = random.sample(range(1,len(gates)-1), k=1)[0]
action_indices = random.sample(range(0,len(gates)-1), k=num_to_drop)
for index in sorted(action_indices, reverse=True):
if "UCC2_" in str(gates[index]) or "UCC4_" in str(gates[index]):
self.num_non_cliffords -= 1
del gates[index]
del positions[index]
self.gates = gates
self.positions = positions
#print ('deleted {} gates'.format(num_to_drop))
def add(self, number=None):
'''
adds a random selection of clifford gates to the end of a clifford instruction set
if number is not specified, the number of gates to add will be drawn from a log normal distribution
'''
gates = copy.deepcopy(self.gates)
positions = copy.deepcopy(self.positions)
n_qubits = self.n_qubits
added_instructions = self.get_new_instructions(number=number)
gates.extend(added_instructions['gates'])
positions.extend(added_instructions['positions'])
self.gates = gates
self.positions = positions
#print ('added {} gates'.format(len(added_instructions['gates'])))
def change(self, number=None):
'''
change a random number of gates and qubit positions in a clifford instruction set
if not specified, the number of gates to change is sampled from a uniform distribution over all the gates
'''
gates = copy.deepcopy(self.gates)
positions = copy.deepcopy(self.positions)
n_qubits = self.n_qubits
if number is not None:
num_to_change = number
else:
num_to_change = random.sample(range(1,len(gates)), k=1)[0]
action_indices = random.sample(range(0,len(gates)-1), k=num_to_change)
added_instructions = self.get_new_instructions(number=num_to_change)
for i in range(num_to_change):
gates[action_indices[i]] = added_instructions['gates'][i]
positions[action_indices[i]] = added_instructions['positions'][i]
self.gates = gates
self.positions = positions
#print ('changed {} gates'.format(len(added_instructions['gates'])))
# TODO to be debugged!
def prune(self):
'''
Prune instructions to remove redundant operations:
--> first gate should go beyond subsystems (this assumes expressible enough subsystem-ciruits
#TODO later -> this needs subsystem information in here!
--> 2 subsequent gates that are their respective inverse can be removed
#TODO this might change the number of qubits acted on in theory?
'''
pass
#print ("DEBUG PRUNE FUNCTION!")
# gates = copy.deepcopy(self.gates)
# positions = copy.deepcopy(self.positions)
# for g_index in range(len(gates)-1):
# if (gates[g_index] == gates[g_index+1] and not 'S' in gates[g_index])\
# or (gates[g_index] == 'S' and gates[g_index+1] == 'S-dag')\
# or (gates[g_index] == 'S-dag' and gates[g_index+1] == 'S'):
# print(len(gates))
# if positions[g_index] == positions[g_index+1]:
# self.gates.pop(g_index)
# self.positions.pop(g_index)
def update_by_action(self, action: str):
'''
Updates instruction dictionary
-> Either adds, deletes or changes gates
'''
if action == 'delete':
try:
self.delete()
# In case there are too few gates to delete
except:
pass
elif action == 'add':
self.add()
elif action == 'change':
self.change()
else:
raise Exception("Unknown action type " + action + ".")
self.prune()
def update_best_previous_instructions(self):
''' Overwrites the best previous instructions with the current ones. '''
self.best_previous_instructions['gates'] = copy.deepcopy(self.gates)
self.best_previous_instructions['positions'] = copy.deepcopy(self.positions)
self.best_previous_instructions['T'] = copy.deepcopy(self.T)
def reset_to_best(self):
''' Overwrites the current instructions with best previous ones. '''
#print ('Patience ran out... resetting to best previous instructions.')
self.gates = copy.deepcopy(self.best_previous_instructions['gates'])
self.positions = copy.deepcopy(self.best_previous_instructions['positions'])
self.patience = copy.deepcopy(self.starting_patience)
self.update_T(update_type='patience', best_temp=copy.deepcopy(self.best_previous_instructions['T']))
def get_new_instructions(self, number=None):
'''
Returns a a clifford instruction set,
a dictionary of gates and qubit positions for building a clifford circuit
'''
mu = self.mu
sigma = self.sigma
n_qubits = self.n_qubits
instruction = {}
gates = self.get_random_gates(number=number)
q_positions = self.get_random_positions(gates)
assert(len(q_positions) == len(gates))
instruction['gates'] = gates
instruction['positions'] = q_positions
# instruction['n_qubits'] = n_qubits
# instruction['patience'] = patience
# instruction['best_previous_options'] = {}
return instruction
def replace_cg_w_ncg(self, gate_id):
''' replaces a set of Clifford gates
with corresponding non-Cliffords
'''
# print("gates before", self.gates, flush=True)
gate = self.gates[gate_id]
if gate == 'X':
gate = "Rx"
elif gate == 'Y':
gate = "Ry"
elif gate == 'Z':
gate = "Rz"
elif gate == 'S':
gate = "S_nc"
#gate = "Rz"
elif gate == 'H':
gate = "H_nc"
# this does not work???????
# gate = "Ry"
elif gate == 'CX':
gate = "CRx"
elif gate == 'CY':
gate = "CRy"
elif gate == 'CZ':
gate = "CRz"
elif gate == 'SWAP':#find a way to change this as well
pass
# gate = "SWAP"
elif "UCC2c" in str(gate):
pre_gate = gate.split("_")[0]
mid_gate = gate.split("_")[-1]
gate = pre_gate + "_" + "UCC2" + "_" +mid_gate
elif "UCC4c" in str(gate):
pre_gate = gate.split("_")[0]
mid_gate = gate.split("_")[-1]
gate = pre_gate + "_" + "UCC4" + "_" + mid_gate
self.gates[gate_id] = gate
# print("gates after", self.gates, flush=True)
def build_circuit(instructions):
'''
constructs a tequila circuit from a clifford instruction set
'''
gates = instructions.gates
q_positions = instructions.positions
init_angles = {}
clifford_circuit = tq.QCircuit()
# for i in range(1, len(gates)):
# TODO len(q_positions) not == len(gates)
for i in range(len(gates)):
if len(q_positions[i]) == 2:
q1, q2 = q_positions[i]
elif len(q_positions[i]) == 1:
q1 = q_positions[i]
q2 = None
elif not len(q_positions[i]) == 4:
raise Exception("q_positions[i] must have length 1, 2 or 4...")
if gates[i] == 'X':
clifford_circuit += tq.gates.X(q1)
if gates[i] == 'Y':
clifford_circuit += tq.gates.Y(q1)
if gates[i] == 'Z':
clifford_circuit += tq.gates.Z(q1)
if gates[i] == 'S':
clifford_circuit += tq.gates.S(q1)
if gates[i] == 'H':
clifford_circuit += tq.gates.H(q1)
if gates[i] == 'CX':
clifford_circuit += tq.gates.CX(q1, q2)
if gates[i] == 'CY': #using generators
clifford_circuit += tq.gates.S(q2)
clifford_circuit += tq.gates.CX(q1, q2)
clifford_circuit += tq.gates.S(q2).dagger()
if gates[i] == 'CZ': #using generators
clifford_circuit += tq.gates.H(q2)
clifford_circuit += tq.gates.CX(q1, q2)
clifford_circuit += tq.gates.H(q2)
if gates[i] == 'SWAP':
clifford_circuit += tq.gates.CX(q1, q2)
clifford_circuit += tq.gates.CX(q2, q1)
clifford_circuit += tq.gates.CX(q1, q2)
if "UCC2c" in str(gates[i]) or "UCC4c" in str(gates[i]):
clifford_circuit += get_clifford_UCC_circuit(gates[i], q_positions[i])
# NON-CLIFFORD STUFF FROM HERE ON
global global_seed
if gates[i] == "S_nc":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.S(q1)
clifford_circuit += tq.gates.Rz(angle=var_name, target=q1)
if gates[i] == "H_nc":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.H(q1)
clifford_circuit += tq.gates.Ry(angle=var_name, target=q1)
if gates[i] == "Rx":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.X(q1)
clifford_circuit += tq.gates.Rx(angle=var_name, target=q1)
if gates[i] == "Ry":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.Y(q1)
clifford_circuit += tq.gates.Ry(angle=var_name, target=q1)
if gates[i] == "Rz":
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0
clifford_circuit += tq.gates.Z(q1)
clifford_circuit += tq.gates.Rz(angle=var_name, target=q1)
if gates[i] == "CRx":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = np.pi
clifford_circuit += tq.gates.Rx(angle=var_name, target=q2, control=q1)
if gates[i] == "CRy":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = np.pi
clifford_circuit += tq.gates.Ry(angle=var_name, target=q2, control=q1)
if gates[i] == "CRz":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = np.pi
clifford_circuit += tq.gates.Rz(angle=var_name, target=q2, control=q1)
def get_ucc_init_angles(gate):
angle = None
pre_gate = gate.split("_")[0]
mid_gate = gate.split("_")[-1]
if mid_gate == 'Z':
angle = 0.
elif mid_gate == 'S':
angle = 0.
elif mid_gate == 'S-dag':
angle = 0.
else:
raise Exception("This should not happen -- center/mid gate should be Z,S,S_dag.")
return angle
if "UCC2_" in str(gates[i]) or "UCC4_" in str(gates[i]):
uccc_circuit = get_non_clifford_UCC_circuit(gates[i], q_positions[i])
clifford_circuit += uccc_circuit
try:
var_name = uccc_circuit.extract_variables()[0]
init_angles[var_name]= get_ucc_init_angles(gates[i])
except:
init_angles = {}
return clifford_circuit, init_angles
def get_non_clifford_UCC_circuit(gate, positions):
"""
"""
pre_cir_dic = {"X":tq.gates.X, "Y":tq.gates.Y, "H":tq.gates.H, "I":None}
pre_gate = gate.split("_")[0]
pre_gates = pre_gate.split(*"#")
pre_circuit = tq.QCircuit()
for i, pos in enumerate(positions):
try:
pre_circuit += pre_cir_dic[pre_gates[i]](pos)
except:
pass
for i, pos in enumerate(positions[:-1]):
pre_circuit += tq.gates.CX(pos, positions[i+1])
global global_seed
mid_gate = gate.split("_")[-1]
mid_circuit = tq.QCircuit()
if mid_gate == "S":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit += tq.gates.S(positions[-1])
mid_circuit += tq.gates.Rz(angle=tq.Variable(var_name), target=positions[-1])
elif mid_gate == "S-dag":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit += tq.gates.S(positions[-1]).dagger()
mid_circuit += tq.gates.Rz(angle=tq.Variable(var_name), target=positions[-1])
elif mid_gate == "Z":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit += tq.gates.Z(positions[-1])
mid_circuit += tq.gates.Rz(angle=tq.Variable(var_name), target=positions[-1])
return pre_circuit + mid_circuit + pre_circuit.dagger()
def get_string_Cliff_ucc(gate):
"""
this function randomly sample basis change and mid circuit elements for
a ucc-type clifford circuit and adds it to the gate
"""
pre_circ_comp = ["X", "Y", "H", "I"]
mid_circ_comp = ["S", "S-dag", "Z"]
p = None
if "UCC2c" in gate:
p = random.sample(pre_circ_comp, k=2)
elif "UCC4c" in gate:
p = random.sample(pre_circ_comp, k=4)
pre_gate = "#".join([str(item) for item in p])
mid_gate = random.sample(mid_circ_comp, k=1)[0]
return str(pre_gate + "_" + gate + "_" + mid_gate)
def get_string_ucc(gate):
"""
this function randomly sample basis change and mid circuit elements for
a ucc-type clifford circuit and adds it to the gate
"""
pre_circ_comp = ["X", "Y", "H", "I"]
p = None
if "UCC2" in gate:
p = random.sample(pre_circ_comp, k=2)
elif "UCC4" in gate:
p = random.sample(pre_circ_comp, k=4)
pre_gate = "#".join([str(item) for item in p])
mid_gate = str(random.random() * 2 * np.pi)
return str(pre_gate + "_" + gate + "_" + mid_gate)
def get_clifford_UCC_circuit(gate, positions):
"""
This function creates an approximate UCC excitation circuit using only
clifford Gates
"""
#pre_circ_comp = ["X", "Y", "H", "I"]
pre_cir_dic = {"X":tq.gates.X, "Y":tq.gates.Y, "H":tq.gates.H, "I":None}
pre_gate = gate.split("_")[0]
pre_gates = pre_gate.split(*"#")
#pre_gates = []
#if gate == "UCC2":
# pre_gates = random.choices(pre_circ_comp, k=2)
#if gate == "UCC4":
# pre_gates = random.choices(pre_circ_comp, k=4)
pre_circuit = tq.QCircuit()
for i, pos in enumerate(positions):
try:
pre_circuit += pre_cir_dic[pre_gates[i]](pos)
except:
pass
for i, pos in enumerate(positions[:-1]):
pre_circuit += tq.gates.CX(pos, positions[i+1])
#mid_circ_comp = ["S", "S-dag", "Z"]
#mid_gate = random.sample(mid_circ_comp, k=1)[0]
mid_gate = gate.split("_")[-1]
mid_circuit = tq.QCircuit()
if mid_gate == "S":
mid_circuit += tq.gates.S(positions[-1])
elif mid_gate == "S-dag":
mid_circuit += tq.gates.S(positions[-1]).dagger()
elif mid_gate == "Z":
mid_circuit += tq.gates.Z(positions[-1])
return pre_circuit + mid_circuit + pre_circuit.dagger()
def get_UCC_circuit(gate, positions):
"""
This function creates an UCC excitation circuit
"""
#pre_circ_comp = ["X", "Y", "H", "I"]
pre_cir_dic = {"X":tq.gates.X, "Y":tq.gates.Y, "H":tq.gates.H, "I":None}
pre_gate = gate.split("_")[0]
pre_gates = pre_gate.split(*"#")
pre_circuit = tq.QCircuit()
for i, pos in enumerate(positions):
try:
pre_circuit += pre_cir_dic[pre_gates[i]](pos)
except:
pass
for i, pos in enumerate(positions[:-1]):
pre_circuit += tq.gates.CX(pos, positions[i+1])
mid_gate_val = gate.split("_")[-1]
global global_seed
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit = tq.gates.Rz(target=positions[-1], angle=tq.Variable(var_name))
# mid_circ_comp = ["S", "S-dag", "Z"]
# mid_gate = random.sample(mid_circ_comp, k=1)[0]
# mid_circuit = tq.QCircuit()
# if mid_gate == "S":
# mid_circuit += tq.gates.S(positions[-1])
# elif mid_gate == "S-dag":
# mid_circuit += tq.gates.S(positions[-1]).dagger()
# elif mid_gate == "Z":
# mid_circuit += tq.gates.Z(positions[-1])
return pre_circuit + mid_circuit + pre_circuit.dagger()
def schedule_actions_ratio(epoch: int, action_options: list = ['delete', 'change', 'add'],
decay: int = 30,
steady_ratio: Union[tuple, list] = [0.2, 0.6, 0.2]) -> list:
delete, change, add = tuple(steady_ratio)
actions_ratio = []
for action in action_options:
if action == 'delete':
actions_ratio += [ delete*(1-np.exp(-1*epoch / decay)) ]
elif action == 'change':
actions_ratio += [ change*(1-np.exp(-1*epoch / decay)) ]
elif action == 'add':
actions_ratio += [ (1-add)*np.exp(-1*epoch / decay) + add ]
else:
print('Action type ', action, ' not defined!')
# unnecessary for current schedule
# if not np.isclose(np.sum(actions_ratio), 1.0):
# actions_ratio /= np.sum(actions_ratio)
return actions_ratio
def get_action(ratio: list = [0.20,0.60,0.20]):
'''
randomly chooses an action from delete, change, add
ratio denotes the multinomial probabilities
'''
choice = np.random.multinomial(n=1, pvals = ratio, size = 1)
index = np.where(choice[0] == 1)
action_options = ['delete', 'change', 'add']
action = action_options[index[0].item()]
return action
def get_prob_acceptance(E_curr, E_prev, T, reference_energy):
'''
Computes acceptance probability of a certain action
based on change in energy
'''
# print("Ecurr is", E_curr)
# print("Eprev is", E_prev)
# print("delta", E_curr - E_prev)
# raise Exception("shit")
delta_E = 12
try:
delta_E = E_curr - E_prev
except:
print("Ecurr is", E_curr)
print("Eprev is", E_prev)
print("delta", E_curr - E_prev)
prob = 0
if delta_E < 0 and E_curr < reference_energy:
prob = 1
else:
if E_curr < reference_energy:
prob = np.exp(-delta_E/T)
else:
prob = 0
return prob
def perform_folding(hamiltonian, circuit):
# QubitHamiltonian -> ParamQubitHamiltonian
param_hamiltonian = convert_tq_QH_to_PQH(hamiltonian)
gates = circuit.gates
# Go backwards
gates.reverse()
for gate in gates:
# print("\t folding", gate)
param_hamiltonian = (fold_unitary_into_hamiltonian(gate, param_hamiltonian))
# hamiltonian = convert_PQH_to_tq_QH(param_hamiltonian)()
hamiltonian = param_hamiltonian
return hamiltonian
def evaluate_fitness(instructions, hamiltonian: tq.QubitHamiltonian, type_energy_eval: str, cluster_circuit: tq.QCircuit=None) -> tuple:
'''
Evaluates fitness=objective=energy given a system
for a set of instructions
'''
#print ("evaluating fitness")
n_qubits = instructions.n_qubits
clifford_circuit, init_angles = build_circuit(instructions)
# tq.draw(clifford_circuit, backend="cirq")
## TODO check if cluster_circuit is parametrized
t0 = time()
t1 = None
folded_hamiltonian = perform_folding(hamiltonian, clifford_circuit)
t1 = time()
#print ("\tfolding took ", t1-t0)
parametrized = len(clifford_circuit.extract_variables()) > 0
initial_guess = None
if not parametrized:
folded_hamiltonian = (convert_PQH_to_tq_QH(folded_hamiltonian))()
elif parametrized:
# TODO clifford_circuit is both ref + rest; so nomenclature is shit here
variables = [gate.extract_variables() for gate in clifford_circuit.gates\
if gate.extract_variables()]
variables = np.array(variables).flatten().tolist()
# TODO this initial_guess is absolutely useless rn and is just causing problems if called
initial_guess = { k: 0.1 for k in variables }
if instructions.best_reference_wfn is not None:
initial_guess = instructions.best_reference_wfn
E_new, optimal_state = minimize_energy(hamiltonian=folded_hamiltonian, n_qubits=n_qubits, type_energy_eval=type_energy_eval, cluster_circuit=cluster_circuit, initial_guess=initial_guess,\
initial_mixed_angles=init_angles)
t2 = time()
#print ("evaluated fitness; comp was ", t2-t1)
#print ("current fitness: ", E_new)
return E_new, optimal_state
| 26,784 | 33.967363 | 191 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/plot_term_increase.py | import numpy as np
import matplotlib.pyplot as plt
# For now, this is all when varying _one_ Clifford gate only
def find_envelope(in_list):
upper, lower = [], []
for li in in_list:
lower += [np.min(li)]
upper += [np.max(li)]
return upper, lower
# >>>>>>>>>>>>>>>>>> BEGIN DATA >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
distances_h2 = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0]
distances_beh2 = [1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
distances_n2 = [0.75, 1.0, 1.3, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]
# h2_before_folding = [[62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62], [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62]]
# h2_after_folding = [[90, 110, 90, 110, 134, 114, 114, 90, 118, 114, 114, 118, 114, 118, 114, 90, 90, 118, 140, 140, 118], [123, 108, 106, 108, 82, 123, 106, 121, 106, 121, 122, 108, 110, 118, 122, 122, 106, 108, 106, 108, 122, 121], [128, 91, 144, 91, 91, 91, 91, 124, 124, 91, 135, 141, 133, 125, 124, 125, 115, 129, 134, 124], [138, 136, 128, 141, 140, 144, 120, 128, 96, 144, 147, 144, 145, 96, 96, 128, 156, 132, 62, 126, 142, 144, 96, 142, 120, 137, 96, 156, 126, 120, 132, 126, 96, 122], [125, 120, 120, 121, 84, 124, 84, 117, 111, 111, 115, 129, 126, 111, 120, 120, 115, 111, 115, 111, 127, 112, 112, 84, 115, 126, 120, 120, 117, 84, 106, 115], [136, 90, 126, 134, 128, 90, 100, 118, 128, 128, 140, 140, 134, 90, 90, 114, 114, 126, 120, 130, 118, 110, 134, 90, 114, 90, 114, 114, 118, 114, 114], [137, 91, 91, 134, 91, 139, 129, 134, 134, 129, 132, 125, 91, 116, 129, 125, 129, 124, 116, 115], [94, 95, 62, 91, 62, 62, 90, 62, 90, 91, 84, 97, 97, 90, 84, 90, 97, 93, 91, 84, 62, 62, 90, 90, 81, 90, 91, 97, 91, 78, 97, 94, 91, 78, 84, 91, 91, 89, 84, 84], [81, 62, 62, 90, 89, 95, 84, 92, 84, 62, 62, 78, 84, 89, 84], [95, 148, 132, 134, 95, 124, 142, 127, 146, 146, 127, 143, 144, 157, 140, 145, 144, 115, 134, 129, 138, 127, 124, 146, 143, 128, 95, 95, 95, 122, 115, 137, 95, 137, 128, 128, 123, 127, 122, 122, 124, 138], [123, 137, 139, 137, 91, 144, 136, 136, 136, 124, 124, 138, 136, 133, 134, 91, 132, 124, 115, 137, 125, 115, 115, 123, 125, 124, 123, 134, 132, 135, 91, 134, 91], [118, 90, 118, 158, 138, 126, 126, 144, 120, 114, 90, 90, 120, 136, 132, 134, 130, 138, 90, 114, 114, 134, 90, 132, 90, 90, 120, 134, 114, 126, 90, 128, 114, 118, 144, 126, 138, 133, 118, 114], [108, 78, 116, 108, 114, 78, 114, 78, 108, 117, 108, 78, 106, 110, 116, 78, 78, 110, 78, 114, 108, 115, 114, 108, 115, 78, 110, 110, 110, 108, 120, 108, 128, 108, 108], [123, 78, 108, 78, 78, 110, 78, 78, 116, 122, 118, 117, 123, 104, 78, 107, 117, 115, 107, 106, 108, 115, 123, 117, 114, 78, 107, 78, 110, 108, 108, 116, 114, 117, 108, 132, 132, 108, 104, 108, 128, 112, 114, 110, 110, 108, 78], [121, 84, 120, 112, 119, 84, 121, 114, 127, 116, 112, 116, 116, 84, 110, 84, 84, 84, 124, 124], [148, 145, 143, 145, 144, 145, 98, 98, 122, 140, 145, 145, 98, 132, 122, 98], [78, 120, 112, 78, 115, 108, 78, 78, 108, 108, 108, 78, 119, 115, 110, 118, 118, 101, 115, 101, 108, 92, 101, 108, 116, 110, 114, 78, 115, 78, 115, 108, 108, 78, 108, 84], [117, 116, 128, 127, 135, 132, 89, 89, 129, 118, 126, 140, 132], [114, 114, 122, 78, 124, 108, 108, 119, 115, 117, 114, 78, 114, 119, 117, 108, 108, 84, 78, 115, 110, 78, 116, 78, 84, 110, 108, 110, 101, 101, 110], [124, 126, 90, 132, 118, 90, 132, 132, 132, 134, 136, 124, 118, 132, 90, 132, 90, 118, 136, 136, 90, 90, 136, 118, 138, 90, 90, 114, 118, 118, 118, 118, 114, 118, 132, 114, 114, 136, 118, 110, 90, 90, 132, 133, 110, 90, 118], [123, 84, 84, 146, 126, 112, 84, 130, 126, 106, 125, 84, 129, 84, 130, 114, 102, 124, 124, 112, 116, 102, 132, 92, 120, 112, 125, 102, 125, 102, 126, 121, 121], [124, 106, 104, 108, 121, 120, 108, 120, 122, 106, 82, 115, 108, 121, 115, 121, 82, 114, 82, 114, 108, 108, 111, 122, 106, 120, 108, 108, 108], [129, 129, 88, 129, 115, 126, 120, 116, 120, 88, 120, 134, 129, 120, 129, 88, 132, 120, 120, 124, 130, 124, 88, 134, 118, 112, 132, 118, 124, 118, 120, 124, 116, 118, 120, 130, 116, 112, 118, 116, 112, 88, 124, 88, 118, 118], [120, 101, 120, 125, 81, 81, 81, 81, 114, 101, 81, 81, 120, 101, 118, 109, 109, 101, 109], [138, 114, 90, 138, 110, 138, 124, 134, 110, 122, 124, 128, 116, 134, 135, 110, 130, 130, 116, 130], [141, 128, 95, 128, 128, 143, 138, 143, 95, 138, 95, 95, 95, 95, 129, 142, 95, 142, 141, 95, 115, 127, 115, 115, 138, 136, 138, 95, 136, 141]]
# Bond length increase
h2_before_folding = [[2002, 2028, 2002, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2002, 2028, 2028, 2028, 2028, 2028], [2340, 2340, 2340, 2340, 2314, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340], [1742, 1716, 1742, 1742, 1716, 1742, 1742, 1742, 1742, 1716, 1742, 1742, 1742, 1742, 1742, 1742, 1742, 1742, 1742, 1742], [3536, 3536, 3536, 3536, 3536, 3536, 3536, 3536, 3510, 3536, 3536, 3536, 3536, 3510, 3510, 3536, 3536, 3536, 3536, 3536, 3536, 3536, 3510, 3536, 3536, 3536, 3510, 3536, 3536, 3536, 3536, 3536, 3510, 3536], [3744, 3744, 3744, 3744, 3718, 3744, 3718, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3718, 3744, 3744, 3744, 3744, 3744, 3718, 3744, 3744], [3120, 3120, 3120, 3120, 3120, 3094, 3120, 3120, 3120, 3120, 3120, 3120, 3120, 3094, 3094, 3120, 3120, 3120, 3120, 3120, 3120, 3120, 3120, 3094, 3120, 3094, 3120, 3120, 3120, 3120, 3120], [2678, 2652, 2678, 2678, 2652, 2678, 2678, 2678, 2678, 2678, 2678, 2678, 2652, 2678, 2678, 2678, 2678, 2678, 2678, 2678], [2990, 2990, 2964, 2990, 2990, 2964, 2990, 2964, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2964, 2964, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990], [1482, 1456, 1456, 1482, 1482, 1482, 1482, 1482, 1482, 1456, 1456, 1482, 1482, 1482, 1482], [2704, 2720, 2720, 2720, 2704, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2704, 2720, 2704, 2720, 2720, 2720, 2704, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720], [2560, 2560, 2560, 2560, 2544, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2544, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2544, 2560, 2544], [2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2352, 2352, 2368, 2368, 2368, 2368, 2368, 2368, 2352, 2368, 2368, 2368, 2352, 2368, 2352, 2352, 2368, 2368, 2368, 2368, 2352, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368], [2224, 2208, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2208, 2224, 2224, 2224, 2224, 2208, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2208, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224], [3984, 3968, 3984, 3968, 3968, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3968, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3968, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3968], [2496, 2470, 2496, 2496, 2496, 2470, 2496, 2496, 2496, 2496, 2496, 2496, 2496, 2470, 2496, 2470, 2470, 2470, 2496, 2496], [2600, 2600, 2600, 2600, 2600, 2600, 2574, 2574, 2600, 2600, 2600, 2600, 2574, 2600, 2600, 2574], [3302, 3328, 3328, 3302, 3328, 3328, 3302, 3328, 3328, 3328, 3328, 3302, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3302, 3328, 3302, 3328, 3328, 3328, 3328, 3328, 3328], [1768, 1768, 1768, 1768, 1768, 1768, 1742, 1742, 1768, 1768, 1768, 1768, 1768], [2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2392, 2418, 2418, 2418, 2418, 2392, 2418, 2418, 2418, 2418, 2418, 2418, 2418], [4888, 4888, 4862, 4888, 4888, 4862, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4862, 4888, 4888, 4888, 4888, 4862, 4862, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4862, 4862, 4888, 4888, 4888, 4862, 4888], [3198, 3172, 3172, 3198, 3198, 3198, 3172, 3198, 3198, 3198, 3198, 3172, 3198, 3172, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198], [3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3146, 3172, 3172, 3172, 3172, 3172, 3146, 3172, 3146, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172], [5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5694, 5720, 5720], [2236, 2236, 2236, 2236, 2210, 2210, 2210, 2210, 2236, 2236, 2210, 2210, 2236, 2236, 2236, 2236, 2236, 2236, 2236], [2106, 2106, 2080, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106], [2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2782, 2808, 2782, 2808, 2782, 2782, 2808, 2808, 2782, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2782, 2808, 2808]]
h2_after_folding = [[2002, 2028, 2002, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2028, 2002, 2028, 2028, 2028, 2028, 2028], [2340, 2340,
2340, 2340, 2314, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340, 2340], [1742, 1716, 1742, 1742, 1716, 1742, 1742, 1742, 1742, 1716, 1742, 1742, 1742, 1742, 1742, 1742, 1742, 1742, 1742, 1742], [3536, 3536, 3536, 3536, 3536, 3536, 3536, 3536, 3510, 3536, 3536, 3536, 3536, 3510, 3510, 3536, 3536, 3536, 3536, 3536, 3536, 3536, 3510, 3536, 3536, 3536, 3510, 3536, 3536, 3536, 3536, 3536, 3510, 3536], [3744, 3744, 3744, 3744, 3718, 3744, 3718, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3744, 3718, 3744, 3744, 3744, 3744, 3744, 3718, 3744, 3744], [3120, 3120, 3120, 3120, 3120, 3094, 3120, 3120, 3120, 3120, 3120, 3120, 3120, 3094, 3094, 3120, 3120, 3120, 3120, 3120, 3120, 3120, 3120, 3094, 3120, 3094, 3120, 3120, 3120, 3120, 3120], [2678, 2652, 2678, 2678, 2652, 2678, 2678, 2678, 2678, 2678, 2678, 2678, 2652, 2678, 2678, 2678, 2678, 2678, 2678, 2678], [2990, 2990, 2964, 2990, 2990, 2964, 2990, 2964, 2990, 2990,
2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2964, 2964, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990, 2990], [1482, 1456, 1456, 1482, 1482, 1482, 1482, 1482, 1482, 1456, 1456, 1482, 1482, 1482, 1482], [2704, 2720, 2720, 2720, 2704, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2704, 2720, 2704, 2720, 2720, 2720, 2704, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720, 2720], [2560, 2560, 2560, 2560, 2544, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2544, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2560, 2544, 2560, 2544], [2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2352, 2352, 2368, 2368, 2368, 2368, 2368, 2368, 2352, 2368, 2368, 2368, 2352, 2368, 2352, 2352, 2368, 2368, 2368, 2368, 2352, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368, 2368], [2224, 2208, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2208, 2224, 2224, 2224, 2224, 2208, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2208, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224, 2224], [3984, 3968, 3984, 3968, 3968, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3968, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3968, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3984, 3968], [2496, 2470, 2496, 2496, 2496, 2470, 2496, 2496, 2496, 2496, 2496, 2496, 2496, 2470, 2496, 2470, 2470, 2470, 2496, 2496], [2600, 2600, 2600, 2600, 2600, 2600, 2574, 2574, 2600, 2600, 2600, 2600, 2574, 2600, 2600, 2574], [3302, 3328, 3328, 3302, 3328, 3328, 3302, 3328, 3328, 3328, 3328, 3302, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3328, 3302, 3328, 3302, 3328, 3328, 3328, 3328, 3328, 3328], [1768, 1768, 1768, 1768, 1768, 1768, 1742, 1742, 1768, 1768, 1768, 1768, 1768], [2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2418, 2392, 2418, 2418, 2418, 2418, 2392, 2418, 2418, 2418, 2418, 2418, 2418, 2418], [4888, 4888, 4862, 4888, 4888, 4862, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4862, 4888, 4888, 4888, 4888, 4862, 4862, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4888, 4862, 4862, 4888, 4888, 4888, 4862, 4888], [3198, 3172, 3172, 3198, 3198, 3198, 3172, 3198, 3198, 3198, 3198, 3172, 3198, 3172, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198, 3198], [3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172,
3172, 3172, 3146, 3172, 3172, 3172, 3172, 3172, 3146, 3172, 3146, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172, 3172], [5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720, 5720,
5720, 5720, 5720, 5720, 5720, 5720, 5694, 5720, 5694, 5720, 5720], [2236, 2236, 2236, 2236, 2210, 2210, 2210, 2210, 2236, 2236, 2210, 2210, 2236, 2236, 2236, 2236, 2236, 2236, 2236], [2106, 2106, 2080, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106, 2106], [2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2782, 2808, 2782, 2808, 2782, 2782, 2808, 2808, 2782, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2808, 2782, 2808, 2808]]
# Test difference
equal = np.equal(np.array(h2_before_folding), np.array(h2_after_folding))
print("h2 equal???", equal)
# beh2_before_folding = [[361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361], [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361]]
# beh2_after_folding = [[541, 361, 361, 529, 541, 541, 447, 447, 539, 481, 537, 361, 539, 519, 481, 539, 361, 539, 529, 361, 519, 481, 481, 447, 481, 481], [529, 481, 481, 535, 481, 515, 539, 513, 481], [543, 513, 361, 547, 537, 361, 547, 541, 481, 529, 361, 535, 539, 481, 537, 529, 481, 529, 361, 361, 547, 529, 547, 539, 447, 539, 535, 515, 539, 535, 545, 447, 539, 481, 539, 529, 481, 481, 481], [541, 513, 361, 545, 539, 519, 481, 539, 539, 539, 541, 481, 535, 361, 537, 539, 539, 539, 519, 537, 539, 529, 361, 447, 513, 361, 447, 481, 539, 539, 361], [539, 481, 543, 541, 549, 539, 541, 539, 361, 541, 481, 447, 537, 501, 541, 481, 541, 541, 543, 537, 539, 535, 481, 481, 361, 481, 481, 481, 537, 529, 481, 481, 529, 539, 481, 539, 447, 447, 539], [541, 541, 361, 481, 539, 539, 539, 537, 513, 361, 537, 537, 481, 539, 537, 537, 537, 519, 361, 361, 539, 481, 539, 481, 481, 481, 539, 361, 537, 481, 539, 539], [519, 539, 543, 519, 519, 539, 537, 539, 535, 537, 537, 539, 481, 515, 519, 529, 539, 519, 515, 535, 481, 529, 481, 519, 513, 519, 481, 537, 537, 535, 361, 447, 481], [543, 541, 513, 361, 537, 535, 541, 541, 361, 515, 519, 361, 529, 481, 481, 515, 481, 537, 361, 361, 539, 481, 519, 529, 481, 519, 539, 539, 513, 361], [537, 537, 555, 543, 545, 529, 481, 529, 537, 519, 529, 537, 513, 481, 481, 539, 539], [547, 519, 531, 539, 361, 539, 361, 537, 539, 361, 539, 539, 539, 535, 481, 447, 481], [481, 539, 481, 481, 361, 529, 481, 537, 539, 481, 361, 537, 481, 539, 537, 481, 481, 481, 361, 519, 361, 481, 481, 535, 361, 481, 481, 519, 481]]
# ############ bond dimensions ####################
beh2_before_folding = [[4002, 3956, 3956, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 3956, 4002, 4002, 4002, 4002, 3956, 4002, 4002, 3956, 4002, 4002, 4002, 4002, 4002, 4002], [2254, 2254, 2254, 2254, 2254, 2254, 2254, 2254, 2254], [9200, 9200, 9154, 9200, 9200, 9154, 9200, 9200, 9200, 9200, 9154, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9154, 9154, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200], [5934, 5934, 5888, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5888, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5888, 5934, 5934, 5888, 5934, 5934, 5934, 5934, 5888], [6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6900, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6900, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946], [5980, 5980, 5934, 5980, 5980, 5980, 5980, 5980, 5980, 5934, 5980, 5980, 5980, 5980, 5980, 5980, 5980, 5980, 5934, 5934, 5980, 5980, 5980, 5980, 5980, 5980, 5980, 5934, 5980, 5980, 5980, 5980], [4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4830, 4876, 4876], [5612, 5612, 5612, 5566, 5612, 5612, 5612, 5612, 5566, 5612, 5612, 5566, 5612, 5612, 5612, 5612, 5612, 5612, 5566, 5566, 5612, 5612, 5612, 5612, 5612, 5612, 5612, 5612, 5612, 5566], [4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002], [4370, 4370, 4370, 4370, 4324, 4370, 4324, 4370, 4370, 4324, 4370, 4370, 4370, 4370, 4370, 4370, 4370], [4462, 4462, 4462, 4462, 4416, 4462, 4462, 4462, 4462, 4462, 4416, 4462, 4462, 4462, 4462, 4462, 4462, 4462, 4416, 4462, 4416, 4462, 4462, 4462, 4416, 4462, 4462, 4462, 4462]]
beh2_after_folding = [[4002, 3956, 3956, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 3956, 4002, 4002, 4002, 4002, 3956, 4002, 4002, 3956, 4002, 4002, 4002,
4002, 4002, 4002], [2254, 2254, 2254, 2254, 2254, 2254, 2254, 2254, 2254], [9200, 9200, 9154, 9200, 9200, 9154, 9200, 9200, 9200, 9200, 9154, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9154, 9154, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200, 9200], [5934, 5934, 5888, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5888, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5934, 5888, 5934, 5934, 5888, 5934, 5934, 5934, 5934, 5888], [6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6900, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6900, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946, 6946], [5980, 5980, 5934, 5980, 5980, 5980, 5980, 5980, 5980, 5934, 5980, 5980, 5980, 5980, 5980, 5980, 5980, 5980, 5934, 5934, 5980, 5980, 5980, 5980, 5980, 5980, 5980, 5934, 5980, 5980, 5980, 5980], [4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876,
4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4876, 4830, 4876, 4876], [5612, 5612, 5612, 5566, 5612, 5612, 5612, 5612, 5566, 5612, 5612, 5566, 5612, 5612, 5612, 5612, 5612, 5612, 5566, 5566, 5612, 5612, 5612, 5612, 5612, 5612, 5612, 5612, 5612, 5566], [4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002, 4002], [4370, 4370, 4370, 4370, 4324, 4370, 4324, 4370, 4370, 4324, 4370, 4370, 4370, 4370, 4370, 4370, 4370], [4462, 4462, 4462, 4462, 4416, 4462, 4462, 4462, 4462, 4462, 4416, 4462, 4462, 4462, 4462, 4462, 4462, 4462, 4416, 4462, 4416, 4462, 4462, 4462, 4416, 4462, 4462, 4462, 4462]]
equal = np.equal(np.array(h2_before_folding), np.array(h2_after_folding))
print("beh2 equal???", equal)
raise Exception("end here")
# ############ end bond dimensions ####################
n2_before_folding = [[1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791, 1791], [919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919], [943, 943, 943, 943, 943, 943, 943, 943, 943, 943, 943, 943], [919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919, 919], [931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931, 931], [1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255, 1255], [1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371, 1371], [1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435, 1435], [1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391, 1391], [1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747, 1747]]
n2_after_folding = [[2665, 2674, 2631, 2658, 2658, 2584, 2663, 2257, 2584, 2309, 2521, 2673, 1791, 2257, 2257, 1791, 2247, 1791, 2667, 2247, 2257, 2247, 2257, 2257, 2247, 1791, 2495, 2257, 2247], [1069, 919, 1237, 919, 919, 919, 1155, 919, 1367, 1325, 1069, 1155, 919, 1155, 1365, 1155, 1155, 1155, 1237, 1325, 919, 919, 1245, 1311, 1287, 1069], [1351, 1279, 1367, 1411, 1183, 1295, 1319, 1183, 1363, 1363, 1183, 1183], [1369, 1237, 1155, 1155, 1365, 1325, 919, 919, 1287, 1155, 1287, 1155, 1325], [1387, 1167, 1377, 1167, 931, 1297, 1249, 1299, 1377, 1283, 1385, 1389, 1385, 1275, 1337, 931, 1369, 1167, 1337, 1345, 1275, 1299, 1167, 1081, 1345, 1081, 1337, 1167, 1337, 1167, 1337, 1377], [1877, 1687, 1579, 1579, 1687, 1255, 1695, 1255, 1869, 1695, 1847, 1747, 1579, 1579, 1457], [1947, 2484, 2511, 1947, 1703, 1703, 2503, 2484, 1703, 1881, 1703, 2484, 2515, 2359, 1947, 1881], [2119, 1997, 1435, 2146, 1435, 2125, 1789, 1936, 2145, 2147, 1936, 2146, 1435, 2034, 2132, 2158, 1789, 2140, 1435, 1967, 2140, 1997, 2143, 2078, 2097, 1936, 2145, 2134, 2145, 2093, 1789, 1967, 1789, 1435, 1789, 2007, 1789, 2124, 1789, 2007, 1435, 1789, 1789, 1435, 1789, 2123, 1435], [1719, 2055, 2070, 1391, 2050, 1735, 2063, 2037, 2033, 2048, 2060, 1391, 1391, 1735, 1735, 1735, 1735, 2031, 1391, 2037, 1971, 1391, 1391, 1907, 1391, 1391, 1907, 1603, 1603], [2621, 2447, 2607, 2565, 1747, 2609, 2598, 1747, 2612, 2609, 2621, 2598, 2617, 2617, 2467, 1747, 2613, 2553, 2496, 1747, 2419, 2604, 2415, 1747, 2536, 2598, 2598, 2496, 1747, 2415, 2485, 2201, 2201, 2185, 2362, 2598, 2185, 2201]]
raise Exception("last obstacle before plotting")
before_h2 = [62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62]
before_beh2 = [361, 361, 361, 361, 361, 361, 361, 361, 361, 361, 361]
before_n2 = [1791, 919, 943, 919, 931, 1255, 1371, 1435, 1391, 1747]
opt_h2 = [90, 82, 91, 96, 84, 90, 91, 62, 62, 95, 91, 90, 78, 78, 84, 98, 78, 89, 78, 90, 84, 82, 88, 81, 90, 95]
opt_beh2 = [539, 481, 529, 541, 447, 481, 481, 515, 513, 531, 481]
opt_n2 = [2247, 919, 1183, 919, 1377, 1695, 1703, 1435, 1391, 2415]
# <<<<<<<<<<<<<<<<<< END DATA <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# beh2_optimal = []
up_h2, low_h2 = find_envelope(h2_after_folding)
up_beh2, low_beh2 = find_envelope(beh2_after_folding)
up_n2, low_n2 = find_envelope(n2_after_folding)
distances = [distances_h2, distances_beh2, distances_n2]
uppers = [up_h2, up_beh2, up_n2]
lowers = [low_h2, low_beh2, low_n2]
befores = [before_h2, before_beh2, before_n2]
optimals = [opt_h2, opt_beh2, opt_n2]
for i in range(3):
assert(len(distances[i]) == len(lowers[i]))
assert(len(uppers[i]) == len(optimals[i]))
# fig, ax = plt.subplots(nrows=1, ncols=3, squeeze=True, figsize=(14,7))
fig, ax = plt.subplots()
# for i in range(3):
for i in range(1):
# ax[i].plot(distances[i], befores[i], '*-', label='before folding')
# ax[i].plot(distances[i], optimals[i], 'D-', label='after folding best circuit')
# ax[i].fill_between(distances[i], lowers[i], uppers[i], alpha=.2, label='range of folded circuits')
# ax[i].set_xlabel('Bond distance in Angstrom')
ax.plot(distances[i], befores[i], '*-', label='before folding')
ax.plot(distances[i], optimals[i], 'D-', label='after folding best circuit')
# ax.fill_between(distances[i], lowers[i], uppers[i], alpha=.2, label='range of folded circuits')
ax.set_xlabel('Bond distance in Angstrom')
# ax[0].set_title('H2')
# ax[1].set_title('BeH2')
# ax[2].set_title('N2')
# ax[2].legend()
# ax[0].set_ylabel('Number of Hamiltonian terms')
fig.savefig('testfig_dimsh2.pdf')
| 29,307 | 286.333333 | 4,705 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/hacked_openfermion_qubit_operator.py | import tequila as tq
import sympy
import copy
#from param_hamiltonian import get_geometry, generate_ucc_ansatz
from hacked_openfermion_symbolic_operator import SymbolicOperator
# Define products of all Pauli operators for symbolic multiplication.
_PAULI_OPERATOR_PRODUCTS = {
('I', 'I'): (1., 'I'),
('I', 'X'): (1., 'X'),
('X', 'I'): (1., 'X'),
('I', 'Y'): (1., 'Y'),
('Y', 'I'): (1., 'Y'),
('I', 'Z'): (1., 'Z'),
('Z', 'I'): (1., 'Z'),
('X', 'X'): (1., 'I'),
('Y', 'Y'): (1., 'I'),
('Z', 'Z'): (1., 'I'),
('X', 'Y'): (1.j, 'Z'),
('X', 'Z'): (-1.j, 'Y'),
('Y', 'X'): (-1.j, 'Z'),
('Y', 'Z'): (1.j, 'X'),
('Z', 'X'): (1.j, 'Y'),
('Z', 'Y'): (-1.j, 'X')
}
_clifford_h_products = {
('I') : (1., 'I'),
('X') : (1., 'Z'),
('Y') : (-1., 'Y'),
('Z') : (1., 'X')
}
_clifford_s_products = {
('I') : (1., 'I'),
('X') : (-1., 'Y'),
('Y') : (1., 'X'),
('Z') : (1., 'Z')
}
_clifford_s_dag_products = {
('I') : (1., 'I'),
('X') : (1., 'Y'),
('Y') : (-1., 'X'),
('Z') : (1., 'Z')
}
_clifford_cx_products = {
('I', 'I'): (1., 'I', 'I'),
('I', 'X'): (1., 'I', 'X'),
('I', 'Y'): (1., 'Z', 'Y'),
('I', 'Z'): (1., 'Z', 'Z'),
('X', 'I'): (1., 'X', 'X'),
('X', 'X'): (1., 'X', 'I'),
('X', 'Y'): (1., 'Y', 'Z'),
('X', 'Z'): (-1., 'Y', 'Y'),
('Y', 'I'): (1., 'Y', 'X'),
('Y', 'X'): (1., 'Y', 'I'),
('Y', 'Y'): (-1., 'X', 'Z'),
('Y', 'Z'): (1., 'X', 'Y'),
('Z', 'I'): (1., 'Z', 'I'),
('Z', 'X'): (1., 'Z', 'X'),
('Z', 'Y'): (1., 'I', 'Y'),
('Z', 'Z'): (1., 'I', 'Z'),
}
_clifford_cy_products = {
('I', 'I'): (1., 'I', 'I'),
('I', 'X'): (1., 'Z', 'X'),
('I', 'Y'): (1., 'I', 'Y'),
('I', 'Z'): (1., 'Z', 'Z'),
('X', 'I'): (1., 'X', 'Y'),
('X', 'X'): (-1., 'Y', 'Z'),
('X', 'Y'): (1., 'X', 'I'),
('X', 'Z'): (-1., 'Y', 'X'),
('Y', 'I'): (1., 'Y', 'Y'),
('Y', 'X'): (1., 'X', 'Z'),
('Y', 'Y'): (1., 'Y', 'I'),
('Y', 'Z'): (-1., 'X', 'X'),
('Z', 'I'): (1., 'Z', 'I'),
('Z', 'X'): (1., 'I', 'X'),
('Z', 'Y'): (1., 'Z', 'Y'),
('Z', 'Z'): (1., 'I', 'Z'),
}
_clifford_cz_products = {
('I', 'I'): (1., 'I', 'I'),
('I', 'X'): (1., 'Z', 'X'),
('I', 'Y'): (1., 'Z', 'Y'),
('I', 'Z'): (1., 'I', 'Z'),
('X', 'I'): (1., 'X', 'Z'),
('X', 'X'): (-1., 'Y', 'Y'),
('X', 'Y'): (-1., 'Y', 'X'),
('X', 'Z'): (1., 'X', 'I'),
('Y', 'I'): (1., 'Y', 'Z'),
('Y', 'X'): (-1., 'X', 'Y'),
('Y', 'Y'): (1., 'X', 'X'),
('Y', 'Z'): (1., 'Y', 'I'),
('Z', 'I'): (1., 'Z', 'I'),
('Z', 'X'): (1., 'I', 'X'),
('Z', 'Y'): (1., 'I', 'Y'),
('Z', 'Z'): (1., 'Z', 'Z'),
}
COEFFICIENT_TYPES = (int, float, complex, sympy.Expr, tq.Variable)
class ParamQubitHamiltonian(SymbolicOperator):
@property
def actions(self):
"""The allowed actions."""
return ('X', 'Y', 'Z')
@property
def action_strings(self):
"""The string representations of the allowed actions."""
return ('X', 'Y', 'Z')
@property
def action_before_index(self):
"""Whether action comes before index in string representations."""
return True
@property
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
return True
def renormalize(self):
"""Fix the trace norm of an operator to 1"""
norm = self.induced_norm(2)
if numpy.isclose(norm, 0.0):
raise ZeroDivisionError('Cannot renormalize empty or zero operator')
else:
self /= norm
def _simplify(self, term, coefficient=1.0):
"""Simplify a term using commutator and anti-commutator relations."""
if not term:
return coefficient, term
term = sorted(term, key=lambda factor: factor[0])
new_term = []
left_factor = term[0]
for right_factor in term[1:]:
left_index, left_action = left_factor
right_index, right_action = right_factor
# Still on the same qubit, keep simplifying.
if left_index == right_index:
new_coefficient, new_action = _PAULI_OPERATOR_PRODUCTS[
left_action, right_action]
left_factor = (left_index, new_action)
coefficient *= new_coefficient
# Reached different qubit, save result and re-initialize.
else:
if left_action != 'I':
new_term.append(left_factor)
left_factor = right_factor
# Save result of final iteration.
if left_factor[1] != 'I':
new_term.append(left_factor)
return coefficient, tuple(new_term)
def _clifford_simplify_h(self, qubit):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
for left, right in term:
if left == qubit:
coeff, new_pauli = _clifford_h_products[right]
new_term.append(tuple((left, new_pauli)))
else:
new_term.append(tuple((left,right)))
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
def _clifford_simplify_s(self, qubit):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
for left, right in term:
if left == qubit:
coeff, new_pauli = _clifford_s_products[right]
new_term.append(tuple((left, new_pauli)))
else:
new_term.append(tuple((left,right)))
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
def _clifford_simplify_s_dag(self, qubit):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
for left, right in term:
if left == qubit:
coeff, new_pauli = _clifford_s_dag_products[right]
new_term.append(tuple((left, new_pauli)))
else:
new_term.append(tuple((left,right)))
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
def _clifford_simplify_control_g(self, axis, control_q, target_q):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
target = "I"
control = "I"
for left, right in term:
if left == control_q:
control = right
elif left == target_q:
target = right
else:
new_term.append(tuple((left,right)))
new_c = "I"
new_t = "I"
if not (target == "I" and control == "I"):
if axis == "X":
coeff, new_c, new_t = _clifford_cx_products[control, target]
if axis == "Y":
coeff, new_c, new_t = _clifford_cy_products[control, target]
if axis == "Z":
coeff, new_c, new_t = _clifford_cz_products[control, target]
if new_c != "I":
new_term.append(tuple((control_q, new_c)))
if new_t != "I":
new_term.append(tuple((target_q, new_t)))
new_term = sorted(new_term, key=lambda factor: factor[0])
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
if __name__ == "__main__":
"""geometry = get_geometry("H2", 0.714)
print(geometry)
basis_set = 'sto-3g'
ref_anz, uccsd_anz, ham = generate_ucc_ansatz(geometry, basis_set)
print(ham)
b_ham = tq.grouping.binary_rep.BinaryHamiltonian.init_from_qubit_hamiltonian(ham)
c_ham = tq.grouping.binary_rep.BinaryHamiltonian.init_from_qubit_hamiltonian(ham)
print(b_ham)
print(b_ham.get_binary())
print(b_ham.get_coeff())
param = uccsd_anz.extract_variables()
print(param)
for term in b_ham.binary_terms:
print(term.coeff)
term.set_coeff(param[0])
print(term.coeff)
print(b_ham.get_coeff())
d_ham = c_ham.to_qubit_hamiltonian() + b_ham.to_qubit_hamiltonian()
"""
term = [(2,'X'), (0,'Y'), (3, 'Z')]
coeff = tq.Variable("a")
coeff = coeff *2.j
print(coeff)
print(type(coeff))
print(coeff({"a":1}))
ham = ParamQubitHamiltonian(term= term, coefficient=coeff)
print(ham.terms)
print(str(ham))
for term in ham.terms:
print(ham.terms[term]({"a":1,"b":2}))
term = [(2,'X'), (0,'Z'), (3, 'Z')]
coeff = tq.Variable("b")
print(coeff({"b":1}))
b_ham = ParamQubitHamiltonian(term= term, coefficient=coeff)
print(b_ham.terms)
print(str(b_ham))
for term in b_ham.terms:
print(b_ham.terms[term]({"a":1,"b":2}))
coeff = tq.Variable("a")*tq.Variable("b")
print(coeff)
print(coeff({"a":1,"b":2}))
ham *= b_ham
print(ham.terms)
print(str(ham))
for term in ham.terms:
coeff = (ham.terms[term])
print(coeff)
print(coeff({"a":1,"b":2}))
ham = ham*2.
print(ham.terms)
print(str(ham))
| 9,918 | 29.614198 | 85 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.