File size: 1,891 Bytes
62d1028
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from dataclasses import dataclass

from transformers.configuration_utils import PretrainedConfig


@dataclass
class SymTimeConfig(PretrainedConfig):
    """

    Time series encoder configuration for SymTime Model.



    Parameters

    -----------

    num_layers

        The number of layers to be used for the encoder.

    d_model

        The dimension of the model.

    d_ff

        The dimension of the feedforward network.

    num_heads

        The number of heads to be used for the attention mechanism.

    norm

        The normalization to be used for the encoder.

    attn_dropout

        The dropout rate to be used for the attention mechanism.

    dropout

        The dropout rate to be used for the encoder.

    act

        The activation function to be used for the encoder.

    pre_norm

        Whether to use pre-norm for the encoder.

    patch_size

        The size of the patch to be used for the input data.

    stride

        The stride of the patch to be used for the input data.

    """

    model_type = "symtime"

    def __init__(

        self,

        num_layers: int = 6,

        d_model: int = 512,

        d_ff: int = 2048,

        num_heads: int = 8,

        norm: str = "BatchNorm",

        dropout: float = 0.1,

        act: str = "gelu",

        pre_norm: bool = False,

        patch_size: int = 16,

        stride: int = 16,

        initializer_factor: float = 0.05,

        **kwargs,

    ) -> None:
        self.patch_size = patch_size
        self.stride = stride
        self.num_layers = num_layers
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_ff = d_ff
        self.norm = norm
        self.dropout = dropout
        self.act = act
        self.pre_norm = pre_norm
        self.initializer_factor = initializer_factor

        super().__init__(**kwargs)