File size: 4,752 Bytes
0161e74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import torch.nn as nn
from typing import Optional
from torch import nn, Tensor
import pdb
class CategoryValueEncoder(nn.Module):
    def __init__(
        self,
        num_embeddings: int,
        embedding_dim: int,
        padding_idx: Optional[int] = None,
    ):
        super().__init__()
        self.embedding = nn.Embedding(
            num_embeddings, embedding_dim, padding_idx=padding_idx
        )
        self.enc_norm = nn.LayerNorm(embedding_dim)

    def forward(self, x: Tensor) -> Tensor:
        x = x.long()
        x = self.embedding(x)  # (batch, seq_len, embsize)
        x = self.enc_norm(x)
        return x
    
class GeneEncoder(nn.Module):
    def __init__(
        self,
        num_embeddings: int,
        embedding_dim: int,
        padding_idx: Optional[int] = None,
    ):
        super().__init__()
        self.embedding = nn.Embedding(
            num_embeddings, embedding_dim, padding_idx=padding_idx
        )
        self.enc_norm = nn.LayerNorm(embedding_dim)

    def forward(self, x: Tensor) -> Tensor:
        x = self.embedding(x)  # (batch, seq_len, embsize)
        x = self.enc_norm(x)
        return x

class PerturbationEmbedding(nn.Module):
    def __init__(self, num_perturbations, emb_dim, max_comb_len=2, fusion_method='mlp', output_matrix=False):
        super().__init__()
        self.embedding = nn.Embedding(num_perturbations, emb_dim)
        self.fusion_method = fusion_method
        self.max_comb_len = max_comb_len
        self.output_matrix = output_matrix
        self.output_dim = emb_dim if not output_matrix else emb_dim * emb_dim

        if fusion_method == 'mlp':
            self.fusion = nn.Sequential(
                nn.Linear(emb_dim * max_comb_len, emb_dim * 2),
                nn.ReLU(),
                nn.Linear(emb_dim * 2, self.output_dim)
            )
        elif fusion_method == 'sum':
            self.fusion = None
        else:
            raise ValueError(f"Unsupported fusion method: {fusion_method}")

    def forward(self, ids):
        emb = self.embedding(ids)  # [B, C, D]
        
        if self.fusion_method == 'mlp':
            emb = emb.view(emb.size(0), -1)  # [B, C*D]
            fused = self.fusion(emb)         # [B, D] or [B, D*D]

            if self.output_matrix:
                B = fused.size(0)
                D = int(self.output_dim ** 0.5)
                return fused.view(B, D, D)   # [B, D, D]
            else:
                return fused

        elif self.fusion_method == 'sum':
            out = emb.sum(dim=1)  # [B, D]
            if self.output_matrix:
                B = out.size(0)
                D = out.size(1)
                return out.view(B, D, 1).expand(B, D, D)  # dummy expansion
            return out
        
# class PerturbationEmbedding(nn.Module):
#     def __init__(self, num_perturbations, emb_dim, max_comb_len=2, fusion_method='mlp'):
#         """
#         Args:
#             num_perturbations: 词表大小
#             emb_dim: 嵌入维度
#             max_comb_len: 每个 condition 最多包含的 token 数量(如 drug1, drug2)
#             fusion_method: 'mlp' 或 'sum'
#         """
#         super().__init__()
#         self.embedding = nn.Embedding(num_perturbations, emb_dim)
#         self.fusion_method = fusion_method
#         self.max_comb_len = max_comb_len

#         if fusion_method == 'mlp':
#             self.fusion = nn.Sequential(
#                 nn.Linear(emb_dim * max_comb_len, emb_dim),
#                 nn.ReLU(),
#                 nn.Linear(emb_dim, emb_dim)
#             )
#         elif fusion_method == 'sum':
#             self.fusion = None
#         else:
#             raise ValueError(f"Unsupported fusion method: {fusion_method}")
#     def init_weights(self, m):
#         if isinstance(m, nn.Linear):
#             nn.init.xavier_uniform_(m.weight)
#             nn.init.zeros_(m.bias)
            
#     def initialize_weights(self):
#         self.apply(self.init_weights)
        
#     def forward(self, ids):
#         """
#         Args:
#             ids: LongTensor of shape [B, max_comb_len]
#         Returns:
#             fused: Tensor of shape [B, emb_dim]
#         """
        
#         emb = self.embedding(ids)  # [B, C, D]
        
#         if self.fusion_method == 'mlp':
#             emb = emb.view(emb.size(0), -1)  # [B, C*D]
#             return self.fusion(emb)          # [B, D]

#         elif self.fusion_method == 'sum':
#             if emb.dim() == 2:
#                 return emb.sum(dim=0)            # [B, D]
#             elif emb.dim() == 3:
#                 return emb.sum(dim=1)            # [B, C, D]
#             else:
#                 raise ValueError(f"Unsupported dimension: {ids.dim()}")