codewithdark commited on
Commit
ff99207
·
verified ·
1 Parent(s): 2a4e4bc

Update Model/prelude_Block.py

Browse files
Files changed (1) hide show
  1. Model/prelude_Block.py +27 -27
Model/prelude_Block.py CHANGED
@@ -1,28 +1,28 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from typing import Optional, Tuple
5
- from Model.multi_head_Attention import MultiHeadAttention
6
-
7
-
8
- # Prelude Block (Initial Processing)
9
- class PreludeBlock(nn.Module):
10
- def __init__(self, vocab_size: int, d_model: int, num_heads: int, dropout: float = 0.1):
11
- super().__init__()
12
- self.token_embedding = nn.Embedding(vocab_size, d_model)
13
- self.pos_encoding = nn.Parameter(torch.zeros(1, 1024, d_model))
14
- self.attention = MultiHeadAttention(d_model, num_heads, dropout)
15
- self.norm1, self.norm2 = nn.LayerNorm(d_model), nn.LayerNorm(d_model)
16
- self.feed_forward = nn.Sequential(
17
- nn.Linear(d_model, 4 * d_model),
18
- nn.GELU(),
19
- nn.Linear(4 * d_model, d_model),
20
- nn.Dropout(dropout)
21
- )
22
-
23
- def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
24
- seq_len = x.size(1)
25
- x = self.token_embedding(x) + self.pos_encoding[:, :seq_len, :]
26
- attended = self.attention(self.norm1(x), mask)
27
- x = x + attended
28
  return x + self.feed_forward(self.norm2(x))
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Tuple
5
+ from multi_head_Attention import MultiHeadAttention
6
+
7
+
8
+ # Prelude Block (Initial Processing)
9
+ class PreludeBlock(nn.Module):
10
+ def __init__(self, vocab_size: int, d_model: int, num_heads: int, dropout: float = 0.1):
11
+ super().__init__()
12
+ self.token_embedding = nn.Embedding(vocab_size, d_model)
13
+ self.pos_encoding = nn.Parameter(torch.zeros(1, 1024, d_model))
14
+ self.attention = MultiHeadAttention(d_model, num_heads, dropout)
15
+ self.norm1, self.norm2 = nn.LayerNorm(d_model), nn.LayerNorm(d_model)
16
+ self.feed_forward = nn.Sequential(
17
+ nn.Linear(d_model, 4 * d_model),
18
+ nn.GELU(),
19
+ nn.Linear(4 * d_model, d_model),
20
+ nn.Dropout(dropout)
21
+ )
22
+
23
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
24
+ seq_len = x.size(1)
25
+ x = self.token_embedding(x) + self.pos_encoding[:, :seq_len, :]
26
+ attended = self.attention(self.norm1(x), mask)
27
+ x = x + attended
28
  return x + self.feed_forward(self.norm2(x))