codewithdark commited on
Commit
73ca27d
·
verified ·
1 Parent(s): c646f73

Upload 5 files

Browse files
codaBlock.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Tuple
5
+
6
+ # Final Projection Block
7
+ class CodaBlock(nn.Module):
8
+ def __init__(self, d_model: int, vocab_size: int):
9
+ super().__init__()
10
+ self.norm = nn.LayerNorm(d_model)
11
+ self.output_proj = nn.Linear(d_model, vocab_size)
12
+
13
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
14
+ return self.output_proj(self.norm(x))
latent_Recurrent.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Tuple
5
+ from Model.prelude_Block import PreludeBlock
6
+ from Model.recurrent_Block import RecurrentBlock
7
+ from Model.codaBlock import CodaBlock
8
+
9
+ # Full Latent Recurrent Depth Model
10
+ class LatentRecurrentDepthLM(nn.Module):
11
+ def __init__(self, vocab_size: int, d_model: int, num_heads: int, dropout: float = 0.1):
12
+ super().__init__()
13
+ self.prelude = PreludeBlock(vocab_size, d_model, num_heads, dropout)
14
+ self.recurrent = RecurrentBlock(d_model, num_heads, dropout)
15
+ self.coda = CodaBlock(d_model, vocab_size)
16
+
17
+ def forward(self, x: torch.Tensor, num_iterations: int, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
18
+ hidden = self.prelude(x, mask)
19
+ recurrent_state = torch.zeros_like(hidden)
20
+ for _ in range(num_iterations):
21
+ hidden, recurrent_state = self.recurrent(hidden, recurrent_state, mask)
22
+ return self.coda(hidden)
multi_head_Attention.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Tuple
5
+
6
+ # Multi-Head Attention Mechanism
7
+ class MultiHeadAttention(nn.Module):
8
+ def __init__(self, d_model: int, num_heads: int, dropout: float = 0.1):
9
+ super().__init__()
10
+ assert d_model % num_heads == 0
11
+
12
+ self.d_model = d_model
13
+ self.num_heads = num_heads
14
+ self.head_dim = d_model // num_heads
15
+
16
+ self.q_proj = nn.Linear(d_model, d_model)
17
+ self.k_proj = nn.Linear(d_model, d_model)
18
+ self.v_proj = nn.Linear(d_model, d_model)
19
+ self.o_proj = nn.Linear(d_model, d_model)
20
+
21
+ self.dropout = nn.Dropout(dropout)
22
+
23
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
24
+ batch_size, seq_len, d_model = x.shape
25
+
26
+ # Project and reshape for multi-head attention
27
+ q = self.q_proj(x).reshape(batch_size, seq_len, self.num_heads, self.head_dim)
28
+ k = self.k_proj(x).reshape(batch_size, seq_len, self.num_heads, self.head_dim)
29
+ v = self.v_proj(x).reshape(batch_size, seq_len, self.num_heads, self.head_dim)
30
+
31
+ # Transpose for attention computation
32
+ q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
33
+
34
+ # Compute attention scores
35
+ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
36
+ if mask is not None:
37
+ scores = scores.masked_fill(mask == 0, float('-inf'))
38
+
39
+ attn_weights = F.softmax(scores, dim=-1)
40
+ attn_weights = self.dropout(attn_weights)
41
+
42
+ # Apply attention to values
43
+ out = torch.matmul(attn_weights, v).transpose(1, 2).reshape(batch_size, seq_len, d_model)
44
+ return self.o_proj(out)
prelude_Block.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Tuple
5
+ from Model.multi_head_Attention import MultiHeadAttention
6
+
7
+
8
+ # Prelude Block (Initial Processing)
9
+ class PreludeBlock(nn.Module):
10
+ def __init__(self, vocab_size: int, d_model: int, num_heads: int, dropout: float = 0.1):
11
+ super().__init__()
12
+ self.token_embedding = nn.Embedding(vocab_size, d_model)
13
+ self.pos_encoding = nn.Parameter(torch.zeros(1, 1024, d_model))
14
+ self.attention = MultiHeadAttention(d_model, num_heads, dropout)
15
+ self.norm1, self.norm2 = nn.LayerNorm(d_model), nn.LayerNorm(d_model)
16
+ self.feed_forward = nn.Sequential(
17
+ nn.Linear(d_model, 4 * d_model),
18
+ nn.GELU(),
19
+ nn.Linear(4 * d_model, d_model),
20
+ nn.Dropout(dropout)
21
+ )
22
+
23
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
24
+ seq_len = x.size(1)
25
+ x = self.token_embedding(x) + self.pos_encoding[:, :seq_len, :]
26
+ attended = self.attention(self.norm1(x), mask)
27
+ x = x + attended
28
+ return x + self.feed_forward(self.norm2(x))
recurrent_Block.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Tuple
5
+ from Model.multi_head_Attention import MultiHeadAttention
6
+
7
+ # Recurrent Block (Processing Over Time)
8
+ class RecurrentBlock(nn.Module):
9
+ def __init__(self, d_model: int, num_heads: int, dropout: float = 0.1):
10
+ super().__init__()
11
+ self.attention = MultiHeadAttention(d_model, num_heads, dropout)
12
+ self.norm1, self.norm2 = nn.LayerNorm(d_model), nn.LayerNorm(d_model)
13
+ self.feed_forward = nn.Sequential(
14
+ nn.Linear(d_model, 4 * d_model),
15
+ nn.GELU(),
16
+ nn.Linear(4 * d_model, d_model),
17
+ nn.Dropout(dropout)
18
+ )
19
+ self.state_proj = nn.Linear(d_model, d_model)
20
+
21
+ def forward(self, x: torch.Tensor, recurrent_state: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
22
+ recurrent_state = self.state_proj(recurrent_state)
23
+ x = x + recurrent_state
24
+ attended = self.attention(self.norm1(x), mask)
25
+ return x + attended + self.feed_forward(self.norm2(x)), x