epicDev123 commited on
Commit
c52f5d3
·
verified ·
1 Parent(s): ee4ccb7

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +72 -0
model.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model.py
2
+ import torch
3
+ import torch.nn as nn
4
+ import math
5
+
6
+ class SelfAttention(nn.Module):
7
+ def __init__(self, embed_dim, num_heads):
8
+ super().__init__()
9
+ assert embed_dim % num_heads == 0
10
+ self.head_dim = embed_dim // num_heads
11
+ self.num_heads = num_heads
12
+
13
+ self.query = nn.Linear(embed_dim, embed_dim)
14
+ self.key = nn.Linear(embed_dim, embed_dim)
15
+ self.value = nn.Linear(embed_dim, embed_dim)
16
+ self.out_proj = nn.Linear(embed_dim, embed_dim)
17
+
18
+ def forward(self, x):
19
+ B, T, C = x.size()
20
+ q = self.query(x).view(B, T, self.num_heads, self.head_dim).transpose(1, 2) # (B, heads, T, head_dim)
21
+ k = self.key(x).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
22
+ v = self.value(x).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
23
+
24
+ scores = (q @ k.transpose(-2, -1)) / math.sqrt(self.head_dim) # (B, heads, T, T)
25
+ mask = torch.tril(torch.ones(T, T)).to(x.device)
26
+ scores = scores.masked_fill(mask == 0, float('-inf'))
27
+ attn = torch.softmax(scores, dim=-1)
28
+
29
+ out = attn @ v # (B, heads, T, head_dim)
30
+ out = out.transpose(1, 2).contiguous().view(B, T, C)
31
+ return self.out_proj(out)
32
+
33
+ class TransformerBlock(nn.Module):
34
+ def __init__(self, embed_dim, num_heads):
35
+ super().__init__()
36
+ self.attn = SelfAttention(embed_dim, num_heads)
37
+ self.ln1 = nn.LayerNorm(embed_dim)
38
+ self.ff = nn.Sequential(
39
+ nn.Linear(embed_dim, embed_dim * 4),
40
+ nn.GELU(),
41
+ nn.Linear(embed_dim * 4, embed_dim)
42
+ )
43
+ self.ln2 = nn.LayerNorm(embed_dim)
44
+
45
+ def forward(self, x):
46
+ x = x + self.attn(self.ln1(x))
47
+ x = x + self.ff(self.ln2(x))
48
+ return x
49
+
50
+ class TinyTransformer(nn.Module):
51
+ def __init__(self, vocab_size, max_len, embed_dim=128, num_heads=2, num_layers=1):
52
+ super().__init__()
53
+ self.token_embed = nn.Embedding(vocab_size, embed_dim)
54
+ self.pos_embed = nn.Parameter(torch.zeros(1, max_len, embed_dim))
55
+ self.blocks = nn.ModuleList([
56
+ TransformerBlock(embed_dim, num_heads) for _ in range(num_layers)
57
+ ])
58
+ self.ln_final = nn.LayerNorm(embed_dim)
59
+ self.head = nn.Linear(embed_dim, vocab_size)
60
+
61
+ def forward(self, x):
62
+ B, T = x.size()
63
+ tok_emb = self.token_embed(x) # (B, T, C)
64
+ pos_emb = self.pos_embed[:, :T, :] # (1, T, C)
65
+ x = tok_emb + pos_emb # (B, T, C)
66
+
67
+ for block in self.blocks:
68
+ x = block(x)
69
+
70
+ x = self.ln_final(x)
71
+ logits = self.head(x) # (B, T, vocab_size)
72
+ return logits