MarkProMaster229 commited on
Commit
f40edc1
·
verified ·
1 Parent(s): 8e21f8e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torch.nn as nn
4
+ from transformers import PreTrainedTokenizerFast
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ repo_id = "MarkProMaster229/ClassificationSmall"
8
+
9
+ weights_path = hf_hub_download(repo_id=repo_id, filename="model_weights.pth")
10
+ tokenizer_path = hf_hub_download(repo_id=repo_id, filename="tokenizer.json")
11
+ vocab_path = hf_hub_download(repo_id=repo_id, filename="vocab.txt")
12
+
13
+ class TransformerBlock(nn.Module):
14
+ def __init__(self, sizeVector=256, numHeads=8, dropout=0.5):
15
+ super().__init__()
16
+ self.ln1 = nn.LayerNorm(sizeVector)
17
+ self.attn = nn.MultiheadAttention(sizeVector, numHeads, batch_first=True)
18
+ self.dropout_attn = nn.Dropout(dropout)
19
+ self.ln2 = nn.LayerNorm(sizeVector)
20
+ self.ff = nn.Sequential(
21
+ nn.Linear(sizeVector, sizeVector*4),
22
+ nn.GELU(),
23
+ nn.Linear(sizeVector*4, sizeVector),
24
+ nn.Dropout(dropout)
25
+ )
26
+ def forward(self, x, attention_mask=None):
27
+ key_padding_mask = ~attention_mask.bool() if attention_mask is not None else None
28
+ h = self.ln1(x)
29
+ attn_out, _ = self.attn(h, h, h, key_padding_mask=key_padding_mask)
30
+ x = x + self.dropout_attn(attn_out)
31
+ x = x + self.ff(self.ln2(x))
32
+ return x
33
+
34
+ class TransformerRun(nn.Module):
35
+ def __init__(self, vocabSize=120000, maxLen=100, sizeVector=256, numBlocks=4, numHeads=8, numClasses=3, dropout=0.5):
36
+ super().__init__()
37
+ self.token_emb = nn.Embedding(vocabSize, sizeVector)
38
+ self.pos_emb = nn.Embedding(maxLen, sizeVector)
39
+ self.layers = nn.ModuleList([
40
+ TransformerBlock(sizeVector=sizeVector, numHeads=numHeads, dropout=dropout)
41
+ for _ in range(numBlocks)
42
+ ])
43
+ self.dropout = nn.Dropout(dropout)
44
+ self.ln = nn.LayerNorm(sizeVector*2)
45
+ self.classifier = nn.Linear(sizeVector*2, numClasses)
46
+ def forward(self, x, attention_mask=None):
47
+ B, T = x.shape
48
+ tok = self.token_emb(x)
49
+ pos = self.pos_emb(torch.arange(T, device=x.device).unsqueeze(0).expand(B, T))
50
+ h = tok + pos
51
+ for layer in self.layers:
52
+ h = layer(h, attention_mask)
53
+ cls_token = h[:,0,:]
54
+ mean_pool = h.mean(dim=1)
55
+ combined = torch.cat([cls_token, mean_pool], dim=1)
56
+ combined = self.ln(self.dropout(combined))
57
+ logits = self.classifier(combined)
58
+ return logits
59
+
60
+ config_dict = {
61
+ 'vocabSize': 119547,
62
+ 'maxLong': 100,
63
+ 'sizeVector': 256,
64
+ 'numLayers': 4,
65
+ 'numHeads': 8,
66
+ 'numClasses': 3
67
+ }
68
+
69
+ model = TransformerRun(
70
+ vocabSize=config_dict['vocabSize'],
71
+ maxLen=config_dict['maxLong'],
72
+ sizeVector=config_dict['sizeVector'],
73
+ numBlocks=config_dict['numLayers'],
74
+ numHeads=config_dict['numHeads'],
75
+ numClasses=config_dict['numClasses'],
76
+ dropout=0.1
77
+ )
78
+
79
+ state_dict = torch.load(weights_path, map_location="cpu")
80
+ model.load_state_dict(state_dict)
81
+ model.eval()
82
+
83
+ tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer_path, vocab_file=vocab_path)
84
+ label_map = {0:"positive", 1:"negative", 2:"neutral"}
85
+
86
+ def classify(text):
87
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=config_dict['maxLong'])
88
+ with torch.no_grad():
89
+ logits = model(inputs['input_ids'])
90
+ pred_idx = torch.argmax(logits, dim=1).item()
91
+ return label_map[pred_idx]
92
+
93
+ demo = gr.Interface(
94
+ fn=classify,
95
+ inputs=gr.Textbox(lines=2, placeholder="Введите текст..."),
96
+ outputs="text",
97
+ title="Text Sentiment Classifier",
98
+ description="Простая модель классификации текста"
99
+ )
100
+
101
+ demo.launch()