arcsu1 commited on
Commit
4d0e37d
·
1 Parent(s): 6eeab27
.dockerignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ .Python
6
+ *.so
7
+ *.egg
8
+ *.egg-info
9
+ dist
10
+ build
11
+ .git
12
+ .gitignore
13
+ .vscode
14
+ .idea
15
+ *.swp
16
+ *.swo
17
+ *~
18
+ .DS_Store
19
+ README.md
20
+ .dockerignore
Dockerfile ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y \
7
+ gcc \
8
+ g++ \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ # Upgrade pip first
12
+ RUN pip install --upgrade pip
13
+
14
+ # Copy requirements and install Python dependencies
15
+ COPY requirements.txt .
16
+
17
+ # Install packages separately for better caching and reliability
18
+ RUN pip install --default-timeout=1000 --no-cache-dir flask flask-cors
19
+ RUN pip install --default-timeout=1000 --no-cache-dir transformers==4.42.4
20
+ RUN pip install --default-timeout=1000 --no-cache-dir torch==2.3.1
21
+
22
+ # Copy application code
23
+ COPY app.py .
24
+
25
+ # Copy templates folder
26
+ COPY templates/ ./templates/
27
+
28
+ # Copy model files
29
+ COPY models/ ./models/
30
+
31
+ # Expose port
32
+ EXPOSE 7860
33
+
34
+ # Run the application
35
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -1,10 +1,114 @@
1
  ---
2
  title: Basic Chatbot
3
- emoji: 🏆
4
  colorFrom: blue
5
- colorTo: yellow
6
  sdk: docker
 
7
  pinned: false
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: Basic Chatbot
3
+ emoji: 💬
4
  colorFrom: blue
5
+ colorTo: purple
6
  sdk: docker
7
+ app_port: 7860
8
  pinned: false
9
  ---
10
 
11
+ # AI Chatbot Flask App
12
+
13
+ Flask service for a fine-tuned GPT-2 conversational chatbot model.
14
+
15
+ ## Setup
16
+
17
+ ### Local Development
18
+
19
+ 1. Install dependencies:
20
+ ```bash
21
+ pip install -r requirements.txt
22
+ ```
23
+
24
+ 2. Run the app:
25
+ ```bash
26
+ python app.py
27
+ ```
28
+
29
+ 3. Access the application:
30
+ - **Web Interface**: http://localhost:7860 (Chat directly in your browser!)
31
+ - API Info: http://localhost:7860/api
32
+ - Health check: http://localhost:7860/health
33
+
34
+ ### Docker
35
+
36
+ 1. Build the image:
37
+ ```bash
38
+ docker build -t chatbot-api .
39
+ ```
40
+
41
+ 2. Run the container:
42
+ ```bash
43
+ docker run -p 7860:7860 chatbot-api
44
+ ```
45
+
46
+ ## API Endpoints
47
+
48
+ ### Web Interface
49
+ Visit the root URL to access the interactive chat interface.
50
+
51
+ ### GET `/api`
52
+ Health check and info
53
+ ```bash
54
+ curl http://localhost:7860/api
55
+ ```
56
+
57
+ ### GET `/health`
58
+ Detailed health status
59
+ ```bash
60
+ curl http://localhost:7860/health
61
+ ```
62
+
63
+ ### POST `/chat`
64
+ Generate chatbot response
65
+
66
+ Request body:
67
+ ```json
68
+ {
69
+ "user": ["Hello!", "How are you?"],
70
+ "ai": ["Hi there!"]
71
+ }
72
+ ```
73
+
74
+ Example:
75
+ ```bash
76
+ curl -X POST http://localhost:7860/chat \
77
+ -H "Content-Type: application/json" \
78
+ -d '{"user": ["Hello!"], "ai": []}'
79
+ ```
80
+
81
+ Response:
82
+ ```json
83
+ {
84
+ "response": "Hi there! How can I help you today",
85
+ "device": "cuda:0"
86
+ }
87
+ ```
88
+
89
+ ## Model
90
+
91
+ - **Model**: Fine-tuned GPT-2
92
+ - **Location**: `./models/fine-tuned-gpt2`
93
+ - **Type**: Conversational AI Chatbot
94
+ - **Port**: 7860
95
+
96
+ ## Features
97
+
98
+ - CORS enabled for all origins
99
+ - Automatic GPU detection and usage
100
+ - Conversation history support (last 7 exchanges)
101
+ - Clean chat interface
102
+ - Real-time responses
103
+ - Message history management
104
+ - Typing indicators
105
+
106
+ ## Chat Interface
107
+
108
+ The web interface provides:
109
+ - Real-time chat with the AI
110
+ - Conversation history
111
+ - Typing indicators
112
+ - Clear chat functionality
113
+ - Responsive design for mobile and desktop
114
+
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify, request, render_template
2
+ from flask_cors import CORS
3
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
+ import torch
5
+
6
+ app = Flask(__name__)
7
+ CORS(app)
8
+
9
+ # Global variables for model and tokenizer
10
+ MODEL_PATH = "./models/fine-tuned-gpt2"
11
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ tokenizer = None
13
+ model = None
14
+
15
+ def load_chatbot_model():
16
+ """Load the chatbot model and tokenizer"""
17
+ global tokenizer, model
18
+ if model is None:
19
+ print(f"Loading chatbot model from {MODEL_PATH}...")
20
+ print(f"Using device: {device}")
21
+
22
+ tokenizer = GPT2Tokenizer.from_pretrained(MODEL_PATH)
23
+ model = GPT2LMHeadModel.from_pretrained(MODEL_PATH)
24
+ model.to(device)
25
+
26
+ print("Model loaded successfully!")
27
+
28
+ # Load model on startup
29
+ load_chatbot_model()
30
+
31
+ @app.route("/")
32
+ def index():
33
+ """Serve the chat interface"""
34
+ return render_template('index.html')
35
+
36
+ @app.route("/api")
37
+ def root():
38
+ return jsonify({
39
+ "message": "Chatbot API",
40
+ "status": "running",
41
+ "model": "fine-tuned-gpt2",
42
+ "device": str(device)
43
+ })
44
+
45
+ @app.route("/health")
46
+ def health():
47
+ return jsonify({
48
+ "status": "healthy",
49
+ "model_loaded": model is not None,
50
+ "device": str(device)
51
+ })
52
+
53
+ @app.route("/chat", methods=["POST"])
54
+ def chat():
55
+ """
56
+ Generate a chatbot response based on conversation history
57
+ """
58
+ if model is None or tokenizer is None:
59
+ return jsonify({"error": "Model not loaded"}), 500
60
+
61
+ try:
62
+ data = request.get_json()
63
+ user_messages = data.get("user", [])
64
+ ai_messages = data.get("ai", [])
65
+
66
+ # Build conversation history
67
+ combined_prompt = ""
68
+
69
+ # Limit history to last 7 exchanges
70
+ user_msgs = user_messages[-7:] if len(user_messages) > 7 else user_messages
71
+ ai_msgs = ai_messages[-6:] if len(ai_messages) > 6 else ai_messages
72
+
73
+ # Add conversation history
74
+ for user_message, ai_message in zip(user_msgs[:-1], ai_msgs):
75
+ combined_prompt += f"<user> {user_message}{tokenizer.eos_token}<AI> {ai_message}{tokenizer.eos_token}"
76
+
77
+ # Add current message
78
+ if user_msgs:
79
+ combined_prompt += f"<user> {user_msgs[-1]}{tokenizer.eos_token}<AI>"
80
+
81
+ # Tokenize and generate
82
+ inputs = tokenizer.encode(combined_prompt, return_tensors="pt").to(device)
83
+ attention_mask = torch.ones(inputs.shape, device=device)
84
+
85
+ outputs = model.generate(
86
+ inputs,
87
+ max_new_tokens=50,
88
+ num_beams=5,
89
+ early_stopping=True,
90
+ no_repeat_ngram_size=2,
91
+ temperature=0.7,
92
+ top_k=50,
93
+ top_p=0.95,
94
+ pad_token_id=tokenizer.eos_token_id,
95
+ attention_mask=attention_mask,
96
+ repetition_penalty=1.2
97
+ )
98
+
99
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
100
+ # Extract only the new response
101
+ response = response.replace(combined_prompt, "").split(".")[0].strip()
102
+
103
+ return jsonify({
104
+ "response": response,
105
+ "device": str(device)
106
+ })
107
+
108
+ except Exception as e:
109
+ return jsonify({"error": str(e)}), 500
110
+
111
+ if __name__ == "__main__":
112
+ app.run(host="0.0.0.0", port=7860, debug=False)
models/fine-tuned-gpt2/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.44.0",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
models/fine-tuned-gpt2/config.json:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ HostUrl=https://www.kaggle.com/
models/fine-tuned-gpt2/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.44.0"
6
+ }
models/fine-tuned-gpt2/generation_config.json:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ HostUrl=https://www.kaggle.com/
models/fine-tuned-gpt2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/fine-tuned-gpt2/merges.txt:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ HostUrl=https://www.kaggle.com/
models/fine-tuned-gpt2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8be8247018b9ae965bcf6d6e3edaa797753fcf42623b65efa34973d31dae6aa3
3
+ size 497774208
models/fine-tuned-gpt2/model.safetensors:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ HostUrl=https://www.kaggle.com/
models/fine-tuned-gpt2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
models/fine-tuned-gpt2/special_tokens_map.json:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ HostUrl=https://www.kaggle.com/
models/fine-tuned-gpt2/tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "model_max_length": 1024,
19
+ "pad_token": "<|endoftext|>",
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "unk_token": "<|endoftext|>"
22
+ }
models/fine-tuned-gpt2/tokenizer_config.json:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ HostUrl=https://www.kaggle.com/
models/fine-tuned-gpt2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
models/fine-tuned-gpt2/vocab.json:Zone.Identifier ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [ZoneTransfer]
2
+ ZoneId=3
3
+ HostUrl=https://www.kaggle.com/
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ flask
2
+ flask-cors
3
+ transformers==4.42.4
4
+ torch==2.3.1
templates/index.html ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>AI Chatbot - Chat with GPT-2</title>
7
+ <style>
8
+ * { margin: 0; padding: 0; box-sizing: border-box; }
9
+
10
+ body {
11
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
12
+ background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%);
13
+ min-height: 100vh;
14
+ padding: 20px;
15
+ display: flex;
16
+ flex-direction: column;
17
+ }
18
+
19
+ .container {
20
+ max-width: 800px;
21
+ margin: 0 auto;
22
+ width: 100%;
23
+ display: flex;
24
+ flex-direction: column;
25
+ height: calc(100vh - 40px);
26
+ }
27
+
28
+ header {
29
+ text-align: center;
30
+ color: white;
31
+ padding: 20px 0;
32
+ }
33
+
34
+ h1 {
35
+ font-size: 2rem;
36
+ font-weight: 700;
37
+ text-shadow: 2px 2px 4px rgba(0,0,0,0.2);
38
+ margin-bottom: 5px;
39
+ }
40
+
41
+ .subtitle {
42
+ opacity: 0.9;
43
+ font-size: 1rem;
44
+ }
45
+
46
+ .chat-container {
47
+ flex: 1;
48
+ background: white;
49
+ border-radius: 16px;
50
+ box-shadow: 0 20px 60px rgba(0,0,0,0.3);
51
+ display: flex;
52
+ flex-direction: column;
53
+ overflow: hidden;
54
+ }
55
+
56
+ .messages {
57
+ flex: 1;
58
+ overflow-y: auto;
59
+ padding: 20px;
60
+ display: flex;
61
+ flex-direction: column;
62
+ gap: 15px;
63
+ }
64
+
65
+ .message {
66
+ max-width: 80%;
67
+ padding: 12px 16px;
68
+ border-radius: 12px;
69
+ word-wrap: break-word;
70
+ animation: slideIn 0.3s ease;
71
+ }
72
+
73
+ @keyframes slideIn {
74
+ from { opacity: 0; transform: translateY(10px); }
75
+ to { opacity: 1; transform: translateY(0); }
76
+ }
77
+
78
+ .message.user {
79
+ align-self: flex-end;
80
+ background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%);
81
+ color: white;
82
+ border-bottom-right-radius: 4px;
83
+ }
84
+
85
+ .message.ai {
86
+ align-self: flex-start;
87
+ background: #f3f4f6;
88
+ color: #1f2937;
89
+ border-bottom-left-radius: 4px;
90
+ }
91
+
92
+ .message.ai::before {
93
+ content: '🤖 ';
94
+ }
95
+
96
+ .message.user::before {
97
+ content: '👤 ';
98
+ }
99
+
100
+ .typing-indicator {
101
+ align-self: flex-start;
102
+ padding: 12px 16px;
103
+ background: #f3f4f6;
104
+ border-radius: 12px;
105
+ border-bottom-left-radius: 4px;
106
+ display: none;
107
+ }
108
+
109
+ .typing-indicator.show {
110
+ display: block;
111
+ }
112
+
113
+ .typing-indicator span {
114
+ height: 8px;
115
+ width: 8px;
116
+ background: #9ca3af;
117
+ border-radius: 50%;
118
+ display: inline-block;
119
+ margin: 0 2px;
120
+ animation: bounce 1.4s infinite ease-in-out;
121
+ }
122
+
123
+ .typing-indicator span:nth-child(1) { animation-delay: -0.32s; }
124
+ .typing-indicator span:nth-child(2) { animation-delay: -0.16s; }
125
+
126
+ @keyframes bounce {
127
+ 0%, 80%, 100% { transform: scale(0); }
128
+ 40% { transform: scale(1); }
129
+ }
130
+
131
+ .input-area {
132
+ padding: 20px;
133
+ border-top: 1px solid #e5e7eb;
134
+ background: #fafafa;
135
+ }
136
+
137
+ .input-container {
138
+ display: flex;
139
+ gap: 10px;
140
+ }
141
+
142
+ #messageInput {
143
+ flex: 1;
144
+ padding: 12px 16px;
145
+ border: 2px solid #e5e7eb;
146
+ border-radius: 24px;
147
+ font-size: 1rem;
148
+ outline: none;
149
+ transition: border-color 0.2s;
150
+ }
151
+
152
+ #messageInput:focus {
153
+ border-color: #3b82f6;
154
+ }
155
+
156
+ #sendBtn {
157
+ padding: 12px 24px;
158
+ background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%);
159
+ color: white;
160
+ border: none;
161
+ border-radius: 24px;
162
+ font-weight: 600;
163
+ cursor: pointer;
164
+ transition: all 0.2s;
165
+ }
166
+
167
+ #sendBtn:hover {
168
+ transform: translateY(-2px);
169
+ box-shadow: 0 5px 15px rgba(59, 130, 246, 0.3);
170
+ }
171
+
172
+ #sendBtn:disabled {
173
+ opacity: 0.5;
174
+ cursor: not-allowed;
175
+ transform: none;
176
+ }
177
+
178
+ .clear-btn {
179
+ text-align: center;
180
+ padding: 10px;
181
+ border-top: 1px solid #e5e7eb;
182
+ background: #fafafa;
183
+ }
184
+
185
+ .clear-btn button {
186
+ padding: 8px 16px;
187
+ background: transparent;
188
+ color: #6b7280;
189
+ border: 1px solid #d1d5db;
190
+ border-radius: 8px;
191
+ font-size: 0.875rem;
192
+ cursor: pointer;
193
+ transition: all 0.2s;
194
+ }
195
+
196
+ .clear-btn button:hover {
197
+ background: #f3f4f6;
198
+ color: #374151;
199
+ }
200
+
201
+ .empty-state {
202
+ text-align: center;
203
+ color: #9ca3af;
204
+ padding: 40px 20px;
205
+ }
206
+
207
+ .empty-state h3 {
208
+ font-size: 1.5rem;
209
+ margin-bottom: 10px;
210
+ }
211
+
212
+ @media (max-width: 640px) {
213
+ h1 { font-size: 1.5rem; }
214
+ .message { max-width: 90%; }
215
+ #sendBtn { padding: 12px 20px; }
216
+ }
217
+ </style>
218
+ </head>
219
+ <body>
220
+ <div class="container">
221
+ <header>
222
+ <h1>💬 AI Chatbot</h1>
223
+ <p class="subtitle">Chat with fine-tuned GPT-2</p>
224
+ </header>
225
+
226
+ <div class="chat-container">
227
+ <div class="messages" id="messages">
228
+ <div class="empty-state">
229
+ <h3>👋 Hello!</h3>
230
+ <p>Start a conversation by typing a message below</p>
231
+ </div>
232
+ </div>
233
+
234
+ <div class="clear-btn">
235
+ <button id="clearBtn">Clear Chat</button>
236
+ </div>
237
+
238
+ <div class="input-area">
239
+ <div class="input-container">
240
+ <input type="text" id="messageInput" placeholder="Type your message..." autocomplete="off">
241
+ <button id="sendBtn">Send</button>
242
+ </div>
243
+ </div>
244
+ </div>
245
+ </div>
246
+
247
+ <script>
248
+ const messagesDiv = document.getElementById('messages');
249
+ const messageInput = document.getElementById('messageInput');
250
+ const sendBtn = document.getElementById('sendBtn');
251
+ const clearBtn = document.getElementById('clearBtn');
252
+
253
+ let conversationHistory = { user: [], ai: [] };
254
+
255
+ function addMessage(text, isUser) {
256
+ const emptyState = messagesDiv.querySelector('.empty-state');
257
+ if (emptyState) emptyState.remove();
258
+
259
+ const messageDiv = document.createElement('div');
260
+ messageDiv.className = `message ${isUser ? 'user' : 'ai'}`;
261
+ messageDiv.textContent = text;
262
+ messagesDiv.appendChild(messageDiv);
263
+ messagesDiv.scrollTop = messagesDiv.scrollHeight;
264
+ }
265
+
266
+ function showTyping() {
267
+ const typing = document.createElement('div');
268
+ typing.className = 'typing-indicator show';
269
+ typing.id = 'typing';
270
+ typing.innerHTML = '<span></span><span></span><span></span>';
271
+ messagesDiv.appendChild(typing);
272
+ messagesDiv.scrollTop = messagesDiv.scrollHeight;
273
+ }
274
+
275
+ function hideTyping() {
276
+ const typing = document.getElementById('typing');
277
+ if (typing) typing.remove();
278
+ }
279
+
280
+ async function sendMessage() {
281
+ const message = messageInput.value.trim();
282
+ if (!message) return;
283
+
284
+ addMessage(message, true);
285
+ conversationHistory.user.push(message);
286
+ messageInput.value = '';
287
+ sendBtn.disabled = true;
288
+ messageInput.disabled = true;
289
+
290
+ showTyping();
291
+
292
+ try {
293
+ const response = await fetch('/chat', {
294
+ method: 'POST',
295
+ headers: { 'Content-Type': 'application/json' },
296
+ body: JSON.stringify(conversationHistory)
297
+ });
298
+
299
+ if (!response.ok) throw new Error('Failed to get response');
300
+
301
+ const data = await response.json();
302
+ hideTyping();
303
+
304
+ const aiResponse = data.response || 'Sorry, I could not generate a response.';
305
+ addMessage(aiResponse, false);
306
+ conversationHistory.ai.push(aiResponse);
307
+
308
+ } catch (error) {
309
+ hideTyping();
310
+ addMessage('Sorry, something went wrong. Please try again.', false);
311
+ conversationHistory.user.pop(); // Remove last user message on error
312
+ } finally {
313
+ sendBtn.disabled = false;
314
+ messageInput.disabled = false;
315
+ messageInput.focus();
316
+ }
317
+ }
318
+
319
+ sendBtn.addEventListener('click', sendMessage);
320
+ messageInput.addEventListener('keypress', (e) => {
321
+ if (e.key === 'Enter') sendMessage();
322
+ });
323
+
324
+ clearBtn.addEventListener('click', () => {
325
+ messagesDiv.innerHTML = '<div class="empty-state"><h3>👋 Hello!</h3><p>Start a conversation by typing a message below</p></div>';
326
+ conversationHistory = { user: [], ai: [] };
327
+ messageInput.value = '';
328
+ messageInput.focus();
329
+ });
330
+
331
+ messageInput.focus();
332
+ </script>
333
+ </body>
334
+ </html>