File size: 9,841 Bytes
75033ed
 
 
 
 
 
 
ffb5f88
 
 
 
 
 
75033ed
 
 
 
 
 
ffb5f88
 
75033ed
 
 
 
ffb5f88
75033ed
 
 
 
 
 
 
 
ffb5f88
75033ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffb5f88
75033ed
 
 
 
 
ffb5f88
 
75033ed
ffb5f88
75033ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffb5f88
75033ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffb5f88
 
75033ed
ffb5f88
75033ed
ffb5f88
 
 
 
 
 
 
 
 
 
 
 
 
75033ed
 
 
ffb5f88
 
 
 
 
 
 
 
 
 
 
 
75033ed
ffb5f88
75033ed
 
 
 
ffb5f88
 
 
 
 
 
 
 
 
 
 
 
 
75033ed
 
ffb5f88
75033ed
 
ffb5f88
75033ed
 
ffb5f88
 
 
75033ed
 
ffb5f88
75033ed
ffb5f88
 
75033ed
 
 
 
 
 
 
 
 
 
 
 
 
2ecdea6
75033ed
2ecdea6
75033ed
 
 
 
 
 
 
 
 
 
 
2ecdea6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75033ed
 
2ecdea6
75033ed
 
2ecdea6
75033ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
"""

Hybrid Chat Endpoint: RAG + Scenario FSM

Routes between scripted scenarios and knowledge retrieval

"""
from fastapi import HTTPException
from datetime import datetime
from typing import Dict, Any

# Import scenario handlers
from scenario_handlers.price_inquiry import PriceInquiryHandler
from scenario_handlers.event_recommendation import EventRecommendationHandler
from scenario_handlers.post_event_feedback import PostEventFeedbackHandler
from scenario_handlers.exit_intent_rescue import ExitIntentRescueHandler


async def hybrid_chat_endpoint(

    request,  # ChatRequest

    conversation_service,

    intent_classifier,

    embedding_service,  # For handlers

    qdrant_service,     # For handlers

    tools_service,

    advanced_rag,

    chat_history_collection,

    hf_token,

    lead_storage

):
    """

    Hybrid conversational chatbot: Scenario FSM + RAG

    

    Flow:

    1. Load session & scenario state

    2. Classify intent (scenario vs RAG)

    3. Route:

       - Scenario: Execute FSM flow with dedicated handlers

       - RAG: Knowledge retrieval

       - RAG+Resume: Answer question then resume scenario

    4. Save state & history

    """
    try:
        # ===== SESSION MANAGEMENT =====
        session_id = request.session_id
        if not session_id:
            session_id = conversation_service.create_session(
                metadata={"user_agent": "api", "created_via": "hybrid_chat"},
                user_id=request.user_id
            )
            print(f"✓ Created session: {session_id} (user: {request.user_id or 'anon'})")
        else:
            if not conversation_service.session_exists(session_id):
                raise HTTPException(404, detail=f"Session {session_id} not found")
        
        # ===== LOAD SCENARIO STATE =====
        scenario_state = conversation_service.get_scenario_state(session_id) or {}
        
        # ===== INTENT CLASSIFICATION =====
        intent = intent_classifier.classify(request.message, scenario_state)
        print(f"🎯 Intent: {intent}")
        
        # ===== ROUTING =====
        if intent.startswith("scenario:"):
            # Route to dedicated scenario handler
            response_data = await handle_scenario(
                intent,
                request.message,
                session_id,
                scenario_state,
                embedding_service,
                qdrant_service,
                conversation_service,
                lead_storage
            )
        
        elif intent == "rag:with_resume":
            # Answer question but keep scenario active
            response_data = await handle_rag_with_resume(
                request,
                session_id,
                scenario_state,
                embedding_service,
                qdrant_service,
                conversation_service
            )
        
        else:  # rag:general
            # Pure RAG query
            response_data = await handle_pure_rag(
                request,
                session_id,
                advanced_rag,
                embedding_service,
                qdrant_service,
                tools_service,
                chat_history_collection,
                hf_token,
                conversation_service
            )
        
        # ===== SAVE HISTORY =====
        conversation_service.add_message(
            session_id,
            "user",
            request.message,
            metadata={"intent": intent}
        )
        
        conversation_service.add_message(
            session_id,
            "assistant",
            response_data["response"],
            metadata={
                "mode": response_data.get("mode", "unknown"),
                "context_used": response_data.get("context_used", [])[:3]
            }
        )
        
        return {
            "response": response_data["response"],
            "session_id": session_id,
            "mode": response_data.get("mode"),
            "scenario_active": response_data.get("scenario_active", False),
            "timestamp": datetime.utcnow().isoformat()
        }
    
    except Exception as e:
        print(f"❌ Error in hybrid_chat: {str(e)}")
        raise HTTPException(500, detail=f"Chat error: {str(e)}")


async def handle_scenario(

    intent,

    user_message,

    session_id,

    scenario_state,

    embedding_service,

    qdrant_service,

    conversation_service,

    lead_storage

):
    """

    Handle scenario-based conversation using dedicated handlers

    

    Replaces old scenario_engine with per-scenario handlers

    """
    
    # Initialize all scenario handlers
    handlers = {
        'price_inquiry': PriceInquiryHandler(embedding_service, qdrant_service, lead_storage),
        'event_recommendation': EventRecommendationHandler(embedding_service, qdrant_service, lead_storage),
        'post_event_feedback': PostEventFeedbackHandler(embedding_service, qdrant_service, lead_storage),
        'exit_intent_rescue': ExitIntentRescueHandler(embedding_service, qdrant_service, lead_storage)
    }
    
    if intent == "scenario:continue":
        # Continue existing scenario
        scenario_id = scenario_state.get("active_scenario")
        
        if scenario_id not in handlers:
            return {
                "response": f"Xin lỗi, scenario '{scenario_id}' không tồn tại.",
                "mode": "error",
                "scenario_active": False
            }
        
        handler = handlers[scenario_id]
        result = handler.next_step(
            current_step=scenario_state.get("scenario_step", 1),
            user_input=user_message,
            scenario_data=scenario_state.get("scenario_data", {})
        )
    else:
        # Start new scenario
        scenario_type = intent.split(":", 1)[1]
        
        if scenario_type not in handlers:
            return {
                "response": f"Xin lỗi, scenario '{scenario_type}' không tồn tại.",
                "mode": "error",
                "scenario_active": False
            }
        
        handler = handlers[scenario_type]
        
        # Get initial_data from scenario_state (if any)
        initial_data = scenario_state.get("scenario_data", {})
        result = handler.start(initial_data=initial_data)
    
    # Update scenario state
    if result.get("end_scenario") or not result.get("scenario_active", True):
        conversation_service.clear_scenario(session_id)
        scenario_active = False
    elif result.get("new_state"):
        conversation_service.set_scenario_state(session_id, result["new_state"])
        scenario_active = True
    else:
        # new_state is None → stay at same step (e.g., validation failed)
        scenario_active = True
    
    return {
        "response": result.get("message", ""),
        "mode": "scenario",
        "scenario_active": scenario_active,
        "loading_message": result.get("loading_message")  # For UI
    }


async def handle_rag_with_resume(

    request,

    session_id,

    scenario_state,

    embedding_service,

    qdrant_service,

    conversation_service

):
    """

    Handle RAG query mid-scenario

    Answer question properly, then remind user to continue scenario

    """
    # Query RAG with proper search
    context_used = []
    if request.use_rag:
        query_embedding = embedding_service.encode_text(request.message)
        results = qdrant_service.search(
            query_embedding=query_embedding,
            limit=request.top_k,
            score_threshold=request.score_threshold,
            ef=256
        )
        context_used = results
    
    # Build REAL RAG response (not placeholder)
    if context_used and len(context_used) > 0:
        # Format top results nicely
        top_result = context_used[0]
        text = top_result['metadata'].get('text', '')
        
        # Extract most relevant snippet (first 300 chars)
        if text:
            rag_response = text[:300].strip()
            if len(text) > 300:
                rag_response += "..."
        else:
            rag_response = "Tôi tìm thấy thông tin nhưng không thể hiển thị chi tiết."
        
        # If multiple results, add count
        if len(context_used) > 1:
            rag_response += f"\n\n(Tìm thấy {len(context_used)} kết quả liên quan)"
    else:
        rag_response = "Xin lỗi, tôi không tìm thấy thông tin về câu hỏi này trong tài liệu."
    
    # Add resume hint
    resume_hint = "\n\n---\n💬 Vậy nha! Quay lại câu hỏi trước, bạn đã quyết định chưa?"
    
    return {
        "response": rag_response + resume_hint,
        "mode": "rag_with_resume",
        "scenario_active": True,
        "context_used": context_used
    }


async def handle_pure_rag(

    request,

    session_id,

    advanced_rag,

    embedding_service,

    qdrant_service,

    tools_service,

    chat_history_collection,

    hf_token,

    conversation_service

):
    """

    Handle pure RAG query (fallback to existing logic)

    """
    # Import existing chat_endpoint logic
    from chat_endpoint import chat_endpoint
    
    # Call existing endpoint
    result = await chat_endpoint(
        request,
        conversation_service,
        tools_service,
        advanced_rag,
        embedding_service,
        qdrant_service,
        chat_history_collection,
        hf_token
    )
    
    return {
        "response": result["response"],
        "mode": "rag",
        "context_used": result.get("context_used", [])
    }