Spaces:
Sleeping
Sleeping
File size: 47,319 Bytes
519b145 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 |
"""
Chatbot wrapper that integrates core chatbot with router, LLM, and context management.
"""
import os
import copy
import logging
import json
import time
import unicodedata
import re
from typing import Dict, Any, Optional
from hue_portal.core.chatbot import Chatbot as CoreChatbot, get_chatbot as get_core_chatbot
from hue_portal.chatbot.router import decide_route, IntentRoute, RouteDecision, DOCUMENT_CODE_PATTERNS
from hue_portal.chatbot.context_manager import ConversationContext
from hue_portal.chatbot.llm_integration import LLMGenerator
from hue_portal.core.models import LegalSection, LegalDocument
from hue_portal.chatbot.exact_match_cache import ExactMatchCache
from hue_portal.chatbot.slow_path_handler import SlowPathHandler
logger = logging.getLogger(__name__)
EXACT_MATCH_CACHE = ExactMatchCache(
max_size=int(os.environ.get("EXACT_MATCH_CACHE_MAX", "256")),
ttl_seconds=int(os.environ.get("EXACT_MATCH_CACHE_TTL_SECONDS", "43200")),
)
DEBUG_LOG_PATH = "/Users/davidtran/Downloads/TryHarDemNayProject/.cursor/debug.log"
DEBUG_SESSION_ID = "debug-session"
DEBUG_RUN_ID = "pre-fix"
#region agent log
def _agent_debug_log(hypothesis_id: str, location: str, message: str, data: Dict[str, Any]):
try:
payload = {
"sessionId": DEBUG_SESSION_ID,
"runId": DEBUG_RUN_ID,
"hypothesisId": hypothesis_id,
"location": location,
"message": message,
"data": data,
"timestamp": int(time.time() * 1000),
}
with open(DEBUG_LOG_PATH, "a", encoding="utf-8") as log_file:
log_file.write(json.dumps(payload, ensure_ascii=False) + "\n")
except Exception:
pass
#endregion
class Chatbot(CoreChatbot):
"""
Enhanced chatbot with session support, routing, and RAG capabilities.
"""
def __init__(self):
super().__init__()
self.llm_generator = None
# Cache in-memory: giữ câu trả lời legal gần nhất theo session để xử lý follow-up nhanh
self._last_legal_answer_by_session: Dict[str, str] = {}
self._initialize_llm()
def _initialize_llm(self):
"""Initialize LLM generator if needed."""
try:
self.llm_generator = LLMGenerator()
except Exception as e:
print(f"⚠️ LLM generator not available: {e}")
self.llm_generator = None
def generate_response(self, query: str, session_id: Optional[str] = None) -> Dict[str, Any]:
"""
Generate chatbot response with session support and routing.
Args:
query: User query string
session_id: Optional session ID for conversation context
Returns:
Response dictionary with message, intent, results, etc.
"""
query = query.strip()
# Save user message to context
if session_id:
try:
ConversationContext.add_message(
session_id=session_id,
role="user",
content=query
)
except Exception as e:
print(f"⚠️ Failed to save user message: {e}")
session_metadata: Dict[str, Any] = {}
selected_doc_code: Optional[str] = None
if session_id:
try:
session_metadata = ConversationContext.get_session_metadata(session_id)
selected_doc_code = session_metadata.get("selected_document_code")
except Exception:
session_metadata = {}
# Classify intent
intent, confidence = self.classify_intent(query)
# Router decision (using raw intent)
route_decision = decide_route(query, intent, confidence)
# Use forced intent if router suggests it
if route_decision.forced_intent:
intent = route_decision.forced_intent
# Nếu session đã có selected_document_code (user đã chọn văn bản ở wizard)
# thì luôn ép intent về search_legal và route sang SEARCH,
# tránh bị kẹt ở nhánh small-talk/off-topic do nội dung câu hỏi ban đầu.
if selected_doc_code:
intent = "search_legal"
route_decision.route = IntentRoute.SEARCH
route_decision.forced_intent = "search_legal"
# Map tất cả intent tra cứu nội dung về search_legal
domain_search_intents = {
"search_fine",
"search_procedure",
"search_office",
"search_advisory",
"general_query",
}
if intent in domain_search_intents:
intent = "search_legal"
route_decision.route = IntentRoute.SEARCH
route_decision.forced_intent = "search_legal"
# Instant exact-match cache lookup
# ⚠️ Tắt cache cho intent search_legal để luôn đi qua wizard / Slow Path,
# tránh trả lại các câu trả lời cũ không có options.
cached_response = None
if intent != "search_legal":
cached_response = EXACT_MATCH_CACHE.get(query, intent)
if cached_response:
cached_response["_cache"] = "exact_match"
cached_response["_source"] = cached_response.get("_source", "cache")
cached_response.setdefault("routing", route_decision.route.value)
logger.info(
"[CACHE] Hit for intent=%s route=%s source=%s",
intent,
route_decision.route.value,
cached_response["_source"],
)
if session_id:
cached_response["session_id"] = session_id
if session_id:
try:
ConversationContext.add_message(
session_id=session_id,
role="bot",
content=cached_response.get("message", ""),
intent=intent,
)
except Exception as e:
print(f"⚠️ Failed to save cached bot message: {e}")
return cached_response
# Wizard / option-first ngay tại chatbot layer:
# Multi-stage wizard flow:
# Stage 1: Choose document (if no document selected)
# Stage 2: Choose topic/section (if document selected but no topic)
# Stage 3: Choose detail (if topic selected, ask for more details)
# Final: Answer (when user says "Không" or after detail selection)
has_doc_code_in_query = self._query_has_document_code(query)
wizard_stage = session_metadata.get("wizard_stage") if session_metadata else None
selected_topic = session_metadata.get("selected_topic") if session_metadata else None
wizard_depth = session_metadata.get("wizard_depth", 0) if session_metadata else 0
print(f"[WIZARD] Chatbot layer check - intent={intent}, wizard_stage={wizard_stage}, selected_doc_code={selected_doc_code}, selected_topic={selected_topic}, has_doc_code_in_query={has_doc_code_in_query}, query='{query[:50]}'")
# Reset wizard state if new query doesn't have document code and wizard_stage is "answer"
# This handles the case where user asks a new question after completing a previous wizard flow
# CRITICAL: Check conditions and reset BEFORE Stage 1 check
should_reset = (
intent == "search_legal"
and not has_doc_code_in_query
and wizard_stage == "answer"
)
print(f"[WIZARD] Reset check - intent={intent}, has_doc_code={has_doc_code_in_query}, wizard_stage={wizard_stage}, should_reset={should_reset}") # v2.0-fix
if should_reset:
print("[WIZARD] 🔄 New query detected, resetting wizard state for fresh start")
selected_doc_code = None
selected_topic = None
wizard_stage = None
# Update session metadata FIRST before continuing
if session_id:
try:
ConversationContext.update_session_metadata(
session_id,
{
"selected_document_code": None,
"selected_topic": None,
"wizard_stage": None,
"wizard_depth": 0,
}
)
print("[WIZARD] ✅ Wizard state reset in session metadata")
except Exception as e:
print(f"⚠️ Failed to reset wizard state: {e}")
# Also update session_metadata dict for current function scope
if session_metadata:
session_metadata["selected_document_code"] = None
session_metadata["selected_topic"] = None
session_metadata["wizard_stage"] = None
session_metadata["wizard_depth"] = 0
# Stage 1: Choose document (if no document selected and no code in query)
# Use Query Rewrite Strategy from slow_path_handler instead of old LLM suggestions
if intent == "search_legal" and not selected_doc_code and not has_doc_code_in_query:
print("[WIZARD] ✅ Stage 1: Using Query Rewrite Strategy from slow_path_handler")
# Delegate to slow_path_handler which has Query Rewrite Strategy
slow_handler = SlowPathHandler()
response = slow_handler.handle(
query=query,
intent=intent,
session_id=session_id,
selected_document_code=None, # No document selected yet
)
# Ensure response has wizard metadata
if response:
response.setdefault("wizard_stage", "choose_document")
response.setdefault("routing", "legal_wizard")
response.setdefault("type", "options")
# Update session metadata
if session_id:
try:
ConversationContext.update_session_metadata(
session_id,
{
"wizard_stage": "choose_document",
"wizard_depth": 1,
}
)
except Exception as e:
logger.warning("[WIZARD] Failed to update session metadata: %s", e)
# Save bot message to context
if session_id:
try:
bot_message = response.get("message") or response.get("clarification", {}).get("message", "")
ConversationContext.add_message(
session_id=session_id,
role="bot",
content=bot_message,
intent=intent,
)
except Exception as e:
print(f"⚠️ Failed to save wizard bot message: {e}")
return response if response else {
"message": "Xin lỗi, có lỗi xảy ra khi tìm kiếm văn bản.",
"intent": intent,
"results": [],
"count": 0,
}
# Stage 2: Choose topic/section (if document selected but no topic yet)
# Skip if wizard_stage is already "answer" (user wants final answer)
if intent == "search_legal" and selected_doc_code and not selected_topic and not has_doc_code_in_query and wizard_stage != "answer":
print("[WIZARD] ✅ Stage 2 triggered: Choose topic/section")
# Get document title
document_title = selected_doc_code
try:
doc = LegalDocument.objects.filter(code=selected_doc_code).first()
if doc:
document_title = getattr(doc, "title", "") or selected_doc_code
except Exception:
pass
# Extract keywords from query for parallel search
search_keywords_from_query = []
if self.llm_generator:
try:
conversation_context = None
if session_id:
try:
recent_messages = ConversationContext.get_recent_messages(session_id, limit=5)
conversation_context = [
{"role": msg.role, "content": msg.content}
for msg in recent_messages
]
except Exception:
pass
search_keywords_from_query = self.llm_generator.extract_search_keywords(
query=query,
selected_options=None, # No options selected yet
conversation_context=conversation_context,
)
print(f"[WIZARD] Extracted keywords: {search_keywords_from_query[:5]}")
except Exception as exc:
logger.warning("[WIZARD] Keyword extraction failed: %s", exc)
# Fallback to simple keyword extraction
if not search_keywords_from_query:
search_keywords_from_query = self.chatbot.extract_keywords(query)
# Trigger parallel search for document (if not already done)
slow_handler = SlowPathHandler()
prefetched_results = slow_handler._get_prefetched_results(session_id, "document_results")
if not prefetched_results:
# Trigger parallel search now
slow_handler._parallel_search_prepare(
document_code=selected_doc_code,
keywords=search_keywords_from_query,
session_id=session_id,
)
logger.info("[WIZARD] Triggered parallel search for document")
# Get prefetched search results from parallel search (if available)
prefetched_results = slow_handler._get_prefetched_results(session_id, "document_results")
search_results = []
if prefetched_results:
search_results = prefetched_results.get("results", [])
logger.info("[WIZARD] Using prefetched results: %d sections", len(search_results))
else:
# Fallback: search synchronously if prefetch not ready
search_result = slow_handler._search_by_intent(
intent="search_legal",
query=query,
limit=20,
preferred_document_code=selected_doc_code.upper(),
)
search_results = search_result.get("results", [])
logger.info("[WIZARD] Fallback search: %d sections", len(search_results))
# Extract keywords for topic options
conversation_context = None
if session_id:
try:
recent_messages = ConversationContext.get_recent_messages(session_id, limit=5)
conversation_context = [
{"role": msg.role, "content": msg.content}
for msg in recent_messages
]
except Exception:
pass
# Use LLM to generate topic options
topic_options = []
intro_message = f"Bạn muốn tìm điều khoản/chủ đề nào cụ thể trong {document_title}?"
search_keywords = []
if self.llm_generator:
try:
llm_payload = self.llm_generator.suggest_topic_options(
query=query,
document_code=selected_doc_code,
document_title=document_title,
search_results=search_results[:10], # Top 10 for options
conversation_context=conversation_context,
max_options=3,
)
if llm_payload:
intro_message = llm_payload.get("message") or intro_message
topic_options = llm_payload.get("options", [])
search_keywords = llm_payload.get("search_keywords", [])
print(f"[WIZARD] ✅ LLM generated {len(topic_options)} topic options")
except Exception as exc:
logger.warning("[WIZARD] LLM topic suggestion failed: %s", exc)
# Fallback: build options from search results
if not topic_options and search_results:
for result in search_results[:3]:
data = result.get("data", {})
section_title = data.get("section_title") or data.get("title") or ""
article = data.get("article") or data.get("article_number") or ""
if section_title or article:
topic_options.append({
"title": section_title or article,
"article": article,
"reason": data.get("excerpt", "")[:100] or "",
"keywords": [],
})
# If still no options, create generic ones
if not topic_options:
topic_options = [
{
"title": "Các điều khoản liên quan",
"article": "",
"reason": "Tìm kiếm các điều khoản liên quan đến câu hỏi của bạn",
"keywords": [],
}
]
# Trigger parallel search for selected keywords
if search_keywords:
slow_handler._parallel_search_topic(
document_code=selected_doc_code,
topic_keywords=search_keywords,
session_id=session_id,
)
response = {
"message": intro_message,
"intent": intent,
"confidence": confidence,
"results": [],
"count": 0,
"routing": "legal_wizard",
"type": "options",
"wizard_stage": "choose_topic",
"clarification": {
"message": intro_message,
"options": topic_options,
},
"options": topic_options,
}
if session_id:
response["session_id"] = session_id
try:
ConversationContext.add_message(
session_id=session_id,
role="bot",
content=intro_message,
intent=intent,
)
ConversationContext.update_session_metadata(
session_id,
{
"wizard_stage": "choose_topic",
},
)
except Exception as e:
print(f"⚠️ Failed to save Stage 2 bot message: {e}")
return response
# Stage 3: Choose detail (if topic selected, ask if user wants more details)
# Skip if wizard_stage is already "answer" (user wants final answer)
if intent == "search_legal" and selected_doc_code and selected_topic and wizard_stage != "answer":
# Check if user is asking for more details or saying "Không"
query_lower = query.lower()
wants_more = any(kw in query_lower for kw in ["có", "cần", "muốn", "thêm", "chi tiết", "nữa"])
says_no = any(kw in query_lower for kw in ["không", "khong", "thôi", "đủ", "xong"])
if says_no or wizard_depth >= 2:
# User doesn't want more details or already asked twice - proceed to final answer
print("[WIZARD] ✅ User wants final answer, proceeding to slow_path")
# Clear wizard stage to allow normal answer flow
if session_id:
try:
ConversationContext.update_session_metadata(
session_id,
{
"wizard_stage": "answer",
},
)
except Exception:
pass
elif wants_more or wizard_depth == 0:
# User wants more details - generate detail options
print("[WIZARD] ✅ Stage 3 triggered: Choose detail")
# Get conversation context
conversation_context = None
if session_id:
try:
recent_messages = ConversationContext.get_recent_messages(session_id, limit=5)
conversation_context = [
{"role": msg.role, "content": msg.content}
for msg in recent_messages
]
except Exception:
pass
# Use LLM to generate detail options
detail_options = []
intro_message = "Bạn muốn chi tiết gì cho chủ đề này nữa không?"
search_keywords = []
if self.llm_generator:
try:
llm_payload = self.llm_generator.suggest_detail_options(
query=query,
selected_document_code=selected_doc_code,
selected_topic=selected_topic,
conversation_context=conversation_context,
max_options=3,
)
if llm_payload:
intro_message = llm_payload.get("message") or intro_message
detail_options = llm_payload.get("options", [])
search_keywords = llm_payload.get("search_keywords", [])
print(f"[WIZARD] ✅ LLM generated {len(detail_options)} detail options")
except Exception as exc:
logger.warning("[WIZARD] LLM detail suggestion failed: %s", exc)
# Fallback options
if not detail_options:
detail_options = [
{
"title": "Thẩm quyền xử lý",
"reason": "Tìm hiểu về thẩm quyền xử lý kỷ luật",
"keywords": ["thẩm quyền", "xử lý"],
},
{
"title": "Trình tự, thủ tục",
"reason": "Tìm hiểu về trình tự, thủ tục xử lý",
"keywords": ["trình tự", "thủ tục"],
},
{
"title": "Hình thức kỷ luật",
"reason": "Tìm hiểu về các hình thức kỷ luật",
"keywords": ["hình thức", "kỷ luật"],
},
]
# Trigger parallel search for detail keywords
if search_keywords and session_id:
slow_handler = SlowPathHandler()
slow_handler._parallel_search_topic(
document_code=selected_doc_code,
topic_keywords=search_keywords,
session_id=session_id,
)
response = {
"message": intro_message,
"intent": intent,
"confidence": confidence,
"results": [],
"count": 0,
"routing": "legal_wizard",
"type": "options",
"wizard_stage": "choose_detail",
"clarification": {
"message": intro_message,
"options": detail_options,
},
"options": detail_options,
}
if session_id:
response["session_id"] = session_id
try:
ConversationContext.add_message(
session_id=session_id,
role="bot",
content=intro_message,
intent=intent,
)
ConversationContext.update_session_metadata(
session_id,
{
"wizard_stage": "choose_detail",
"wizard_depth": wizard_depth + 1,
},
)
except Exception as e:
print(f"⚠️ Failed to save Stage 3 bot message: {e}")
return response
# Always send legal intent through Slow Path RAG
if intent == "search_legal":
response = self._run_slow_path_legal(
query,
intent,
session_id,
route_decision,
session_metadata=session_metadata,
)
elif route_decision.route == IntentRoute.GREETING:
response = {
"message": "Xin chào! Tôi có thể giúp bạn tra cứu các thông tin liên quan về các văn bản quy định pháp luật về xử lí kỷ luật cán bộ đảng viên",
"intent": "greeting",
"confidence": 0.9,
"results": [],
"count": 0,
"routing": "greeting"
}
elif route_decision.route == IntentRoute.SMALL_TALK:
# Xử lý follow-up questions trong context
follow_up_keywords = [
"có điều khoản",
"liên quan",
"khác",
"nữa",
"thêm",
"tóm tắt",
"tải file",
"tải",
"download",
]
query_lower = query.lower()
is_follow_up = any(kw in query_lower for kw in follow_up_keywords)
#region agent log
_agent_debug_log(
hypothesis_id="H2",
location="chatbot.py:119",
message="follow_up_detection",
data={
"query": query,
"is_follow_up": is_follow_up,
"session_id_present": bool(session_id),
},
)
#endregion
response = None
# Nếu là follow-up question, ưu tiên dùng context legal gần nhất trong session
if is_follow_up and session_id:
previous_answer = self._last_legal_answer_by_session.get(session_id, "")
# Nếu chưa có trong cache in-memory, fallback sang ConversationContext DB
if not previous_answer:
try:
recent_messages = ConversationContext.get_recent_messages(session_id, limit=5)
for msg in reversed(recent_messages):
if msg.role == "bot" and msg.intent == "search_legal":
previous_answer = msg.content or ""
break
except Exception as e:
logger.warning("[FOLLOW_UP] Failed to load context from DB: %s", e)
if previous_answer:
if "tóm tắt" in query_lower:
summary_message = None
if getattr(self, "llm_generator", None):
try:
prompt = (
"Bạn là chuyên gia pháp luật. Hãy tóm tắt ngắn gọn, rõ ràng nội dung chính của đoạn sau "
"(giữ nguyên tinh thần và các mức, tỷ lệ, hình thức kỷ luật nếu có):\n\n"
f"{previous_answer}"
)
summary_message = self.llm_generator.generate_answer(
prompt,
context=None,
documents=None,
)
except Exception as e:
logger.warning("[FOLLOW_UP] LLM summary failed: %s", e)
if summary_message:
message = summary_message
else:
content_preview = (
previous_answer[:400] + "..." if len(previous_answer) > 400 else previous_answer
)
message = "Tóm tắt nội dung chính của điều khoản trước đó:\n\n" f"{content_preview}"
elif "tải" in query_lower:
message = (
"Bạn có thể tải file gốc của văn bản tại mục Quản lý văn bản trên hệ thống "
"hoặc liên hệ cán bộ phụ trách để được cung cấp bản đầy đủ."
)
else:
message = (
"Trong câu trả lời trước, tôi đã trích dẫn điều khoản chính liên quan. "
"Nếu bạn cần điều khoản khác (ví dụ về thẩm quyền, trình tự, hồ sơ), "
"hãy nêu rõ nội dung muốn tìm để tôi trợ giúp nhanh nhất."
)
response = {
"message": message,
"intent": "search_legal",
"confidence": 0.85,
"results": [],
"count": 0,
"routing": "follow_up",
}
# Nếu không phải follow-up hoặc không tìm thấy context, trả về message thân thiện
if response is None:
#region agent log
_agent_debug_log(
hypothesis_id="H1",
location="chatbot.py:193",
message="follow_up_fallback",
data={
"is_follow_up": is_follow_up,
"session_id_present": bool(session_id),
},
)
#endregion
# Detect off-topic questions (nấu ăn, chả trứng, etc.)
off_topic_keywords = ["nấu", "nau", "chả trứng", "cha trung", "món ăn", "mon an", "công thức", "cong thuc",
"cách làm", "cach lam", "đổ chả", "do cha", "trứng", "trung"]
is_off_topic = any(kw in query_lower for kw in off_topic_keywords)
if is_off_topic:
# Ngoài phạm vi → từ chối lịch sự + gợi ý wizard với các văn bản pháp lý chính
intro_message = (
"Xin lỗi, tôi là chatbot chuyên về tra cứu các văn bản quy định pháp luật "
"về xử lí kỷ luật cán bộ đảng viên của Phòng Thanh Tra - Công An Thành Phố Huế.\n\n"
"Tôi không thể trả lời các câu hỏi về nấu ăn, công thức nấu ăn hay các chủ đề khác ngoài phạm vi pháp luật.\n\n"
"Tuy nhiên, tôi có thể giúp bạn tra cứu một số văn bản pháp luật quan trọng. "
"Bạn hãy chọn văn bản muốn xem trước:"
)
clarification_options = [
{
"code": "264-QD-TW",
"title": "Quyết định 264-QĐ/TW về kỷ luật đảng viên",
"reason": "Quy định chung về xử lý kỷ luật đối với đảng viên vi phạm.",
},
{
"code": "QD-69-TW",
"title": "Quy định 69-QĐ/TW về kỷ luật tổ chức đảng, đảng viên",
"reason": "Quy định chi tiết về các hành vi vi phạm và hình thức kỷ luật.",
},
{
"code": "TT-02-CAND",
"title": "Thông tư 02/2021/TT-BCA về điều lệnh CAND",
"reason": "Quy định về điều lệnh, lễ tiết, tác phong trong CAND.",
},
{
"code": "__other__",
"title": "Khác",
"reason": "Tôi muốn hỏi văn bản hoặc chủ đề pháp luật khác.",
},
]
response = {
"message": intro_message,
"intent": intent,
"confidence": confidence,
"results": [],
"count": 0,
"routing": "small_talk_offtopic_wizard",
"type": "options",
"wizard_stage": "choose_document",
"clarification": {
"message": intro_message,
"options": clarification_options,
},
"options": clarification_options,
}
else:
message = (
"Tôi có thể giúp bạn tra cứu các văn bản quy định pháp luật về xử lí kỷ luật cán bộ đảng viên. "
"Bạn muốn tìm gì?"
)
response = {
"message": message,
"intent": intent,
"confidence": confidence,
"results": [],
"count": 0,
"routing": "small_talk",
}
else: # IntentRoute.SEARCH
# Use core chatbot search for other intents
search_result = self.search_by_intent(intent, query, limit=5)
# Generate response message
if search_result["count"] > 0:
template = self._get_response_template(intent)
message = template.format(
count=search_result["count"],
query=query
)
else:
message = f"Xin lỗi, tôi không tìm thấy thông tin liên quan đến '{query}'. Vui lòng thử lại với từ khóa khác."
response = {
"message": message,
"intent": intent,
"confidence": confidence,
"results": search_result["results"],
"count": search_result["count"],
"routing": "search"
}
if session_id and intent == "search_legal":
try:
self._last_legal_answer_by_session[session_id] = response.get("message", "") or ""
except Exception:
pass
# Đánh dấu loại payload cho frontend: answer hay options (wizard)
if response.get("clarification") or response.get("type") == "options":
response.setdefault("type", "options")
else:
response.setdefault("type", "answer")
# Add session_id
if session_id:
response["session_id"] = session_id
# Save bot response to context
if session_id:
try:
bot_message = response.get("message") or response.get("clarification", {}).get("message", "")
ConversationContext.add_message(
session_id=session_id,
role="bot",
content=bot_message,
intent=intent
)
except Exception as e:
print(f"⚠️ Failed to save bot message: {e}")
self._cache_response(query, intent, response)
return response
def _run_slow_path_legal(
self,
query: str,
intent: str,
session_id: Optional[str],
route_decision: RouteDecision,
session_metadata: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Execute Slow Path legal handler (with fast-path + structured output)."""
slow_handler = SlowPathHandler()
selected_doc_code = None
if session_metadata:
selected_doc_code = session_metadata.get("selected_document_code")
response = slow_handler.handle(
query,
intent,
session_id,
selected_document_code=selected_doc_code,
)
response.setdefault("routing", "slow_path")
response.setdefault(
"_routing",
{
"path": "slow_path",
"method": getattr(route_decision, "rationale", "router"),
"confidence": route_decision.confidence,
},
)
# Cập nhật metadata wizard đơn giản: nếu đang hỏi người dùng chọn văn bản
# thì đánh dấu stage = choose_document; nếu đã trả lời thì stage = answer.
if session_id:
try:
if response.get("clarification") or response.get("type") == "options":
ConversationContext.update_session_metadata(
session_id,
{
"wizard_stage": "choose_document",
},
)
else:
ConversationContext.update_session_metadata(
session_id,
{
"wizard_stage": "answer",
"last_answer_type": response.get("intent"),
},
)
except Exception:
# Không để lỗi metadata làm hỏng luồng trả lời chính
pass
logger.info(
"[LEGAL] Slow path response - source=%s count=%s routing=%s",
response.get("_source"),
response.get("count"),
response.get("_routing"),
)
return response
def _cache_response(self, query: str, intent: str, response: Dict[str, Any]) -> None:
"""Store response in exact-match cache if eligible."""
if not self._should_cache_response(intent, response):
logger.debug(
"[CACHE] Skip storing response (intent=%s, results=%s)",
intent,
response.get("count"),
)
return
payload = copy.deepcopy(response)
payload.pop("session_id", None)
payload.pop("_cache", None)
EXACT_MATCH_CACHE.set(query, intent, payload)
logger.info(
"[CACHE] Stored response for intent=%s (results=%s, source=%s)",
intent,
response.get("count"),
response.get("_source"),
)
def _should_cache_response(self, intent: str, response: Dict[str, Any]) -> bool:
"""Determine if response should be cached for exact matches."""
if response.get("clarification"):
return False
cacheable_intents = {
"search_legal",
"search_fine",
"search_procedure",
"search_office",
"search_advisory",
}
if intent not in cacheable_intents:
return False
if response.get("count", 0) <= 0:
return False
if not response.get("results"):
return False
return True
def _query_has_document_code(self, query: str) -> bool:
"""
Check if the raw query string explicitly contains a known document code pattern
(ví dụ: '264/QĐ-TW', 'QD-69-TW', 'TT-02-CAND').
"""
if not query:
return False
# Remove accents để regex đơn giản hơn
normalized = unicodedata.normalize("NFD", query)
normalized = "".join(ch for ch in normalized if unicodedata.category(ch) != "Mn")
normalized = normalized.upper()
for pattern in DOCUMENT_CODE_PATTERNS:
try:
if re.search(pattern, normalized):
return True
except re.error:
continue
return False
def _handle_legal_query(self, query: str, session_id: Optional[str] = None) -> Dict[str, Any]:
"""
Handle legal document queries with RAG pipeline.
Args:
query: User query
session_id: Optional session ID
Returns:
Response dictionary
"""
# Search legal sections
qs = LegalSection.objects.select_related("document").all()
text_fields = ["section_title", "section_code", "content"]
legal_sections = self._search_legal_sections(qs, query, text_fields, top_k=5)
if not legal_sections:
return {
"message": f"Xin lỗi, tôi không tìm thấy văn bản pháp luật liên quan đến '{query}'.",
"intent": "search_legal",
"confidence": 0.5,
"results": [],
"count": 0,
"routing": "search"
}
# Try LLM generation if available
if self.llm_generator and self.llm_generator.provider != "none":
try:
answer = self.llm_generator.generate_structured_legal_answer(
query=query,
documents=legal_sections,
max_attempts=2
)
message = answer.summary
except Exception as e:
print(f"⚠️ LLM generation failed: {e}")
message = self._format_legal_results(legal_sections, query)
else:
# Template-based response
message = self._format_legal_results(legal_sections, query)
# Format results
results = []
for section in legal_sections:
doc = section.document
results.append({
"type": "legal",
"data": {
"id": section.id,
"section_code": section.section_code,
"section_title": section.section_title or "",
"content": section.content[:500] + "..." if len(section.content) > 500 else section.content,
"excerpt": section.excerpt or "",
"document_code": doc.code if doc else "",
"document_title": doc.title if doc else "",
"page_start": section.page_start,
"page_end": section.page_end,
"download_url": f"/api/legal-documents/{doc.id}/download/" if doc and doc.id else None,
"source_url": doc.source_url if doc else ""
}
})
return {
"message": message,
"intent": "search_legal",
"confidence": 0.9,
"results": results,
"count": len(results),
"routing": "search"
}
def _search_legal_sections(self, qs, query: str, text_fields: list, top_k: int = 5):
"""Search legal sections using ML search."""
from hue_portal.core.search_ml import search_with_ml
return search_with_ml(qs, query, text_fields, top_k=top_k, min_score=0.1)
def _format_legal_results(self, sections, query: str) -> str:
"""Format legal sections into response message."""
if not sections:
return f"Xin lỗi, tôi không tìm thấy văn bản pháp luật liên quan đến '{query}'."
doc = sections[0].document
doc_info = f"{doc.code}: {doc.title}" if doc else "Văn bản pháp luật"
message = f"Tôi tìm thấy {len(sections)} điều khoản liên quan đến '{query}' trong {doc_info}:\n\n"
for i, section in enumerate(sections[:3], 1):
section_text = f"{section.section_code}: {section.section_title or ''}\n"
section_text += section.content[:200] + "..." if len(section.content) > 200 else section.content
message += f"{i}. {section_text}\n\n"
if len(sections) > 3:
message += f"... và {len(sections) - 3} điều khoản khác."
return message
def _get_response_template(self, intent: str) -> str:
"""Get response template for intent."""
templates = {
"search_fine": "Tôi tìm thấy {count} mức phạt liên quan đến '{query}':",
"search_procedure": "Tôi tìm thấy {count} thủ tục liên quan đến '{query}':",
"search_office": "Tôi tìm thấy {count} đơn vị liên quan đến '{query}':",
"search_advisory": "Tôi tìm thấy {count} cảnh báo liên quan đến '{query}':",
}
return templates.get(intent, "Tôi tìm thấy {count} kết quả liên quan đến '{query}':")
# Global chatbot instance
_chatbot_instance = None
def get_chatbot() -> Chatbot:
"""Get or create enhanced chatbot instance."""
global _chatbot_instance
if _chatbot_instance is None:
_chatbot_instance = Chatbot()
return _chatbot_instance
|