Spaces:
Running
Running
File size: 5,097 Bytes
2d671a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
"""
Viz LLM - Gradio App
A RAG-powered assistant for data visualization guidance, powered by Jina-CLIP-v2
embeddings and research from the field of information graphics.
"""
import os
import gradio as gr
from dotenv import load_dotenv
from src.rag_pipeline import create_pipeline
from datetime import datetime, timedelta
from collections import defaultdict
# Load environment variables
load_dotenv()
# Rate limiting: Track requests per user (IP-based)
# Format: {ip: [(timestamp1, timestamp2, ...)]}
rate_limit_tracker = defaultdict(list)
DAILY_LIMIT = 20
# Initialize the RAG pipeline
print("Initializing Graphics Design Pipeline...")
try:
pipeline = create_pipeline(
retrieval_k=5,
model=os.getenv("LLM_MODEL", "meta-llama/Llama-3.1-8B-Instruct"),
temperature=float(os.getenv("LLM_TEMPERATURE", "0.2"))
)
print("β Pipeline initialized successfully")
except Exception as e:
print(f"β Error initializing pipeline: {e}")
raise
def check_rate_limit(request: gr.Request) -> tuple[bool, int]:
"""Check if user has exceeded rate limit"""
if request is None:
return True, DAILY_LIMIT # Allow if no request object
user_id = request.client.host
now = datetime.now()
cutoff = now - timedelta(days=1)
# Remove old requests (older than 24 hours)
rate_limit_tracker[user_id] = [
ts for ts in rate_limit_tracker[user_id] if ts > cutoff
]
remaining = DAILY_LIMIT - len(rate_limit_tracker[user_id])
if remaining <= 0:
return False, 0
# Add current request
rate_limit_tracker[user_id].append(now)
return True, remaining - 1
def recommend_stream(message: str, history: list, request: gr.Request):
"""
Streaming version of design recommendation function
Args:
message: User's design query
history: Chat history
request: Gradio request object for rate limiting
Yields:
Response chunks
"""
# Check rate limit
allowed, remaining = check_rate_limit(request)
if not allowed:
yield "β οΈ **Rate limit exceeded.** You've reached the maximum of 20 queries per day. Please try again in 24 hours."
return
try:
response_stream = pipeline.generate_recommendations(message, stream=True)
full_response = ""
for chunk in response_stream:
full_response += chunk
yield full_response
# Add rate limit info at the end
if remaining <= 5:
yield full_response + f"\n\n---\n*You have {remaining} queries remaining today.*"
except Exception as e:
yield f"Error generating response: {str(e)}\n\nPlease check your environment variables (HF_TOKEN, SUPABASE_URL, SUPABASE_KEY) and try again."
# Minimal CSS to fix UI artifacts
custom_css = """
/* Hide retry/undo buttons that appear as artifacts */
.chatbot button[aria-label="Retry"],
.chatbot button[aria-label="Undo"] {
display: none !important;
}
"""
# Create Gradio interface
with gr.Blocks(
title="Viz LLM",
css=custom_css
) as demo:
gr.Markdown("""
# π Viz LLM
Get design recommendations for creating effective data visualizations based on research and best practices.
""")
# Main chat interface
chatbot = gr.ChatInterface(
fn=recommend_stream,
type="messages",
examples=[
"What's the best chart type for showing trends over time?",
"How do I create an effective infographic for complex data?",
"What are best practices for data visualization accessibility?",
"How should I design a dashboard for storytelling?",
"What visualization works best for comparing categories?"
],
cache_examples=False,
api_name="recommend"
)
# Knowledge base section (below chat interface)
gr.Markdown("""
### Knowledge Base
This assistant draws on research papers, design principles, and examples from the field of information graphics and data visualization.
**Credits:** Special thanks to the researchers whose work informed this model: Robert Kosara, Edward Segel, Jeffrey Heer, Matthew Conlen, John Maeda, Kennedy Elliott, Scott McCloud, and many others.
---
**Usage Limits:** This service is limited to 20 queries per day per user to manage costs. Responses are optimized for English.
<div style="text-align: center; margin-top: 20px; opacity: 0.6; font-size: 0.9em;">
Embeddings: Jina-CLIP-v2
</div>
""")
# Launch configuration
if __name__ == "__main__":
# Check for required environment variables
required_vars = ["SUPABASE_URL", "SUPABASE_KEY", "HF_TOKEN"]
missing_vars = [var for var in required_vars if not os.getenv(var)]
if missing_vars:
print(f"β οΈ Warning: Missing environment variables: {', '.join(missing_vars)}")
print("Please set these in your .env file or as environment variables")
# Launch the app
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
show_api=True
)
|