Spaces:
Running
Running
| # Graphics Guide / Design Assistant - Environment Variables | |
| # ============================================================================= | |
| # REQUIRED: Supabase Client Connection | |
| # ============================================================================= | |
| # Get these from: Supabase Dashboard > Project Settings > API | |
| SUPABASE_URL=https://[PROJECT-REF].supabase.co | |
| SUPABASE_KEY=[YOUR-ANON-KEY] | |
| # ============================================================================= | |
| # REQUIRED: Hugging Face API Token | |
| # ============================================================================= | |
| # Get your token from: https://huggingface.co/settings/tokens | |
| # This is used for Inference Providers API access (LLM generation) | |
| HF_TOKEN=hf_your_token_here | |
| # ============================================================================= | |
| # REQUIRED: Jina AI API Token | |
| # ============================================================================= | |
| # Get your token from: https://jina.ai/ | |
| # This is used for Jina-CLIP-v2 embeddings | |
| JINA_API_KEY=jina_your_token_here | |
| # ============================================================================= | |
| # REQUIRED: Datawrapper API Token | |
| # ============================================================================= | |
| # Get your token from: https://app.datawrapper.de/account/api-tokens | |
| # This is used for creating and publishing charts via Datawrapper API | |
| DATAWRAPPER_ACCESS_TOKEN=your_datawrapper_token_here | |
| # ============================================================================= | |
| # OPTIONAL: LLM Configuration | |
| # ============================================================================= | |
| # Model to use for generation (default: meta-llama/Llama-3.1-8B-Instruct) | |
| # Other options: | |
| # - meta-llama/Meta-Llama-3-8B-Instruct | |
| # - Qwen/Qwen2.5-72B-Instruct | |
| # - mistralai/Mistral-7B-Instruct-v0.3 | |
| LLM_MODEL=meta-llama/Llama-3.1-8B-Instruct | |
| # Temperature for LLM generation (0.0 to 1.0, default: 0.7) | |
| # Lower = more focused/deterministic, Higher = more creative/diverse | |
| LLM_TEMPERATURE=0.7 | |
| # Maximum tokens to generate (default: 2000) | |
| LLM_MAX_TOKENS=2000 | |
| # ============================================================================= | |
| # OPTIONAL: Vector Store Configuration | |
| # ============================================================================= | |
| # Number of document chunks to retrieve for context (default: 5) | |
| RETRIEVAL_K=5 | |
| # Embedding model for vector search (default: jina-clip-v2) | |
| # Note: Database has been re-embedded with Jina-CLIP-v2 (1024 dimensions) | |
| EMBEDDING_MODEL=jina-clip-v2 | |
| # ============================================================================= | |
| # OPTIONAL: Gradio Configuration | |
| # ============================================================================= | |
| # Port for Gradio app (default: 7860) | |
| GRADIO_PORT=7860 | |
| # Server name (default: 0.0.0.0 for all interfaces) | |
| GRADIO_SERVER_NAME=0.0.0.0 | |
| # Enable Gradio sharing link (default: False) | |
| GRADIO_SHARE=False | |