File size: 2,425 Bytes
6466c00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# OSINT Investigation Assistant - Environment Variables

# =============================================================================
# REQUIRED: Supabase Database Connection
# =============================================================================
# PostgreSQL connection string for your Supabase database
# Format: postgresql://[user]:[password]@[host]:[port]/[database]
# Get this from: Supabase Dashboard > Project Settings > Database > Connection String
SUPABASE_CONNECTION_STRING=postgresql://postgres:[YOUR-PASSWORD]@db.[PROJECT-REF].supabase.co:5432/postgres

# =============================================================================
# REQUIRED: Hugging Face API Token
# =============================================================================
# Get your token from: https://huggingface.co/settings/tokens
# This is used for Inference Providers API access
HF_TOKEN=hf_your_token_here

# =============================================================================
# OPTIONAL: LLM Configuration
# =============================================================================
# Model to use for generation (default: meta-llama/Llama-3.1-8B-Instruct)
# Other options:
#   - meta-llama/Meta-Llama-3-8B-Instruct
#   - Qwen/Qwen2.5-72B-Instruct
#   - mistralai/Mistral-7B-Instruct-v0.3
LLM_MODEL=meta-llama/Llama-3.1-8B-Instruct

# Temperature for LLM generation (0.0 to 1.0, default: 0.7)
# Lower = more focused/deterministic, Higher = more creative/diverse
LLM_TEMPERATURE=0.7

# Maximum tokens to generate (default: 2000)
LLM_MAX_TOKENS=2000

# =============================================================================
# OPTIONAL: Vector Store Configuration
# =============================================================================
# Number of tools to retrieve for context (default: 5)
RETRIEVAL_K=5

# Embedding model for vector search (default: sentence-transformers/all-mpnet-base-v2)
# Note: Database uses 768-dimensional embeddings
EMBEDDING_MODEL=sentence-transformers/all-mpnet-base-v2

# =============================================================================
# OPTIONAL: Gradio Configuration
# =============================================================================
# Port for Gradio app (default: 7860)
GRADIO_PORT=7860

# Server name (default: 0.0.0.0 for all interfaces)
GRADIO_SERVER_NAME=0.0.0.0

# Enable Gradio sharing link (default: False)
GRADIO_SHARE=False