Spaces:
Sleeping
Sleeping
File size: 5,825 Bytes
918983a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
"""Unit tests for custom tools."""
import pytest
from src.tools import (
analyze_content_for_opportunities,
create_engagement_hooks,
extract_key_findings,
format_for_platform,
generate_citations,
generate_seo_keywords,
search_industry_trends,
)
class TestFormatForPlatform:
"""Tests for format_for_platform tool."""
@pytest.mark.unit
def test_format_blog(self):
"""Test blog formatting."""
result = format_for_platform("Test content", "blog", "AI Research")
assert result["status"] == "success"
assert result["platform"] == "blog"
assert "markdown" in result["metadata"]["format"]
assert "AI Research" in result["formatted_content"]
@pytest.mark.unit
def test_format_linkedin(self):
"""Test LinkedIn formatting."""
result = format_for_platform("Test content", "linkedin", "ML Topic")
assert result["status"] == "success"
assert result["platform"] == "linkedin"
assert "Key Takeaways" in result["formatted_content"]
@pytest.mark.unit
def test_format_twitter(self):
"""Test Twitter formatting."""
result = format_for_platform("Test content", "twitter", "AI News")
assert result["status"] == "success"
assert result["platform"] == "twitter"
assert "Thread" in result["formatted_content"]
@pytest.mark.unit
def test_invalid_platform(self):
"""Test invalid platform error."""
result = format_for_platform("Test content", "invalid", "Topic")
assert result["status"] == "error"
assert "Unsupported platform" in result["error_message"]
class TestGenerateCitations:
"""Tests for generate_citations tool."""
@pytest.mark.unit
def test_apa_citations(self):
"""Test APA citation generation."""
sources = [
{
"title": "Test Paper",
"authors": "Smith, J.",
"link": "https://arxiv.org/abs/123",
"year": "2024",
}
]
result = generate_citations(sources, "apa")
assert result["status"] == "success"
assert len(result["citations"]) == 1
assert "Smith, J." in result["citations"][0]
assert "(2024)" in result["citations"][0]
@pytest.mark.unit
def test_empty_sources(self):
"""Test error with no sources."""
result = generate_citations([])
assert result["status"] == "error"
class TestExtractKeyFindings:
"""Tests for extract_key_findings tool."""
@pytest.mark.unit
def test_extract_findings(self):
"""Test key findings extraction."""
text = "Research found that AI improves efficiency. Studies showed significant results."
result = extract_key_findings(text, max_findings=2)
assert result["status"] == "success"
assert len(result["findings"]) <= 2
@pytest.mark.unit
def test_insufficient_text(self):
"""Test error with short text."""
result = extract_key_findings("Too short", max_findings=5)
assert result["status"] == "error"
class TestGenerateSeoKeywords:
"""Tests for generate_seo_keywords tool."""
@pytest.mark.unit
def test_keyword_generation(self):
"""Test SEO keyword generation."""
result = generate_seo_keywords("Machine Learning", "AI Consultant")
assert result["status"] == "success"
assert len(result["primary_keywords"]) > 0
assert len(result["technical_keywords"]) > 0
assert "AI Consultant" in result["primary_keywords"]
class TestCreateEngagementHooks:
"""Tests for create_engagement_hooks tool."""
@pytest.mark.unit
def test_opportunities_goal(self):
"""Test hooks for opportunities goal."""
result = create_engagement_hooks("AI Agents", "opportunities")
assert result["status"] == "success"
assert len(result["opening_hooks"]) > 0
assert len(result["closing_ctas"]) > 0
assert result["goal"] == "opportunities"
@pytest.mark.unit
def test_discussion_goal(self):
"""Test hooks for discussion goal."""
result = create_engagement_hooks("NLP", "discussion")
assert result["status"] == "success"
assert len(result["discussion_questions"]) > 0
class TestAnalyzeContentForOpportunities:
"""Tests for analyze_content_for_opportunities tool."""
@pytest.mark.unit
def test_content_analysis(self):
"""Test content opportunity analysis."""
content = """
As an AI Consultant specializing in Machine Learning, I've built production systems
using PyTorch and TensorFlow. Let's connect to discuss how AI can solve your business problems.
Check out my GitHub for real-world implementations.
"""
result = analyze_content_for_opportunities(content, "AI Consultant")
assert result["status"] == "success"
assert "opportunity_score" in result
assert "seo_score" in result
assert "engagement_score" in result
assert 0 <= result["opportunity_score"] <= 100
@pytest.mark.unit
def test_short_content_error(self):
"""Test error with too short content."""
result = analyze_content_for_opportunities("Too short")
assert result["status"] == "error"
class TestSearchIndustryTrends:
"""Tests for search_industry_trends tool."""
@pytest.mark.integration
@pytest.mark.slow
def test_trend_search(self):
"""Test industry trend search (requires internet)."""
result = search_industry_trends("Machine Learning", "global", max_results=3)
assert result["status"] == "success"
assert "trends" in result
assert "hot_skills" in result
assert len(result["hot_skills"]) > 0
|