|
|
import sys, subprocess, openai, json |
|
|
from youtube_comment_downloader import * |
|
|
from tavily import TavilyClient |
|
|
from pytrends.request import TrendReq |
|
|
|
|
|
|
|
|
api_key = os.getenv("OPENAI_API_KEY") |
|
|
client = openai.OpenAI(api_key=api_key) |
|
|
tavily_api_key = os.getenv("TAVILY_API_KEY") |
|
|
|
|
|
def download_comments(video_id="9P6H2QywDjM", output_file="9P6H2QywDjM.json", limit=10, sort=1): |
|
|
|
|
|
subprocess.run([sys.executable, "-m", "youtube_comment_downloader", "--youtubeid", video_id, "--output", output_file, "--limit", limit, "--sort", sort], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) |
|
|
with open(output_file, 'r', encoding='utf-8') as f: return [ {k: json.loads(line)[k] for k in ['text', 'votes', 'replies', 'heart', 'reply', 'time_parsed']} for line in f if line.strip()] |
|
|
|
|
|
def download_comments2(video_id="9P6H2QywDjM", limit=10, sort=1): |
|
|
comments = [] |
|
|
for comment in YoutubeCommentDownloader().get_comments_from_url(f'https://www.youtube.com/watch?v={video_id}', sort_by=sort): |
|
|
comments.append({k: comment.get(k) for k in ['text', 'votes', 'replies', 'heart', 'reply', 'time_parsed']}) |
|
|
if len(comments) >= limit: break |
|
|
return comments |
|
|
|
|
|
def get_tavily_search(keyword): |
|
|
tavily = TavilyClient(api_key=tavily_api_key) |
|
|
return tavily.search( query=f"{keyword} ์ต์ ๋ด์ค", search_depth="advanced", max_results=5, include_answer=True,) |
|
|
|
|
|
def get_recent_news(keyword): |
|
|
response = client.chat.completions.create(model="gpt-4o-mini", messages=[ {"role": "user", "content": f"'{keyword}' ๊ด๋ จ ์ต์ ๋ด์ค๋ค ์์ฝํด์ฃผ์ธ์\n ๋ด์ฉ: {get_tavily_search(keyword)}"}], max_tokens=500, temperature=0.3) |
|
|
return response.choices[0].message.content |
|
|
|
|
|
def summarize_video(video_id="9P6H2QywDjM"): |
|
|
|
|
|
return |
|
|
|
|
|
def get_main_character(summarization): |
|
|
|
|
|
return |
|
|
|
|
|
|