File size: 1,966 Bytes
e7251ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import sys, subprocess, openai, json
from youtube_comment_downloader import *
from tavily import TavilyClient
from pytrends.request import TrendReq
# pytrends = TrendReq(hl='en-US', tz=360)
api_key = os.getenv("OPENAI_API_KEY")
client = openai.OpenAI(api_key=api_key)
tavily_api_key = os.getenv("TAVILY_API_KEY")
def download_comments(video_id="9P6H2QywDjM", output_file="9P6H2QywDjM.json", limit=10, sort=1):
# youtube_comment_downloader ๋ชจ๋์ ํธ์ถํ์ฌ ๋๊ธ์ ๋ค์ด๋ก๋ํฉ๋๋ค.
subprocess.run([sys.executable, "-m", "youtube_comment_downloader", "--youtubeid", video_id, "--output", output_file, "--limit", limit, "--sort", sort], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
with open(output_file, 'r', encoding='utf-8') as f: return [ {k: json.loads(line)[k] for k in ['text', 'votes', 'replies', 'heart', 'reply', 'time_parsed']} for line in f if line.strip()]
def download_comments2(video_id="9P6H2QywDjM", limit=10, sort=1):
comments = []
for comment in YoutubeCommentDownloader().get_comments_from_url(f'https://www.youtube.com/watch?v={video_id}', sort_by=sort):
comments.append({k: comment.get(k) for k in ['text', 'votes', 'replies', 'heart', 'reply', 'time_parsed']})
if len(comments) >= limit: break
return comments
def get_tavily_search(keyword):
tavily = TavilyClient(api_key=tavily_api_key)
return tavily.search( query=f"{keyword} ์ต์ ๋ด์ค", search_depth="advanced", max_results=5, include_answer=True,)
def get_recent_news(keyword):
response = client.chat.completions.create(model="gpt-4o-mini", messages=[ {"role": "user", "content": f"'{keyword}' ๊ด๋ จ ์ต์ ๋ด์ค๋ค ์์ฝํด์ฃผ์ธ์\n ๋ด์ฉ: {get_tavily_search(keyword)}"}], max_tokens=500, temperature=0.3)
return response.choices[0].message.content
def summarize_video(video_id="9P6H2QywDjM"):
# TODO
return
def get_main_character(summarization):
# TODO
return
|