Ivenn commited on
Commit
15b7f09
·
verified ·
1 Parent(s): 8c5c24b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -1
app.py CHANGED
@@ -7,6 +7,100 @@ from tools.final_answer import FinalAnswerTool
7
 
8
  from Gradio_UI import GradioUI
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
  @tool
12
  def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
@@ -55,7 +149,8 @@ with open("prompts.yaml", 'r') as stream:
55
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
 
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
 
7
 
8
  from Gradio_UI import GradioUI
9
 
10
+ from typing import List
11
+ from bs4 import BeautifulSoup
12
+ import requests
13
+ from smolagent import tool
14
+
15
+ @tool
16
+ def scrape_images_from_url(url: str) -> List[str]:
17
+ """Scrapes image URLs from a given webpage.
18
+
19
+ Args:
20
+ url: The webpage URL to scrape from.
21
+
22
+ Returns:
23
+ A list of image source URLs found on the page.
24
+ """
25
+ try:
26
+ response = requests.get(url)
27
+ soup = BeautifulSoup(response.content, "html.parser")
28
+ images = [img['src'] for img in soup.find_all('img') if img.get('src')]
29
+ return images
30
+ except Exception as e:
31
+ return [f"Error: {str(e)}"]
32
+
33
+ @tool
34
+ def scrape_video_links_from_url(url: str) -> List[str]:
35
+ """Extracts video links from a webpage.
36
+
37
+ Args:
38
+ url: The URL to extract video tags or embeds from.
39
+
40
+ Returns:
41
+ A list of video or embedded video URLs.
42
+ """
43
+ try:
44
+ response = requests.get(url)
45
+ soup = BeautifulSoup(response.content, "html.parser")
46
+ video_sources = []
47
+
48
+ for video in soup.find_all('video'):
49
+ for source in video.find_all('source'):
50
+ if source.get('src'):
51
+ video_sources.append(source['src'])
52
+
53
+ for iframe in soup.find_all('iframe'):
54
+ src = iframe.get('src')
55
+ if "youtube" in src or "vimeo" in src:
56
+ video_sources.append(src)
57
+
58
+ return video_sources
59
+ except Exception as e:
60
+ return [f"Error: {str(e)}"]
61
+
62
+ @tool
63
+ def scrape_text_content(url: str) -> str:
64
+ """Scrapes main textual content from a webpage.
65
+
66
+ Args:
67
+ url: The URL to extract content from.
68
+
69
+ Returns:
70
+ A string of readable text from the webpage.
71
+ """
72
+ try:
73
+ response = requests.get(url)
74
+ soup = BeautifulSoup(response.content, "html.parser")
75
+
76
+ # Remove unwanted elements
77
+ for tag in soup(["script", "style", "noscript"]):
78
+ tag.decompose()
79
+
80
+ text = soup.get_text(separator=' ', strip=True)
81
+ return text[:2000] + "..." if len(text) > 2000 else text
82
+ except Exception as e:
83
+ return f"Error: {str(e)}"
84
+
85
+ @tool
86
+ def download_file_from_url(file_url: str, save_as: str) -> str:
87
+ """Downloads a file from a given URL and saves it locally.
88
+
89
+ Args:
90
+ file_url: Direct link to the file (image/video/pdf etc.)
91
+ save_as: Filename to save it as (e.g., "image1.jpg")
92
+
93
+ Returns:
94
+ A string indicating success or error.
95
+ """
96
+ try:
97
+ r = requests.get(file_url)
98
+ with open(save_as, "wb") as f:
99
+ f.write(r.content)
100
+ return f"Downloaded successfully as {save_as}"
101
+ except Exception as e:
102
+ return f"Download failed: {str(e)}"
103
+
104
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
105
  @tool
106
  def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
 
149
 
150
  agent = CodeAgent(
151
  model=model,
152
+ tools=[final_answer,image_generation_tool, get_current_time_in_timezone,
153
+ download_file_from_url, scrape_text_content, scrape_video_links_from_url, scrape_images_from_url], ## add your tools here (don't remove final answer)
154
  max_steps=6,
155
  verbosity_level=1,
156
  grammar=None,