Spaces:
Sleeping
Sleeping
File size: 24,972 Bytes
bcba5ba 4e729a5 8df8170 665a0db dc8b2a7 bcba5ba 665a0db bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c 96ed627 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c 4e729a5 719bd5b 173b4ae 719bd5b 4e729a5 173b4ae 719bd5b 98eec6c 719bd5b 98eec6c 719bd5b bcba5ba 719bd5b bcba5ba 719bd5b bcba5ba 1711f91 7ee2792 719bd5b bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba 98eec6c bcba5ba b3ecc7a bcba5ba b3ecc7a bcba5ba 665a0db bcba5ba e487181 4e729a5 173b4ae 4e729a5 dc8b2a7 4e729a5 dc8b2a7 4e729a5 859566a 9220e18 859566a 9220e18 859566a 92b8ebf 7ee2792 0a73403 49cd6f4 258dbf2 49cd6f4 92b8ebf 4e729a5 0cc79aa dc8b2a7 0cc79aa dc8b2a7 d21fa10 dc8b2a7 0cc79aa bcba5ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 |
from __future__ import annotations
import ast
import json
import os
import re
import time
from typing import Any, Dict, List, Optional
import requests
from smolagents.tools import Tool
DATASETS_JSON = r'''{"amazon_product": {"dataset_id": "gd_l7q7dkf244hwjntr0", "description": "Quickly read structured amazon product data.\nRequires a valid product URL with /dp/ in it.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "amazon_product_reviews": {"dataset_id": "gd_le8e811kzy4ggddlq", "description": "Quickly read structured amazon product review data.\nRequires a valid product URL with /dp/ in it.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "amazon_product_search": {"dataset_id": "gd_lwdb4vjm1ehb499uxs", "description": "Quickly read structured amazon product search data.\nRequires a valid search keyword and amazon domain URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["keyword", "url"], "fixed_values": {"pages_to_search": "1"}}, "walmart_product": {"dataset_id": "gd_l95fol7l1ru6rlo116", "description": "Quickly read structured walmart product data.\nRequires a valid product URL with /ip/ in it.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "walmart_seller": {"dataset_id": "gd_m7ke48w81ocyu4hhz0", "description": "Quickly read structured walmart seller data.\nRequires a valid walmart seller URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "ebay_product": {"dataset_id": "gd_ltr9mjt81n0zzdk1fb", "description": "Quickly read structured ebay product data.\nRequires a valid ebay product URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "homedepot_products": {"dataset_id": "gd_lmusivh019i7g97q2n", "description": "Quickly read structured homedepot product data.\nRequires a valid homedepot product URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "zara_products": {"dataset_id": "gd_lct4vafw1tgx27d4o0", "description": "Quickly read structured zara product data.\nRequires a valid zara product URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "etsy_products": {"dataset_id": "gd_ltppk0jdv1jqz25mz", "description": "Quickly read structured etsy product data.\nRequires a valid etsy product URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "bestbuy_products": {"dataset_id": "gd_ltre1jqe1jfr7cccf", "description": "Quickly read structured bestbuy product data.\nRequires a valid bestbuy product URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "linkedin_person_profile": {"dataset_id": "gd_l1viktl72bvl7bjuj0", "description": "Quickly read structured linkedin people profile data.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "linkedin_company_profile": {"dataset_id": "gd_l1vikfnt1wgvvqz95w", "description": "Quickly read structured linkedin company profile data.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "linkedin_job_listings": {"dataset_id": "gd_lpfll7v5hcqtkxl6l", "description": "Quickly read structured linkedin job listings data.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "linkedin_posts": {"dataset_id": "gd_lyy3tktm25m4avu764", "description": "Quickly read structured linkedin posts data.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "linkedin_people_search": {"dataset_id": "gd_m8d03he47z8nwb5xc", "description": "Quickly read structured linkedin people search data.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url", "first_name", "last_name"]}, "crunchbase_company": {"dataset_id": "gd_l1vijqt9jfj7olije", "description": "Quickly read structured crunchbase company data.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "zoominfo_company_profile": {"dataset_id": "gd_m0ci4a4ivx3j5l6nx", "description": "Quickly read structured ZoomInfo company profile data.\nRequires a valid ZoomInfo company URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "instagram_profiles": {"dataset_id": "gd_l1vikfch901nx3by4", "description": "Quickly read structured Instagram profile data.\nRequires a valid Instagram URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "instagram_posts": {"dataset_id": "gd_lk5ns7kz21pck8jpis", "description": "Quickly read structured Instagram post data.\nRequires a valid Instagram URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "instagram_reels": {"dataset_id": "gd_lyclm20il4r5helnj", "description": "Quickly read structured Instagram reel data.\nRequires a valid Instagram URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "instagram_comments": {"dataset_id": "gd_ltppn085pokosxh13", "description": "Quickly read structured Instagram comments data.\nRequires a valid Instagram URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "facebook_posts": {"dataset_id": "gd_lyclm1571iy3mv57zw", "description": "Quickly read structured Facebook post data.\nRequires a valid Facebook post URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "facebook_marketplace_listings": {"dataset_id": "gd_lvt9iwuh6fbcwmx1a", "description": "Quickly read structured Facebook marketplace listing data.\nRequires a valid Facebook marketplace listing URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "facebook_company_reviews": {"dataset_id": "gd_m0dtqpiu1mbcyc2g86", "description": "Quickly read structured Facebook company reviews data.\nRequires a valid Facebook company URL and number of reviews.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url", "num_of_reviews"]}, "facebook_events": {"dataset_id": "gd_m14sd0to1jz48ppm51", "description": "Quickly read structured Facebook events data.\nRequires a valid Facebook event URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "tiktok_profiles": {"dataset_id": "gd_l1villgoiiidt09ci", "description": "Quickly read structured Tiktok profiles data.\nRequires a valid Tiktok profile URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "tiktok_posts": {"dataset_id": "gd_lu702nij2f790tmv9h", "description": "Quickly read structured Tiktok post data.\nRequires a valid Tiktok post URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "tiktok_shop": {"dataset_id": "gd_m45m1u911dsa4274pi", "description": "Quickly read structured Tiktok shop data.\nRequires a valid Tiktok shop product URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "tiktok_comments": {"dataset_id": "gd_lkf2st302ap89utw5k", "description": "Quickly read structured Tiktok comments data.\nRequires a valid Tiktok video URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "google_maps_reviews": {"dataset_id": "gd_luzfs1dn2oa0teb81", "description": "Quickly read structured Google maps reviews data.\nRequires a valid Google maps URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url", "days_limit"], "defaults": {"days_limit": "3"}}, "google_shopping": {"dataset_id": "gd_ltppk50q18kdw67omz", "description": "Quickly read structured Google shopping data.\nRequires a valid Google shopping product URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "google_play_store": {"dataset_id": "gd_lsk382l8xei8vzm4u", "description": "Quickly read structured Google play store data.\nRequires a valid Google play store app URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "apple_app_store": {"dataset_id": "gd_lsk9ki3u2iishmwrui", "description": "Quickly read structured apple app store data.\nRequires a valid apple app store app URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "reuter_news": {"dataset_id": "gd_lyptx9h74wtlvpnfu", "description": "Quickly read structured reuter news data.\nRequires a valid reuter news report URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "github_repository_file": {"dataset_id": "gd_lyrexgxc24b3d4imjt", "description": "Quickly read structured github repository data.\nRequires a valid github repository file URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "yahoo_finance_business": {"dataset_id": "gd_lmrpz3vxmz972ghd7", "description": "Quickly read structured yahoo finance business data.\nRequires a valid yahoo finance business URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "x_posts": {"dataset_id": "gd_lwxkxvnf1cynvib9co", "description": "Quickly read structured X post data.\nRequires a valid X post URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "zillow_properties_listing": {"dataset_id": "gd_lfqkr8wm13ixtbd8f5", "description": "Quickly read structured zillow properties listing data.\nRequires a valid zillow properties listing URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "booking_hotel_listings": {"dataset_id": "gd_m5mbdl081229ln6t4a", "description": "Quickly read structured booking hotel listings data.\nRequires a valid booking hotel listing URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "youtube_profiles": {"dataset_id": "gd_lk538t2k2p1k3oos71", "description": "Quickly read structured youtube profiles data.\nRequires a valid youtube profile URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "youtube_comments": {"dataset_id": "gd_lk9q0ew71spt1mxywf", "description": "Quickly read structured youtube comments data.\nRequires a valid youtube video URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url", "num_of_comments"], "defaults": {"num_of_comments": "10"}}, "reddit_posts": {"dataset_id": "gd_lvz8ah06191smkebj4", "description": "Quickly read structured reddit posts data.\nRequires a valid reddit post URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}, "youtube_videos": {"dataset_id": "gd_lk56epmy2i5g7lzu0k", "description": "Quickly read structured YouTube videos data.\nRequires a valid YouTube video URL.\nThis can be a cache lookup, so it can be more reliable than scraping.", "inputs": ["url"]}}'''
DATASETS: Dict[str, Any] = json.loads(DATASETS_JSON)
DATASET_FIELDS: Dict[str, List[str]] = {key: value["inputs"] for key, value in DATASETS.items()}
DATASET_CHOICES = sorted(DATASETS.keys())
class BrightDataDatasetTool(Tool):
name = "brightdata_dataset_fetch"
description = "Trigger a Bright Data dataset collection and poll until the snapshot is ready."
output_type = "string"
def __init__(self, datasets: Optional[Dict[str, Any]] = None) -> None:
self.datasets = datasets or DATASETS
self.inputs = {
"dataset": {
"type": "string",
"description": "Dataset key",
"enum": sorted(self.datasets.keys()),
},
"url": {
"type": "string",
"description": "URL for the dataset",
"nullable": True,
},
"keyword": {
"type": "string",
"description": "Search keyword",
"nullable": True,
},
"first_name": {
"type": "string",
"description": "First name",
"nullable": True,
},
"last_name": {
"type": "string",
"description": "Last name",
"nullable": True,
},
"days_limit": {
"type": "string",
"description": "Days limit",
"nullable": True,
},
"num_of_reviews": {
"type": "string",
"description": "Number of reviews",
"nullable": True,
},
"num_of_comments": {
"type": "string",
"description": "Number of comments",
"nullable": True,
},
}
super().__init__()
def forward(
self,
dataset: str,
url: Optional[str] = None,
keyword: Optional[str] = None,
first_name: Optional[str] = None,
last_name: Optional[str] = None,
days_limit: Optional[str] = None,
num_of_reviews: Optional[str] = None,
num_of_comments: Optional[str] = None,
) -> str:
try:
# Debug logging
import sys
print(f"[DEBUG forward] Received url parameter: {url!r} (type: {type(url).__name__})", file=sys.stderr)
url = self._coerce_url_input(url)
print(f"[DEBUG forward] After coerce: {url!r}", file=sys.stderr)
api_token = os.getenv("BRIGHT_DATA_API_TOKEN")
if not api_token:
raise ValueError("BRIGHT_DATA_API_TOKEN not found in environment variables")
if dataset not in self.datasets:
raise ValueError(
f"Unknown dataset '{dataset}'. Valid options: {', '.join(sorted(self.datasets.keys()))}"
)
params = self._build_params(
url=url,
keyword=keyword,
first_name=first_name,
last_name=last_name,
days_limit=days_limit,
num_of_reviews=num_of_reviews,
num_of_comments=num_of_comments,
)
payload = self._prepare_payload(dataset, params)
snapshot_id = self._trigger_snapshot(dataset, payload, api_token)
data = self._poll_snapshot(snapshot_id, api_token)
return json.dumps(data, indent=2)
except requests.exceptions.RequestException as exc:
details = exc.response.text if getattr(exc, "response", None) is not None else ""
return json.dumps({"error": str(exc), "details": details, "payload": payload, "coerced_url": url})
except Exception as exc:
return json.dumps({"error": str(exc)})
def _build_params(
self,
url: Optional[str],
keyword: Optional[str],
first_name: Optional[str],
last_name: Optional[str],
days_limit: Optional[str],
num_of_reviews: Optional[str],
num_of_comments: Optional[str],
) -> Dict[str, str]:
params: Dict[str, str] = {}
if url is not None:
params["url"] = url
if keyword is not None:
params["keyword"] = keyword
if first_name is not None:
params["first_name"] = first_name
if last_name is not None:
params["last_name"] = last_name
if days_limit is not None:
params["days_limit"] = days_limit
if num_of_reviews is not None:
params["num_of_reviews"] = num_of_reviews
if num_of_comments is not None:
params["num_of_comments"] = num_of_comments
return params
def _prepare_payload(self, dataset_key: str, params: Dict[str, str]) -> Dict[str, str]:
config = self.datasets[dataset_key]
payload: Dict[str, str] = {}
defaults = config.get("defaults", {})
fixed_values = config.get("fixed_values", {})
for field in config["inputs"]:
if field in params:
payload[field] = params[field]
elif field in defaults:
payload[field] = defaults[field]
else:
raise ValueError(f"Missing required field '{field}' for dataset '{dataset_key}'")
payload.update(fixed_values)
return payload
def _trigger_snapshot(self, dataset_key: str, payload: Dict[str, str], api_token: str) -> str:
dataset_id = self.datasets[dataset_key]["dataset_id"]
trigger_url = "https://api.brightdata.com/datasets/v3/trigger"
response = requests.post(
trigger_url,
params={"dataset_id": dataset_id, "include_errors": "true"},
json=[payload],
headers={
"Authorization": f"Bearer {api_token}",
"Content-Type": "application/json",
},
timeout=60,
)
response.raise_for_status()
snapshot_id = response.json().get("snapshot_id")
if not snapshot_id:
raise RuntimeError("No snapshot ID returned from Bright Data.")
return snapshot_id
def _poll_snapshot(self, snapshot_id: str, api_token: str) -> Any:
snapshot_url = f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}"
max_attempts = 600
attempts = 0
while attempts < max_attempts:
response = requests.get(
snapshot_url,
params={"format": "json"},
headers={"Authorization": f"Bearer {api_token}"},
timeout=30,
)
if response.status_code == 400:
response.raise_for_status()
data = response.json()
if isinstance(data, list):
return data
status = data.get("status") if isinstance(data, dict) else None
if status not in {"running", "building"}:
return data
attempts += 1
time.sleep(1)
raise TimeoutError(f"Timeout waiting for snapshot {snapshot_id} after {max_attempts} seconds")
def _coerce_url_input(self, raw: Optional[Any]) -> Optional[str]:
import sys
print(f"[DEBUG _coerce_url_input] Input: {raw!r} (type: {type(raw).__name__})", file=sys.stderr)
if raw is None:
return None
if isinstance(raw, str):
if raw.strip().startswith("{") and "orig_name" in raw:
parsed = self._parse_file_dict_string(raw)
if parsed:
raw = parsed
else:
return self._extract_url_from_text(raw)
else:
return self._extract_url_from_text(raw)
if isinstance(raw, dict):
# Check if this is a Gradio FileData with a path to read
path_value = raw.get("path")
if isinstance(path_value, str) and os.path.isfile(path_value):
# Read the file content (smolagents writes URL as file content)
file_content = self._read_text_file(path_value)
import sys
print(f"[DEBUG _coerce_url_input] File content from {path_value}: {file_content!r}", file=sys.stderr)
if file_content:
extracted = self._extract_url_from_text(file_content)
print(f"[DEBUG _coerce_url_input] Extracted URL: {extracted!r}", file=sys.stderr)
if extracted:
return extracted
# Check for direct url field (common in Gradio FileData from smolagents)
url_value = raw.get("url")
if isinstance(url_value, str):
if url_value.startswith(("http://", "https://")):
return url_value
if url_value.startswith("/gradio_api/file="):
# Do not parse HTML/CSS file contents; treat as no URL.
return None
extracted = self._extract_url_from_text(url_value)
if extracted:
return extracted
# Fallback: check original text name fields if present
for key in ("orig_name", "name"):
candidate = raw.get(key)
if isinstance(candidate, str) and candidate:
extracted = self._extract_url_from_text(candidate)
if extracted:
return extracted
return None
return None
def _ensure_scheme(self, url: str) -> str:
if url.startswith(("http://", "https://")):
return url
return f"https://{url}"
def _parse_file_dict_string(self, value: str) -> Optional[dict]:
try:
parsed = ast.literal_eval(value)
return parsed if isinstance(parsed, dict) else None
except (ValueError, SyntaxError):
return None
def _read_text_file(self, path: str) -> Optional[str]:
if not os.path.isfile(path):
return None
try:
with open(path, "r", encoding="utf-8", errors="ignore") as fh:
return fh.read()
except OSError:
return None
def _extract_url_from_text(self, text: str) -> Optional[str]:
if not text:
return None
# If text looks like HTML, try to extract canonical URL first
if text.strip().startswith(("<!doctype", "<!DOCTYPE", "<html", "<HTML")):
# Look for canonical URL in HTML
canonical_match = re.search(r'<link\s+rel=["\']canonical["\']\s+href=["\'](https?://[^"\']+)["\']', text, re.IGNORECASE)
if canonical_match:
return canonical_match.group(1)
# Look for og:url meta tag
og_url_match = re.search(r'<meta\s+property=["\']og:url["\']\s+content=["\'](https?://[^"\']+)["\']', text, re.IGNORECASE)
if og_url_match:
return og_url_match.group(1)
# direct http/https - find first URL
match = re.search(r"(https?://[^\s\"'<>]+)", text)
if match:
return match.group(1)
# domain/path without scheme
match_domain = re.search(r"\b([A-Za-z0-9.-]+\.[A-Za-z]{2,}(?:/[^\s\"'<>]*)?)", text)
if match_domain:
return self._ensure_scheme(match_domain.group(1))
return None
def _get_gradio_app_code(self, tool_module_name: str = "tool") -> str:
choices = sorted(self.datasets.keys())
dataset_fields = {key: value["inputs"] for key, value in self.datasets.items()}
return f"""import gradio as gr
import importlib
BrightDataDatasetTool = importlib.import_module("{tool_module_name}").BrightDataDatasetTool
tool = BrightDataDatasetTool()
DATASET_FIELDS = {dataset_fields}
CHOICES = {choices}
def toggle_fields(selected):
inputs = ["url", "keyword", "first_name", "last_name", "days_limit", "num_of_reviews", "num_of_comments"]
wanted = set(DATASET_FIELDS.get(selected, []))
def vis(name):
return gr.update(visible=name in wanted)
return tuple(vis(name) for name in inputs)
def run(dataset, url, keyword, first_name, last_name, days_limit, num_of_reviews, num_of_comments):
return tool(
dataset=dataset,
url=url,
keyword=keyword,
first_name=first_name,
last_name=last_name,
days_limit=days_limit,
num_of_reviews=num_of_reviews,
num_of_comments=num_of_comments,
)
with gr.Blocks() as demo:
gr.Markdown("### Bright Data dataset fetch")
dataset = gr.Dropdown(choices=CHOICES, label="Dataset", value=CHOICES[0])
url = gr.Textbox(label="URL", placeholder="https://...", visible=True)
keyword = gr.Textbox(label="Keyword", visible=False)
first_name = gr.Textbox(label="First name", visible=False)
last_name = gr.Textbox(label="Last name", visible=False)
days_limit = gr.Textbox(label="Days limit (e.g. 3)", visible=False)
num_of_reviews = gr.Textbox(label="Number of reviews", visible=False)
num_of_comments = gr.Textbox(label="Number of comments", visible=False)
dataset.change(
toggle_fields,
inputs=[dataset],
outputs=[url, keyword, first_name, last_name, days_limit, num_of_reviews, num_of_comments],
)
run_btn = gr.Button("Run")
output = gr.Textbox(label="Output", lines=12)
run_btn.click(
run,
inputs=[dataset, url, keyword, first_name, last_name, days_limit, num_of_reviews, num_of_comments],
outputs=output,
)
demo.launch()
"""
|