broadfield-dev commited on
Commit
2ba24d4
·
verified ·
1 Parent(s): 48124ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -139
app.py CHANGED
@@ -1,17 +1,14 @@
1
- # app.py - Your Flask API Server
2
-
3
  import os
4
  import re
5
  import urllib.parse
6
  import asyncio
7
  from typing import Dict, Optional
8
  from itertools import cycle
9
- #os.system("playwright install")
10
- os.system("playwright install")
11
  # Install playwright if not present
12
- '''if os.getenv("PLAYWRIGHT_INSTALL_RUN", "false").lower() != "true":
13
- os.system("playwright install --with-deps")
14
- os.environ["PLAYWRIGHT_INSTALL_RUN"] = "true"'''
15
 
16
  from flask import Flask, request, jsonify
17
  from bs4 import BeautifulSoup, NavigableString
@@ -20,12 +17,12 @@ from playwright.async_api import async_playwright
20
  # --- Flask App Initialization ---
21
  app = Flask(__name__)
22
 
23
- # --- Credential and State Management (unchanged) ---
24
  class CredentialRevolver:
 
25
  def __init__(self, proxy_string: str):
26
  self.proxies = self._parse_proxies(proxy_string)
27
  self.proxy_cycler = cycle(self.proxies) if self.proxies else None
28
-
29
  def _parse_proxies(self, proxy_string: str):
30
  proxies = []
31
  if not proxy_string: return proxies
@@ -40,53 +37,26 @@ class CredentialRevolver:
40
  proxies.append(proxy_dict)
41
  except Exception: pass
42
  return proxies
43
-
44
  def get_next(self) -> Optional[Dict]:
45
  return next(self.proxy_cycler) if self.proxy_cycler else None
46
-
47
  def count(self) -> int:
48
  return len(self.proxies)
49
 
50
- PLAYWRIGHT_STATE: Dict = {}
 
51
  REVOLVER = CredentialRevolver(os.getenv("PROXY_LIST", ""))
52
 
53
  SEARCH_ENGINES = {
54
- "Google": "https://www.google.com/search?q={query}&hl=en",
55
- "DuckDuckGo": "https://duckduckgo.com/html/?q={query}",
56
- "Bing": "https://www.bing.com/search?q={query}",
57
- "Brave": "https://search.brave.com/search?q={query}",
58
- "Ecosia": "https://www.ecosia.org/search?q={query}",
59
- "Yahoo": "https://search.yahoo.com/search?p={query}",
60
- "Startpage": "https://www.startpage.com/sp/search?q={query}",
61
- "Qwant": "https://www.qwant.com/?q={query}",
62
- "Swisscows": "https://swisscows.com/web?query={query}",
63
- "You.com": "https://you.com/search?q={query}",
64
- "SearXNG": "https://searx.be/search?q={query}",
65
- "MetaGer": "https://metager.org/meta/meta.ger-en?eingabe={query}",
66
- "Yandex": "https://yandex.com/search/?text={query}",
67
- "Baidu": "https://www.baidu.com/s?wd={query}",
68
- "Perplexity": "https://www.perplexity.ai/search?q={query}",
69
  }
70
 
71
  # --- HTML to Markdown Conversion (unchanged) ---
72
  class HTML_TO_MARKDOWN_CONVERTER:
73
- def __init__(self, soup: BeautifulSoup, base_url: str):
74
- self.soup = soup
75
- self.base_url = base_url
76
-
77
- def _cleanup_html(self):
78
- selectors_to_remove = ['nav', 'footer', 'header', 'aside', 'form', 'script', 'style', 'svg', 'button', 'input', 'textarea', '[role="navigation"]', '[role="search"]', '[id*="comment"]', '[class*="comment-"]', '[id*="sidebar"]', '[class*="sidebar"]', '[id*="related"]', '[class*="related"]', '[id*="share"]', '[class*="share"]', '[id*="social"]', '[class*="social"]', '[id*="cookie"]', '[class*="cookie"]', '[aria-hidden="true"]']
79
- for selector in selectors_to_remove:
80
- for element in self.soup.select(selector):
81
- element.decompose()
82
-
83
- def convert(self):
84
- self._cleanup_html()
85
- content_node = self.soup.find('main') or self.soup.find('article') or self.soup.find('body')
86
- if not content_node: return ""
87
- md = self._process_node(content_node)
88
- return re.sub(r'\n{3,}', '\n\n', md).strip()
89
-
90
  def _process_node(self, element):
91
  if isinstance(element, NavigableString): return re.sub(r'\s+', ' ', element.strip())
92
  if element.name is None or not element.name: return ''
@@ -100,124 +70,103 @@ class HTML_TO_MARKDOWN_CONVERTER:
100
  if element.name in ['ul', 'ol']: return f"\n{inner_md}\n"
101
  if element.name == 'blockquote': return f"> {inner_md.replace(chr(10), chr(10) + '> ')}\n\n"
102
  if element.name == 'hr': return "\n\n---\n\n"
103
- if element.name == 'table':
104
- header = " | ".join(f"**{th.get_text(strip=True)}**" for th in element.select('thead th, tr th'))
105
- separator = " | ".join(['---'] * len(header.split('|')))
106
- rows = [" | ".join(td.get_text(strip=True) for td in tr.find_all('td')) for tr in element.select('tbody tr')]
107
- return f"\n\n{header}\n{separator}\n" + "\n".join(rows) + "\n\n"
108
  if element.name == 'pre': return f"\n```\n{element.get_text(strip=True)}\n```\n\n"
109
  if element.name == 'code': return f"`{inner_md}`"
110
  if element.name in ['strong', 'b']: return f"**{inner_md}**"
111
  if element.name in ['em', 'i']: return f"*{inner_md}*"
112
- if element.name == 'a':
113
- href = element.get('href', '')
114
- full_href = urllib.parse.urljoin(self.base_url, href)
115
- return f"[{inner_md}]({full_href})"
116
- if element.name == 'img':
117
- src = element.get('src', '')
118
- alt = element.get('alt', 'Image').strip()
119
- full_src = urllib.parse.urljoin(self.base_url, src)
120
- return f"\n\n![{alt}]({full_src})\n\n"
121
  return inner_md
122
 
123
  # --- Core Web Browsing Logic ---
124
- # UPDATED: The function signature now uses `browser` and `search_engine` for consistency.
125
- async def perform_web_browse(action: str, query: str, browser: str, search_engine: str):
126
- browser_key = browser.lower()
127
- if "playwright" not in PLAYWRIGHT_STATE:
128
- PLAYWRIGHT_STATE["playwright"] = await async_playwright().start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
- if browser_key not in PLAYWRIGHT_STATE:
131
  try:
132
- p = PLAYWRIGHT_STATE["playwright"]
133
- browser_map = {'firefox': p.firefox, 'chromium': p.chromium, 'webkit': p.webkit}
134
- browser_launcher = browser_map.get(browser_key)
135
- if not browser_launcher:
136
- raise ValueError(f"Invalid browser name: {browser}")
137
- launch_args = ['--no-sandbox'] if browser_key == 'chromium' else []
138
- browser_instance = await browser_launcher.launch(headless=True, args=launch_args)
139
- PLAYWRIGHT_STATE[browser_key] = browser_instance
 
 
140
  except Exception as e:
141
- return {"status": "error", "query": query, "error_message": f"Failed to launch '{browser_key}'. Error: {str(e).splitlines()[0]}"}
142
-
143
- browser_instance = PLAYWRIGHT_STATE[browser_key]
144
-
145
- if action == "Scrape URL":
146
- url = query if query.startswith(('http://', 'https://')) else f"http://{query}"
147
- else: # action == "Search"
148
- url_template = SEARCH_ENGINES.get(search_engine)
149
- if not url_template:
150
- return {"status": "error", "query": query, "error_message": f"Invalid search engine: '{search_engine}'."}
151
- url = url_template.format(query=urllib.parse.quote_plus(query))
152
-
153
- proxy_config = REVOLVER.get_next()
154
- proxy_server_used = proxy_config["server"] if proxy_config else "Direct Connection"
155
-
156
- context_args = {'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36', 'java_script_enabled': True, 'ignore_https_errors': True, 'bypass_csp': True, 'accept_downloads': False}
157
- if proxy_config: context_args['proxy'] = proxy_config
158
-
159
- context = await browser_instance.new_context(**context_args)
160
- page = await context.new_page()
161
-
162
- try:
163
- response = await page.goto(url, wait_until='domcontentloaded', timeout=25000)
164
- html_content = await page.content()
165
- if any(phrase in html_content for phrase in ["unusual traffic", "CAPTCHA", "are you human", "not a robot"]):
166
- raise Exception(f"Anti-bot measure detected on {page.url}. Try another search engine or proxy.")
167
- final_url, title = page.url, await page.title() or "No Title"
168
- soup = BeautifulSoup(html_content, 'lxml')
169
- converter = HTML_TO_MARKDOWN_CONVERTER(soup, base_url=final_url)
170
- markdown_text = converter.convert()
171
- status_code = response.status if response else 0
172
- return {"status": "success", "query": query, "action": action, "final_url": final_url, "page_title": title, "http_status": status_code, "proxy_used": proxy_server_used, "markdown_content": markdown_text}
173
  except Exception as e:
174
- error_message = str(e).splitlines()[0]
175
- if "Timeout" in error_message:
176
- return {"status": "error", "query": query, "proxy_used": proxy_server_used, "error_message": f"Navigation Timeout: The page for '{query}' took too long to load."}
177
- return {"status": "error", "query": query, "proxy_used": proxy_server_used, "error_message": error_message}
178
  finally:
179
- if 'page' in locals() and not page.is_closed(): await page.close()
180
- if 'context' in locals(): await context.close()
 
 
181
 
182
- # --- API Endpoint Definitions ---
183
 
 
184
  @app.route('/', methods=['GET'])
185
  def index():
186
- """Root endpoint to provide API status and usage instructions."""
187
- return jsonify({
188
- "status": "online",
189
- "message": "Welcome to the Web Browse API!",
190
- "api_endpoint": "/web_browse",
191
- "instructions": "Send a POST request to /web_browse with a JSON payload to use the service.",
192
- "payload_format": {
193
- "action": "string (required: 'Search' or 'Scrape URL')",
194
- "query": "string (required: a search term or a full URL)",
195
- "browser": "string (optional, default: 'firefox'; options: 'firefox', 'chromium', 'webkit')",
196
- "search_engine": "string (optional, default: 'DuckDuckGo'; see code for all options)"
197
- },
198
- "example_curl": """curl -X POST YOUR_SPACE_URL/web_browse -H "Content-Type: application/json" -d '{"action": "Search", "query": "latest news on AI", "browser": "webkit"}'"""
199
- })
200
 
201
  @app.route('/web_browse', methods=['POST'])
202
  def web_browse():
203
- """API endpoint to perform a web search or scrape a URL."""
204
- if not request.is_json:
205
- return jsonify({"status": "error", "error_message": "Invalid input: payload must be JSON"}), 400
206
-
207
  data = request.get_json()
208
  action = data.get('action')
209
  query = data.get('query')
210
- # UPDATED: Reading `browser` and `search_engine` from the payload.
211
  browser = data.get('browser', 'firefox')
212
  search_engine = data.get('search_engine', 'DuckDuckGo')
213
-
214
- if not action or not query:
215
- return jsonify({"status": "error", "error_message": "Missing required parameters: 'action' and 'query' are mandatory."}), 400
216
- if action not in ["Search", "Scrape URL"]:
217
- return jsonify({"status": "error", "error_message": "Invalid 'action'. Must be 'Search' or 'Scrape URL'."}), 400
218
-
219
  try:
220
- # UPDATED: Passing the new variable names to the function.
221
  result = asyncio.run(perform_web_browse(action, query, browser, search_engine))
222
  response_status_code = 200 if result.get("status") == "success" else 500
223
  return jsonify(result), response_status_code
@@ -225,8 +174,8 @@ def web_browse():
225
  app.logger.error(f"An unexpected server error occurred: {e}", exc_info=True)
226
  return jsonify({"status": "error", "query": query, "error_message": f"An unexpected server error occurred: {str(e)}"}), 500
227
 
228
- # --- Main Application Runner ---
229
  if __name__ == "__main__":
230
  port = int(os.environ.get("PORT", 7860))
231
  print(f"Flask server starting on port {port}... {REVOLVER.count()} proxies loaded.")
232
- app.run(host='0.0.0.0', port=port, debug=True)
 
 
 
1
  import os
2
  import re
3
  import urllib.parse
4
  import asyncio
5
  from typing import Dict, Optional
6
  from itertools import cycle
7
+
 
8
  # Install playwright if not present
9
+ if os.getenv("PLAYWRIGHT_INSTALL_RUN", "false").lower() != "true":
10
+ os.system("playwright install")
11
+ os.environ["PLAYWRIGHT_INSTALL_RUN"] = "true"
12
 
13
  from flask import Flask, request, jsonify
14
  from bs4 import BeautifulSoup, NavigableString
 
17
  # --- Flask App Initialization ---
18
  app = Flask(__name__)
19
 
20
+ # --- Credential and State Management ---
21
  class CredentialRevolver:
22
+ # ... (this class is unchanged)
23
  def __init__(self, proxy_string: str):
24
  self.proxies = self._parse_proxies(proxy_string)
25
  self.proxy_cycler = cycle(self.proxies) if self.proxies else None
 
26
  def _parse_proxies(self, proxy_string: str):
27
  proxies = []
28
  if not proxy_string: return proxies
 
37
  proxies.append(proxy_dict)
38
  except Exception: pass
39
  return proxies
 
40
  def get_next(self) -> Optional[Dict]:
41
  return next(self.proxy_cycler) if self.proxy_cycler else None
 
42
  def count(self) -> int:
43
  return len(self.proxies)
44
 
45
+ # <<< REMOVED >>> The global PLAYWRIGHT_STATE is removed to prevent event loop conflicts.
46
+ # PLAYWRIGHT_STATE: Dict = {}
47
  REVOLVER = CredentialRevolver(os.getenv("PROXY_LIST", ""))
48
 
49
  SEARCH_ENGINES = {
50
+ # ... (this dictionary is unchanged)
51
+ "Google": "https://www.google.com/search?q={query}&hl=en", "DuckDuckGo": "https://duckduckgo.com/html/?q={query}", "Bing": "https://www.bing.com/search?q={query}", "Brave": "https://search.brave.com/search?q={query}", "Ecosia": "https://www.ecosia.org/search?q={query}", "Yahoo": "https://search.yahoo.com/search?p={query}", "Startpage": "https://www.startpage.com/sp/search?q={query}", "Qwant": "https://www.qwant.com/?q={query}", "Swisscows": "https://swisscows.com/web?query={query}", "You.com": "https://you.com/search?q={query}", "SearXNG": "https://searx.be/search?q={query}", "MetaGer": "https://metager.org/meta/meta.ger-en?eingabe={query}", "Yandex": "https://yandex.com/search/?text={query}", "Baidu": "https://www.baidu.com/s?wd={query}", "Perplexity": "https://www.perplexity.ai/search?q={query}",
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  }
53
 
54
  # --- HTML to Markdown Conversion (unchanged) ---
55
  class HTML_TO_MARKDOWN_CONVERTER:
56
+ # ... (this class is unchanged)
57
+ def __init__(self, soup: BeautifulSoup, base_url: str): self.soup = soup; self.base_url = base_url
58
+ def _cleanup_html(self): selectors_to_remove = ['nav', 'footer', 'header', 'aside', 'form', 'script', 'style', 'svg', 'button', 'input', 'textarea', '[role="navigation"]', '[role="search"]', '[id*="comment"]', '[class*="comment-"]', '[id*="sidebar"]', '[class*="sidebar"]', '[id*="related"]', '[class*="related"]', '[id*="share"]', '[class*="share"]', '[id*="social"]', '[class*="social"]', '[id*="cookie"]', '[class*="cookie"]', '[aria-hidden="true"]']; [element.decompose() for selector in selectors_to_remove for element in self.soup.select(selector)]
59
+ def convert(self): self._cleanup_html(); content_node = self.soup.find('main') or self.soup.find('article') or self.soup.find('body'); return re.sub(r'\n{3,}', '\n\n', self._process_node(content_node)).strip() if content_node else ""
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  def _process_node(self, element):
61
  if isinstance(element, NavigableString): return re.sub(r'\s+', ' ', element.strip())
62
  if element.name is None or not element.name: return ''
 
70
  if element.name in ['ul', 'ol']: return f"\n{inner_md}\n"
71
  if element.name == 'blockquote': return f"> {inner_md.replace(chr(10), chr(10) + '> ')}\n\n"
72
  if element.name == 'hr': return "\n\n---\n\n"
73
+ if element.name == 'table': header = " | ".join(f"**{th.get_text(strip=True)}**" for th in element.select('thead th, tr th')); separator = " | ".join(['---'] * len(header.split('|'))); rows = [" | ".join(td.get_text(strip=True) for td in tr.find_all('td')) for tr in element.select('tbody tr')]; return f"\n\n{header}\n{separator}\n" + "\n".join(rows) + "\n\n"
 
 
 
 
74
  if element.name == 'pre': return f"\n```\n{element.get_text(strip=True)}\n```\n\n"
75
  if element.name == 'code': return f"`{inner_md}`"
76
  if element.name in ['strong', 'b']: return f"**{inner_md}**"
77
  if element.name in ['em', 'i']: return f"*{inner_md}*"
78
+ if element.name == 'a': href = element.get('href', ''); full_href = urllib.parse.urljoin(self.base_url, href); return f"[{inner_md}]({full_href})"
79
+ if element.name == 'img': src = element.get('src', ''); alt = element.get('alt', 'Image').strip(); full_src = urllib.parse.urljoin(self.base_url, src); return f"\n\n![{alt}]({full_src})\n\n"
 
 
 
 
 
 
 
80
  return inner_md
81
 
82
  # --- Core Web Browsing Logic ---
83
+ async def perform_web_browse(action: str, query: str, browser_name: str, search_engine: str):
84
+ playwright = None
85
+ browser = None
86
+ # <<< CHANGE >>> Use a try/finally block to ensure resources are always cleaned up.
87
+ try:
88
+ # <<< CHANGE >>> Start Playwright inside the function.
89
+ playwright = await async_playwright().start()
90
+
91
+ browser_key = browser_name.lower()
92
+ browser_map = {'firefox': playwright.firefox, 'chromium': playwright.chromium, 'webkit': playwright.webkit}
93
+ browser_launcher = browser_map.get(browser_key)
94
+
95
+ if not browser_launcher:
96
+ raise ValueError(f"Invalid browser name: {browser_name}")
97
+
98
+ # <<< CHANGE >>> Launch the browser inside the function.
99
+ launch_args = ['--no-sandbox'] if browser_key == 'chromium' else []
100
+ browser = await browser_launcher.launch(headless=True, args=launch_args)
101
+
102
+ if action == "Scrape URL":
103
+ url = query if query.startswith(('http://', 'https://')) else f"http://{query}"
104
+ else: # action == "Search"
105
+ url_template = SEARCH_ENGINES.get(search_engine)
106
+ if not url_template:
107
+ return {"status": "error", "query": query, "error_message": f"Invalid search engine: '{search_engine}'."}
108
+ url = url_template.format(query=urllib.parse.quote_plus(query))
109
+
110
+ proxy_config = REVOLVER.get_next()
111
+ proxy_server_used = proxy_config["server"] if proxy_config else "Direct Connection"
112
+
113
+ context_args = {'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36', 'java_script_enabled': True, 'ignore_https_errors': True, 'bypass_csp': True, 'accept_downloads': False}
114
+ if proxy_config: context_args['proxy'] = proxy_config
115
+
116
+ context = await browser.new_context(**context_args)
117
+ page = await context.new_page()
118
 
 
119
  try:
120
+ response = await page.goto(url, wait_until='domcontentloaded', timeout=25000)
121
+ html_content = await page.content()
122
+ if any(phrase in html_content for phrase in ["unusual traffic", "CAPTCHA", "are you human", "not a robot"]):
123
+ raise Exception(f"Anti-bot measure detected on {page.url}. Try another search engine or proxy.")
124
+ final_url, title = page.url, await page.title() or "No Title"
125
+ soup = BeautifulSoup(html_content, 'lxml')
126
+ converter = HTML_TO_MARKDOWN_CONVERTER(soup, base_url=final_url)
127
+ markdown_text = converter.convert()
128
+ status_code = response.status if response else 0
129
+ return {"status": "success", "query": query, "action": action, "final_url": final_url, "page_title": title, "http_status": status_code, "proxy_used": proxy_server_used, "markdown_content": markdown_text}
130
  except Exception as e:
131
+ error_message = str(e).splitlines()[0]
132
+ if "Timeout" in error_message:
133
+ return {"status": "error", "query": query, "proxy_used": proxy_server_used, "error_message": f"Navigation Timeout: The page for '{query}' took too long to load."}
134
+ return {"status": "error", "query": query, "proxy_used": proxy_server_used, "error_message": error_message}
135
+ finally:
136
+ if 'page' in locals() and not page.is_closed(): await page.close()
137
+ if 'context' in locals(): await context.close()
138
+
139
+ # <<< CHANGE >>> This block now handles errors during browser launch as well.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  except Exception as e:
141
+ app.logger.error(f"A critical error occurred in perform_web_browse: {e}", exc_info=True)
142
+ return {"status": "error", "query": query, "error_message": f"Failed to initialize browser resources: {str(e).splitlines()[0]}"}
143
+
144
+ # <<< CHANGE >>> Ensure browser and playwright are always closed down.
145
  finally:
146
+ if browser:
147
+ await browser.close()
148
+ if playwright:
149
+ await playwright.stop()
150
 
 
151
 
152
+ # --- API Endpoint Definitions (unchanged) ---
153
  @app.route('/', methods=['GET'])
154
  def index():
155
+ # ... (this function is unchanged)
156
+ return jsonify({ "status": "online", "message": "Welcome to the Web Browse API!", "api_endpoint": "/web_browse", "instructions": "Send a POST request to /web_browse with a JSON payload to use the service.", "payload_format": { "action": "string (required: 'Search' or 'Scrape URL')", "query": "string (required: a search term or a full URL)", "browser": "string (optional, default: 'firefox'; options: 'firefox', 'chromium', 'webkit')", "search_engine": "string (optional, default: 'DuckDuckGo'; see code for all options)" }, "example_curl": """curl -X POST YOUR_SPACE_URL/web_browse -H "Content-Type: application/json" -d '{"action": "Search", "query": "latest news on AI", "browser": "webkit"}'""" })
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
  @app.route('/web_browse', methods=['POST'])
159
  def web_browse():
160
+ # ... (this function is unchanged)
161
+ if not request.is_json: return jsonify({"status": "error", "error_message": "Invalid input: payload must be JSON"}), 400
 
 
162
  data = request.get_json()
163
  action = data.get('action')
164
  query = data.get('query')
 
165
  browser = data.get('browser', 'firefox')
166
  search_engine = data.get('search_engine', 'DuckDuckGo')
167
+ if not action or not query: return jsonify({"status": "error", "error_message": "Missing required parameters: 'action' and 'query' are mandatory."}), 400
168
+ if action not in ["Search", "Scrape URL"]: return jsonify({"status": "error", "error_message": "Invalid 'action'. Must be 'Search' or 'Scrape URL'."}), 400
 
 
 
 
169
  try:
 
170
  result = asyncio.run(perform_web_browse(action, query, browser, search_engine))
171
  response_status_code = 200 if result.get("status") == "success" else 500
172
  return jsonify(result), response_status_code
 
174
  app.logger.error(f"An unexpected server error occurred: {e}", exc_info=True)
175
  return jsonify({"status": "error", "query": query, "error_message": f"An unexpected server error occurred: {str(e)}"}), 500
176
 
177
+ # --- Main Application Runner (unchanged) ---
178
  if __name__ == "__main__":
179
  port = int(os.environ.get("PORT", 7860))
180
  print(f"Flask server starting on port {port}... {REVOLVER.count()} proxies loaded.")
181
+ app.run(host='0.0.0.0', port=port)