async def fetch_url_async(client, url):
try:
- response = await client.get(url, timeout=10) # Increased timeout
+ response = await client.get(url, timeout=20) # Increased timeout from 10 to 20 seconds
response.raise_for_status()
return response.status_code
except httpx.RequestError as e:
- # print(f"Request to {url} failed: {e}") # Silenced
+ # More verbose error printing
+ print(f"Request to {url} failed. Type: {type(e)}, Str: {str(e)}, Repr: {repr(e)}", flush=True)
return None
async def run_fastapi_benchmark_async(num_requests):
start_time = time.perf_counter()
results_list = [] # To store results for final count
- async with httpx.AsyncClient() as client:
+ # Configure httpx limits
+ limits = httpx.Limits(max_connections=500, max_keepalive_connections=50)
+ async with httpx.AsyncClient(limits=limits) as client:
tasks = [fetch_url_async(client, FASTAPI_URL) for _ in range(num_requests)]
- # Print a message indicating all tasks are launched and now awaiting completion.
- # This might be redundant if the parent script already says "launched X/X tasks"
- # but can be useful for the benchmark script's own log.
- # print(f"FASTAPI_INFO: All {num_requests} tasks launched, awaiting completion...", flush=True)
completed_count = 0
for i, task_future in enumerate(asyncio.as_completed(tasks)):
final_summary_line = line
if progress_line_printed:
print("\r" + " " * 80 + "\r", end="", flush=True)
+ else:
+ # Print any other lines from the subprocess (e.g., error messages)
+ if progress_line_printed:
+ # Clear the progress line before printing the unexpected line
+ print("\r" + " " * 80 + "\r", end="", flush=True)
+ print(line, flush=True) # Print the actual error line
+ if progress_line_printed:
+ # Reprint the progress line after the error line
+ print(f"\rFastAPI progress: Completed {requests_done_count}/{NUM_REQUESTS_EXPECTED} tasks...", end="", flush=True)
process.stdout.close()