17
authorSam Mirazi <sasan345@gmail.com>
Mon, 2 Jun 2025 04:51:00 +0000 (21:51 -0700)
committerSam Mirazi <sasan345@gmail.com>
Mon, 2 Jun 2025 04:51:00 +0000 (21:51 -0700)
app_fastapi/__pycache__/app.cpython-312.pyc
app_fastapi/__pycache__/app_no_delay.cpython-312.pyc [new file with mode: 0644]
app_fastapi/app_no_delay.py [new file with mode: 0644]
app_flask/flask_application_no_delay.py [new file with mode: 0644]
benchmark/run_benchmark.py
run_benchmark_NO_RESTRICTIONS.py [new file with mode: 0644]
run_benchmark_table.py

index 1e2f5cebeeafec93fbd86c8d6bfd34f46d91d960..1334a0e21affbf525563b3cf92235b3f86199f6f 100644 (file)
Binary files a/app_fastapi/__pycache__/app.cpython-312.pyc and b/app_fastapi/__pycache__/app.cpython-312.pyc differ
diff --git a/app_fastapi/__pycache__/app_no_delay.cpython-312.pyc b/app_fastapi/__pycache__/app_no_delay.cpython-312.pyc
new file mode 100644 (file)
index 0000000..8cc371b
Binary files /dev/null and b/app_fastapi/__pycache__/app_no_delay.cpython-312.pyc differ
diff --git a/app_fastapi/app_no_delay.py b/app_fastapi/app_no_delay.py
new file mode 100644 (file)
index 0000000..36959b4
--- /dev/null
@@ -0,0 +1,13 @@
+# app_fastapi/app_no_delay.py
+from fastapi import FastAPI, Response
+# import asyncio # No longer needed for sleep
+
+app = FastAPI()
+
+@app.get("/")
+async def home():
+    # await asyncio.sleep(0.3) # Removed delay
+    html = "<h1>FastAPI Server: No Artificial Delay</h1>"
+    return Response(content=html, media_type="text/html")
+
+# To run this app (for testing): uvicorn app_fastapi.app_no_delay:app --reload --port 8000 
\ No newline at end of file
diff --git a/app_flask/flask_application_no_delay.py b/app_flask/flask_application_no_delay.py
new file mode 100644 (file)
index 0000000..a7b8ba0
--- /dev/null
@@ -0,0 +1,33 @@
+from flask import Flask, Response
+# import time # No longer needed for sleep
+# import webbrowser # Not needed here
+from datetime import datetime
+
+app = Flask(__name__)
+
+request_counter = 0
+
+@app.route("/")
+def home():
+    global request_counter
+    request_id = request_counter + 1
+    request_counter = request_id
+
+    start_time = datetime.now()
+    # print(f"[Flask No-Delay Server] Request {request_id} received at {start_time.strftime('%Y-%m-%d %H:%M:%S.%f')}") # Optional: keep for debugging if needed
+    
+    # time.sleep(0.3) # Removed delay
+    
+    end_time = datetime.now()
+    processing_time = (end_time - start_time).total_seconds()
+    # print(f"[Flask No-Delay Server] Request {request_id} finishing at {end_time.strftime('%Y-%m-%d %H:%M:%S.%f')}, processed in {processing_time:.2f}s") # Optional
+    
+    html = f"<h1>Flask Server (No Delay, Threaded): Request {request_id} processed in {processing_time:.6f}s</h1>"
+    return Response(html, mimetype="text/html")
+
+if __name__ == "__main__":
+    print("[Flask No-Delay Server] Starting server on http://127.0.0.1:3000...")
+    # Running with threaded=True to allow Werkzeug to handle requests concurrently
+    app.run(host="0.0.0.0", port=3000, threaded=True)
+
+# To run this app (for testing): python app_flask/flask_application_no_delay.py 
\ No newline at end of file
index 9975e2dac0e68ce4f64076887fd3e5cfe344c10a..5a1c50fc145344d986a25225290080f1b7ffbaa9 100644 (file)
@@ -10,68 +10,68 @@ import sys # Added for sys.stdout.flush()
 # Server URLs (ensure these match your running servers)
 FLASK_URL = "http://127.0.0.1:3000/"
 FASTAPI_URL = "http://127.0.0.1:8000/"
-NUM_REQUESTS = 100
+# NUM_REQUESTS = 100 # Removed, will be passed as argument
 
 def fetch_url_sync(url):
     try:
-        response = requests.get(url, timeout=5) # Added timeout to prevent hanging
-        response.raise_for_status() # Raise an exception for bad status codes
+        response = requests.get(url, timeout=10) # Increased timeout for potentially more requests
+        response.raise_for_status()
         return response.status_code
     except requests.exceptions.RequestException as e:
-        print(f"Request to {url} failed: {e}")
+        # print(f"Request to {url} failed: {e}") # Silenced for cleaner benchmark output
         return None
 
-def run_flask_benchmark():
-    print(f"Starting Flask benchmark: {NUM_REQUESTS} requests to {FLASK_URL}...")
-    print("[DIAG-BRB-FLASK] Running requests SEQUENTIALLY for diagnosis.", flush=True)
+def run_flask_benchmark(num_requests):
+    print(f"Starting Flask benchmark: {num_requests} requests to {FLASK_URL}...")
+    # print("[DIAG-BRB-FLASK] Running requests SEQUENTIALLY for diagnosis.", flush=True) # Can be verbose
     start_time = time.perf_counter()
     
     results_list = []
-    successful_so_far = 0
+    # successful_so_far = 0 # Not strictly needed here
 
-    for i in range(NUM_REQUESTS):
+    for i in range(num_requests):
         try:
-            status_code = fetch_url_sync(FLASK_URL) # Direct call
-            print(f"REQ_STATUS:{status_code}", flush=True) # New progress line
+            status_code = fetch_url_sync(FLASK_URL)
+            print(f"REQ_STATUS:{status_code}", flush=True) # Progress for the calling script to count
             results_list.append(status_code)
-            if status_code == 200:
-                successful_so_far += 1
-            print(f"[DIAG-BRB-FLASK] Request {i+1}/{NUM_REQUESTS} result: {status_code}", flush=True)
+            if status_code == 200:
+                successful_so_far += 1
+            # print(f"[DIAG-BRB-FLASK] Request {i+1}/{num_requests} result: {status_code}", flush=True)
         except Exception as e:
-            print(f"[DIAG-BRB-FLASK] Request {i+1}/{NUM_REQUESTS} failed with exception: {e}", flush=True)
+            # print(f"[DIAG-BRB-FLASK] Request {i+1}/{num_requests} failed with exception: {e}", flush=True)
             results_list.append(None)
 
     end_time = time.perf_counter()
     total_time = end_time - start_time
     successful_requests = sum(1 for r in results_list if r == 200)
-    print(f"Final Flask benchmark summary: {successful_requests}/{NUM_REQUESTS} successful requests in {total_time:.2f} seconds.")
+    print(f"Final Flask benchmark summary: {successful_requests}/{num_requests} successful requests in {total_time:.2f} seconds.")
     return total_time
 
 async def fetch_url_async(client, url):
     try:
-        response = await client.get(url) # REMOVED timeout=10
+        response = await client.get(url, timeout=10) # Increased timeout
         response.raise_for_status()
         return response.status_code
     except httpx.RequestError as e:
-        print(f"Request to {url} failed: {e}")
+        # print(f"Request to {url} failed: {e}") # Silenced
         return None
 
-async def run_fastapi_benchmark_async():
-    print(f"Starting FastAPI benchmark: {NUM_REQUESTS} requests to {FASTAPI_URL}...")
+async def run_fastapi_benchmark_async(num_requests):
+    print(f"Starting FastAPI benchmark: {num_requests} requests to {FASTAPI_URL}...")
     start_time = time.perf_counter()
     
     async with httpx.AsyncClient() as client:
-        tasks = [fetch_url_async(client, FASTAPI_URL) for _ in range(NUM_REQUESTS)]
+        tasks = [fetch_url_async(client, FASTAPI_URL) for _ in range(num_requests)]
         results = await asyncio.gather(*tasks)
     
     end_time = time.perf_counter()
     total_time = end_time - start_time
     successful_requests = sum(1 for r in results if r == 200)
-    print(f"FastAPI benchmark: {successful_requests}/{NUM_REQUESTS} successful requests in {total_time:.2f} seconds.")
+    print(f"FastAPI benchmark: {successful_requests}/{num_requests} successful requests in {total_time:.2f} seconds.")
     return total_time
 
-def run_fastapi_benchmark():
-    return asyncio.run(run_fastapi_benchmark_async())
+def run_fastapi_benchmark(num_requests):
+    return asyncio.run(run_fastapi_benchmark_async(num_requests))
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(description="Run web server benchmarks.")
@@ -80,11 +80,16 @@ if __name__ == "__main__":
         choices=["flask", "fastapi"], 
         help="Specify the framework to benchmark (flask or fastapi)"
     )
+    parser.add_argument(
+        "num_requests",
+        type=int,
+        help="Number of requests to perform"
+    )
     args = parser.parse_args()
 
     if args.framework == "flask":
-        run_flask_benchmark()
+        run_flask_benchmark(args.num_requests)
     elif args.framework == "fastapi":
-        run_fastapi_benchmark()
+        run_fastapi_benchmark(args.num_requests)
     else:
         print("Invalid framework specified. Choose 'flask' or 'fastapi'.") 
\ No newline at end of file
diff --git a/run_benchmark_NO_RESTRICTIONS.py b/run_benchmark_NO_RESTRICTIONS.py
new file mode 100644 (file)
index 0000000..c351ab1
--- /dev/null
@@ -0,0 +1,215 @@
+import subprocess
+import time
+import re
+import requests  # pip install requests
+import webbrowser
+from rich.console import Console
+from rich.table import Table
+import sys
+import os
+
+# --- Configuration ------------------------------------------------------
+FLASK_SERVER_URL = "http://127.0.0.1:3000/"
+FASTAPI_SERVER_URL = "http://127.0.0.1:8000/"
+BENCHMARK_SCRIPT_PATH = "benchmark/run_benchmark.py"  # This script sends requests, delays are in apps
+NUM_REQUESTS_EXPECTED = 1000
+PYTHON_EXE = sys.executable
+
+# ------------------------------------------------------------------------
+console = Console()
+
+# -------------------------- helpers -------------------------------------
+def start_server(command_args, health_check_url, server_name, cwd=None):
+    """Start server and wait until a 200 health check is returned."""
+    console.print(f"[yellow]Starting {server_name} server (No Restrictions Test)...[/yellow]")
+    popen_kwargs = dict(cwd=cwd, text=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
+    if "uvicorn" in command_args[0] and not command_args[0].endswith(".exe"):
+        process = subprocess.Popen([PYTHON_EXE, "-m"] + command_args, **popen_kwargs)
+    else:
+        process = subprocess.Popen(command_args, **popen_kwargs)
+
+    max_wait = 30
+    start_t = time.time()
+    while time.time() - start_t < max_wait:
+        try:
+            if requests.get(health_check_url, timeout=3).status_code == 200:
+                console.print(f"[green]{server_name} ready.[/green]")
+                return process
+        except requests.RequestException:
+            time.sleep(0.3)
+    console.print(f"[red]{server_name} failed to start within {max_wait}s.[/red]")
+    process.terminate()
+    return None
+
+def stop_server(proc, name):
+    if not proc:
+        return
+    console.print(f"[yellow]Stopping {name}…[/yellow]")
+    proc.terminate()
+    try:
+        proc.wait(timeout=8)
+    except subprocess.TimeoutExpired:
+        proc.kill()
+    console.print(f"[green]{name} stopped.[/green]")
+
+def run_benchmark_script(framework_arg):
+    # This function remains largely the same, as benchmark/run_benchmark.py handles the requests.
+    # The "no restrictions" aspect is handled by running different app_*.py files.
+    console.print(f"Running benchmark for [bold]{framework_arg}[/bold] (No Restrictions Test) with {NUM_REQUESTS_EXPECTED} requests...")
+    cmd = [PYTHON_EXE, BENCHMARK_SCRIPT_PATH, framework_arg, str(NUM_REQUESTS_EXPECTED)]
+    
+    # The stdout/stderr handling can be simplified if live progress isn't strictly needed for this version,
+    # or kept if useful. For now, keeping the detailed Flask progress handling.
+    if framework_arg.lower() == "flask":
+        final_summary_line = None
+        requests_done_count = 0
+        progress_line_printed = False
+        try:
+            process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True, encoding='utf-8')
+            if process.stdout:
+                for line in iter(process.stdout.readline, ''):
+                    line = line.strip()
+                    if not line: continue
+                    if line.startswith("REQ_STATUS:"):
+                        requests_done_count += 1
+                        print(f"\rFlask progress: Handled {requests_done_count}/{NUM_REQUESTS_EXPECTED} requests...", end="", flush=True)
+                        progress_line_printed = True
+                    elif line.startswith("[DIAG-BRB-FLASK]"):
+                        if progress_line_printed: print("\r" + " " * 80 + "\r", end="", flush=True)
+                        print(line, flush=True)
+                        if progress_line_printed: print(f"\rFlask progress: Handled {requests_done_count}/{NUM_REQUESTS_EXPECTED} requests...", end="", flush=True)
+                    elif "Final Flask benchmark summary:" in line:
+                        final_summary_line = line
+                        if progress_line_printed: print("\r" + " " * 80 + "\r", end="", flush=True)
+                process.stdout.close()
+            if progress_line_printed and not final_summary_line: print("\r" + " " * 80 + "\r", end="", flush=True)
+
+            stderr_output_list = []
+            if process.stderr:
+                for line in iter(process.stderr.readline, ''):
+                    line = line.strip()
+                    if line: stderr_output_list.append(line)
+                process.stderr.close()
+            process.wait(timeout=600)
+            if process.returncode != 0:
+                console.print(f"[red]{framework_arg} benchmark script failed (code {process.returncode})[/red]")
+                if stderr_output_list: console.print("[red]STDERR:[/red]"); [console.print(f"[red]{err_line}[/red]") for err_line in stderr_output_list]
+                return None
+            if final_summary_line: return final_summary_line
+            else: 
+                console.print(f"[red]No summary line for {framework_arg}.[/red]")
+                if stderr_output_list: console.print("[red]STDERR:[/red]"); [console.print(f"[red]{err_line}[/red]") for err_line in stderr_output_list]
+                return None
+        except subprocess.TimeoutExpired:
+            console.print(f"[red]Benchmark for {framework_arg} timed out.[/red]")
+            if process.poll() is None: process.kill(); process.wait()
+            return None
+        except Exception as e:
+            console.print(f"[red]Error running Popen benchmark for {framework_arg}: {e}[/red]")
+            return None
+    else: # For FastAPI
+        try:
+            result = subprocess.run(cmd, text=True, capture_output=True, timeout=600, check=False, encoding='utf-8')
+            if result.returncode != 0:
+                console.print(f"[red]{framework_arg} benchmark failed with subprocess.run.[/red]")
+                if result.stderr: console.print(f"STDERR:\n{result.stderr.strip()}")
+                return None
+            if result.stdout and result.stdout.strip():
+                lines = result.stdout.strip().splitlines()
+                if lines: return lines[-1]
+                else: console.print(f"[red]No stdout lines from {framework_arg}.[/red]"); return None
+            else:
+                console.print(f"[red]No stdout from {framework_arg}.[/red]")
+                if result.stderr and result.stderr.strip(): console.print(f"STDERR:\n{result.stderr.strip()}")
+                return None
+        except subprocess.TimeoutExpired:
+            console.print(f"[red]Benchmark for {framework_arg} (subprocess.run) timed out.[/red]")
+            return None
+        except Exception as e:
+            console.print(f"[red]Error running subprocess.run benchmark for {framework_arg}: {e}[/red]")
+            return None
+
+def parse_benchmark(line):
+    m = re.search(r"(\d+)/(\d+) successful requests in ([\d.]+) seconds", line)
+    if not m:
+        return None
+    succ, total, tsec = map(float, m.groups())
+    return {"successful": f"{int(succ)}/{int(total)}", "total_time": tsec}
+
+def display_table(rows):
+    tbl = Table(title="Benchmark Summary - NO RESTRICTIONS", show_lines=True, header_style="bold magenta")
+    tbl.add_column("Framework", style="cyan")
+    tbl.add_column("Server Config", style="white")
+    tbl.add_column("Artificial Delay", style="green")
+    tbl.add_column("#Reqs", justify="right")
+    tbl.add_column("Success", justify="right")
+    tbl.add_column("Total s", justify="right", style="yellow")
+    tbl.add_column("Avg s/req", justify="right", style="blue")
+    for r in rows:
+        avg_time = r["total_time"] / NUM_REQUESTS_EXPECTED if NUM_REQUESTS_EXPECTED > 0 else 0
+        tbl.add_row(r["framework"], r["config"], r["delay"],
+                    str(NUM_REQUESTS_EXPECTED), r["successful"],
+                    f"{r['total_time']:.2f}", f"{avg_time:.4f}") # Increased precision for avg
+    console.print(tbl)
+
+# --------------------------- scenarios ----------------------------------
+SCENARIOS = [
+    {
+        "name": "FastAPI (No Delay)",
+        "config": "Uvicorn, async (default)",
+        "delay": "None",
+        "cmd": ["uvicorn", "app_fastapi.app_no_delay:app", "--host", "0.0.0.0",
+                "--port", "8000", "--log-level", "warning"],
+        "url": FASTAPI_SERVER_URL,
+        "bench_arg": "fastapi", # benchmark/run_benchmark.py uses this to pick the URL/method
+    },
+    {
+        "name": "Flask (No Delay, Threaded)",
+        "config": "Werkzeug (threaded=True)",
+        "delay": "None",
+        "cmd": [PYTHON_EXE, "app_flask/flask_application_no_delay.py"],
+        "url": FLASK_SERVER_URL,
+        "bench_arg": "flask", # benchmark/run_benchmark.py uses this to pick the URL/method
+    }
+]
+
+# ----------------------------- main -------------------------------------
+if __name__ == "__main__":
+    console.print("[bold underline]Automated Web Framework Benchmark (NO RESTRICTIONS)[/bold underline]\n")
+    results = []
+    root = os.getcwd()
+
+    for i, sc in enumerate(SCENARIOS, 1):
+        console.rule(f"[cyan]Scenario {i}/{len(SCENARIOS)} – {sc['name']}[/cyan]")
+        srv = start_server(sc["cmd"], sc["url"], sc["name"], cwd=root)
+        if not srv:
+            console.print(f"[red]Skipping benchmark for {sc['name']} as server failed to start.[/red]")
+            continue
+        try:
+            # No artificial grace period needed as apps have no sleep()
+            # if sc["name"].lower().startswith("flask"):
+            #     time.sleep(2) 
+            line = run_benchmark_script(sc["bench_arg"])
+            parsed = parse_benchmark(line) if line else None
+            if parsed:
+                results.append({"framework": sc["name"], "config": sc["config"],
+                                "delay": sc["delay"], **parsed})
+                # Optionally, open browser after benchmark. Keeping it for consistency.
+                try:
+                    console.print(f"[blue]Opening {sc['name']} page at {sc['url']} in browser...[/blue]")
+                    webbrowser.open(sc["url"])
+                    console.print(f"[blue]Keeping server alive for 3 seconds to view the page...[/blue]")
+                    time.sleep(3)  # Reduced delay as pages should load faster
+                except Exception as e:
+                    console.print(f"[yellow]Could not open browser for {sc['name']}: {e}[/yellow]")
+            else:
+                console.print(f"[yellow]No parsed benchmark results for {sc['name']}.[/yellow]")
+        finally:
+            stop_server(srv, sc["name"])
+        console.print() # Newline after each scenario
+
+    if results:
+        display_table(results)
+    else:
+        console.print("[yellow]No benchmark results were collected.[/yellow]")
+    console.print("\n[bold]No Restrictions Benchmark run finished.[/bold]")
index 7af1733f4450d07ea7271066c07580c78d630df9..47e56dd7500c9c5fbccb08f18bc6f03f4bf63521 100644 (file)
@@ -59,8 +59,8 @@ def stop_server(proc, name):
     console.print(f"[green]{name} stopped.[/green]")
 
 def run_benchmark_script(framework_arg):
-    console.print(f"Running benchmark for [bold]{framework_arg}[/bold]")
-    cmd = [PYTHON_EXE, BENCHMARK_SCRIPT_PATH, framework_arg]
+    console.print(f"Running benchmark for [bold]{framework_arg}[/bold] with {NUM_REQUESTS_EXPECTED} requests...")
+    cmd = [PYTHON_EXE, BENCHMARK_SCRIPT_PATH, framework_arg, str(NUM_REQUESTS_EXPECTED)]
 
     if framework_arg.lower() == "flask":
         final_summary_line = None