mirror of
https://github.com/aljazceru/enclava.git
synced 2025-12-17 23:44:24 +01:00
122 lines
3.7 KiB
Python
122 lines
3.7 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Performance Analysis - Identify bottlenecks in the platform
|
|
"""
|
|
|
|
import time
|
|
import requests
|
|
import asyncio
|
|
import aiohttp
|
|
from contextlib import asynccontextmanager
|
|
|
|
def analyze_platform_bottlenecks():
|
|
"""Analyze where time is spent in platform requests"""
|
|
|
|
print("🔍 PERFORMANCE BOTTLENECK ANALYSIS")
|
|
print("=" * 60)
|
|
|
|
# Test individual components
|
|
components = {
|
|
"Authentication": test_auth_time,
|
|
"Database Lookup": test_db_lookup_time,
|
|
"API Key Validation": test_api_key_validation_time,
|
|
"Budget Check": test_budget_check_time,
|
|
"Audit Logging": test_audit_logging_time,
|
|
"Response Processing": test_response_processing_time
|
|
}
|
|
|
|
results = {}
|
|
|
|
for component, test_func in components.items():
|
|
print(f"\nTesting {component}...")
|
|
try:
|
|
execution_time = test_func()
|
|
results[component] = execution_time
|
|
print(f" ⏱️ {component}: {execution_time:.3f}s")
|
|
except Exception as e:
|
|
print(f" ❌ {component}: Failed - {e}")
|
|
results[component] = None
|
|
|
|
print("\n" + "=" * 60)
|
|
print("BOTTLENECK SUMMARY")
|
|
print("=" * 60)
|
|
|
|
sorted_results = sorted([(k, v) for k, v in results.items() if v is not None],
|
|
key=lambda x: x[1], reverse=True)
|
|
|
|
for component, timing in sorted_results:
|
|
percentage = (timing / 0.218) * 100 # Against our 218ms baseline
|
|
print(f"{component:20} {timing:.3f}s ({percentage:.1f}% of total)")
|
|
|
|
return results
|
|
|
|
def test_auth_time():
|
|
"""Simulate authentication overhead"""
|
|
# This would test just the authentication layer
|
|
start = time.time()
|
|
|
|
# Simulate typical auth operations:
|
|
# - Header parsing
|
|
# - Token validation
|
|
# - Permission checks
|
|
time.sleep(0.01) # Placeholder for actual auth time
|
|
|
|
return time.time() - start
|
|
|
|
def test_db_lookup_time():
|
|
"""Test database lookup performance"""
|
|
start = time.time()
|
|
|
|
# Make a simple database query to measure connection overhead
|
|
try:
|
|
response = requests.get("http://localhost:58000/api/v1/api-keys/",
|
|
headers={"Authorization": "Bearer ce_mMJNyEznKHJRvvNyyuwuQotuWJ2BvdD8"},
|
|
timeout=5)
|
|
return time.time() - start
|
|
except:
|
|
return 0.05 # Fallback estimate
|
|
|
|
def test_api_key_validation_time():
|
|
"""Test API key validation specific overhead"""
|
|
# This tests the crypto operations for API key validation
|
|
start = time.time()
|
|
|
|
# Simulate bcrypt validation time
|
|
import bcrypt
|
|
test_key = "test_key_for_timing"
|
|
hashed = bcrypt.hashpw(test_key.encode(), bcrypt.gensalt())
|
|
bcrypt.checkpw(test_key.encode(), hashed)
|
|
|
|
return time.time() - start
|
|
|
|
def test_budget_check_time():
|
|
"""Test budget enforcement overhead"""
|
|
start = time.time()
|
|
|
|
# Simulate budget calculations and database queries
|
|
time.sleep(0.005) # Placeholder for budget check operations
|
|
|
|
return time.time() - start
|
|
|
|
def test_audit_logging_time():
|
|
"""Test audit logging overhead"""
|
|
start = time.time()
|
|
|
|
# Simulate audit log database write
|
|
time.sleep(0.002) # Placeholder for audit write operations
|
|
|
|
return time.time() - start
|
|
|
|
def test_response_processing_time():
|
|
"""Test response serialization overhead"""
|
|
start = time.time()
|
|
|
|
# Simulate JSON serialization and response processing
|
|
import json
|
|
data = {"models": [{"id": f"model_{i}", "name": f"Model {i}"} for i in range(20)]}
|
|
json.dumps(data)
|
|
|
|
return time.time() - start
|
|
|
|
if __name__ == "__main__":
|
|
analyze_platform_bottlenecks() |