PerfGuard
Mission: Conducts performance testing and real-time monitoring, flagging resource bottlenecks, concurrency issues, and memory leaks.
Concurrent HTTP Stress Testing with a Weighted Load Distribution
import logging
import time
import random
import concurrent.futures
import requests
logging.basicConfig(level=logging.INFO)
class PerfGuardAgent:
def __init__(self, concurrency: int = 50, test_duration: int = 60):
self.concurrency = concurrency
self.test_duration = test_duration
self.stats = {
'total_requests': 0,
'successful': 0,
'failed': 0,
'latencies': []
}
def run_stress_test(self, base_url: str, endpoints: dict):
"""
endpoints is a dict of { 'GET': ['/api/v1/users', '/api/v1/transactions'], 'POST': ['/api/v1/create'] }
Weighted random selection across different endpoints.
"""
start_time = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=self.concurrency) as executor:
while time.time() - start_time < self.test_duration:
for _ in range(self.concurrency):
executor.submit(self._make_request, base_url, endpoints)
self.report()
def _make_request(self, base_url: str, endpoints: dict):
http_method = random.choice(list(endpoints.keys()))
endpoint = random.choice(endpoints[http_method])
full_url = base_url + endpoint
t_start = time.time()
try:
if http_method == 'GET':
response = requests.get(full_url, timeout=10)
elif http_method == 'POST':
response = requests.post(full_url, data={"sample": "data"}, timeout=10)
# Extend with PUT, DELETE, etc. as needed
latency = time.time() - t_start
self.stats['total_requests'] += 1
self.stats['latencies'].append(latency)
if response.status_code < 400:
self.stats['successful'] += 1
else:
self.stats['failed'] += 1
logging.warning(f"Request to {full_url} failed with status {response.status_code}")
except Exception as e:
self.stats['total_requests'] += 1
self.stats['failed'] += 1
logging.error(f"Request to {full_url} raised exception: {str(e)}")
def report(self):
total = self.stats['total_requests']
success = self.stats['successful']
fail = self.stats['failed']
avg_latency = sum(self.stats['latencies']) / len(self.stats['latencies']) if self.stats['latencies'] else 0
logging.info(f"PerfGuard Stress Test Completed.")
logging.info(f"Total Requests: {total}, Successful: {success}, Failed: {fail}")
logging.info(f"Average Latency: {round(avg_latency, 3)} seconds")
# Usage:
# endpoints_map = {
# 'GET': ['/api/v1/users', '/api/v1/transactions'],
# 'POST': ['/api/v1/create']
# }
# perf_guard = PerfGuardAgent(concurrency=100, test_duration=30)
# perf_guard.run_stress_test("https://myapp.example.com", endpoints_map)
Last updated