updated redis impl

This commit is contained in:
2025-04-20 14:58:55 +03:00
parent cc19cb7e6d
commit 6ab4410a82
4 changed files with 264 additions and 6 deletions

View File

@@ -1,4 +1,9 @@
from Controllers.Redis.database import RedisActions
import threading
import time
import random
import uuid
import concurrent.futures
def example_set_json() -> None:
@@ -106,5 +111,158 @@ def run_all_examples() -> None:
example_resolve_expires_at()
def run_concurrent_test(num_threads=100):
"""Run a comprehensive concurrent test with multiple threads to verify Redis connection handling."""
print(f"\nStarting comprehensive Redis concurrent test with {num_threads} threads...")
# Results tracking with detailed metrics
results = {
"passed": 0,
"failed": 0,
"retried": 0,
"errors": [],
"operation_times": [],
"retry_count": 0,
"max_retries": 3,
"retry_delay": 0.1
}
results_lock = threading.Lock()
def worker(thread_id):
# Track operation timing
start_time = time.time()
retry_count = 0
success = False
error_message = None
while retry_count <= results["max_retries"] and not success:
try:
# Generate unique key for this thread
unique_id = str(uuid.uuid4())[:8]
full_key = f"test:concurrent:{thread_id}:{unique_id}"
# Simple string operations instead of JSON
test_value = f"test-value-{thread_id}-{time.time()}"
# Set data in Redis with pipeline for efficiency
from Controllers.Redis.database import redis_cli
# Use pipeline to reduce network overhead
with redis_cli.pipeline() as pipe:
pipe.set(full_key, test_value)
pipe.get(full_key)
pipe.delete(full_key)
results_list = pipe.execute()
# Check results
set_ok = results_list[0]
retrieved_value = results_list[1]
if isinstance(retrieved_value, bytes):
retrieved_value = retrieved_value.decode('utf-8')
# Verify data
success = set_ok and retrieved_value == test_value
if success:
break
else:
error_message = f"Data verification failed: set_ok={set_ok}, value_match={retrieved_value == test_value}"
retry_count += 1
with results_lock:
results["retry_count"] += 1
time.sleep(results["retry_delay"] * (2 ** retry_count)) # Exponential backoff
except Exception as e:
error_message = str(e)
retry_count += 1
with results_lock:
results["retry_count"] += 1
# Check if it's a connection error and retry
if "Too many connections" in str(e) or "Connection" in str(e):
# Exponential backoff for connection issues
backoff_time = results["retry_delay"] * (2 ** retry_count)
time.sleep(backoff_time)
else:
# For other errors, use a smaller delay
time.sleep(results["retry_delay"])
# Record operation time
operation_time = time.time() - start_time
# Update results
with results_lock:
if success:
results["passed"] += 1
results["operation_times"].append(operation_time)
if retry_count > 0:
results["retried"] += 1
else:
results["failed"] += 1
if error_message:
results["errors"].append(f"Thread {thread_id} failed after {retry_count} retries: {error_message}")
else:
results["errors"].append(f"Thread {thread_id} failed after {retry_count} retries with unknown error")
# Create and start threads using a thread pool
start_time = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = [executor.submit(worker, i) for i in range(num_threads)]
concurrent.futures.wait(futures)
# Calculate execution time and performance metrics
execution_time = time.time() - start_time
ops_per_second = num_threads / execution_time if execution_time > 0 else 0
# Calculate additional metrics if we have successful operations
avg_op_time = 0
min_op_time = 0
max_op_time = 0
p95_op_time = 0
if results["operation_times"]:
avg_op_time = sum(results["operation_times"]) / len(results["operation_times"])
min_op_time = min(results["operation_times"])
max_op_time = max(results["operation_times"])
# Calculate 95th percentile
sorted_times = sorted(results["operation_times"])
p95_index = int(len(sorted_times) * 0.95)
p95_op_time = sorted_times[p95_index] if p95_index < len(sorted_times) else sorted_times[-1]
# Print detailed results
print("\nConcurrent Redis Test Results:")
print(f"Total threads: {num_threads}")
print(f"Passed: {results['passed']}")
print(f"Failed: {results['failed']}")
print(f"Operations with retries: {results['retried']}")
print(f"Total retry attempts: {results['retry_count']}")
print(f"Success rate: {(results['passed'] / num_threads) * 100:.2f}%")
print("\nPerformance Metrics:")
print(f"Total execution time: {execution_time:.2f} seconds")
print(f"Operations per second: {ops_per_second:.2f}")
if results["operation_times"]:
print(f"Average operation time: {avg_op_time * 1000:.2f} ms")
print(f"Minimum operation time: {min_op_time * 1000:.2f} ms")
print(f"Maximum operation time: {max_op_time * 1000:.2f} ms")
print(f"95th percentile operation time: {p95_op_time * 1000:.2f} ms")
# Print errors (limited to 10 for readability)
if results["errors"]:
print("\nErrors:")
for i, error in enumerate(results["errors"][:10]):
print(f"- {error}")
if len(results["errors"]) > 10:
print(f"- ... and {len(results['errors']) - 10} more errors")
# Return results for potential further analysis
return results
if __name__ == "__main__":
# Run basic examples
run_all_examples()
# Run enhanced concurrent test
run_concurrent_test(10000)