Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#!/usr/bin/env python3
"""Test script to verify L total bug fixes."""
import sys
sys.path.insert(0, 'apps/backend/src')
from analytics.jackson import JacksonAnalyzer
from analytics.comparison import compare_results
from core.simulation import Simulator, SimulationConfig
# Configuration from the user's JSON file
config = SimulationConfig(
arrival_rate=0.006, # λ₀
coordinator_service_rate=0.1, # μc
coordinator_exit_probability=0.5, # p
server_service_rates=[0.00833], # μ₁
server_routing_probs=[0.5], # q₁
warmup_time=1000,
simulation_time=10000, # Shorter for quick test
random_seed=42
)
print("=" * 80)
print("TESTING L TOTAL BUG FIXES")
print("=" * 80)
# Run analytical analysis
print("\n1. Running analytical analysis...")
analyzer = JacksonAnalyzer(
external_arrival_rate=config.arrival_rate,
coordinator_service_rate=config.coordinator_service_rate,
coordinator_exit_prob=config.coordinator_exit_probability,
server_service_rates=config.server_service_rates,
server_routing_probs=config.server_routing_probs
)
analytical = analyzer.analyze()
print(f" λ₀ (external) = {analytical.external_arrival_rate:.6f}")
print(f" γ₀ (coordinator effective) = {analytical.coordinator.arrival_rate:.6f}")
print(f" Analytical L_total = {analytical.total_average_customers:.4f}")
print(f" Analytical W_total = {analytical.total_average_time:.4f}")
# Run simulation
print("\n2. Running simulation...")
simulator = Simulator(config)
results = simulator.run()
print(f" Simulation W = {results.average_system_time:.4f}")
print(f" Simulation L = {results.average_customers_in_system:.4f} ← Should NOT be 0!")
# Verify Bug Fix #1: average_customers_in_system should be calculated
expected_L = config.arrival_rate * results.average_system_time
print(f"\n ✓ Expected L (λ₀ × W) = {expected_L:.4f}")
if abs(results.average_customers_in_system - expected_L) < 0.01:
print(f" ✅ BUG FIX #1 VERIFIED: average_customers_in_system is calculated correctly")
else:
print(f" ❌ BUG FIX #1 FAILED: Got {results.average_customers_in_system}, expected {expected_L}")
# Run comparison
print("\n3. Running comparison...")
comparison = compare_results(analytical, results)
print(f" Analytical L_total = {comparison.analytical_total_L:.4f}")
print(f" Simulation L_total = {comparison.simulation_total_L:.4f}")
print(f" Difference = {comparison.total_L_diff_percent:.2f}%")
# Verify Bug Fix #2: comparison should use λ₀ not γ₀
expected_comparison_L = analytical.external_arrival_rate * results.average_system_time
print(f"\n ✓ Expected comparison L (λ₀ × W) = {expected_comparison_L:.4f}")
if abs(comparison.simulation_total_L - expected_comparison_L) < 0.01:
print(f" ✅ BUG FIX #2 VERIFIED: Comparison uses λ₀ (not γ₀)")
else:
print(f" ❌ BUG FIX #2 FAILED: Got {comparison.simulation_total_L}, expected {expected_comparison_L}")
# Final verification
print("\n" + "=" * 80)
print("SUMMARY")
print("=" * 80)
print(f"λ₀ (external arrival rate) = {analytical.external_arrival_rate:.6f}")
print(f"γ₀ (coordinator effective rate) = {analytical.coordinator.arrival_rate:.6f}")
print(f"Ratio γ₀/λ₀ = {analytical.coordinator.arrival_rate / analytical.external_arrival_rate:.2f} (should be 1/p = {1/config.coordinator_exit_probability:.2f})")
print()
print(f"Simulation L = {results.average_customers_in_system:.4f}")
print(f"Comparison L = {comparison.simulation_total_L:.4f}")
print(f"Analytical L = {comparison.analytical_total_L:.4f}")
print()
if abs(results.average_customers_in_system - comparison.simulation_total_L) < 0.01:
print("✅ Both L values match!")
else:
print(f"⚠️ L values differ: simulation={results.average_customers_in_system:.4f} vs comparison={comparison.simulation_total_L:.4f}")
print("\n" + "=" * 80)