Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
"""
Tests for the simulation engine.
"""
import pytest
from src.core.simulation import Simulator, SimulationConfig
from src.core.random_utils import set_seed
class TestSimulationConfig:
"""Tests for simulation configuration."""
def test_valid_config(self):
"""Test that valid configuration passes validation."""
config = SimulationConfig(
arrival_rate=0.1,
coordinator_service_rate=0.15,
coordinator_exit_probability=0.5,
server_service_rates=[0.2],
server_routing_probs=[0.5],
simulation_time=1000.0
)
config.validate() # Should not raise
def test_invalid_arrival_rate(self):
"""Test that invalid arrival rate raises error."""
config = SimulationConfig(
arrival_rate=0, # Invalid
coordinator_service_rate=0.15,
coordinator_exit_probability=0.5,
server_service_rates=[0.2],
server_routing_probs=[0.5]
)
with pytest.raises(ValueError):
config.validate()
def test_probability_conservation(self):
"""Test that probabilities must sum to 1.0."""
config = SimulationConfig(
arrival_rate=0.1,
coordinator_service_rate=0.15,
coordinator_exit_probability=0.5,
server_service_rates=[0.2],
server_routing_probs=[0.3] # Sum = 0.8, should be 0.5
)
with pytest.raises(ValueError):
config.validate()
class TestSimulator:
"""Tests for the simulation engine."""
def test_simple_simulation(self):
"""Test a simple stable system simulation."""
config = SimulationConfig(
arrival_rate=0.08, # λ = 0.08 (mean inter-arrival = 12.5)
coordinator_service_rate=0.1, # μc = 0.1 (mean service = 10)
coordinator_exit_probability=0.5, # p = 0.5
server_service_rates=[0.1], # μ1 = 0.1
server_routing_probs=[0.5], # q1 = 0.5
warmup_time=1000.0,
simulation_time=5000.0,
random_seed=42
)
simulator = Simulator(config)
results = simulator.run()
# Basic sanity checks
assert results.total_requests_arrived > 0
assert results.total_requests_completed > 0
assert results.average_system_time > 0
# Check that utilizations are reasonable (≤ 1 for stable system, with small tolerance)
# Note: May slightly exceed 1.0 due to discrete measurement
assert results.coordinator_stats["utilization"] < 1.1
assert results.server_stats["server_1"]["utilization"] < 1.1
print(f"\nSimulation results:")
print(f" Requests arrived: {results.total_requests_arrived}")
print(f" Requests completed: {results.total_requests_completed}")
print(f" Average system time: {results.average_system_time:.2f}")
print(f" Coordinator utilization: {results.coordinator_stats['utilization']:.3f}")
print(f" Server 1 utilization: {results.server_stats['server_1']['utilization']:.3f}")
def test_unstable_system(self):
"""Test detection of unstable system (high utilization)."""
config = SimulationConfig(
arrival_rate=0.15, # High arrival rate
coordinator_service_rate=0.1, # Lower service rate
coordinator_exit_probability=0.2, # Low exit probability
server_service_rates=[0.1],
server_routing_probs=[0.8], # High routing to server
warmup_time=500.0,
simulation_time=2000.0,
random_seed=42
)
simulator = Simulator(config)
results = simulator.run()
# In unstable system, utilization should approach 1.0
# (or exceed it in simulation due to finite time)
print(f"\nUnstable system results:")
print(f" Coordinator utilization: {results.coordinator_stats['utilization']:.3f}")
print(f" Server 1 utilization: {results.server_stats['server_1']['utilization']:.3f}")
# At least one queue should have very high utilization
assert (results.coordinator_stats["utilization"] > 0.9 or
results.server_stats["server_1"]["utilization"] > 0.9)
def test_multiple_servers(self):
"""Test simulation with multiple servers."""
config = SimulationConfig(
arrival_rate=0.1,
coordinator_service_rate=0.15,
coordinator_exit_probability=0.4,
server_service_rates=[0.2, 0.15, 0.1], # 3 servers
server_routing_probs=[0.2, 0.2, 0.2], # Equal routing
warmup_time=1000.0,
simulation_time=5000.0,
random_seed=42
)
simulator = Simulator(config)
results = simulator.run()
# Check that all 3 servers have statistics
assert len(results.server_stats) == 3
assert "server_1" in results.server_stats
assert "server_2" in results.server_stats
assert "server_3" in results.server_stats
# All servers should have processed some requests
for server_id, stats in results.server_stats.items():
assert stats["total_arrivals"] > 0
assert stats["total_departures"] > 0
print(f" {server_id}: {stats['total_departures']} requests, "
f"utilization={stats['utilization']:.3f}")
def test_reproducibility(self):
"""Test that same seed produces same results."""
config = SimulationConfig(
arrival_rate=0.1,
coordinator_service_rate=0.15,
coordinator_exit_probability=0.5,
server_service_rates=[0.2],
server_routing_probs=[0.5],
warmup_time=500.0,
simulation_time=2000.0,
random_seed=123
)
sim1 = Simulator(config)
results1 = sim1.run()
sim2 = Simulator(config)
results2 = sim2.run()
# Same seed should produce identical results
assert results1.total_requests_arrived == results2.total_requests_arrived
assert results1.total_requests_completed == results2.total_requests_completed
assert abs(results1.average_system_time - results2.average_system_time) < 0.01