|
1 | 1 | # SPDX-License-Identifier: Apache-2.0 |
2 | 2 | # SPDX-FileCopyrightText: Copyright contributors to the vLLM project |
3 | | -from vllm.v1.metrics.stats import IterationStats |
| 3 | +from vllm.v1.engine import FinishReason |
| 4 | +from vllm.v1.metrics.stats import IterationStats, RequestStateStats |
4 | 5 |
|
5 | 6 |
|
6 | 7 | def test_iteration_stats_repr(): |
7 | 8 | iteration_stats = IterationStats() |
8 | 9 | assert repr(iteration_stats).startswith("IterationStats(") |
| 10 | + |
| 11 | + |
| 12 | +def test_prefill_kv_computed_with_cache(): |
| 13 | + """Test that prefill KV compute correctly excludes cached tokens.""" |
| 14 | + iteration_stats = IterationStats() |
| 15 | + req_stats = RequestStateStats(arrival_time=0.0) |
| 16 | + req_stats.scheduled_ts = 0.1 |
| 17 | + req_stats.first_token_ts = 0.5 |
| 18 | + req_stats.last_token_ts = 5.0 |
| 19 | + req_stats.num_generation_tokens = 50 |
| 20 | + |
| 21 | + # Case 1: With prefix cache (1200 tokens cached) |
| 22 | + iteration_stats.update_from_finished_request( |
| 23 | + finish_reason=FinishReason.STOP, |
| 24 | + num_prompt_tokens=10000, |
| 25 | + max_tokens_param=100, |
| 26 | + req_stats=req_stats, |
| 27 | + num_cached_tokens=1200, |
| 28 | + ) |
| 29 | + |
| 30 | + finished_req = iteration_stats.finished_requests[0] |
| 31 | + assert finished_req.num_prompt_tokens == 10000 |
| 32 | + assert finished_req.num_cached_tokens == 1200 |
| 33 | + |
| 34 | + # Verify calculation: prefill KV = prompt tokens - cached tokens |
| 35 | + prefill_kv_computed = finished_req.num_prompt_tokens - max( |
| 36 | + finished_req.num_cached_tokens, 0 |
| 37 | + ) |
| 38 | + assert prefill_kv_computed == 8800 # 10000 - 1200 |
| 39 | + |
| 40 | + |
| 41 | +def test_prefill_kv_computed_no_cache(): |
| 42 | + """Test prefill KV compute without prefix caching.""" |
| 43 | + iteration_stats = IterationStats() |
| 44 | + req_stats = RequestStateStats(arrival_time=0.0) |
| 45 | + req_stats.scheduled_ts = 0.1 |
| 46 | + req_stats.first_token_ts = 0.5 |
| 47 | + req_stats.last_token_ts = 2.0 |
| 48 | + req_stats.num_generation_tokens = 10 |
| 49 | + |
| 50 | + # Case 2: No prefix cache |
| 51 | + iteration_stats.update_from_finished_request( |
| 52 | + finish_reason=FinishReason.STOP, |
| 53 | + num_prompt_tokens=2000, |
| 54 | + max_tokens_param=100, |
| 55 | + req_stats=req_stats, |
| 56 | + num_cached_tokens=0, |
| 57 | + ) |
| 58 | + |
| 59 | + finished_req = iteration_stats.finished_requests[0] |
| 60 | + assert finished_req.num_prompt_tokens == 2000 |
| 61 | + assert finished_req.num_cached_tokens == 0 |
| 62 | + |
| 63 | + # Verify calculation: prefill KV = full prompt when no cache |
| 64 | + prefill_kv_computed = finished_req.num_prompt_tokens - max( |
| 65 | + finished_req.num_cached_tokens, 0 |
| 66 | + ) |
| 67 | + assert prefill_kv_computed == 2000 |
| 68 | + |
| 69 | + |
| 70 | +def test_prefill_kv_computed_edge_cases(): |
| 71 | + """Test edge cases for prefill KV compute calculation.""" |
| 72 | + iteration_stats = IterationStats() |
| 73 | + req_stats = RequestStateStats(arrival_time=0.0) |
| 74 | + req_stats.scheduled_ts = 0.1 |
| 75 | + req_stats.first_token_ts = 0.5 |
| 76 | + req_stats.last_token_ts = 1.0 |
| 77 | + req_stats.num_generation_tokens = 1 |
| 78 | + |
| 79 | + # Case 3: Negative num_cached_tokens (shouldn't happen, but handle gracefully) |
| 80 | + iteration_stats.update_from_finished_request( |
| 81 | + finish_reason=FinishReason.STOP, |
| 82 | + num_prompt_tokens=100, |
| 83 | + max_tokens_param=10, |
| 84 | + req_stats=req_stats, |
| 85 | + num_cached_tokens=-1, |
| 86 | + ) |
| 87 | + |
| 88 | + finished_req = iteration_stats.finished_requests[0] |
| 89 | + # max() should handle negative values |
| 90 | + prefill_kv_computed = finished_req.num_prompt_tokens - max( |
| 91 | + finished_req.num_cached_tokens, 0 |
| 92 | + ) |
| 93 | + assert prefill_kv_computed == 100 # Should treat negative as 0 |
| 94 | + |
| 95 | + # Case 4: All tokens cached (shouldn't happen in practice) |
| 96 | + iteration_stats2 = IterationStats() |
| 97 | + iteration_stats2.update_from_finished_request( |
| 98 | + finish_reason=FinishReason.STOP, |
| 99 | + num_prompt_tokens=100, |
| 100 | + max_tokens_param=10, |
| 101 | + req_stats=req_stats, |
| 102 | + num_cached_tokens=100, |
| 103 | + ) |
| 104 | + |
| 105 | + finished_req2 = iteration_stats2.finished_requests[0] |
| 106 | + prefill_kv_computed2 = finished_req2.num_prompt_tokens - max( |
| 107 | + finished_req2.num_cached_tokens, 0 |
| 108 | + ) |
| 109 | + assert prefill_kv_computed2 == 0 # All cached, nothing computed |
0 commit comments