fix: VRAM eviction cascades through all tiers for large LLM loads

The original eviction logic blocked ASR eviction even when an LLM
genuinely needed all 16GB VRAM (e.g., gpt-oss-20b at 13GB). Now uses
two-pass eviction: first evicts lower/same priority, then cascades to
higher priority as last resort. Added tests for ASR-survives and
full-cascade scenarios.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
tlg
2026-04-04 09:22:14 +02:00
parent d7a091df8c
commit 813bbe0ad0
2 changed files with 71 additions and 18 deletions

View File

@@ -65,19 +65,53 @@ async def test_evict_llm_first(manager):
@pytest.mark.asyncio
async def test_evict_cascade_for_large_llm(manager):
async def test_evict_cascade_asr_survives(manager):
"""When LLM fits alongside ASR after evicting LLM+TTS, ASR survives."""
backend = FakeBackend()
await manager.load_model("cohere-transcribe", model_type="asr", vram_gb=4.0, backend=backend)
await manager.load_model("chatterbox-multilingual", model_type="tts", vram_gb=2.0, backend=backend)
await manager.load_model("qwen3.5-4b", model_type="llm", vram_gb=4.0, backend=backend)
# 10 GB used. gpt-oss-20b needs 12GB. Evict LLM(4)->free=10. Evict TTS(2)->free=12. Load.
await manager.load_model("gpt-oss-20b", model_type="llm", vram_gb=12.0, backend=backend)
# 10 GB used. Need 12GB. Evict LLM(4)->free=10. Evict TTS(2)->free=12. ASR+12=16, fits.
await manager.load_model("large-llm", model_type="llm", vram_gb=12.0, backend=backend)
assert not manager.is_loaded("qwen3.5-4b")
assert not manager.is_loaded("chatterbox-multilingual")
assert manager.is_loaded("cohere-transcribe") # ASR survives if possible
assert manager.is_loaded("cohere-transcribe") # ASR survives
assert manager.is_loaded("large-llm")
@pytest.mark.asyncio
async def test_evict_cascade_full_for_huge_llm(manager):
"""When LLM is too large to fit alongside ASR, everything gets evicted."""
backend = FakeBackend()
await manager.load_model("cohere-transcribe", model_type="asr", vram_gb=4.0, backend=backend)
await manager.load_model("chatterbox-multilingual", model_type="tts", vram_gb=2.0, backend=backend)
await manager.load_model("qwen3.5-4b", model_type="llm", vram_gb=4.0, backend=backend)
# 10 GB used. gpt-oss-20b needs 13GB. Evict LLM(4)->free=10. TTS(2)->free=12. ASR(4)->free=16. Load alone.
await manager.load_model("gpt-oss-20b", model_type="llm", vram_gb=13.0, backend=backend)
assert not manager.is_loaded("qwen3.5-4b")
assert not manager.is_loaded("chatterbox-multilingual")
assert not manager.is_loaded("cohere-transcribe") # ASR evicted as last resort
assert manager.is_loaded("gpt-oss-20b")
@pytest.mark.asyncio
async def test_tts_cannot_evict_asr(manager):
"""TTS request must not evict ASR — it evicts LLM instead."""
backend = FakeBackend()
await manager.load_model("cohere-transcribe", model_type="asr", vram_gb=4.0, backend=backend)
await manager.load_model("qwen3.5-9b-fp8", model_type="llm", vram_gb=9.0, backend=backend)
# 13GB used, 3GB free. TTS needs 2GB — fits! Load alongside.
await manager.load_model("chatterbox", model_type="tts", vram_gb=2.0, backend=backend)
assert manager.is_loaded("cohere-transcribe")
assert manager.is_loaded("qwen3.5-9b-fp8")
assert manager.is_loaded("chatterbox")
# Now replace TTS with a bigger one that needs eviction
# 15GB used, 1GB free. New TTS needs 2GB. Evict old TTS(2)->free=3. Load.
await manager.load_model("chatterbox-ml", model_type="tts", vram_gb=2.0, backend=backend)
assert manager.is_loaded("cohere-transcribe") # ASR must survive
assert manager.is_loaded("chatterbox-ml")
@pytest.mark.asyncio
async def test_asr_evicts_llm_not_reversed(manager):
"""When ASR request arrives and LLM is loaded, evict LLM (lower priority)."""