Fix auto mode model selection issues

parent 0a02552e
......@@ -3753,12 +3753,22 @@ def select_best_model(gen_type, models, vram_gb=24, prefer_quality=True, return_
# Check VRAM compatibility using base model requirements
# LoRAs add a small overhead (~1-2GB)
vram_est = parse_vram_estimate(base_model_info.get("vram", "~10 GB")) + 2
if vram_est > vram_gb * 1.1: # Allow 10% margin
if vram_est > vram_gb: # No margin - only allow models that fit within available VRAM
continue
# Get capabilities from base model
base_caps = detect_model_type(base_model_info)
# Check if base model supports the required generation type
if gen_type_str == "t2v" and not base_caps["t2v"]:
continue
elif gen_type_str == "i2v" and not base_caps["i2v"]:
continue
elif gen_type_str == "t2i" and not base_caps["t2i"]:
continue
elif gen_type_str == "i2i" and not base_caps["i2i"]:
continue
# Score the LoRA
score = 0
reasons = []
......@@ -3895,7 +3905,7 @@ def select_best_model(gen_type, models, vram_gb=24, prefer_quality=True, return_
# Non-LoRA model handling (original logic)
# Check VRAM compatibility
vram_est = parse_vram_estimate(info.get("vram", "~10 GB"))
if vram_est > vram_gb * 1.1: # Allow 10% margin
if vram_est > vram_gb: # No margin - only allow models that fit within available VRAM
continue
# Check model capabilities
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment