Fix Wan 2.2 I2V base model detection and VRAM estimation

- Add correct base model for Wan 2.2 I2V: Wan-AI/Wan2.2-I2V-A14B-Diffuser
- Add specific VRAM estimate for Wan 2.2 I2V MoE models (~14GB)
- Apply more conservative VRAM calculation for models with weights/LoRAs
- Fix indentation error in add_model_from_hf function
parent 1fdeb905
...@@ -1429,7 +1429,12 @@ def add_model_from_hf(model_id_or_url, name=None, hf_token=None, debug=False): ...@@ -1429,7 +1429,12 @@ def add_model_from_hf(model_id_or_url, name=None, hf_token=None, debug=False):
else: else:
# Try to infer base model from LoRA name # Try to infer base model from LoRA name
if "wan" in model_id.lower(): if "wan" in model_id.lower():
base_model = "Wan-AI/Wan2.1-I2V-14B-Diffusers" if is_i2v else "Wan-AI/Wan2.1-T2V-14B-Diffusers" if "wan2.2" in model_id.lower():
# Wan 2.2 models - use the new MoE base
base_model = "Wan-AI/Wan2.2-I2V-A14B-Diffuser" if is_i2v else "Wan-AI/Wan2.2-T2V-14B-Diffuser"
else:
# Wan 2.1 and earlier
base_model = "Wan-AI/Wan2.1-I2V-14B-Diffusers" if is_i2v else "Wan-AI/Wan2.1-T2V-14B-Diffusers"
elif "svd" in model_id.lower() or "stable-video" in model_id.lower(): elif "svd" in model_id.lower() or "stable-video" in model_id.lower():
base_model = "stabilityai/stable-video-diffusion-img2vid-xt-1-1" base_model = "stabilityai/stable-video-diffusion-img2vid-xt-1-1"
elif "flux" in model_id.lower(): elif "flux" in model_id.lower():
...@@ -3861,10 +3866,12 @@ def select_best_model(gen_type, models, vram_gb=24, prefer_quality=True, return_ ...@@ -3861,10 +3866,12 @@ def select_best_model(gen_type, models, vram_gb=24, prefer_quality=True, return_
# Try to infer base model from LoRA name # Try to infer base model from LoRA name
lora_id = info.get("id", "").lower() lora_id = info.get("id", "").lower()
if "wan" in lora_id: if "wan" in lora_id:
if info.get("supports_i2v"): if "wan2.2" in lora_id:
base_model_id = "Wan-AI/Wan2.1-I2V-14B-Diffusers" # Wan 2.2 models - use the new MoE base
base_model_id = "Wan-AI/Wan2.2-I2V-A14B-Diffuser" if info.get("supports_i2v") else "Wan-AI/Wan2.2-T2V-14B-Diffuser"
else: else:
base_model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers" # Wan 2.1 and earlier
base_model_id = "Wan-AI/Wan2.1-I2V-14B-Diffusers" if info.get("supports_i2v") else "Wan-AI/Wan2.1-T2V-14B-Diffusers"
elif "svd" in lora_id or "stable-video" in lora_id: elif "svd" in lora_id or "stable-video" in lora_id:
base_model_id = "stabilityai/stable-video-diffusion-img2vid-xt-1-1" base_model_id = "stabilityai/stable-video-diffusion-img2vid-xt-1-1"
elif "sdxl" in lora_id: elif "sdxl" in lora_id:
...@@ -3887,7 +3894,10 @@ def select_best_model(gen_type, models, vram_gb=24, prefer_quality=True, return_ ...@@ -3887,7 +3894,10 @@ def select_best_model(gen_type, models, vram_gb=24, prefer_quality=True, return_
# If base model not in our database, create a minimal info dict # If base model not in our database, create a minimal info dict
if not base_model_info: if not base_model_info:
# Estimate VRAM based on base model type # Estimate VRAM based on base model type
if "wan" in base_model_id.lower(): # Wan 2.2 I2V MoE model - 14B parameters with mixture of experts
if "wan2.2" in base_model_id.lower() and "i2v" in base_model_id.lower():
base_vram_est = 14.0 # Wan 2.2 I2V MoE is ~14GB
elif "wan" in base_model_id.lower():
base_vram_est = 24.0 base_vram_est = 24.0
elif "svd" in base_model_id.lower(): elif "svd" in base_model_id.lower():
base_vram_est = 16.0 base_vram_est = 16.0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment