Fix loading models without model_index.json (I2V models)

When a model has component folders (transformer, vae, etc.) but no model_index.json
at the root level, the loading would fail. This fix adds:

1. Base model fallback strategy:
   - Detect model type from model ID (ltx, wan, svd, cogvideo, mochi)
   - Load the known base model first
   - Then attempt to load fine-tuned components from the target model

2. Component detection and loading:
   - List files in the repo to find component folders
   - Load transformer, VAE components from the fine-tuned model
   - Fall back to base model if component loading fails

3. Better error messages:
   - Clear indication of what went wrong
   - Suggestions for alternative models

This fixes loading of models like Muinez/ltxvideo-2b-nsfw which have
all component folders but are missing the model_index.json file.
parent ebf80ab6
......@@ -7349,17 +7349,105 @@ def main(args):
print(f"\n⚠️ I2V model model_index.json not found at root level")
print(f" Attempting alternative loading strategies...")
# Try with DiffusionPipeline (generic loader)
try:
print(f" Trying generic DiffusionPipeline for I2V model...")
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained(model_id_to_load, **pipe_kwargs)
print(f" ✅ Successfully loaded I2V model with DiffusionPipeline")
PipelineClass = DiffusionPipeline
except Exception as generic_e:
if debug:
print(f" [DEBUG] Generic loader also failed: {generic_e}")
raise e # Re-raise if fallback failed
# Strategy 1: Try loading from base model and then fine-tuned weights
base_model_fallbacks = {
"ltx": "Lightricks/LTX-Video",
"ltxvideo": "Lightricks/LTX-Video",
"wan": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
"svd": "stabilityai/stable-video-diffusion-img2vid-xt-1-1",
"cogvideo": "THUDM/CogVideoX-5b",
"mochi": "genmo/mochi-1-preview",
}
loaded_with_base = False
model_id_lower = model_id_to_load.lower()
for key, base_model in base_model_fallbacks.items():
if key in model_id_lower:
print(f" Trying to load base model first: {base_model}")
print(f" Then loading fine-tuned weights from: {model_id_to_load}")
try:
# Load base model
pipe = PipelineClass.from_pretrained(base_model, **pipe_kwargs)
print(f" ✅ Base model loaded")
# Now try to load the fine-tuned components
# This works for models that have component folders but no model_index.json
try:
from huggingface_hub import hf_hub_download, list_repo_files
# List files in the repo to see what components exist
repo_files = list_repo_files(model_id_to_load, token=hf_token)
# Check for component folders
component_folders = set()
for f in repo_files:
parts = f.split('/')
if len(parts) > 1:
component_folders.add(parts[0])
print(f" Found component folders: {component_folders}")
# Load each component that exists
components_loaded = []
for component in ['transformer', 'unet', 'vae', 'text_encoder', 'text_encoder_2']:
if component in component_folders:
try:
if component == 'transformer':
from diffusers import LTXVideoTransformer3DModel
pipe.transformer = LTXVideoTransformer3DModel.from_pretrained(
model_id_to_load, subfolder="transformer",
torch_dtype=pipe_kwargs.get("torch_dtype", torch.float16)
)
components_loaded.append(component)
elif component == 'vae':
from diffusers import AutoencoderKLLTXVideo
pipe.vae = AutoencoderKLLTXVideo.from_pretrained(
model_id_to_load, subfolder="vae",
torch_dtype=pipe_kwargs.get("torch_dtype", torch.float16)
)
components_loaded.append(component)
except Exception as comp_e:
if debug:
print(f" [DEBUG] Could not load {component}: {comp_e}")
if components_loaded:
print(f" ✅ Loaded components: {components_loaded}")
loaded_with_base = True
else:
print(f" ⚠️ No components could be loaded from fine-tuned model")
print(f" Using base model: {base_model}")
loaded_with_base = True # Still use base model
except Exception as ft_e:
if debug:
print(f" [DEBUG] Fine-tuned loading failed: {ft_e}")
print(f" Using base model: {base_model}")
loaded_with_base = True
break
except Exception as base_e:
if debug:
print(f" [DEBUG] Base model loading failed: {base_e}")
continue
# Strategy 2: Try with DiffusionPipeline (generic loader)
if not loaded_with_base:
try:
print(f" Trying generic DiffusionPipeline for I2V model...")
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained(model_id_to_load, **pipe_kwargs)
print(f" ✅ Successfully loaded I2V model with DiffusionPipeline")
PipelineClass = DiffusionPipeline
loaded_with_base = True
except Exception as generic_e:
if debug:
print(f" [DEBUG] Generic loader also failed: {generic_e}")
if not loaded_with_base:
print(f"\n❌ Could not load I2V model: {model_id_to_load}")
print(f" The model may be in an unsupported format or require manual setup.")
print(f" Try using a different model with --model <name>")
raise e # Re-raise if all fallbacks failed
else:
raise e # Re-raise for non-404 errors
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment