Fix pipeline fallback error handling - nest error messages inside failure check

parent efa4dfd3
Pipeline #232 canceled with stages
...@@ -3651,60 +3651,60 @@ def main(args): ...@@ -3651,60 +3651,60 @@ def main(args):
print(f"\n❌ All model retries exhausted ({retry_count}/{max_retries} attempts)") print(f"\n❌ All model retries exhausted ({retry_count}/{max_retries} attempts)")
# Print detailed error message for the user # Print detailed error message for the user
if "404" in error_str or "Entry Not Found" in error_str: if "404" in error_str or "Entry Not Found" in error_str:
print(f"❌ Model not found on HuggingFace: {model_id_to_load}") print(f"❌ Model not found on HuggingFace: {model_id_to_load}")
print(f" This model may have been removed or the ID is incorrect.") print(f" This model may have been removed or the ID is incorrect.")
if debug: if debug:
print(f"\n [DEBUG] Troubleshooting:") print(f"\n [DEBUG] Troubleshooting:")
print(f" - Check if the model exists: https://huggingface.co/{model_id_to_load}") print(f" - Check if the model exists: https://huggingface.co/{model_id_to_load}")
print(f" - Verify the model ID spelling") print(f" - Verify the model ID spelling")
print(f" - The model may have been renamed or moved") print(f" - The model may have been renamed or moved")
print(f"\n 💡 Try searching for an alternative:") print(f"\n 💡 Try searching for an alternative:")
print(f" videogen --search-models ltxvideo") print(f" videogen --search-models ltxvideo")
print(f"\n 💡 Or use the official LTX Video model:") print(f"\n 💡 Or use the official LTX Video model:")
print(f" videogen --model ltx_video --prompt 'your prompt' ...") print(f" videogen --model ltx_video --prompt 'your prompt' ...")
elif "401" in error_str or "Unauthorized" in error_str: elif "401" in error_str or "Unauthorized" in error_str:
print(f"❌ Model requires authentication: {model_id_to_load}") print(f"❌ Model requires authentication: {model_id_to_load}")
print(f" Set your HuggingFace token:") print(f" Set your HuggingFace token:")
print(f" export HF_TOKEN=your_token_here") print(f" export HF_TOKEN=your_token_here")
print(f" huggingface-cli login") print(f" huggingface-cli login")
if debug: if debug:
print(f"\n [DEBUG] To get a token:") print(f"\n [DEBUG] To get a token:")
print(f" 1. Go to https://huggingface.co/settings/tokens") print(f" 1. Go to https://huggingface.co/settings/tokens")
print(f" 2. Create a new token with 'read' permissions") print(f" 2. Create a new token with 'read' permissions")
print(f" 3. Export it: export HF_TOKEN=hf_xxx") print(f" 3. Export it: export HF_TOKEN=hf_xxx")
elif "gated" in error_str.lower(): elif "gated" in error_str.lower():
print(f"❌ This is a gated model: {model_id_to_load}") print(f"❌ This is a gated model: {model_id_to_load}")
print(f" You need to accept the license on HuggingFace:") print(f" You need to accept the license on HuggingFace:")
print(f" https://huggingface.co/{model_id_to_load}") print(f" https://huggingface.co/{model_id_to_load}")
print(f" Then set HF_TOKEN and run again.") print(f" Then set HF_TOKEN and run again.")
elif "connection" in error_str.lower() or "timeout" in error_str.lower(): elif "connection" in error_str.lower() or "timeout" in error_str.lower():
print(f"❌ Network error loading model: {model_id_to_load}") print(f"❌ Network error loading model: {model_id_to_load}")
print(f" Check your internet connection and try again.") print(f" Check your internet connection and try again.")
if debug: if debug:
print(f"\n [DEBUG] Network troubleshooting:") print(f"\n [DEBUG] Network troubleshooting:")
print(f" - Check if you can access: https://huggingface.co/{model_id_to_load}") print(f" - Check if you can access: https://huggingface.co/{model_id_to_load}")
print(f" - Try with a VPN if HuggingFace is blocked") print(f" - Try with a VPN if HuggingFace is blocked")
print(f" - Check if HF_ENDPOINT is set (for China mirror): {os.environ.get('HF_ENDPOINT', 'not set')}") print(f" - Check if HF_ENDPOINT is set (for China mirror): {os.environ.get('HF_ENDPOINT', 'not set')}")
elif "FrozenDict" in error_str or "scale_factor" in error_str or "has no attribute" in error_str: elif "FrozenDict" in error_str or "scale_factor" in error_str or "has no attribute" in error_str:
print(f"❌ Pipeline compatibility error: {model_id_to_load}") print(f"❌ Pipeline compatibility error: {model_id_to_load}")
print(f" This model uses a pipeline architecture incompatible with your diffusers version.") print(f" This model uses a pipeline architecture incompatible with your diffusers version.")
print(f" The model may require a specific diffusers version or different pipeline class.") print(f" The model may require a specific diffusers version or different pipeline class.")
if debug: if debug:
print(f"\n [DEBUG] Compatibility troubleshooting:") print(f"\n [DEBUG] Compatibility troubleshooting:")
print(f" - Try updating diffusers: pip install --upgrade git+https://github.com/huggingface/diffusers.git") print(f" - Try updating diffusers: pip install --upgrade git+https://github.com/huggingface/diffusers.git")
print(f" - Check the model's documentation for required versions") print(f" - Check the model's documentation for required versions")
print(f" - The model may be incorrectly configured in models.json") print(f" - The model may be incorrectly configured in models.json")
print(f"\n 💡 Try a different model with --model <name>") print(f"\n 💡 Try a different model with --model <name>")
else: else:
print(f"Model loading failed: {e}") print(f"Model loading failed: {e}")
if debug: if debug:
import traceback import traceback
print(f"\n [DEBUG] Full traceback:") print(f"\n [DEBUG] Full traceback:")
traceback.print_exc() traceback.print_exc()
print(f"\n 💡 Try searching for alternative models: videogen --search-models <query>") print(f"\n 💡 Try searching for alternative models: videogen --search-models <query>")
sys.exit(1) sys.exit(1)
timing.end_step() # model_loading timing.end_step() # model_loading
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment