Fix auto mode retry logic and improve error handling

- Fix retry logic bug: only run auto mode once (check for _auto_mode flag)
- Prevent infinite retry loops by preserving retry count across recursive calls
- Add better error handling for pipeline compatibility issues (FrozenDict, scale_factor errors)
- Add helpful troubleshooting messages for diffusers version incompatibilities
- Show retry exhaustion message when all alternative models fail
parent 83ea5872
Pipeline #224 canceled with stages
...@@ -2866,7 +2866,8 @@ def main(args): ...@@ -2866,7 +2866,8 @@ def main(args):
parser.error("the following arguments are required: --prompt") parser.error("the following arguments are required: --prompt")
# Handle auto mode with retry support # Handle auto mode with retry support
if getattr(args, 'auto', False): # Only run auto mode if this is not a retry (retry count not set yet)
if getattr(args, 'auto', False) and not hasattr(args, '_auto_mode'):
if not args.prompt: if not args.prompt:
parser.error("--auto requires --prompt to analyze") parser.error("--auto requires --prompt to analyze")
args = run_auto_mode(args, MODELS) args = run_auto_mode(args, MODELS)
...@@ -3109,6 +3110,16 @@ def main(args): ...@@ -3109,6 +3110,16 @@ def main(args):
print(f" - Check if you can access: https://huggingface.co/{model_id_to_load}") print(f" - Check if you can access: https://huggingface.co/{model_id_to_load}")
print(f" - Try with a VPN if HuggingFace is blocked") print(f" - Try with a VPN if HuggingFace is blocked")
print(f" - Check if HF_ENDPOINT is set (for China mirror): {os.environ.get('HF_ENDPOINT', 'not set')}") print(f" - Check if HF_ENDPOINT is set (for China mirror): {os.environ.get('HF_ENDPOINT', 'not set')}")
elif "FrozenDict" in error_str or "scale_factor" in error_str or "has no attribute" in error_str:
print(f"❌ Pipeline compatibility error: {model_id_to_load}")
print(f" This model uses a pipeline architecture incompatible with your diffusers version.")
print(f" The model may require a specific diffusers version or different pipeline class.")
if debug:
print(f"\n [DEBUG] Compatibility troubleshooting:")
print(f" - Try updating diffusers: pip install --upgrade git+https://github.com/huggingface/diffusers.git")
print(f" - Check the model's documentation for required versions")
print(f" - The model may be incorrectly configured in models.json")
print(f"\n 💡 Try a different model with --model <name>")
else: else:
print(f"Model loading failed: {e}") print(f"Model loading failed: {e}")
if debug: if debug:
...@@ -3116,6 +3127,16 @@ def main(args): ...@@ -3116,6 +3127,16 @@ def main(args):
print(f"\n [DEBUG] Full traceback:") print(f"\n [DEBUG] Full traceback:")
traceback.print_exc() traceback.print_exc()
# If we've exhausted all retries, exit with error
if getattr(args, '_auto_mode', False):
retry_count = getattr(args, '_retry_count', 0)
max_retries = getattr(args, '_max_retries', 3)
alternative_models = getattr(args, '_auto_alternative_models', [])
if retry_count >= max_retries or not alternative_models:
print(f"\n❌ All model retries exhausted ({retry_count}/{max_retries} attempts)")
print(f" Try searching for alternative models: videogen --search-models <query>")
sys.exit(1) sys.exit(1)
timing.end_step() # model_loading timing.end_step() # model_loading
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment