Fix: Wrap LoRA loading and offloading in defer_i2v_loading check

When defer_i2v_loading=True (I2V mode without provided image), the code
sets pipe=None but then tried to call pipe.load_lora_weights() and
pipe.enable_model_cpu_offload() on None, causing AttributeError.

This fix wraps the LoRA loading and offloading configuration blocks
inside an 'if not defer_i2v_loading:' condition so they are skipped
when the I2V model loading is deferred until after image generation.
parent 0ccc1d52
Pipeline #237 failed with stages
......@@ -3938,61 +3938,63 @@ def main(args):
timing.end_step() # model_loading
# Apply LoRA if this is a LoRA model
if is_lora and lora_id:
timing.begin_step("lora_loading")
print(f" Loading LoRA adapter: {lora_id}")
try:
# Load LoRA weights
pipe.load_lora_weights(lora_id)
print(f" ✅ LoRA applied successfully")
except Exception as e:
print(f" ⚠️ LoRA loading failed: {e}")
print(f" Continuing with base model...")
timing.end_step() # lora_loading
# Only apply LoRA and offloading if we actually loaded the model (not deferred)
if not defer_i2v_loading:
# Apply LoRA if this is a LoRA model
if is_lora and lora_id:
timing.begin_step("lora_loading")
print(f" Loading LoRA adapter: {lora_id}")
try:
# Load LoRA weights
pipe.load_lora_weights(lora_id)
print(f" ✅ LoRA applied successfully")
except Exception as e:
print(f" ⚠️ LoRA loading failed: {e}")
print(f" Continuing with base model...")
timing.end_step() # lora_loading
if args.no_filter and hasattr(pipe, "safety_checker"):
pipe.safety_checker = None
if args.no_filter and hasattr(pipe, "safety_checker"):
pipe.safety_checker = None
# Offloading
off = args.offload_strategy
if off == "auto_map":
pipe.enable_model_cpu_offload()
elif off == "sequential":
pipe.enable_sequential_cpu_offload()
elif off == "group":
try:
pipe.enable_group_offload(group_size=args.offload_group_size)
except:
print("Group offload unavailable → model offload fallback")
# Offloading
off = args.offload_strategy
if off == "auto_map":
pipe.enable_model_cpu_offload()
elif off == "model":
pipe.enable_model_cpu_offload()
else:
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
pipe.enable_attention_slicing("max")
try:
pipe.enable_vae_slicing()
pipe.enable_vae_tiling()
except:
pass
elif off == "sequential":
pipe.enable_sequential_cpu_offload()
elif off == "group":
try:
pipe.enable_group_offload(group_size=args.offload_group_size)
except:
print("Group offload unavailable → model offload fallback")
pipe.enable_model_cpu_offload()
elif off == "model":
pipe.enable_model_cpu_offload()
else:
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
pipe.enable_attention_slicing("max")
try:
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_vae_slicing()
pipe.enable_vae_tiling()
except:
pass
if "wan" in args.model and hasattr(pipe, "scheduler"):
try:
pipe.scheduler = UniPCMultistepScheduler.from_config(
pipe.scheduler.config,
prediction_type="flow_prediction",
flow_shift=extra.get("flow_shift", 3.0)
)
except:
pass
if torch.cuda.is_available():
try:
pipe.enable_xformers_memory_efficient_attention()
except:
pass
if "wan" in args.model and hasattr(pipe, "scheduler"):
try:
pipe.scheduler = UniPCMultistepScheduler.from_config(
pipe.scheduler.config,
prediction_type="flow_prediction",
flow_shift=extra.get("flow_shift", 3.0)
)
except:
pass
# ─── Generation ────────────────────────────────────────────────────────────
seed = args.seed if args.seed >= 0 else random.randint(0, 2**31 - 1)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment