Commit e35fd018 authored by Your Name's avatar Your Name

Improve error message when GGUF loading fails

Tell user to update llama.cpp instead of falling back to diffusers.
parent 963aa5d5
......@@ -4594,7 +4594,13 @@ def main():
print(f"GGUF image model loaded successfully: {original_model_name}")
except Exception as llama_error:
print(f"llama.cpp load error: {llama_error}")
print(f"Will try loading image model on first request instead")
print(f"This usually means llama.cpp does not support this model architecture.")
print(f"Try updating llama.cpp: pip install llama-cpp-python --upgrade")
print(f"Or rebuild with latest ggml:")
print(f" pip install llama-cpp-python --force-reinstall --no-cache-dir \\")
print(f" CMAKE_ARGS=\"-DGGML_VULKAN=ON\" \\")
print(f" FORCE_CMAKE=1")
print(f"Image model will load on first request")
else:
print(f"Could not load GGUF image model: no valid model path")
......@@ -4992,7 +4998,13 @@ def main():
print(f"GGUF image model loaded successfully: {original_model_name}")
except Exception as llama_error:
print(f"llama.cpp load error: {llama_error}")
print(f"Will try loading image model on first request instead")
print(f"This usually means llama.cpp does not support this model architecture.")
print(f"Try updating llama.cpp: pip install llama-cpp-python --upgrade")
print(f"Or rebuild with latest ggml:")
print(f" pip install llama-cpp-python --force-reinstall --no-cache-dir \\")
print(f" CMAKE_ARGS=\"-DGGML_VULKAN=ON\" \\")
print(f" FORCE_CMAKE=1")
print(f"Image model will load on first request")
else:
print(f"Could not load GGUF image model: no valid model path")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment