Add --yes flag for auto-confirmation of cache deletion

Added --yes / -y argument that automatically answers 'yes' to confirmation
prompts when deleting cached models or clearing the entire cache.

Usage:
  videogen --remove-cached-model MODEL_ID --yes
  videogen --clear-cache --yes
parent 506faa85
...@@ -780,8 +780,13 @@ def list_cached_models(): ...@@ -780,8 +780,13 @@ def list_cached_models():
return False return False
def remove_cached_model(model_id): def remove_cached_model(model_id, yes=False):
"""Remove a specific model from the local HuggingFace cache""" """Remove a specific model from the local HuggingFace cache
Args:
model_id: The model ID to remove
yes: If True, skip confirmation prompt and auto-delete
"""
try: try:
from huggingface_hub import scan_cache_dir, HfApi from huggingface_hub import scan_cache_dir, HfApi
import shutil import shutil
...@@ -806,11 +811,14 @@ def remove_cached_model(model_id): ...@@ -806,11 +811,14 @@ def remove_cached_model(model_id):
for repo in matching_repos: for repo in matching_repos:
print(f" - {repo.repo_id} ({repo.size_on_disk / (1024 ** 3):.2f} GB)") print(f" - {repo.repo_id} ({repo.size_on_disk / (1024 ** 3):.2f} GB)")
# Confirm deletion # Confirm deletion (skip if --yes flag is set)
if not yes:
confirm = input("\n⚠️ Are you sure you want to delete these models? (y/N): ").strip().lower() confirm = input("\n⚠️ Are you sure you want to delete these models? (y/N): ").strip().lower()
if confirm != 'y' and confirm != 'yes': if confirm != 'y' and confirm != 'yes':
print("✅ Aborted - models not deleted") print("✅ Aborted - models not deleted")
return False return False
else:
print(" ⚠️ Auto-confirming deletion due to --yes flag")
# Delete matching repos # Delete matching repos
deleted_count = 0 deleted_count = 0
...@@ -833,8 +841,12 @@ def remove_cached_model(model_id): ...@@ -833,8 +841,12 @@ def remove_cached_model(model_id):
return False return False
def clear_cache(): def clear_cache(yes=False):
"""Clear the entire local HuggingFace cache""" """Clear the entire local HuggingFace cache
Args:
yes: If True, skip confirmation prompt and auto-delete
"""
try: try:
from huggingface_hub import scan_cache_dir from huggingface_hub import scan_cache_dir
import shutil import shutil
...@@ -847,11 +859,14 @@ def clear_cache(): ...@@ -847,11 +859,14 @@ def clear_cache():
total_size = cache_info.size_on_disk / (1024 ** 3) total_size = cache_info.size_on_disk / (1024 ** 3)
print(f"⚠️ Cache contains {len(cache_info.repos)} model(s) taking {total_size:.2f} GB") print(f"⚠️ Cache contains {len(cache_info.repos)} model(s) taking {total_size:.2f} GB")
# Confirm deletion # Confirm deletion (skip if --yes flag is set)
if not yes:
confirm = input("Are you sure you want to CLEAR THE ENTIRE CACHE? (y/N): ").strip().lower() confirm = input("Are you sure you want to CLEAR THE ENTIRE CACHE? (y/N): ").strip().lower()
if confirm != 'y' and confirm != 'yes': if confirm != 'y' and confirm != 'yes':
print("✅ Aborted - cache not cleared") print("✅ Aborted - cache not cleared")
return False return False
else:
print(" ⚠️ Auto-confirming cache clear due to --yes flag")
# Get cache directory path # Get cache directory path
cache_dir = cache_info.repos.pop().repo_path.parent cache_dir = cache_info.repos.pop().repo_path.parent
...@@ -7783,11 +7798,13 @@ def main(args): ...@@ -7783,11 +7798,13 @@ def main(args):
sys.exit(0) sys.exit(0)
if args.remove_cached_model: if args.remove_cached_model:
success = remove_cached_model(args.remove_cached_model) yes_flag = getattr(args, 'yes', False)
success = remove_cached_model(args.remove_cached_model, yes=yes_flag)
sys.exit(0 if success else 1) sys.exit(0 if success else 1)
if args.clear_cache: if args.clear_cache:
success = clear_cache() yes_flag = getattr(args, 'yes', False)
success = clear_cache(yes=yes_flag)
sys.exit(0 if success else 1) sys.exit(0 if success else 1)
# Handle model disable/enable # Handle model disable/enable
...@@ -9888,6 +9905,8 @@ List TTS voices: ...@@ -9888,6 +9905,8 @@ List TTS voices:
help="Allow models larger than available VRAM by using system RAM for offloading (implies --offload_strategy sequential)") help="Allow models larger than available VRAM by using system RAM for offloading (implies --offload_strategy sequential)")
parser.add_argument("--clear-cache", action="store_true", parser.add_argument("--clear-cache", action="store_true",
help="Clear the entire local HuggingFace cache") help="Clear the entire local HuggingFace cache")
parser.add_argument("--yes", "-y", action="store_true",
help="Automatically answer yes to confirmation prompts (for cache deletion)")
parser.add_argument("--update-models", action="store_true", parser.add_argument("--update-models", action="store_true",
help="Search HuggingFace and update model database with I2V, T2V, and NSFW models") help="Search HuggingFace and update model database with I2V, T2V, and NSFW models")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment