Add --allow-bigger-models option to web interface and MCP server

parent e0c0485f
......@@ -328,6 +328,7 @@ async function handleGenerate(e) {
params.sync_audio = form.querySelector('#sync_audio')?.checked || false;
params.lip_sync = form.querySelector('#lip_sync')?.checked || false;
params.no_filter = form.querySelector('#no_filter')?.checked || false;
params.allow_bigger_models = form.querySelector('#allow_bigger_models')?.checked || false;
params.debug = form.querySelector('#debug')?.checked || false;
params.voice_clone = form.querySelector('#voice_clone')?.checked || false;
params.create_subtitles = form.querySelector('#create_subtitles')?.checked || false;
......
......@@ -433,6 +433,10 @@
<input type="checkbox" id="no_filter" name="no_filter">
<span>Disable NSFW Filter</span>
</label>
<label class="checkbox-label">
<input type="checkbox" id="allow_bigger_models" name="allow_bigger_models">
<span>Allow Bigger Models (Use RAM)</span>
</label>
<label class="checkbox-label">
<input type="checkbox" id="debug" name="debug">
<span>Debug Mode</span>
......
......@@ -716,6 +716,65 @@ async def list_tools() -> list:
}
),
Tool(
name="videogen_allow_bigger_models",
description="Allow models larger than available VRAM by using system RAM for offloading (implies sequential offload strategy).",
inputSchema={
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "Description of the video to generate"
},
"output": {
"type": "string",
"description": "Output filename",
"default": "output"
},
"model": {
"type": "string",
"description": "Specific model to use (optional, auto-selected if not provided)"
},
"length": {
"type": "number",
"description": "Video duration in seconds",
"default": 5.0
},
"width": {
"type": "integer",
"description": "Video width in pixels",
"default": 832
},
"height": {
"type": "integer",
"description": "Video height in pixels",
"default": 480
},
"fps": {
"type": "integer",
"description": "Frames per second",
"default": 15
},
"seed": {
"type": "integer",
"description": "Random seed for reproducibility (-1 for random)",
"default": -1
},
"auto": {
"type": "boolean",
"description": "Use automatic mode (recommended)",
"default": True
},
"no_filter": {
"type": "boolean",
"description": "Disable NSFW filter",
"default": False
}
},
"required": ["prompt"]
}
),
Tool(
name="videogen_list_tts_voices",
description="List all available TTS voices for audio generation.",
......@@ -1206,6 +1265,36 @@ async def call_tool(name: str, arguments: dict) -> list:
output, code = run_videogen_command(args)
return [TextContent(type="text", text=output)]
elif name == "videogen_allow_bigger_models":
args = []
if arguments.get("auto", True):
args.append("--auto")
if arguments.get("model"):
args.extend(["--model", arguments["model"]])
args.extend(["--prompt", arguments["prompt"]])
args.extend(["--output", arguments.get("output", "output")])
if arguments.get("length"):
args.extend(["--length", str(arguments["length"])])
if arguments.get("width"):
args.extend(["--width", str(arguments["width"])])
if arguments.get("height"):
args.extend(["--height", str(arguments["height"])])
if arguments.get("fps"):
args.extend(["--fps", str(arguments["fps"])])
if arguments.get("seed", -1) >= 0:
args.extend(["--seed", str(arguments["seed"])])
if arguments.get("no_filter"):
args.append("--no_filter")
args.append("--allow-bigger-models")
output, code = run_videogen_command(args)
return [TextContent(type="text", text=output)]
elif name == "videogen_list_tts_voices":
args = ["--tts-list"]
output, code = run_videogen_command(args)
......
......@@ -350,6 +350,10 @@ def build_command(params: Dict) -> List[str]:
if params.get('vram_limit'):
cmd.extend(['--vram-limit', str(params['vram_limit'])])
# Allow bigger models
if params.get('allow_bigger_models'):
cmd.append('--allow-bigger-models')
# Debug
if params.get('debug'):
cmd.append('--debug')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment