Fix unpacking error in model.generate() calls by handling variable tuple lengths

parent 2a6d39b2
...@@ -155,11 +155,18 @@ def analyze_single_image(image_path, prompt, model): ...@@ -155,11 +155,18 @@ def analyze_single_image(image_path, prompt, model):
gen_result = model.generate({"messages": messages}, max_new_tokens=128) gen_result = model.generate({"messages": messages}, max_new_tokens=128)
if isinstance(gen_result, tuple): if isinstance(gen_result, tuple):
result, tokens_used = gen_result if len(gen_result) >= 2:
result, tokens_used = gen_result[0], gen_result[1]
elif len(gen_result) == 1:
result = gen_result[0]
tokens_used = 0
else:
result = ""
tokens_used = 0
else: else:
result = gen_result result = gen_result
tokens_used = 0 tokens_used = 0
return result return result, tokens_used
# For now, estimate tokens (could be improved with actual token counting) # For now, estimate tokens (could be improved with actual token counting)
estimated_tokens = len(result.split()) + len(prompt.split()) estimated_tokens = len(result.split()) + len(prompt.split())
return result, estimated_tokens return result, estimated_tokens
...@@ -320,7 +327,14 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None, ...@@ -320,7 +327,14 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
messages = [{"role": "user", "content": [{"type": "text", "text": summary_prompt}]}] messages = [{"role": "user", "content": [{"type": "text", "text": summary_prompt}]}]
gen_result = model.generate({"messages": messages}, max_new_tokens=256) gen_result = model.generate({"messages": messages}, max_new_tokens=256)
if isinstance(gen_result, tuple): if isinstance(gen_result, tuple):
summary, summary_tokens = gen_result if len(gen_result) >= 2:
summary, summary_tokens = gen_result[0], gen_result[1]
elif len(gen_result) == 1:
summary = gen_result[0]
summary_tokens = 0
else:
summary = ""
summary_tokens = 0
else: else:
summary = gen_result summary = gen_result
summary_tokens = 0 summary_tokens = 0
...@@ -328,7 +342,14 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None, ...@@ -328,7 +342,14 @@ def analyze_media(media_path, prompt, model_path, interval=10, job_id_int=None,
# Use text-only model for summary # Use text-only model for summary
gen_result = model.generate(f"Summarize the video based on frame descriptions: {' '.join(descriptions)}", max_new_tokens=256) gen_result = model.generate(f"Summarize the video based on frame descriptions: {' '.join(descriptions)}", max_new_tokens=256)
if isinstance(gen_result, tuple): if isinstance(gen_result, tuple):
summary, summary_tokens = gen_result if len(gen_result) >= 2:
summary, summary_tokens = gen_result[0], gen_result[1]
elif len(gen_result) == 1:
summary = gen_result[0]
summary_tokens = 0
else:
summary = ""
summary_tokens = 0
else: else:
summary = gen_result summary = gen_result
summary_tokens = 0 summary_tokens = 0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment