Fix model loading to use AutoModelForCausalLM for Qwen2.5-Omni-7B

parent 0d6f3fcb
Pipeline #201 canceled with stages
import argparse
import torch
from transformers import AutoProcessor, AutoModel, BitsAndBytesConfig
from transformers import AutoProcessor, AutoModelForCausalLM, BitsAndBytesConfig
from resemblyzer import VoiceEncoder
from sklearn.cluster import AgglomerativeClustering
import webrtcvad
......@@ -76,7 +76,7 @@ def main():
# Load Qwen2.5-Omni-7B model with 4-bit quantization
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B")
model = AutoModel.from_pretrained("Qwen/Qwen2.5-Omni-7B", quantization_config=quantization_config, device_map="auto")
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Omni-7B", quantization_config=quantization_config, device_map="auto")
# Load audio
audio, sr = librosa.load(audio_file, sr=16000)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment