许可证: mit
语言:
- 英文
基础模型:
- Qwen/Qwen2.5-VL-7B-Instruct
任务标签: 强化学习
标签:
- 图像质量评估(IQA)
- 视觉语言模型(VLM)
- 推理引导
- Pytorch
最新版本请访问 VisualQuality-R1-7B
VisualQuality-R1-7B预览版
这是VisualQuality-R1的演示版本,基于KADID-10K、TID2013和KONIQ-10K数据集联合训练。其基础模型为Qwen2.5-VL-7B-Instruct。
论文链接: arXiv
快速开始
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import json
import numpy as np
import torch
import random
import re
import os
def score_image(model_path, image_path):
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map=device,
)
processor = AutoProcessor.from_pretrained(MODEL_PATH)
processor.tokenizer.padding_side = "left"
PROMPT = (
"您正在执行图像质量评估任务。问题如下:"
"您对这张图片的整体质量评分是多少?评分应为1到5之间的浮点数,"
"保留两位小数,1代表质量极差,5代表质量极佳。"
)
x = {
"image": [image_path],
"question": PROMPT,
}
QUESTION_TEMPLATE = "{Question} 首先在<think></think>标签中输出思考过程,然后在<answer></answer>标签中仅输出最终评分。"
message = [
{
"role": "用户",
"content": [
*({'type': 'image', 'image': img_path} for img_path in x['image']),
{"type": "text", "text": QUESTION_TEMPLATE.format(Question=x['question'])}
],
}
]
batch_messages = [message]
text = [processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True, add_vision_id=True) for msg in batch_messages]
image_inputs, video_inputs = process_vision_info(batch_messages)
inputs = processor(
text=text,
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to(device)
generated_ids = model.generate(**inputs, use_cache=True, max_new_tokens=256, do_sample=True)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
batch_output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
reasoning = re.findall(r'<think>(.*?)</think>', batch_output_text[0], re.DOTALL)
reasoning = reasoning[-1].strip()
model_output_matches = re.findall(r'<answer>(.*?)</answer>', batch_output_text[0], re.DOTALL)
model_answer = model_output_matches[-1].strip()
score = float(re.search(r'\d+(\.\d+)?', model_answer).group())
return reasoning, score
random.seed(42)
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
model_path = ""
image_path = ""
reasoning, score = score_image(
model_path=model_path,
image_path=image_path
)
print(reasoning)
print(score)