模型训练与验证minicpm-v

minicpm-v 模型进行微调并进行验证

训练使用混合数据集进行训练,对minicpm-V进行lora微调,微调后使用llama3_1对输出结果与标签值进行比对,计算准确率。
整体流程验证代码为:

# URL = https://swift.readthedocs.io/zh-cn/latest/LLM/VLLM%E6%8E%A8%E7%90%86%E5%8A%A0%E9%80%9F%E4%B8%8E%E9%83%A8%E7%BD%B2.html

'''
使用swift进行部署的示例
model: llama3_1-8b-instruct
CUDA_VISIBLE_DEVICES=2 swift deploy --max_model_len 4096 --model_type llama3_1-8b-instruct --model_id_or_path /nas/share/model/huggingface/models--meta-llama--Meta-Llama-3.1-8B-Instruct/snapshots/8c22764a7e3675c50d4c7c9a4edb474456022b16
'''

# 客户端
# example of using swift client

import os
import json
from tqdm import tqdm
from swift.llm import get_model_list_client, XRequestConfig, inference_client

model_list = get_model_list_client()
model_type = model_list.data[0].id


def get_data_jsonl(data_path):
    datas =[]
    with open(data_path, 'r') as f:
        data = f.readlines()
        
    for i in range(len(data)):
        datas.append(json.loads(data[i]))
        
    return datas

def save_data_jsonl(save_path:str, datas: list):
    with open(save_path, 'w') as f:
        for data in datas:
            json.dump(data, f,ensure_ascii=False)
            f.write('\n')
    return save_path


def get_save_path(data_path:str):
    save_path =os.path.splitext(data_path)[0] + '_llm_eval.jsonl'
    if not os.path.exists(os.path.dirname(save_path)):
        os.makedirs(os.path.dirname(save_path))
    return save_path
    
def get_result_correct_path(data_path:str):
    result_correct_path =os.path.splitext(data_path)[0] + '_llm_eval_correct.json'
    if not os.path.exists(os.path.dirname(result_correct_path)):
        os.makedirs(os.path.dirname(result_correct_path))
    return result_correct_path

def save_result_correct(data_path:str,precision:float):
    result =  {
        'data_path': data_path,
        'precision': precision
    }
    with open(data_path, 'w') as f:
        json.dump(result, f,ensure_ascii=False)
    print(f'precision: {precision*100}%')
    return result


def main():
    
    def q_tempalte(response, reply):
        question_template = '''你可以作为一个语言专家,判断一下两个回答是否相同吗 \n
                "response": "{}",
                "reply": "{}",
                ----
                如果是,返回"YES",否则返回"NO"。
                '''.format(response, reply)
        return question_template

    # origin
    data_paths = 
    
    for data_path in tqdm(data_paths):
        datas = get_data_jsonl(data_path)

        save_data =[]
        total_correct = 0
        total = 0
        for data in tqdm(datas):
            
            response = data['response']
            reply = data['reply']
            
            question = q_tempalte(response, reply)
            
            request_config = XRequestConfig(max_tokens=32, temperature=0.1, seed=42)    
            resp = inference_client(model_type, question, request_config=request_config)
            response = resp.choices[0].message.content
            data['llm_response'] = response
            save_data.append(data)
            
            if response.lower() == 'yes':
                total_correct += 1
                
            total += 1
            
            precision = total_correct / total    

        # 保存数据
        save_path = get_save_path(data_path)
        save_data_jsonl(save_path, save_data)
        
        # 保存精度
        result_correct_path = get_result_correct_path(data_path)
        save_result_correct(result_correct_path, precision)
if __name__ == '__main__':
    main()
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值