大致思路:
先请求接口,将img图片保存到本地,beanshell调用py文件进行识别并返回识别验证码,再进行回传到jmeter
jmeter BeanShell 脚本
String pythonScript = "C:\\Users\\Administrator\\PycharmProjects\\杂乱\\1.py";
String argument = vars.get("img");//获取jmeter的变量
// 构建完整的命令
String[] command = {"python", pythonScript, argument};
// 执行命令
ProcessBuilder processBuilder = new ProcessBuilder(command);
Process process = processBuilder.start();
// 获取Python脚本的输出结果
BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line;
while ((line = reader.readLine()) != null) {
System.out.println("获取验证码"+line);
vars.put("yzm", line);//塞入jmeter变量
log.info("获取验证码"+line);
}
调用的py文件
# encoding:utf-8
# @author: Qin
# @file: TIM.py
# @time: 2023/10/12 17:50
# @desc:
import ddddocr
from PIL import Image
import io
import argparse
import base64
from datetime import datetime
import random
timestamp = datetime.now().timestamp()
random_integer = random.randint(1, 100000)
filename = f'{timestamp}{random_integer}.png'
def saveimg(data):
base64_data = data
# 解码Base64字符串为字节数组
image_data = base64.b64decode(base64_data)
# 创建Image对象
image = Image.open(io.BytesIO(image_data))
# 显示图像
image.save(filename)
def yzm(a):
saveimg(a)
# 创建 OCR 模型对象
ocr = ddddocr.DdddOcr()
with open(filename, 'rb') as f: img_bytes = f.read()
res = ocr.classification(img_bytes)
# print(f'自动识别:{res}验证码')
print(res)
parser = argparse.ArgumentParser(description='脚本描述')
parser.add_argument('arg1', type=str, help='参数1的说明')
args = parser.parse_args()
yzm(args.arg1)