python3
在搭建计算机视觉服务的时候,在server端会遇到base64图片文件需要转化到np array,然后才能送入模型的情况,而在clinet端,则是需要将图片转化成base64发送request到服务,这个中间的转换我记录一下。
import base64
import cv2
import numpy as np
1.本地的base64 string to numpy array
image_string = request_data['image']
img_data = base64.b64decode(image_string)
nparr = np.fromstring(img_data, np.uint8)
img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
2 .本地的numpy array to base64 string
retval, buffer = cv2.imencode('.jpg', pic_img)
pic_str = base64.b64encode(buffer)
pic_str = pic_str.decode()
3 .image to base64
with open(img_path, 'rb') as rf:
img_data = rf.read()
data = base64.b64encode(img_data)
data = data.decode() # 如果用Python2 则注释这一行
最终版本 .
post request using the base64 string as part of json parameters
客户端
import requests
import json
with open(img_path, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read())
req_file = encoded_image.decode()
#json format parameters
post_dict = {"image": req_file, "type": 0}
headers = {
'Content-Type': 'application/json', # This is important
}
response = requests.post(url, data=json.dumps(post_dict), headers=headers)
收回result
json.loads(json_res.text)['data']
以fastapi为例子
解码方法1 ,用cv2
#the host using jsonify(from flask) to return the result
解码方法1 ,用cv2
img_ascii_str = base64.b64decode(
base64_string) #ok,convert string to ascii
img_channel_1 = np.frombuffer(
img_ascii_str, dtype=np.uint8) # ok,convert ascii to [h*w*3,]
user_image = cv2.imdecode(
img_channel_1, cv2.IMREAD_ANYCOLOR) # ok, [h,w,3]
##解码方法2,用PIL pillow
import PIL.Image as Image
raw = base64.b64decode(base64_string)
pil_image = Image.open(io.BytesIO(raw)) # 这是pil格式的图片
注意有些图片,因为摄像的时候本身带了太多图片之外的信息,
比如图片是横着拍摄,进入linux之后会显示横图,我们用软件看的时候是正常的,
但是经过base64和request进入server端之后,图片有变成了横图
所以要用pil获取图片信息判断是不是横图,如果上,要纠正
exifdata = pil_image.getexif()
print('exif data {}'.format(exifdata))
if 274 in exifdata:
if exifdata[274] == 6:
h, w = pil_image.size
cx, cy = w // 2, h // 2
image = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2BGR)
rotated_image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
# M=cv2.getRotationMatrix2D((cx,cy),90,1.0)
# rotated_image=cv2.warpAffine(image,M,(w,h))
return (rotated_image, end1 - st,
end2 - end1, end3 - end2, end3)
else:
image = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2BGR)