基 于 F l a s k 的 人 工 智 能 模 型 微 服 务 基于Flask的人工智能模型微服务 基于Flask的人工智能模型微服务
写Docker服务分两步:
第一步:进入容器自己构建,记录自己的操作命令
第二步:将记录的命令集成到Dockerfile
第 一 步 : 进 入 容 器 自 己 构 建 , 记 录 自 己 的 操 作 命 令 第一步:进入容器自己构建,记录自己的操作命令 第一步:进入容器自己构建,记录自己的操作命令
sudo docker run -itd -v /home/csp/place_for_docker:/opt -p 9000:5000 ubuntu:18.04
apt-get update && apt-get install -y --no-install-recommends python3 python3-pip python3-dev llvm llvm-dev
apt install vim
pip3 install torch
pip3 install flask
pip3 install flask-restful
apt install gcc
apt install setuptools
pip3 install uwsgi
uwsgi uwsgi.ini
[uwsgi]
http=:5000
wsgi-file=/usr/src/test_for_flask.py
callable=app
processes=4
threads=2
daemonize=/usr/src/flask.log
第二步:将记录的命令集成到Dockerfile
FROM ubuntu:18.04
LABEL maintainer major
COPY run.sh /usr/bin
ADD test.zip /usr/src
RUN apt-get update && apt-get install -y --no-install-recommends python3 python3-pip python3-dev llvm llvm-dev
RUN pip3 install torch torchvision flask flask-restful
WORKDIR /usr/src
EXPOSE 5000
ENV AAA=111
CMD ["sleep","36000"]
第三步 build构建镜像
sudo docker build -t major:v1 .
1.系统环境
2.python环境
3.python库环境
FROM nvidia/cuda
RUN apt-get update && apt-get install -y --no-install-recommends python3 python3-pip python3-dev llvm llvm-dev
RUN pip3 install rembg
ENTRYPOINT ["rembg"]
CMD []
Dockerfile
编写外部文件requirements.txt
flask的server文件
from flask import Flask, request,url_for
from flask_restful import Api,Resource,reqparse
app = Flask(__name__)
api = Api(app)
class TestForGetView(Resource):
def get(self):
res = "hello world"
return {"res":res}
class TestForGetView2(Resource):
def get(self):
# 1.接受json数据
parser = reqparse.RequestParser()
parser.add_argument('source')
parser.add_argument('operation')
parser.add_argument('destination')
args = parser.parse_args()
# 2.处理数据
print(args)
print(args.source)
print(args.operation)
print(args.destination)
# 3.返回处理结果(json格式)pyt
return args
class TestForPost(Resource):
def post(self):
file = request.get_data()
# print(file)
# print(type(file))
with open("11.pdf", "wb") as f:
f.write(file)
return 'OK'
class PredictForClassificationView(Resource):
def post(self):
file = request.get_data()
# print(file)
# print(type(file))
with open("test_for_classification.png", "wb") as f:
f.write(file)
from classification import predict_image
from net.classification.ResNet import ResNet18
import torch
# 导入模型参数
path_model_state_dict = "/opt/best_model_res18_0.99.pth"
# 导入网络
net_loaded = ResNet18(num_classes=10, num_linear=512)
# 在网络中加载参数合成为模型
net_loaded.load_state_dict(torch.load(path_model_state_dict))
model_loaded = net_loaded
result = predict_image(model_loaded, "test_for_classification.png")
index = result.item()
classes = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# print(index)
# print("识别结果{}".format(result))
res = classes[index]
return {"res":res}
# class PredictForSegementationView(Resource):
# def post(self):
# file = request.get_data()
# # print(file)
# # print(type(file))
# with open("test_for_segementation.png", "wb") as f:
# f.write(file)
#
# return 'OK'
#
# class PredictForDetectionView(Resource):
# def post(self):
# file = request.get_data()
# # print(file)
# # print(type(file))
# with open("test_for_detection.jpg", "wb") as f:
# f.write(file)
# return 'OK'
api.add_resource(TestForGetView,'/test_for_get')
api.add_resource(TestForGetView2,'/test_for_get2')
api.add_resource(TestForPost,'/test_for_post')
api.add_resource(PredictForClassificationView,'/predict_for_classification')
# api.add_resource(PredictForSegementationView,'/predict_for_segementation')
# api.add_resource(PredictForDetectionView,'/predict_for_detection')
if __name__ == "__main__":
app.run(host="0.0.0.0",port=5000)
import os
import glob
import argparse
from io import BytesIO
from urllib.parse import unquote_plus
from urllib.request import urlopen
from flask import Flask, request, send_file
from waitress import serve
from ..bg import remove
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
file_content = ""
if request.method == "POST":
if "file" not in request.files:
return {"error": "missing post form param 'file'"}, 400
file_content = request.files["file"].read()
if request.method == "GET":
url = request.args.get("url", type=str)
if url is None:
return {"error": "missing query param 'url'"}, 400
file_content = urlopen(unquote_plus(url)).read()
if file_content == "":
return {"error": "File content is empty"}, 400
alpha_matting = "a" in request.values
af = request.values.get("af", type=int, default=240)
ab = request.values.get("ab", type=int, default=10)
ae = request.values.get("ae", type=int, default=10)
az = request.values.get("az", type=int, default=1000)
model = request.args.get("model", type=str, default="u2net")
model_path = os.environ.get(
"U2NETP_PATH",
os.path.expanduser(os.path.join("~", ".u2net")),
)
model_choices = [os.path.splitext(os.path.basename(x))[0] for x in set(glob.glob(model_path + "/*"))]
if len(model_choices) == 0:
model_choices = ["u2net", "u2netp", "u2net_human_seg"]
if model not in model_choices:
return {"error": f"invalid query param 'model'. Available options are {model_choices}"}, 400
try:
return send_file(
BytesIO(
remove(
file_content,
model_name=model,
alpha_matting=alpha_matting,
alpha_matting_foreground_threshold=af,
alpha_matting_background_threshold=ab,
alpha_matting_erode_structure_size=ae,
alpha_matting_base_size=az,
)
),
mimetype="image/png",
)
except Exception as e:
app.logger.exception(e, exc_info=True)
return {"error": "oops, something went wrong!"}, 500
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"-a",
"--addr",
default="0.0.0.0",
type=str,
help="The IP address to bind to.",
)
ap.add_argument(
"-p",
"--port",
default=5000,
type=int,
help="The port to bind to.",
)
args = ap.parse_args()
serve(app, host=args.addr, port=args.port)
if __name__ == "__main__":
main()
pip3 --no-cache-dir install torch==1.2 -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com