在前面的一些文章中,我们经常使用飞桨的PaddleX来训练各种模型,然后导出部署模型后进行推理预测。比如我们的入门篇,猫狗分类项目,详细可以参看:2.人工智能-图像分类。
Paddle Inference是飞桨原推理库,来满足高性能服务器端和云端推理。
推理流程
本文主要通过具体实例来演示一下如何实现部署模型的推理。
这里我们下载ResNet50模型,https://paddle-inference-dist.bj.bcebos.com/Paddle-Inference-Demo/resnet50.tgz。解压后就得到Paddle推理格式的模型。
# 获得模型目录即文件如下
resnet50/
├── inference.pdmodel (模型结构文件)
├── inference.pdiparams.info
└── inference.pdiparams (模型参数文件)ResNet50是基于ImageNet 训练的1000分类模型,imagenet 类别映射表链接:https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a,部分对应的信息如下:
0:'tench,Tincatinca',
1:'goldfish,Carassiusauratus',
2:'greatwhiteshark,whiteshark,man-eater,man-eatingshark,Carcharodoncarcharias',
3:'tigershark,Galeocerdocuvieri',
4:'hammerhead,hammerheadshark',
5:'electricray,crampfish,numbfish,torpedo',
6:'stingray',
7:'cock',
8:'hen',
9:'ostrich,Struthiocamelus',
10:'brambling,Fringillamontifringilla',
11:'goldfinch,Cardueliscarduelis',
12:'housefinch,linnet,Carpodacusmexicanus',
13:'junco,snowbird',
14:'indigobunting,indigofinch,indigobird,Passerinacyanea',
15:'robin,Americanrobin,Turdusmigratorius',
16:'bulbul',
17:'jay',
18:'magpie',
19:'chickadee',
20:'waterouzel,dipper',
…………import numpy as np
import cv2
# 引用 paddle inference 推理库
import paddle.inference as paddle_infer
def init_predector():
# 引用 paddle 模型文件
model_file="infer-model/resnet50/inference.pdmodel"
params_file="infer-model/resnet50/inference.pdiparams"
# 配置参数,创建推理对象
config=paddle_infer.Config(model_file, params_file)
#配置推理线程数
config.enable_memory_optim()
config.set_cpu_math_library_num_threads(4)
config.enable_mkldnn()
predictor=paddle_infer.create_predictor(config)
return predictor
#图像缩放
def resize_short(img, target_size):
""" resize_short """
percent = float(target_size) / min(img.shape[0], img.shape[1])
resized_width = int(round(img.shape[1] * percent))
resized_height = int(round(img.shape[0] * percent))
resized = cv2.resize(img, (resized_width, resized_height))
return resized
#图像裁剪
def crop_image(img, target_size, center):
""" crop_image """
height, width = img.shape[:2]
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = np.random.randint(0, width - size + 1)
h_start = np.random.randint(0, height - size + 1)
w_end = w_start + size
h_end = h_start + size
img = img[int(h_start):int(h_end), int(w_start):int(w_end), :]
return img
#图像预处理
def preprocess(img):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = resize_short(img, 224)
img = crop_image(img, 224, True)
# bgr-> rgb && hwc->chw
img = img[:, :, ::-1].astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
return img[np.newaxis, :]
def run(predictor, img):
# copy img data to input tensor
input_names = predictor.get_input_names()
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(img[i].shape)
input_tensor.copy_from_cpu(img[i].copy())
# do the inference
predictor.run()
results = []
# get out data from output tensor
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
#读取imagenet1000_cls.txt文件,获取图像分类信息
def get_class_info(file_path):
class_info = {}
with open(file_path, 'r') as f:
for line in f:
line = line.strip()
kv=line.split(":")
class_info[kv[0]]= kv[1]
return class_info
if __name__ == '__main__':
class_info=get_class_info('infer-model/resnet50/imagenet1000_cls.txt')
#print(class_info)
pred=init_predector()
#加载图片
imgpath="img/16.jpg"
img=cv2.imread(imgpath)
img0=img.copy()
img=preprocess(img)
result=run(pred,[img])
print("class index:",np.argmax(result[0][0]))
print("class name:",class_info[str(np.argmax(result[0][0]))])
cv2.putText(img0,class_info[str(np.argmax(result[0][0]))],(10,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)
cv2.imshow("img",img0)
cv2.waitKey(0)
cv2.destroyAllWindows()
推理1
推理2
参考资料:PaddlePaddle/Paddle-Inference-Demo (github.com)
| 留言与评论(共有 0 条评论) “” |