果洛wap网站建设多少钱,优秀网页设计作品文字分析,沈阳大东区做网站公司,东莞网站建设市场分析1 环境#xff1a;
CPU#xff1a;i5-12500 Python#xff1a;3.8.18
2 安装Openvino和ONNXRuntime
2.1 Openvino简介
Openvino是由Intel开发的专门用于优化和部署人工智能推理的半开源的工具包#xff0c;主要用于对深度推理做优化。
Openvino内部集成了Opencv、Tens…1 环境
CPUi5-12500 Python3.8.18
2 安装Openvino和ONNXRuntime
2.1 Openvino简介
Openvino是由Intel开发的专门用于优化和部署人工智能推理的半开源的工具包主要用于对深度推理做优化。
Openvino内部集成了Opencv、TensorFlow模块除此之外它还具有强大的Plugin开发框架允许开发者在Openvino之上对推理过程做优化。
Openvino整体框架为Openvino前端→ Plugin中间层→ Backend后端 Openvino的优点在于它屏蔽了后端接口提供了统一操作的前端API开发者可以无需关心后端的实现例如后端可以是TensorFlow、Keras、ARM-NN通过Plugin提供给前端接口调用也就意味着一套代码在Openvino之上可以运行在多个推理引擎之上Openvino像是类似聚合一样的开发包。
2.2 ONNXRuntime简介
ONNXRuntime是微软推出的一款推理框架用户可以非常便利的用其运行一个onnx模型。ONNXRuntime支持多种运行后端包括CPUGPUTensorRTDML等。可以说ONNXRuntime是对ONNX模型最原生的支持。
虽然大家用ONNX时更多的是作为一个中间表示从pytorch转到onnx后直接喂到TensorRT或MNN等各种后端框架但这并不能否认ONNXRuntime是一款非常优秀的推理框架。而且由于其自身只包含推理功能最新的ONNXRuntime甚至已经可以训练通过阅读其源码可以解深度学习框架的一些核心功能原理op注册内存管理运行逻辑等 总体来看整个ONNXRuntime的运行可以分为三个阶段Session构造模型加载与初始化和运行。和其他所有主流框架相同ONNXRuntime最常用的语言是python而实际负责执行框架运行的则是C。
2.3 安装
pip install openvino -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install onnxruntime -i https://pypi.tuna.tsinghua.edu.cn/simple3 YOLOv5介绍
YOLOv5详解 Githubhttps://github.com/ultralytics/yolov5
4 基于Openvino和ONNXRuntime推理
下面代码整个处理过程主要包括预处理—推理—后处理—画图。 假设图像resize为640×640 前处理输出结果维度(1, 3, 640, 640) 推理输出结果维度(1, 8400, 85)其中85表示4个box坐标信息置信度分数80个类别概率8400表示80×8040×4020×20不同于v8与v9采用类别里面最大的概率作为置信度score 后处理输出结果维度(5, 6)其中第一个5表示图bus.jpg检出5个目标第二个维度6表示(x1, y1, x2, y2, conf, cls)。 注与YOLOv6输出维度一致可通用
4.1 全部代码
import argparse
import time
import cv2
import numpy as np
from openvino.runtime import Core # pip install openvino -i https://pypi.tuna.tsinghua.edu.cn/simple
import onnxruntime as ort # 使用onnxruntime推理用上pip install onnxruntime默认安装CPU# COCO默认的80类
CLASSES [person, bicycle, car, motorcycle, airplane, bus, train, truck, boat, traffic light,fire hydrant, stop sign, parking meter, bench, bird, cat, dog, horse, sheep, cow,elephant, bear, zebra, giraffe, backpack, umbrella, handbag, tie, suitcase, frisbee,skis, snowboard, sports ball, kite, baseball bat, baseball glove, skateboard, surfboard,tennis racket, bottle, wine glass, cup, fork, knife, spoon, bowl, banana, apple, sandwich,orange, broccoli, carrot, hot dog, pizza, donut, cake, chair, couch, potted plant, bed,dining table, toilet, tv, laptop, mouse, remote, keyboard, cell phone, microwave, oven,toaster, sink, refrigerator, book, clock, vase, scissors, teddy bear, hair drier, toothbrush]class OpenvinoInference(object):def __init__(self, onnx_path):self.onnx_path onnx_pathie Core()self.model_onnx ie.read_model(modelself.onnx_path)self.compiled_model_onnx ie.compile_model(modelself.model_onnx, device_nameCPU)self.output_layer_onnx self.compiled_model_onnx.output(0)def predirts(self, datas):predict_data self.compiled_model_onnx([datas])[self.output_layer_onnx]return predict_dataclass YOLOv5:YOLOv5 object detection model class for handling inference and visualization.def __init__(self, onnx_model, imgsz(640, 640), infer_toolopenvino):Initialization.Args:onnx_model (str): Path to the ONNX model.self.infer_tool infer_toolif self.infer_tool openvino:# 构建openvino推理引擎self.openvino OpenvinoInference(onnx_model)self.ndtype np.singleelse:# 构建onnxruntime推理引擎self.ort_session ort.InferenceSession(onnx_model,providers[CUDAExecutionProvider, CPUExecutionProvider]if ort.get_device() GPU else [CPUExecutionProvider])# Numpy dtype: support both FP32 and FP16 onnx modelself.ndtype np.half if self.ort_session.get_inputs()[0].type tensor(float16) else np.singleself.classes CLASSES # 加载模型类别self.model_height, self.model_width imgsz[0], imgsz[1] # 图像resize大小self.color_palette np.random.uniform(0, 255, size(len(self.classes), 3)) # 为每个类别生成调色板def __call__(self, im0, conf_threshold0.4, iou_threshold0.45):The whole pipeline: pre-process - inference - post-process.Args:im0 (Numpy.ndarray): original input image.conf_threshold (float): confidence threshold for filtering predictions.iou_threshold (float): iou threshold for NMS.Returns:boxes (List): list of bounding boxes.# 前处理Pre-processt1 time.time()im, ratio, (pad_w, pad_h) self.preprocess(im0)print(预处理时间{:.3f}s.format(time.time() - t1))# 推理 inferencet2 time.time()if self.infer_tool openvino:preds self.openvino.predirts(im)else:preds self.ort_session.run(None, {self.ort_session.get_inputs()[0].name: im})[0]print(推理时间{:.2f}s.format(time.time() - t2))# 后处理Post-processt3 time.time()boxes self.postprocess(preds,im0im0,ratioratio,pad_wpad_w,pad_hpad_h,conf_thresholdconf_threshold,iou_thresholdiou_threshold,)print(后处理时间{:.3f}s.format(time.time() - t3))return boxes# 前处理包括resize, pad, HWC to CHWBGR to RGB归一化增加维度CHW - BCHWdef preprocess(self, img):Pre-processes the input image.Args:img (Numpy.ndarray): image about to be processed.Returns:img_process (Numpy.ndarray): image preprocessed for inference.ratio (tuple): width, height ratios in letterbox.pad_w (float): width padding in letterbox.pad_h (float): height padding in letterbox.# Resize and pad input image using letterbox() (Borrowed from Ultralytics)shape img.shape[:2] # original image shapenew_shape (self.model_height, self.model_width)r min(new_shape[0] / shape[0], new_shape[1] / shape[1])ratio r, rnew_unpad int(round(shape[1] * r)), int(round(shape[0] * r))pad_w, pad_h (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2 # wh paddingif shape[::-1] ! new_unpad: # resizeimg cv2.resize(img, new_unpad, interpolationcv2.INTER_LINEAR)top, bottom int(round(pad_h - 0.1)), int(round(pad_h 0.1))left, right int(round(pad_w - 0.1)), int(round(pad_w 0.1))img cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value(114, 114, 114)) # 填充# Transforms: HWC to CHW - BGR to RGB - div(255) - contiguous - add axis(optional)img np.ascontiguousarray(np.einsum(HWC-CHW, img)[::-1], dtypeself.ndtype) / 255.0img_process img[None] if len(img.shape) 3 else imgreturn img_process, ratio, (pad_w, pad_h)# 后处理包括阈值过滤与NMSdef postprocess(self, preds, im0, ratio, pad_w, pad_h, conf_threshold, iou_threshold):Post-process the prediction.Args:preds (Numpy.ndarray): predictions come from ort.session.run().im0 (Numpy.ndarray): [h, w, c] original input image.ratio (tuple): width, height ratios in letterbox.pad_w (float): width padding in letterbox.pad_h (float): height padding in letterbox.conf_threshold (float): conf threshold.iou_threshold (float): iou threshold.Returns:boxes (List): list of bounding boxes.# (Batch_size, Num_anchors, xywh_score_conf_cls), v5和v6_1.0的[..., 4]是置信度分数v8v9采用类别里面最大的概率作为置信度scorex preds # outputs: predictions (1, 8400, 85)# Predictions filtering by conf-thresholdx x[x[..., 4] conf_threshold]# Create a new matrix which merge these(box, score, cls) into one# For more details about numpy.c_(): https://numpy.org/doc/1.26/reference/generated/numpy.c_.htmlx np.c_[x[..., :4], x[..., 4], np.argmax(x[..., 5:], axis-1)]# NMS filtering# 经过NMS后的值, np.array([[x, y, w, h, conf, cls], ...]), shape(-1, 4 1 1)x x[cv2.dnn.NMSBoxes(x[:, :4], x[:, 4], conf_threshold, iou_threshold)]# 重新缩放边界框为画图做准备if len(x) 0:# Bounding boxes format change: cxcywh - xyxyx[..., [0, 1]] - x[..., [2, 3]] / 2x[..., [2, 3]] x[..., [0, 1]]# Rescales bounding boxes from model shape(model_height, model_width) to the shape of original imagex[..., :4] - [pad_w, pad_h, pad_w, pad_h]x[..., :4] / min(ratio)# Bounding boxes boundary clampx[..., [0, 2]] x[:, [0, 2]].clip(0, im0.shape[1])x[..., [1, 3]] x[:, [1, 3]].clip(0, im0.shape[0])return x[..., :6] # boxeselse:return []# 绘框def draw_and_visualize(self, im, bboxes, visFalse, saveTrue):Draw and visualize results.Args:im (np.ndarray): original image, shape [h, w, c].bboxes (numpy.ndarray): [n, 4], n is number of bboxes.vis (bool): imshow using OpenCV.save (bool): save image annotated.Returns:None# Draw rectangles for (*box, conf, cls_) in bboxes:# draw bbox rectanglecv2.rectangle(im, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])),self.color_palette[int(cls_)], 1, cv2.LINE_AA)cv2.putText(im, f{self.classes[int(cls_)]}: {conf:.3f}, (int(box[0]), int(box[1] - 9)),cv2.FONT_HERSHEY_SIMPLEX, 0.7, self.color_palette[int(cls_)], 2, cv2.LINE_AA)# Show imageif vis:cv2.imshow(demo, im)cv2.waitKey(0)cv2.destroyAllWindows()# Save imageif save:cv2.imwrite(demo.jpg, im)if __name__ __main__:# Create an argument parser to handle command-line argumentsparser argparse.ArgumentParser()parser.add_argument(--model, typestr, defaultyolov5s.onnx, helpPath to ONNX model)parser.add_argument(--source, typestr, defaultstr(bus.jpg), helpPath to input image)parser.add_argument(--imgsz, typetuple, default(640, 640), helpImage input size)parser.add_argument(--conf, typefloat, default0.25, helpConfidence threshold)parser.add_argument(--iou, typefloat, default0.45, helpNMS IoU threshold)parser.add_argument(--infer_tool, typestr, defaultopenvino, choices(openvino, onnxruntime), help选择推理引擎)args parser.parse_args()# Build modelmodel YOLOv5(args.model, args.imgsz, args.infer_tool)# Read image by OpenCVimg cv2.imread(args.source)# Inferenceboxes model(img, conf_thresholdargs.conf, iou_thresholdargs.iou)# Visualizeif len(boxes) 0:model.draw_and_visualize(img, boxes, visFalse, saveTrue)4.2 结果 具体时间消耗
预处理时间0.005s包含Pad 推理时间0.04~0.05sOpenvino 推理时间0.08~0.09sONNXRuntime 后处理时间0.001s 注640×640下。