3、一个脚本搞定yolov5和yolov8在c++环境中加载onnx模型格式仅调用opencv部署

news/2024/7/10 23:30:15 标签: YOLO, c++, opencv

一、重要说明:
本脚本参考官方代码直接整理在一个.cpp文件中完成,仅仅使用cv接口加载onnx模型完成yolov5和yolov8模型的推理过程,即只在你的c++开发环境中配置一个opencv就可以了。opencv如果要在gpu上使用,需要在编译的时候,在编译选项里面选定cuda相关的选项,配置成功即可。
二、看完别忘提示
用完不点赞的,上班族我祝您加班到12点,上学族我祝愿你毕业困难!不客气!!
三、完整代码如下:

#include <fstream>
#include <vector>
#include <string>
#include <random>

// OpenCV / DNN / Inference
#include <opencv2/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>

using namespace cv;
using namespace std;

struct Detection
{
	int class_id{ 0 };
	std::string className{};
	float confidence{ 0.0 };
	cv::Scalar color{};
	cv::Rect box{};
};

class Inference
{
public:
	Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape, const std::string &classesTxtFile, const bool &runWithCuda)
	{
		modelPath = onnxModelPath;
		modelShape = modelInputShape;
		classesPath = classesTxtFile;
		cudaEnabled = runWithCuda;

		loadOnnxNetwork();
	}
private:
	std::string modelPath;
	std::string classesPath;
	bool cudaEnabled;
	cv::Size2f modelShape;
	cv::dnn::Net net;
	bool letterBoxForSquare = true;

	std::vector<std::string> classes{ "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush" };
	float modelConfidenceThreshold{ 0.25 };
	float modelScoreThreshold{ 0.45 };
	float modelNMSThreshold{ 0.50 };


private:
	void loadOnnxNetwork()
	{
		net = cv::dnn::readNetFromONNX(modelPath);
		if (cudaEnabled)
		{
			std::cout << "\nRunning on CUDA" << std::endl;
			net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
			net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);
		}
		else
		{
			std::cout << "\nRunning on CPU" << std::endl;
			net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
			net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
		}
	}
	cv::Mat formatToSquare(const cv::Mat &source)
	{
		int col = source.cols;
		int row = source.rows;
		int _max = MAX(col, row);
		cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3);
		source.copyTo(result(cv::Rect(0, 0, col, row)));
		return result;
	}
public:
	std::vector<Detection> runInference(const cv::Mat &input)
	{
		cv::Mat modelInput = input;
		if (letterBoxForSquare && modelShape.width == modelShape.height)
			modelInput = formatToSquare(modelInput);

		cv::Mat blob;
		cv::dnn::blobFromImage(modelInput, blob, 1.0 / 255.0, modelShape, cv::Scalar(), true, false);
		net.setInput(blob);

		std::vector<cv::Mat> outputs;
		net.forward(outputs, net.getUnconnectedOutLayersNames());

		int rows = outputs[0].size[1];
		int dimensions = outputs[0].size[2];

		bool yolov8 = false;
		// yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c])
		// yolov8 has an output of shape (batchSize, 84,  8400) (Num classes + box[x,y,w,h])
		if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8)
		{
			yolov8 = true;
			rows = outputs[0].size[2];
			dimensions = outputs[0].size[1];

			outputs[0] = outputs[0].reshape(1, dimensions);
			cv::transpose(outputs[0], outputs[0]);
		}
		float *data = (float *)outputs[0].data;

		float x_factor = modelInput.cols / modelShape.width;
		float y_factor = modelInput.rows / modelShape.height;

		std::vector<int> class_ids;
		std::vector<float> confidences;
		std::vector<cv::Rect> boxes;

		for (int i = 0; i < rows; ++i)
		{
			if (yolov8)
			{
				float *classes_scores = data + 4;

				cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
				cv::Point class_id;
				double maxClassScore;

				minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);

				if (maxClassScore > modelScoreThreshold)
				{
					confidences.push_back(maxClassScore);
					class_ids.push_back(class_id.x);

					float x = data[0];
					float y = data[1];
					float w = data[2];
					float h = data[3];

					int left = int((x - 0.5 * w) * x_factor);
					int top = int((y - 0.5 * h) * y_factor);

					int width = int(w * x_factor);
					int height = int(h * y_factor);

					boxes.push_back(cv::Rect(left, top, width, height));
				}
			}
			else // yolov5
			{
				float confidence = data[4];

				if (confidence >= modelConfidenceThreshold)
				{
					float *classes_scores = data + 5;

					cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
					cv::Point class_id;
					double max_class_score;

					minMaxLoc(scores, 0, &max_class_score, 0, &class_id);

					if (max_class_score > modelScoreThreshold)
					{
						confidences.push_back(confidence);
						class_ids.push_back(class_id.x);

						float x = data[0];
						float y = data[1];
						float w = data[2];
						float h = data[3];

						int left = int((x - 0.5 * w) * x_factor);
						int top = int((y - 0.5 * h) * y_factor);

						int width = int(w * x_factor);
						int height = int(h * y_factor);

						boxes.push_back(cv::Rect(left, top, width, height));
					}
				}
			}

			data += dimensions;
		}

		std::vector<int> nms_result;
		cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result);

		std::vector<Detection> detections{};
		for (unsigned long i = 0; i < nms_result.size(); ++i)
		{
			int idx = nms_result[i];

			Detection result;
			result.class_id = class_ids[idx];
			result.confidence = confidences[idx];

			std::random_device rd;
			std::mt19937 gen(rd());
			std::uniform_int_distribution<int> dis(100, 255);
			result.color = cv::Scalar(dis(gen),
				dis(gen),
				dis(gen));

			result.className = classes[result.class_id];
			result.box = boxes[idx];

			detections.push_back(result);
		}

		return detections;
	}

	void loadClassesFromFile()
	{
		std::ifstream inputFile(classesPath);
		if (inputFile.is_open())
		{
			std::string classLine;
			while (std::getline(inputFile, classLine))
				classes.push_back(classLine);
			inputFile.close();
		}
	}
};

int main(int argc, char **argv)
{
	std::string projectBasePath = "E:\\mycode\\c++projects\\vs2017code\\sourcefile";
	bool runOnGPU = true;

	Inference inf(projectBasePath + "/yolov8s.onnx", cv::Size(640, 640), "classes.txt", runOnGPU);
	
	std::vector<std::string> imageNames;
	imageNames.push_back(projectBasePath + "/bus.jpg");


	for (int i = 0; i < imageNames.size(); ++i)
	{
		cv::Mat frame = cv::imread(imageNames[i]);

		// Inference starts here...
		std::vector<Detection> output = inf.runInference(frame);

		int detections = output.size();
		std::cout << "Number of detections:" << detections << std::endl;

		for (int i = 0; i < detections; ++i)
		{
			Detection detection = output[i];

			cv::Rect box = detection.box;
			cv::Scalar color = detection.color;

			// Detection box
			cv::rectangle(frame, box, color, 2);

			// Detection box text
			std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);
			cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
			cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);

			cv::rectangle(frame, textBox, color, cv::FILLED);
			cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);
		}
		// Inference ends here...

		// This is only for preview purposes
		float scale = 0.8;
		cv::resize(frame, frame, cv::Size(frame.cols*scale, frame.rows*scale));
		cv::imshow("Inference", frame);

		cv::waitKey(-1);
	}
	std::cout << "-----------------------------" << std::endl;
	std::cout << "dd"<< std::endl;
	return 0;
}

http://www.niftyadmin.cn/n/5363456.html

相关文章

Nicn的刷题日常之杨氏矩阵(三种方法求解,逐级递增详解,手把手教学,建议三连收藏)

目录 1.杨氏矩阵知识普及&#xff1a;什么是样式矩阵 2.题目描述 3.解题 3.1暴力求解&#xff0c;遍历法 3.2巧妙解题&#xff1a;对角元素法 3.3将巧解法封装为函数 4.结语 1.杨氏矩阵知识普及&#xff1a;什么是样式矩阵 杨氏矩阵&#xff0c;是对组合表示理论和…

前端工程化之:webpack2-2(内置插件)

目录 一、内置插件 1.DefinePlugin 2.BannerPlugin 3.ProvidePlugin 一、内置插件 所有的 webpack 内置插件都作为 webpack 的静态属性存在的&#xff0c;使用下面的方式即可创建一个插件对象&#xff1a; const webpack require("webpack")new webpack.插件…

前端封装websocket类,实现消息注册和全局回调

实现消息注册和回调函数&#xff0c;实现全局使用同一个webscoket对象&#xff0c;并实现断线重连和心跳连接等功能&#xff0c;可以实现全局使用唯一实例&#xff0c;可以另外进行拓展配置 // WebSocket类对象 class WebSocketCli {// 构造函数constructor(url: string, opts…

单片机学习笔记---定时器和中断系统如何连起来工作

前面两节我们分别讲了中断系统和定时器&#xff0c;这节我们看看这两者连起来工作的原理。 说明&#xff1a;看这一节之前一定要先把前两节给看明白了再仔细琢磨这一节的每一张图&#xff01; 前两节&#xff1a; 单片机学习笔记---中断系统&#xff08;含外部中断&#xff…

javascript实现深度拷贝

在JavaScript中&#xff0c;深拷贝是指创建一个新对象&#xff0c;并将原对象的所有属性值复制到新对象中。这样&#xff0c;即使修改新对象的属性值&#xff0c;也不会影响到原对象。 1.使用JSON.parse()和JSON.stringify()方法&#xff1a; function deepClone(obj) {return…

JSR303参数校验-SpringMVC

文章目录 JSR303技术标准简介JSR303标准几个具体实现框架validation-apijakarta.validation-apihibernate-validatorspring-boot-starter-validation Spring Validationjavax.validation.constraints包下提供的注解org.hibernate.validator.constraints包扩展的注解校验注解默认…

【Eclipse插件开发】3工作台workbench探索

3工作台workbench探索 文章目录 3工作台workbench探索前言视图编辑器一、工作台Workbench入门工作台页透视图视图和编辑器二、使用命令的基本工作台扩展点2.1 org.eclipse.ui.views2.2 org.eclipse.ui.editors编辑器和内容大纲2.3 org.eclipse.ui.comm

【开源】基于JAVA+Vue+SpringBoot的河南软件客服系统

目录 一、摘要1.1 项目介绍1.2 项目录屏 二、功能模块2.1 系统管理人员2.2 业务操作人员 三、系统展示四、核心代码4.1 查询客户4.2 新增客户跟进情况4.3 查询客户历史4.4 新增服务派单4.5 新增客户服务费 五、免责说明 一、摘要 1.1 项目介绍 基于JAVAVueSpringBootMySQL的河…