You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

221 lines
6.2 KiB

/*
* @Author: your name
* @Date: 2022-04-20 15:49:50
* @LastEditTime: 2025-09-06 22:23:08
* @LastEditors: xiewenji 527774126@qq.com
* @Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
* @FilePath: /ZCXD_MonitorPlatform/src/CoreLogicModule/include/CamDeal.h
*/
#ifndef AIModel_Impl_H_
#define AIModel_Impl_H_
#include <vector>
#include <thread>
#include <mutex>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "AI_Factory.h"
#include "Engine.h"
using namespace std;
// 模型输入输出最大允许的 节点数
#define MAX_MODEL_NODE_NUM 5
class AIModel_Impl : public AIModel_Base
{
public:
enum AI_Buffer_Type
{
AI_Buffer_Type_INPUT,
AI_Buffer_Type_OUTPUT,
AI_Buffer_Type_Count,
};
// 模型节点参数 一个模型包括多个节点,输入 输出。
struct Node_Config : public AI_Image
{
int type;
std::string name;
int ucharsize;
int floatsize;
int datalength;
Node_Config()
{
channel = 0;
width = 0;
height = 0;
type = AI_Buffer_Type_INPUT;
name = "";
ucharsize = 0;
floatsize = 0;
datalength = 0;
}
void copy(Node_Config tem)
{
this->type = tem.type;
this->channel = tem.channel;
this->width = tem.width;
this->height = tem.height;
this->name = tem.name;
this->ucharsize = tem.ucharsize;
this->floatsize = tem.floatsize;
this->datalength = tem.datalength;
}
void CalDataSize()
{
ucharsize = channel * width * height * sizeof(unsigned char);
floatsize = channel * width * height * sizeof(float);
datalength = channel * width * height;
}
};
// 流上的 节点参数信息。
struct Stream_Node_Config
{
Node_Config nodeConfig;
void *gpu_buffers;
void *gpu_ImgData; // uchar
float *cpu_floatData;
Stream_Node_Config()
{
gpu_buffers = NULL;
gpu_ImgData = NULL;
cpu_floatData = NULL;
}
~Stream_Node_Config()
{
if (gpu_buffers != NULL)
{
cudaFree(gpu_buffers);
gpu_buffers = NULL;
}
if (gpu_ImgData != NULL)
{
cudaFree(gpu_ImgData);
gpu_ImgData = NULL;
}
if (cpu_floatData != NULL)
{
delete[] cpu_floatData;
cpu_floatData = NULL;
}
}
};
// cuda上 一个流 包括的所有参数
struct Cuda_Stream_Config
{
int nstreamIdx;
cudaStream_t stream;
std::unique_ptr<nvinfer1::IExecutionContext> context;
std::shared_ptr<Stream_Node_Config> pstreamNode_input_0;
std::shared_ptr<Stream_Node_Config> pstreamNode_input_1;
std::shared_ptr<Stream_Node_Config> pstreamNode_output_0;
std::shared_ptr<Stream_Node_Config> pstreamNode_output_1;
std::shared_ptr<Stream_Node_Config> pstreamNode_output_2;
std::vector<std::shared_ptr<Stream_Node_Config>> streamConfigList;
Cuda_Stream_Config()
{
nstreamIdx = 0;
streamConfigList.clear();
pstreamNode_input_0 = nullptr;
pstreamNode_input_1 = nullptr;
pstreamNode_output_0 = nullptr;
pstreamNode_output_1 = nullptr;
pstreamNode_output_2 = nullptr;
}
~Cuda_Stream_Config()
{
cudaStreamDestroy(stream);
}
};
// GPU 上封装的参数信息
struct GPU_Engine
{
bool bsucc; // 是否初始化成功
int nGPUIdx; // GPU id号
std::shared_ptr<Engine> engine; // AI 模型 引擎
std::vector<std::shared_ptr<Cuda_Stream_Config>> cudaSteams; // 所有流
GPU_Engine()
{
nGPUIdx = 0;
cudaSteams.clear();
bsucc = false;
}
};
// AI 检测 顺序调用 gpu stream 流。
struct Det_GPU_Stram
{
std::mutex AI_mutex;
int nGPUIdx;
std::shared_ptr<Engine> engine; // AI 模型 引擎
std::shared_ptr<Cuda_Stream_Config> cuda_stream;
Det_GPU_Stram()
{
nGPUIdx = 0;
}
};
public:
AIModel_Impl();
~AIModel_Impl();
// 初始化函数
int Init(AIModelRun_Config config);
int AIDet(const cv::Mat &inImg, cv::Mat &outimg);
int AIDet(const cv::Mat &inImg, cv::Mat &outimg0, cv::Mat &outimg1);
int AIClass(const cv::Mat &inImg, float *fmaxScore);
private:
// 运行参数检查
int ModelRunConfigCheck(AIModelRun_Config &runConfig);
// 每个gpu 载入模型
int LoadEngine(int ngpuIdx);
// 解析 模型的信息
int GetEngineInfo1(std::shared_ptr<GPU_Engine> &pgpuengine);
int AI_Det_In_1_Out_1(Node_Config *pConfig_in, Node_Config *pConfig_out, const unsigned char *p_indata_0, unsigned char *p_outdata_1);
int AI_Det_In_1_Out_1_class(unsigned char *p_indata_0, float *fmaxScore);
int F2softmaxId(float *data, int class_num, float *fmaxScore);
private:
// 调用相关函数
int GetStream(std::shared_ptr<Det_GPU_Stram> &pdetStream);
cv::Mat InitMat(int channel, int w, int h);
private:
// 模型参数
AIModelRun_Config m_modelRun_Config;
// 模型的输入输出list
std::vector<Node_Config> m_modelNodeList;
// 输入 输出节点
Node_Config *m_pNode_input_0;
Node_Config *m_pNode_input_1;
Node_Config *m_pNode_output_0;
Node_Config *m_pNode_output_1;
Node_Config *m_pNode_output_2;
// 引擎列表,一个 gpu 一个引擎
std::vector<std::shared_ptr<GPU_Engine>> m_GPU_Engine;
// 检测信息
std::vector<std::shared_ptr<Det_GPU_Stram>> m_DetGPUStream;
int m_nALLStreamNum;
private:
// 上次使用的GPU stream Idx;
std::atomic<int> m_nLast_GPUStreamIdx;
};
#endif