gst_license_plate_recognition/include/YOLOv5/DetectorCOCO.h
nqthai199@gmail.com df3dd9a705 first commit
2022-09-15 09:26:49 +07:00

69 lines
2.3 KiB
C++

#include <iostream>
#include <chrono>
#include <cmath>
#include "cuda_utils.h"
#include "logging.h"
#include "utils.h"
#include "preprocess.h"
#include "structs.h"
#include <fstream>
#include <map>
#include <sstream>
#include <vector>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "yololayer.h"
using namespace nvinfer1;
#define USE_FP32 // set USE_INT8 or USE_FP16 or USE_FP32
#define DEVICE 0 // GPU id
#define NMS_THRESH_YOLO 0.4
#define CONF_THRESH_YOLO_HEAD 0.75
#define CONF_THRESH_YOLO_CHAIR 0.8
#define BATCH_SIZE_YOLO 1
#define MAX_IMAGE_INPUT_SIZE_THRESH 3000 * 3000 // ensure it exceed the maximum size in the input images !
class DetectorCOCO{
public:
static DetectorCOCO &getInst()
{
static DetectorCOCO instance;
return instance;
};
void _init();
DetectorCOCO();
cv::Rect get_rect_yolov5(cv::Mat& img, float bbox[4]);
float iou_yolov5(float lbox[4], float rbox[4]);
static bool cmp_yolov5(const Yolo::Detection& a, const Yolo::Detection& b);
void nms_yolov5(std::vector<Yolo::Detection>& res, float *output, float conf_thresh, float nms_thresh);
void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output, int batchSize);
std::vector<ObjectDetect> predict(cv::Mat inputImage, std::string type);
private:
const int INPUT_H = Yolo::INPUT_H;
const int INPUT_W = Yolo::INPUT_W;
const int CLASS_NUM = Yolo::CLASS_NUM;
const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1;
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "prob";
Logger gLogger;
std::string engine_name_head = "/opt/nvidia/deepstream/deepstream-6.0/sources/apps/sample_apps/deepstream-app/Model_Classify/verify/crowdhuman_0-body_1-head__yolov5m.engine";
std::string engine_name_chair = "/media/thai/A0B6A6B3B6A688FC2/code/PROJECT_BI/CLONE/tensorrtx/yolov5/build/20220615_weight_80class.engine";
IRuntime* runtime_head;
IRuntime* runtime_chair;
ICudaEngine* engine_head;
ICudaEngine* engine_chair;
IExecutionContext* context_head;
IExecutionContext* context_chair;
};