first commit

This commit is contained in:
nqthai199@gmail.com 2022-09-15 14:33:44 +07:00
commit 24cca83c2e
13 changed files with 563 additions and 0 deletions

51
config_ana.txt Normal file
View File

@ -0,0 +1,51 @@
[property]
enable=1
#Width height used for configuration to which below configs are configured
config-width=1920
config-height=1080
#osd-mode 0: Dont display any lines, rois and text
# 1: Display only lines, rois and static text i.e. labels
# 2: Display all info from 1 plus information about counts
osd-mode=2
#Set OSD font size that has to be displayed
display-font-size=12
[roi-filtering-stream-0]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
roi-RF=0;0;1920;0;1920;1080;0;1080
#remove objects in the ROI
inverse-roi=0
class-id=-1
[roi-filtering-stream-1]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
roi-RF=0;0;1920;0;1920;1080;0;1080
#remove objects in the ROI
inverse-roi=0
class-id=-1
[roi-filtering-stream-2]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
roi-RF=40;509;835;349;1822;756;1903;871;1897;1047;557;1065
#remove objects in the ROI
inverse-roi=0
class-id=-1
[roi-filtering-stream-3]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
roi-RF=453;71;1216;48;1526;995;249;1016
#remove objects in the ROI
inverse-roi=0
class-id=-1

View File

@ -0,0 +1,161 @@
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=1
[tiled-display]
enable=1
rows=2
columns=2
width=1600
height=900
gpu-id=0
nvbuf-memory-type=0
#rtsp://192.168.1.121:8554/test
[source0]
enable=1
# 1:camera(v4l2) 2: single uri 3:multi uri 4:rtsp 5 camera(CSI) only for Jetson
type=3
#uri=file:///home/thai/manyPlates.mp4
uri=file:///home/thai/manyPlates.mp4
#uri=rtsp://18.179.9.206:55544/test
num-sources=1
gpu-id=0
cudadec-memtype=0
[source1]
enable=0
# 1:camera(v4l2) 2: single uri 3:multi uri 4:rtsp 5 camera(CSI) only for Jetson
type=3
uri=file:///home/thai/manyPlates.mp4
#uri=rtsp://192.168.1.121:8554/test
#uri=rtsp://18.179.9.206:55544/test
num-sources=1
gpu-id=0
cudadec-memtype=0
[source2]
enable=0
# 1:camera(v4l2) 2: single uri 3:multi uri 4:rtsp 5 camera(CSI) only for Jetson
type=3
#uri=file:///home/thai/manyPlates.mp4
uri=file:///home/thai/Desktop/TestOto.mp4
#uri=rtsp://18.179.9.206:55544/test
num-sources=11920
[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0
[source3]
enable=0
# 1:camera(v4l2) 2: single uri 3:multi uri 4:rtsp 5 camera(CSI) only for Jetson
type=3
#uri=file:///home/thai/manyPlates.mp4
uri=file:///home/thai/Desktop/TestOto.mp4
#uri=rtsp://18.179.9.206:55544/test
num-sources=11920
[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0
[osd]
enable=1
gpu-id=0
border-width=5
text-size=20
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Serif
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0
[streammux]
gpu-id=0
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=1
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
[primary-gie]
enable=1
gpu-id=0
gie-unique-id=1
nvbuf-memory-type=0
batch-size=1
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
config-file=license_plate_detection/config_infer_primary_YOLOX.txt
[secondary-gie0]
enable=1
gpu-id=0
gie-unique-id=2
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=license_plate_classification/config_infer_secondary_classify_licenseplate.txt
[tracker]
enable=1
# For NvDCF and DeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=640
tracker-height=384
ll-lib-file=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so
# ll-config-file required to set different tracker types
# ll-config-file=../../samples/configs/deepstream-app/config_tracker_IOU.yml
ll-config-file=../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
# ll-config-file=../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
# ll-config-file=../../samples/configs/deepstream-app/config_tracker_DeepSORT.yml
gpu-id=0
enable-batch-process=1
enable-past-frame=1
display-tracking-id=1
[nvds-analytics]
enable=1
config-file=config_ana.txt
[tests]
file-loop=0
[ds-example]
enable=0
full-frame=1
processing-width=1280
processing-height=720
gpu-id=0
unique-id=999
#[nvds-preprocess]
#enable=1
#gpu-id=0
#config-file=config_preprocess.txt
#unique_id=998

View File

@ -0,0 +1,17 @@
[property]
network-input-order=0
model-engine-file=03-0.9769-weights-2class-licenseplate-20220618.trt
labelfile-path=labels.txt
gpu-id=0
net-scale-factor=0
model-color-format=0
force-implicit-batch-dim=1
batch-size=1
# 0=FP32 and 1=INT8 mode
network-mode=0
network-type=1
process-mode=2
classifier-async-mode=0
secondary-reinfer-interval=1
classifier-threshold=0.85

View File

@ -0,0 +1 @@
long;square

View File

@ -0,0 +1,38 @@
[property]
gpu-id=0
net-scale-factor=1
# 0:RGB 1:BGR
model-color-format=1
model-engine-file=license-plate-detection.trt
labelfile-path=labels-custom.txt
num-detected-classes=1
batch-size=1
interval=0
gie-unique-id=1
# primary
process-mode=1
# Detector
network-type=0
# FP16
network-mode=2
# 0Group Rectange 1DBSCAN 2NMS 3:DBSCAN+NMS 4:None
cluster-mode=2
maintain-aspect-ratio=1
scaling-filter=1
scaling-compute-hw=0
parse-bbox-func-name=NvDsInferParseCustomYolox
custom-lib-path=nvdsinfer_custom_impl_yolox/libnvdsinfer_custom_impl_yolox.so
[class-attrs-all]
pre-cluster-threshold=0.6
nms-iou-threshold=0.4

View File

@ -0,0 +1 @@
license plate

Binary file not shown.

View File

@ -0,0 +1,54 @@
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#CUDA_VER?=
CUDA_VER=11.2
ifeq ($(CUDA_VER),)
$(error "CUDA_VER is not set")
endif
CC:= g++
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations
CFLAGS+= -I../../../includes -I/usr/local/cuda-$(CUDA_VER)/include
# CXXFLAGS:= -fopenmp
CXXFLAGS = -std=c++11
LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs
LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group
INCS:= $(wildcard *.h)
SRCFILES:= nvdsparsebbox_yolox.cpp
TARGET_LIB:= libnvdsinfer_custom_impl_yolox.so
TARGET_OBJS:= $(SRCFILES:.cpp=.o)
TARGET_OBJS:= $(TARGET_OBJS:.cu=.o)
all: $(TARGET_LIB)
%.o: %.cpp $(INCS) Makefile
$(CC) -c -o $@ $(CFLAGS) $(CXXFLAGS) $<
%.o: %.cu $(INCS) Makefile
$(NVCC) -c -o $@ --compiler-options '-fPIC' $<
$(TARGET_LIB) : $(TARGET_OBJS)
$(CC) -o $@ $(TARGET_OBJS) $(LFLAGS)
clean:
rm -rf $(TARGET_LIB)
rm -rf $(TARGET_OBJS)

View File

@ -0,0 +1,240 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstring>
#include <fstream>
#include <vector>
#include <map>
#include <iostream>
// #include <omp.h>
// #include <opencv2/opencv.hpp>
#include "nvdsinfer_custom_impl.h"
#define NMS_THRESH 0.1
#define BBOX_CONF_THRESH 0.1
static const int NUM_CLASSES = 1;
static const int INPUT_W = 416;
static const int INPUT_H = 416;
static const int IMAGE_W = 1920;
static const int IMAGE_H = 1080;
const char* INPUT_BLOB_NAME = "images";
const char* OUTPUT_BLOB_NAME = "output";
static constexpr int LOCATIONS = 4;
struct alignas(float) Detection{
//center_x center_y w h
float bbox[LOCATIONS];
float conf; // bbox_conf * cls_conf
float class_id;
};
struct GridAndStride
{
int grid0;
int grid1;
int stride;
};
static void generate_grids_and_stride(std::vector<int>& strides, std::vector<GridAndStride>& grid_strides)
{
for (auto stride : strides)
{
int num_grid_y = INPUT_H / stride;
int num_grid_x = INPUT_W / stride;
for (int g1 = 0; g1 < num_grid_y; g1++)
{
for (int g0 = 0; g0 < num_grid_x; g0++)
{
/* 用于预测每层特征图上anchor的位置信息*/
grid_strides.push_back((GridAndStride){g0, g1, stride});
}
}
}
}
static void generate_yolox_proposals(std::vector<GridAndStride> grid_strides, float* feat_blob, float prob_threshold, std::vector<Detection>& objects)
{
const int num_anchors = grid_strides.size(); // 8400
for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++)
{
const int grid0 = grid_strides[anchor_idx].grid0;
const int grid1 = grid_strides[anchor_idx].grid1;
const int stride = grid_strides[anchor_idx].stride;
const int basic_pos = anchor_idx * (NUM_CLASSES + 5);
// yolox/models/yolo_head.py decode logic
// decode之后的bbox信息
float x_center = (feat_blob[basic_pos+0] + grid0) * stride;
float y_center = (feat_blob[basic_pos+1] + grid1) * stride;
float w = exp(feat_blob[basic_pos+2]) * stride;
float h = exp(feat_blob[basic_pos+3]) * stride;
float box_objectness = feat_blob[basic_pos+4];
for (int class_idx = 0; class_idx < NUM_CLASSES; class_idx++)
{
float box_cls_score = feat_blob[basic_pos + 5 + class_idx];
float box_prob = box_objectness * box_cls_score;
if (box_prob > prob_threshold)
{
Detection obj;
obj.bbox[0] = x_center;
obj.bbox[1] = y_center;
obj.bbox[2] = w;
obj.bbox[3] = h;
obj.class_id = class_idx;
obj.conf = box_prob;
objects.push_back(obj);
}
} // class loop
} // point anchor loop
}
bool cmp(Detection& a, Detection& b)
{
return a.conf > b.conf;
}
float iou(float lbox[4], float rbox[4])
{
float interBox[] =
{
std::max(lbox[0] - lbox[2]/2.f , rbox[0] - rbox[2]/2.f), //left
std::min(lbox[0] + lbox[2]/2.f , rbox[0] + rbox[2]/2.f), //right
std::max(lbox[1] - lbox[3]/2.f , rbox[1] - rbox[3]/2.f), //top
std::min(lbox[1] + lbox[3]/2.f , rbox[1] + rbox[3]/2.f), //bottom
};
if(interBox[2] > interBox[3] || interBox[0] > interBox[1])
return 0.0f;
float interBoxS =(interBox[1]-interBox[0])*(interBox[3]-interBox[2]);
return interBoxS/(lbox[2]*lbox[3] + rbox[2]*rbox[3] -interBoxS);
}
void nms_bboxes(std::vector<Detection>& proposals, std::vector<Detection>& res,float nms_thresh)
{
// int det_size = sizeof(Detection) / sizeof(float);
std::map<float, std::vector<Detection>> m;
for (unsigned int i = 0; i < proposals.size(); i++)
{
Detection det = proposals[i];
if (m.count(det.class_id) == 0) m.emplace(det.class_id, std::vector<Detection>());
m[det.class_id].push_back(det);
}
for (auto it = m.begin(); it != m.end(); it++)
{
auto& dets = it->second;
std::sort(dets.begin(), dets.end(), cmp);
for (size_t m = 0; m < dets.size(); ++m)
{
auto& item = dets[m];
res.push_back(item);
for (size_t n = m + 1; n < dets.size(); ++n)
{
if (iou(item.bbox, dets[n].bbox) > nms_thresh)
{
dets.erase(dets.begin()+n);
--n;
}
}
}
}
}
static void decode_outputs(float* prob, std::vector<Detection>& objects, float scale, const int img_w, const int img_h) {
std::vector<Detection> proposals;
std::vector<int> strides = {8, 16, 32};
std::vector<GridAndStride> grid_strides;
generate_grids_and_stride(strides, grid_strides);
generate_yolox_proposals(grid_strides, prob, BBOX_CONF_THRESH, proposals);
// NMS
nms_bboxes(proposals, objects, NMS_THRESH);
}
/* This is a sample bounding box parsing function for the sample YoloV5 detector model */
static bool NvDsInferParseYolox(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
float* prob = (float*)outputLayersInfo[0].buffer;
std::vector<Detection> objects;
float scale = std::min(INPUT_W / (IMAGE_W*1.0), INPUT_H / (IMAGE_H*1.0));
decode_outputs(prob, objects, scale, IMAGE_W, IMAGE_H);
float scale_bbox_width = 0.25f;
float scale_bbox_height = 0.4f;
for(auto& r : objects) {
NvDsInferParseObjectInfo oinfo;
oinfo.classId = r.class_id;
oinfo.left = static_cast<unsigned int>(r.bbox[0]-r.bbox[2]*0.5f);
oinfo.top = static_cast<unsigned int>(r.bbox[1]-r.bbox[3]*0.5f);
oinfo.width = static_cast<unsigned int>(r.bbox[2]);
oinfo.height = static_cast<unsigned int>(r.bbox[3]);
oinfo.detectionConfidence = r.conf;
float x1 = oinfo.left / scale;
float y1 = oinfo.top / scale;
float width = oinfo.width / scale;
float height = oinfo.height / scale;
float x2 = x1 + width;
float y2 = y1 + height;
x1 = ((x1 - scale_bbox_width * width) >= 0) ? (x1 - scale_bbox_width * width) : 0;
y1 = ((y1 - scale_bbox_height * height) >= 0) ? (y1 - scale_bbox_height * height) : 0;
x2 = ((x2 + scale_bbox_width * width) <= IMAGE_W) ? (x2 + scale_bbox_width * width) : IMAGE_W;
y2 = ((y2 + scale_bbox_height * height) <= IMAGE_H) ? (y2 + scale_bbox_height * height) : IMAGE_H;
oinfo.left = (float) x1 * scale;
oinfo.top = (float) y1 * scale;
oinfo.width = (float) (x2 - x1) * scale;
oinfo.height = (float) (y2 - y1) * scale;
objectList.push_back(oinfo);
}
return true;
}
extern "C" bool NvDsInferParseCustomYolox(
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferParseObjectInfo> &objectList)
{
return NvDsInferParseYolox(
outputLayersInfo, networkInfo, detectionParams, objectList);
}
/* Check that the custom function has been defined correctly */
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYolox);